1 /* $NetBSD: pmap_bootstrap.c,v 1.88 2010/03/02 15:01:04 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.88 2010/03/02 15:01:04 tsutsui Exp $"); 40 41 #include "opt_ddb.h" 42 #include "opt_kgdb.h" 43 #include "zsc.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/reboot.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/pte.h> 52 #include <machine/vmparam.h> 53 #include <machine/cpu.h> 54 #include <machine/pmap.h> 55 #include <machine/autoconf.h> 56 #include <machine/video.h> 57 58 #include <mac68k/mac68k/macrom.h> 59 60 #define PA2VA(v, t) (t)((u_int)(v) - firstpa) 61 62 extern char *etext; 63 extern char *extiobase; 64 65 extern paddr_t avail_start; 66 extern paddr_t avail_end; 67 68 #if NZSC > 0 69 extern int zsinited; 70 #endif 71 72 /* 73 * These are used to map the RAM: 74 */ 75 int numranges; /* = 0 == don't use the ranges */ 76 u_long low[8]; 77 u_long high[8]; 78 u_long maxaddr; /* PA of the last physical page */ 79 int vidlen; 80 #define VIDMAPSIZE btoc(vidlen) 81 static vaddr_t newvideoaddr; 82 83 extern void * ROMBase; 84 85 /* 86 * Special purpose kernel virtual addresses, used for mapping 87 * physical pages for a variety of temporary or permanent purposes: 88 * 89 * CADDR1, CADDR2: pmap zero/copy operations 90 * vmmap: /dev/mem, crash dumps, parity error checking 91 * msgbufaddr: kernel message buffer 92 */ 93 void *CADDR1, *CADDR2; 94 char *vmmap; 95 void *msgbufaddr; 96 97 void pmap_bootstrap(paddr_t, paddr_t); 98 void bootstrap_mac68k(int); 99 100 /* 101 * Bootstrap the VM system. 102 * 103 * This is called with the MMU either on or off. If it's on, we assume 104 * that it's mapped with the same PA <=> LA mapping that we eventually 105 * want. The page sizes and the protections will be wrong, anyway. 106 * 107 * nextpa is the first address following the loaded kernel. On a IIsi 108 * on 12 May 1996, that was 0xf9000 beyond firstpa. 109 */ 110 void 111 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 112 { 113 paddr_t kstpa, kptpa, kptmpa, lwp0upa; 114 u_int nptpages, kstsize; 115 paddr_t avail_next; 116 int avail_remaining; 117 int avail_range; 118 int i; 119 st_entry_t protoste, *ste, *este; 120 pt_entry_t protopte, *pte, *epte; 121 u_int stfree = 0; /* XXX: gcc -Wuninitialized */ 122 extern char start[]; 123 124 vidlen = m68k_round_page(mac68k_video.mv_height * 125 mac68k_video.mv_stride + m68k_page_offset(mac68k_video.mv_phys)); 126 127 /* 128 * Calculate important physical addresses: 129 * 130 * lwp0upa lwp 0 u-area UPAGES pages 131 * 132 * kstpa kernel segment table 1 page (!040) 133 * N pages (040) 134 * 135 * kptmpa kernel PT map 1 page 136 * 137 * kptpa statically allocated 138 * kernel PT pages Sysptsize+ pages 139 * 140 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 141 * NBMAPSIZE are the number of PTEs, hence we need to round 142 * the total to a page boundary with IO maps at the end. ] 143 * 144 */ 145 lwp0upa = nextpa; 146 nextpa += USPACE; 147 if (mmutype == MMU_68040) 148 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 149 else 150 kstsize = 1; 151 kstpa = nextpa; 152 nextpa += kstsize * PAGE_SIZE; 153 kptmpa = nextpa; 154 nextpa += PAGE_SIZE; 155 kptpa = nextpa; 156 nptpages = Sysptsize + 157 (IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE + NPTEPG - 1) / NPTEPG; 158 nextpa += nptpages * PAGE_SIZE; 159 160 for (i = 0; i < numranges; i++) 161 if (low[i] <= firstpa && firstpa < high[i]) 162 break; 163 if (i >= numranges || nextpa > high[i]) { 164 if (mac68k_machine.do_graybars) { 165 printf("Failure in NetBSD boot; "); 166 if (i < numranges) 167 printf("nextpa=0x%lx, high[%d]=0x%lx.\n", 168 nextpa, i, high[i]); 169 else 170 printf("can't find kernel RAM segment.\n"); 171 printf("You're hosed! Try booting with 32-bit "); 172 printf("addressing enabled in the memory control "); 173 printf("panel.\n"); 174 printf("Older machines may need Mode32 to get that "); 175 printf("option.\n"); 176 } 177 panic("Cannot work with the current memory mappings."); 178 } 179 180 /* 181 * Initialize segment table and kernel page table map. 182 * 183 * On 68030s and earlier MMUs the two are identical except for 184 * the valid bits so both are initialized with essentially the 185 * same values. On the 68040, which has a mandatory 3-level 186 * structure, the segment table holds the level 1 table and part 187 * (or all) of the level 2 table and hence is considerably 188 * different. Here the first level consists of 128 descriptors 189 * (512 bytes) each mapping 32mb of address space. Each of these 190 * points to blocks of 128 second level descriptors (512 bytes) 191 * each mapping 256kb. Note that there may be additional "segment 192 * table" pages depending on how large MAXKL2SIZE is. 193 * 194 * XXX cramming two levels of mapping into the single "segment" 195 * table on the 68040 is intended as a temporary hack to get things 196 * working. The 224mb of address space that this allows will most 197 * likely be insufficient in the future (at least for the kernel). 198 */ 199 if (mmutype == MMU_68040) { 200 int nl1desc, nl2desc; 201 202 /* 203 * First invalidate the entire "segment table" pages 204 * (levels 1 and 2 have the same "invalid" value). 205 */ 206 ste = PA2VA(kstpa, st_entry_t *); 207 este = &ste[kstsize * NPTEPG]; 208 while (ste < este) 209 *ste++ = SG_NV; 210 /* 211 * Initialize level 2 descriptors (which immediately 212 * follow the level 1 table). We need: 213 * NPTEPG / SG4_LEV3SIZE 214 * level 2 descriptors to map each of the nptpages 215 * pages of PTEs. Note that we set the "used" bit 216 * now to save the HW the expense of doing it. 217 */ 218 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE); 219 ste = PA2VA(kstpa, st_entry_t *); 220 ste = &ste[SG4_LEV1SIZE]; 221 este = &ste[nl2desc]; 222 protoste = kptpa | SG_U | SG_RW | SG_V; 223 while (ste < este) { 224 *ste++ = protoste; 225 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 226 } 227 /* 228 * Initialize level 1 descriptors. We need: 229 * howmany(nl2desc, SG4_LEV2SIZE) 230 * level 1 descriptors to map the `nl2desc' level 2's. 231 */ 232 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 233 ste = PA2VA(kstpa, u_int *); 234 este = &ste[nl1desc]; 235 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 236 while (ste < este) { 237 *ste++ = protoste; 238 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 239 } 240 /* 241 * Initialize the final level 1 descriptor to map the next 242 * block of level 2 descriptors for Sysptmap. 243 */ 244 ste = PA2VA(kstpa, st_entry_t *); 245 ste = &ste[SG4_LEV1SIZE - 1]; 246 *ste = protoste; 247 /* 248 * Now initialize the final portion of that block of 249 * descriptors to map Sysmap. 250 */ 251 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 252 ste = PA2VA(kstpa, st_entry_t *); 253 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; 254 este = &ste[NPTEPG / SG4_LEV3SIZE]; 255 protoste = kptmpa | SG_U | SG_RW | SG_V; 256 while (ste < este) { 257 *ste++ = protoste; 258 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 259 } 260 /* 261 * Calculate the free level 2 descriptor mask 262 * noting that we have used: 263 * 0: level 1 table 264 * 1 to nl1desc: map page tables 265 * nl1desc + 1: maps kptmpa and last-page page table 266 */ 267 /* mark an entry for level 1 table */ 268 stfree = ~l2tobm(0); 269 /* mark entries for map page tables */ 270 for (i = 1; i <= nl1desc; i++) 271 stfree &= ~l2tobm(i); 272 /* mark an entry for kptmpa and lkptpa */ 273 stfree &= ~l2tobm(i); 274 /* mark entries not available */ 275 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++) 276 stfree &= ~l2tobm(i); 277 278 /* 279 * Initialize Sysptmap 280 */ 281 pte = PA2VA(kptmpa, pt_entry_t *); 282 epte = &pte[nptpages]; 283 protopte = kptpa | PG_RW | PG_CI | PG_V; 284 while (pte < epte) { 285 *pte++ = protopte; 286 protopte += PAGE_SIZE; 287 } 288 /* 289 * Invalidate all remaining entries. 290 */ 291 epte = PA2VA(kptmpa, pt_entry_t *); 292 epte = &epte[TIB_SIZE]; 293 while (pte < epte) { 294 *pte++ = PG_NV; 295 } 296 /* 297 * Initialize the last one to point to Sysptmap. 298 */ 299 pte = PA2VA(kptmpa, pt_entry_t *); 300 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 301 *pte = kptmpa | PG_RW | PG_CI | PG_V; 302 } else { 303 /* 304 * Map the page table pages in both the HW segment table 305 * and the software Sysptmap. 306 */ 307 ste = PA2VA(kstpa, st_entry_t *); 308 pte = PA2VA(kptmpa, pt_entry_t *); 309 epte = &pte[nptpages]; 310 protoste = kptpa | SG_RW | SG_V; 311 protopte = kptpa | PG_RW | PG_CI | PG_V; 312 while (pte < epte) { 313 *ste++ = protoste; 314 *pte++ = protopte; 315 protoste += PAGE_SIZE; 316 protopte += PAGE_SIZE; 317 } 318 /* 319 * Invalidate all remaining entries in both. 320 */ 321 este = PA2VA(kstpa, st_entry_t *); 322 este = &este[TIA_SIZE]; 323 while (ste < este) 324 *ste++ = SG_NV; 325 epte = PA2VA(kptmpa, pt_entry_t *); 326 epte = &epte[TIB_SIZE]; 327 while (pte < epte) 328 *pte++ = PG_NV; 329 /* 330 * Initialize the last one to point to Sysptmap. 331 */ 332 ste = PA2VA(kstpa, st_entry_t *); 333 ste = &ste[SYSMAP_VA >> SEGSHIFT]; 334 *ste = kptmpa | SG_RW | SG_V; 335 pte = PA2VA(kptmpa, pt_entry_t *); 336 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 337 *pte = kptmpa | PG_RW | PG_CI | PG_V; 338 } 339 340 /* 341 * Initialize kernel page table. 342 * Start by invalidating the `nptpages' that we have allocated. 343 */ 344 pte = PA2VA(kptpa, pt_entry_t *); 345 epte = &pte[nptpages * NPTEPG]; 346 while (pte < epte) 347 *pte++ = PG_NV; 348 349 /* 350 * Validate PTEs for kernel text (RO). 351 * Pages up to "start" must be writable for the ROM. 352 */ 353 pte = PA2VA(kptpa, pt_entry_t *); 354 pte = &pte[m68k_btop(KERNBASE)]; 355 /* XXX why KERNBASE relative? */ 356 epte = &pte[m68k_btop(m68k_round_page(start))]; 357 protopte = firstpa | PG_RW | PG_V; 358 while (pte < epte) { 359 *pte++ = protopte; 360 protopte += PAGE_SIZE; 361 } 362 /* XXX why KERNBASE relative? */ 363 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 364 protopte = (protopte & ~PG_PROT) | PG_RO; 365 while (pte < epte) { 366 *pte++ = protopte; 367 protopte += PAGE_SIZE; 368 } 369 /* 370 * Validate PTEs for kernel data/bss, dynamic data allocated 371 * by us so far (nextpa - firstpa bytes), and pages for lwp0 372 * u-area and page table allocated below (RW). 373 */ 374 epte = PA2VA(kptpa, pt_entry_t *); 375 epte = &epte[m68k_btop(nextpa - firstpa)]; 376 protopte = (protopte & ~PG_PROT) | PG_RW; 377 /* 378 * Enable copy-back caching of data pages 379 */ 380 if (mmutype == MMU_68040) 381 protopte |= PG_CCB; 382 while (pte < epte) { 383 *pte++ = protopte; 384 protopte += PAGE_SIZE; 385 } 386 387 #define PTE2VA(pte) m68k_ptob(pte - PA2VA(kptpa, pt_entry_t *)) 388 389 protopte = IOBase | PG_RW | PG_CI | PG_V; 390 IOBase = PTE2VA(pte); 391 epte = &pte[IIOMAPSIZE]; 392 while (pte < epte) { 393 *pte++ = protopte; 394 protopte += PAGE_SIZE; 395 } 396 397 protopte = (pt_entry_t)ROMBase | PG_RO | PG_V; 398 ROMBase = (void *)PTE2VA(pte); 399 epte = &pte[ROMMAPSIZE]; 400 while (pte < epte) { 401 *pte++ = protopte; 402 protopte += PAGE_SIZE; 403 } 404 405 if (vidlen) { 406 protopte = m68k_trunc_page(mac68k_video.mv_phys) | 407 PG_RW | PG_V | PG_CI; 408 newvideoaddr = PTE2VA(pte) 409 + m68k_page_offset(mac68k_video.mv_phys); 410 epte = &pte[VIDMAPSIZE]; 411 while (pte < epte) { 412 *pte++ = protopte; 413 protopte += PAGE_SIZE; 414 } 415 } 416 virtual_avail = PTE2VA(pte); 417 418 /* 419 * Calculate important exported kernel addresses and related values. 420 */ 421 /* 422 * Sysseg: base of kernel segment table 423 */ 424 Sysseg = PA2VA(kstpa, st_entry_t *); 425 Sysseg_pa = PA2VA(kstpa, paddr_t); 426 #if defined(M68040) 427 if (mmutype == MMU_68040) 428 protostfree = stfree; 429 #endif 430 /* 431 * Sysptmap: base of kernel page table map 432 */ 433 Sysptmap = PA2VA(kptmpa, pt_entry_t *); 434 /* 435 * Sysmap: kernel page table (as mapped through Sysptmap) 436 * Allocated at the end of KVA space. 437 */ 438 Sysmap = (pt_entry_t *)SYSMAP_VA; 439 440 /* 441 * Remember the u-area address so it can be loaded in the lwp0 442 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize(). 443 */ 444 lwp0uarea = PA2VA(lwp0upa, vaddr_t); 445 446 /* 447 * VM data structures are now initialized, set up data for 448 * the pmap module. 449 * 450 * Note about avail_end: msgbuf is initialized just after 451 * avail_end in machdep.c. Since the last page is used 452 * for rebooting the system (code is copied there and 453 * excution continues from copied code before the MMU 454 * is disabled), the msgbuf will get trounced between 455 * reboots if it's placed in the last physical page. 456 * To work around this, we move avail_end back one more 457 * page so the msgbuf can be preserved. 458 */ 459 avail_next = avail_start = m68k_round_page(nextpa); 460 avail_remaining = 0; 461 avail_range = -1; 462 for (i = 0; i < numranges; i++) { 463 if (low[i] <= avail_next && avail_next < high[i]) { 464 avail_range = i; 465 avail_remaining = high[i] - avail_next; 466 } else if (avail_range != -1) { 467 avail_remaining += (high[i] - low[i]); 468 } 469 } 470 physmem = m68k_btop(avail_remaining + nextpa - firstpa); 471 472 maxaddr = high[numranges - 1] - m68k_ptob(1); 473 high[numranges - 1] -= (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1)); 474 avail_end = high[numranges - 1]; 475 mem_size = m68k_ptob(physmem); 476 virtual_end = VM_MAX_KERNEL_ADDRESS; 477 478 /* 479 * Allocate some fixed, special purpose kernel virtual addresses 480 */ 481 { 482 vaddr_t va = virtual_avail; 483 484 CADDR1 = (void *)va; 485 va += PAGE_SIZE; 486 CADDR2 = (void *)va; 487 va += PAGE_SIZE; 488 vmmap = (void *)va; 489 va += PAGE_SIZE; 490 msgbufaddr = (void *)va; 491 va += m68k_round_page(MSGBUFSIZE); 492 virtual_avail = va; 493 } 494 } 495 496 void 497 bootstrap_mac68k(int tc) 498 { 499 #if NZSC > 0 500 extern void zs_init(void); 501 #endif 502 extern int *esym; 503 paddr_t nextpa; 504 void *oldROMBase; 505 506 if (mac68k_machine.do_graybars) 507 printf("Bootstrapping NetBSD/mac68k.\n"); 508 509 oldROMBase = ROMBase; 510 mac68k_video.mv_phys = mac68k_video.mv_kvaddr; 511 512 if (((tc & 0x80000000) && (mmutype == MMU_68030)) || 513 ((tc & 0x8000) && (mmutype == MMU_68040))) { 514 if (mac68k_machine.do_graybars) 515 printf("Getting mapping from MMU.\n"); 516 (void) get_mapping(); 517 if (mac68k_machine.do_graybars) 518 printf("Done.\n"); 519 } else { 520 /* MMU not enabled. Fake up ranges. */ 521 numranges = 1; 522 low[0] = 0; 523 high[0] = mac68k_machine.mach_memsize * (1024 * 1024); 524 if (mac68k_machine.do_graybars) 525 printf("Faked range to byte 0x%lx.\n", high[0]); 526 } 527 nextpa = load_addr + m68k_round_page(esym); 528 529 if (mac68k_machine.do_graybars) 530 printf("Bootstrapping the pmap system.\n"); 531 532 pmap_bootstrap(nextpa, load_addr); 533 534 if (mac68k_machine.do_graybars) 535 printf("Pmap bootstrapped.\n"); 536 537 if (!vidlen) 538 panic("Don't know how to relocate video!"); 539 540 if (mac68k_machine.do_graybars) 541 printf("Moving ROMBase from %p to %p.\n", oldROMBase, ROMBase); 542 543 mrg_fixupROMBase(oldROMBase, ROMBase); 544 545 if (mac68k_machine.do_graybars) 546 printf("Video address 0x%p -> 0x%p.\n", 547 (void *)mac68k_video.mv_kvaddr, (void *)newvideoaddr); 548 549 mac68k_set_io_offsets(IOBase); 550 551 /* 552 * If the serial ports are going (for console or 'echo'), then 553 * we need to make sure the IO change gets propagated properly. 554 * This resets the base addresses for the 8530 (serial) driver. 555 * 556 * WARNING!!! No printfs() (etc) BETWEEN zs_init() and the end 557 * of this function (where we start using the MMU, so the new 558 * address is correct. 559 */ 560 #if NZSC > 0 561 if (zsinited != 0) 562 zs_init(); 563 #endif 564 565 mac68k_video.mv_kvaddr = newvideoaddr; 566 } 567