1 /* $NetBSD: pmap_bootstrap.c,v 1.26 2009/12/11 22:23:08 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.26 2009/12/11 22:23:08 tsutsui Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/msgbuf.h> 44 45 #include <machine/frame.h> 46 #include <machine/cpu.h> 47 #include <machine/vmparam.h> 48 #include <machine/pte.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE)) 53 54 extern char *etext; 55 extern paddr_t avail_start, avail_end; 56 57 void pmap_bootstrap(paddr_t, paddr_t); 58 59 /* 60 * Special purpose kernel virtual addresses, used for mapping 61 * physical pages for a variety of temporary or permanent purposes: 62 * 63 * CADDR1, CADDR2: pmap zero/copy operations 64 * vmmap: /dev/mem, crash dumps, parity error checking 65 * msgbufp: kernel message buffer 66 */ 67 void *CADDR1, *CADDR2; 68 char *vmmap; 69 void *msgbufaddr; 70 71 /* 72 * Bootstrap the VM system. 73 * 74 * Called with MMU off so we must relocate all global references by `firstpa' 75 * (don't call any functions here!) `nextpa' is the first available physical 76 * memory address. Returns an updated first PA reflecting the memory we 77 * have allocated. MMU is still off when we return. 78 * 79 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 80 * XXX a PIC compiler would make this much easier. 81 */ 82 void 83 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 84 { 85 paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa; 86 u_int nptpages, kstsize; 87 st_entry_t protoste, *ste, *este; 88 pt_entry_t protopte, *pte, *epte; 89 u_int stfree = 0; /* XXX: gcc -Wuninitialized */ 90 91 /* 92 * Calculate important physical addresses: 93 * 94 * lwp0upa lwp 0 u-area UPAGES pages 95 * 96 * kstpa kernel segment table 1 page (!040) 97 * N pages (040) 98 * 99 * kptmpa kernel PT map 1 page 100 * 101 * lkptpa last kernel PT page 1 page 102 * 103 * kptpa statically allocated 104 * kernel PT pages Sysptsize+ pages 105 * 106 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 107 * EIOMAPSIZE are the number of PTEs, hence we need to round 108 * the total to a page boundary with IO maps at the end. ] 109 * 110 * The KVA corresponding to any of these PAs is: 111 * (PA - firstpa + KERNBASE). 112 */ 113 lwp0upa = nextpa; 114 nextpa += USPACE; 115 if (RELOC(mmutype, int) == MMU_68040) 116 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 117 else 118 kstsize = 1; 119 kstpa = nextpa; 120 nextpa += kstsize * PAGE_SIZE; 121 kptmpa = nextpa; 122 nextpa += PAGE_SIZE; 123 lkptpa = nextpa; 124 nextpa += PAGE_SIZE; 125 kptpa = nextpa; 126 nptpages = RELOC(Sysptsize, int); 127 nextpa += nptpages * PAGE_SIZE; 128 129 /* 130 * Initialize segment table and kernel page table map. 131 * 132 * On 68030s and earlier MMUs the two are identical except for 133 * the valid bits so both are initialized with essentially the 134 * same values. On the 68040, which has a mandatory 3-level 135 * structure, the segment table holds the level 1 table and part 136 * (or all) of the level 2 table and hence is considerably 137 * different. Here the first level consists of 128 descriptors 138 * (512 bytes) each mapping 32mb of address space. Each of these 139 * points to blocks of 128 second level descriptors (512 bytes) 140 * each mapping 256kb. Note that there may be additional "segment 141 * table" pages depending on how large MAXKL2SIZE is. 142 * 143 * Portions of the last two segment of KVA space (0xFF800000 - 144 * 0xFFFFFFFF) are mapped for a couple of purposes. 145 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped 146 * for the kernel page tables. 147 * 148 * XXX: It looks this was copied from hp300 and not sure if 149 * XXX: last physical page mapping is really needed on this port. 150 * The very last page (0xFFFFF000) in the second segment is mapped 151 * to the last physical page of RAM to give us a region in which 152 * PA == VA. We use the first part of this page for enabling 153 * and disabling mapping. The last part of this page also contains 154 * info left by the boot ROM. 155 * 156 * XXX cramming two levels of mapping into the single "segment" 157 * table on the 68040 is intended as a temporary hack to get things 158 * working. The 224mb of address space that this allows will most 159 * likely be insufficient in the future (at least for the kernel). 160 */ 161 if (RELOC(mmutype, int) == MMU_68040) { 162 int nl1desc, nl2desc, i; 163 164 /* 165 * First invalidate the entire "segment table" pages 166 * (levels 1 and 2 have the same "invalid" value). 167 */ 168 ste = (st_entry_t *)kstpa; 169 este = &ste[kstsize * NPTEPG]; 170 while (ste < este) 171 *ste++ = SG_NV; 172 /* 173 * Initialize level 2 descriptors (which immediately 174 * follow the level 1 table). We need: 175 * NPTEPG / SG4_LEV3SIZE 176 * level 2 descriptors to map each of the nptpages 177 * pages of PTEs. Note that we set the "used" bit 178 * now to save the HW the expense of doing it. 179 */ 180 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE); 181 ste = (st_entry_t *)kstpa; 182 ste = &ste[SG4_LEV1SIZE]; 183 este = &ste[nl2desc]; 184 protoste = kptpa | SG_U | SG_RW | SG_V; 185 while (ste < este) { 186 *ste++ = protoste; 187 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 188 } 189 190 /* 191 * Initialize level 1 descriptors. We need: 192 * howmany(nl2desc, SG4_LEV2SIZE) 193 * level 1 descriptors to map the `nl2desc' level 2's. 194 */ 195 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 196 ste = (st_entry_t *)kstpa; 197 este = &ste[nl1desc]; 198 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 199 while (ste < este) { 200 *ste++ = protoste; 201 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 202 } 203 204 /* 205 * Initialize the final level 1 descriptor to map the next 206 * block of level 2 descriptors for Sysptmap. 207 */ 208 ste = (st_entry_t *)kstpa; 209 ste = &ste[SG4_LEV1SIZE - 1]; 210 *ste = protoste; 211 /* 212 * Now initialize the final portion of that block of 213 * descriptors to map kptmpa and the "last PT page". 214 */ 215 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 216 ste = (st_entry_t *)kstpa; 217 ste = &ste[i + SG4_LEV2SIZE - NPTEPG / SG4_LEV3SIZE * 2]; 218 epte = &ste[NPTEPG / SG4_LEV3SIZE]; 219 protoste = kptmpa | SG_U | SG_RW | SG_V; 220 while (ste < este) { 221 *ste++ = protoste; 222 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 223 } 224 este = &ste[NPTEPG / SG4_LEV3SIZE]; 225 protoste = lkptpa | SG_U | SG_RW | SG_V; 226 while (ste < este) { 227 *ste++ = protoste; 228 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 229 } 230 /* 231 * Calculate the free level 2 descriptor mask 232 * noting that we have used: 233 * 0: level 1 table 234 * 1 to nl1desc: map page tables 235 * nl1desc + 1: maps kptmpa and last-page page table 236 */ 237 /* mark an entry for level 1 table */ 238 stfree = ~l2tobm(0); 239 /* mark entries for map page tables */ 240 for (i = 1; i <= nl1desc; i++) 241 stfree &= ~l2tobm(i); 242 /* mark an entry for kptmpa and lkptpa */ 243 stfree &= ~l2tobm(i); 244 /* mark entries not available */ 245 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++) 246 stfree &= ~l2tobm(i); 247 248 /* 249 * Initialize Sysptmap 250 */ 251 pte = (pt_entry_t *)kptmpa; 252 epte = &pte[nptpages]; 253 protopte = kptpa | PG_RW | PG_CI | PG_V; 254 while (pte < epte) { 255 *pte++ = protopte; 256 protopte += PAGE_SIZE; 257 } 258 259 /* 260 * Invalidate all remaining entries. 261 */ 262 epte = (pt_entry_t *)kptmpa; 263 epte = &epte[TIB_SIZE]; 264 while (pte < epte) { 265 *pte++ = PG_NV; 266 } 267 /* 268 * Initialize the last to point to kptmpa and the page 269 * table page allocated earlier. 270 */ 271 pte = (pt_entry_t *)kptmpa; 272 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 273 *pte = kptmpa | PG_RW | PG_CI | PG_V; 274 pte++; /* XXX should use [MAXADDR >> SEGSHIFT] */ 275 *pte = lkptpa | PG_RW | PG_CI | PG_V; 276 } else { 277 /* 278 * Map the page table pages in both the HW segment table 279 * and the software Sysptmap. 280 */ 281 ste = (st_entry_t *)kstpa; 282 pte = (pt_entry_t *)kptmpa; 283 epte = &pte[nptpages]; 284 protoste = kptpa | SG_RW | SG_V; 285 protopte = kptpa | PG_RW | PG_CI | PG_V; 286 while (pte < epte) { 287 *ste++ = protoste; 288 *pte++ = protopte; 289 protoste += PAGE_SIZE; 290 protopte += PAGE_SIZE; 291 } 292 /* 293 * Invalidate all remaining entries in both. 294 */ 295 este = (st_entry_t *)kstpa; 296 este = &epte[TIA_SIZE]; 297 while (ste < este) 298 *ste++ = SG_NV; 299 epte = (pt_entry_t *)kptmpa; 300 epte = &epte[TIB_SIZE]; 301 while (pte < epte) 302 *pte++ = PG_NV; 303 /* 304 * Initialize the last to point to kptmpa and the page 305 * table page allocated earlier. 306 */ 307 ste = (st_entry_t *)kstpa; 308 ste = &ste[SYSMAP_VA >> SEGSHIFT]; 309 pte = (pt_entry_t *)kptmpa; 310 pte = &pte[SYSMAP_VA >> SEGSHIFT]; 311 *ste = kptmpa | SG_RW | SG_V; 312 *pte = kptmpa | PG_RW | PG_CI | PG_V; 313 ste++; /* XXX should use [MAXADDR >> SEGSHIFT] */ 314 pte++; /* XXX should use [MAXADDR >> SEGSHIFT] */ 315 *ste = lkptpa | SG_RW | SG_V; 316 *pte = lkptpa | PG_RW | PG_CI | PG_V; 317 } 318 /* 319 * Invalidate all but the final entry in the last kernel PT page. 320 * The final entry maps the last page of physical memory to 321 * prepare a page that is PA == VA to turn on the MMU. 322 * 323 * XXX: This looks copied from hp300 where PA != VA, but 324 * XXX: it's suspicious if this is also required on this port. 325 */ 326 pte = (pt_entry_t *)lkptpa; 327 epte = &pte[NPTEPG]; 328 while (pte < epte) 329 *pte++ = PG_NV; 330 331 /* 332 * Initialize kernel page table. 333 * Start by invalidating the `nptpages' that we have allocated. 334 */ 335 pte = (pt_entry_t *)kptpa; 336 epte = &pte[nptpages * NPTEPG]; 337 while (pte < epte) 338 *pte++ = PG_NV; 339 340 /* 341 * Validate PTEs for kernel text (RO). 342 */ 343 pte = (pt_entry_t *)kptpa; 344 pte = &pte[m68k_btop(KERNBASE)]; 345 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 346 protopte = firstpa | PG_RO | PG_V; 347 while (pte < epte) { 348 *pte++ = protopte; 349 protopte += PAGE_SIZE; 350 } 351 /* 352 * Validate PTEs for kernel data/bss, dynamic data allocated 353 * by us so far (nextpa - firstpa bytes), and pages for lwp0 354 * u-area and page table allocated below (RW). 355 */ 356 epte = (pt_entry_t *)kptpa; 357 epte = &epte[m68k_btop(KERNBASE + nextpa - firstpa)]; 358 protopte = (protopte & ~PG_PROT) | PG_RW; 359 /* 360 * Enable copy-back caching of data pages 361 */ 362 if (RELOC(mmutype, int) == MMU_68040) 363 protopte |= PG_CCB; 364 365 while (pte < epte) { 366 *pte++ = protopte; 367 protopte += PAGE_SIZE; 368 } 369 370 /* 371 * Calculate important exported kernel addresses and related vaules. 372 */ 373 /* 374 * Sysseg: base of kernel segment table 375 */ 376 RELOC(Sysseg, st_entry_t *) = 377 (st_entry_t *)(kstpa - firstpa + KERNBASE); 378 RELOC(Sysseg_pa, paddr_t) = kstpa; 379 if (RELOC(mmutype, int) == MMU_68040) 380 RELOC(protostfree, u_int) = stfree; 381 /* 382 * Sysptmap: base of kernel page table map 383 */ 384 RELOC(Sysptmap, pt_entry_t *) = 385 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 386 /* 387 * Sysmap: kernel page table (as mapped through Sysptmap) 388 * Allocated at the end of KVA space. 389 */ 390 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA; 391 392 /* 393 * Remember the u-area address so it can be loaded in the lwp0 394 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize(). 395 */ 396 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa + KERNBASE; 397 398 /* 399 * VM data structures are now initialized, set up data for 400 * the pmap module. 401 * 402 * Note about avail_end: msgbuf is initialized just after 403 * avail_end in machdep.c. Since the last page is used 404 * for rebooting the system (code is copied there and 405 * excution continues from copied code before the MMU 406 * is disabled), the msgbuf will get trounced between 407 * reboots if it's placed in the last physical page. 408 * To work around this, we move avail_end back one more 409 * page so the msgbuf can be preserved. 410 */ 411 RELOC(avail_start, paddr_t) = nextpa; 412 RELOC(avail_end, paddr_t) = firstpa 413 + m68k_ptob(RELOC(physmem, int)) 414 - m68k_round_page(MSGBUFSIZE) 415 - PAGE_SIZE; /* if that start of last page??? */ 416 RELOC(virtual_avail, vaddr_t) = 417 KERNBASE + (nextpa - firstpa); 418 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; 419 420 /* 421 * Allocate some fixed, special purpose kernel virtual addresses 422 */ 423 { 424 vaddr_t va = RELOC(virtual_avail, vaddr_t); 425 426 RELOC(CADDR1, void *) = (void *)va; 427 va += PAGE_SIZE; 428 RELOC(CADDR2, void *) = (void *)va; 429 va += PAGE_SIZE; 430 RELOC(vmmap, void *) = (void *)va; 431 va += PAGE_SIZE; 432 RELOC(msgbufaddr, void *) = (void *)va; 433 va += m68k_round_page(MSGBUFSIZE); 434 RELOC(virtual_avail, vaddr_t) = va; 435 } 436 } 437