1 /* $NetBSD: pmap_bootstrap.c,v 1.12 2007/10/17 19:54:07 garbled Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.12 2007/10/17 19:54:07 garbled Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/msgbuf.h> 43 #include <sys/proc.h> 44 45 #include <machine/frame.h> 46 #include <machine/cpu.h> 47 #include <machine/vmparam.h> 48 #include <machine/pte.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa - KERNBASE)) 53 54 extern char *etext; 55 extern int Sysptsize; 56 extern char *proc0paddr; 57 extern st_entry_t *Sysseg; 58 extern pt_entry_t *Sysptmap, *Sysmap; 59 60 extern int physmem; 61 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; 62 extern int protection_codes[]; 63 64 void pmap_bootstrap __P((vm_offset_t, vm_offset_t)); 65 66 /* 67 * Special purpose kernel virtual addresses, used for mapping 68 * physical pages for a variety of temporary or permanent purposes: 69 * 70 * CADDR1, CADDR2: pmap zero/copy operations 71 * vmmap: /dev/mem, crash dumps, parity error checking 72 * msgbufp: kernel message buffer 73 */ 74 void *CADDR1, *CADDR2; 75 char *vmmap; 76 void *msgbufaddr; 77 78 /* 79 * Bootstrap the VM system. 80 * 81 * Called with MMU off so we must relocate all global references by `firstpa' 82 * (don't call any functions here!) `nextpa' is the first available physical 83 * memory address. Returns an updated first PA reflecting the memory we 84 * have allocated. MMU is still off when we return. 85 * 86 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 87 * XXX a PIC compiler would make this much easier. 88 */ 89 void 90 pmap_bootstrap(nextpa, firstpa) 91 vm_offset_t nextpa; 92 vm_offset_t firstpa; 93 { 94 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 95 u_int nptpages, kstsize; 96 st_entry_t protoste, *ste; 97 pt_entry_t protopte, *pte, *epte; 98 99 /* 100 * Calculate important physical addresses: 101 * 102 * kstpa kernel segment table 1 page (!040) 103 * N pages (040) 104 * 105 * kptpa statically allocated 106 * kernel PT pages Sysptsize+ pages 107 * 108 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 109 * EIOMAPSIZE are the number of PTEs, hence we need to round 110 * the total to a page boundary with IO maps at the end. ] 111 * 112 * kptmpa kernel PT map 1 page 113 * 114 * lkptpa last kernel PT page 1 page 115 * 116 * p0upa proc 0 u-area UPAGES pages 117 * 118 * The KVA corresponding to any of these PAs is: 119 * (PA - firstpa + KERNBASE). 120 */ 121 if (RELOC(mmutype, int) == MMU_68040) 122 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 123 else 124 kstsize = 1; 125 kstpa = nextpa; 126 nextpa += kstsize * PAGE_SIZE; 127 kptmpa = nextpa; 128 nextpa += PAGE_SIZE; 129 lkptpa = nextpa; 130 nextpa += PAGE_SIZE; 131 p0upa = nextpa; 132 nextpa += USPACE; 133 kptpa = nextpa; 134 nptpages = RELOC(Sysptsize, int); 135 nextpa += nptpages * PAGE_SIZE; 136 137 /* 138 * Initialize segment table and kernel page table map. 139 * 140 * On 68030s and earlier MMUs the two are identical except for 141 * the valid bits so both are initialized with essentially the 142 * same values. On the 68040, which has a mandatory 3-level 143 * structure, the segment table holds the level 1 table and part 144 * (or all) of the level 2 table and hence is considerably 145 * different. Here the first level consists of 128 descriptors 146 * (512 bytes) each mapping 32mb of address space. Each of these 147 * points to blocks of 128 second level descriptors (512 bytes) 148 * each mapping 256kb. Note that there may be additional "segment 149 * table" pages depending on how large MAXKL2SIZE is. 150 * 151 * Portions of the last segment of KVA space (0xFFF00000 - 152 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 153 * for UPAGES is used for mapping the current process u-area 154 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 155 * to the last physical page of RAM to give us a region in which 156 * PA == VA. We use the first part of this page for enabling 157 * and disabling mapping. The last part of this page also contains 158 * info left by the boot ROM. 159 * 160 * XXX cramming two levels of mapping into the single "segment" 161 * table on the 68040 is intended as a temporary hack to get things 162 * working. The 224mb of address space that this allows will most 163 * likely be insufficient in the future (at least for the kernel). 164 */ 165 if (RELOC(mmutype, int) == MMU_68040) { 166 int num; 167 168 /* 169 * First invalidate the entire "segment table" pages 170 * (levels 1 and 2 have the same "invalid" value). 171 */ 172 pte = (u_int *)kstpa; 173 epte = &pte[kstsize * NPTEPG]; 174 while (pte < epte) 175 *pte++ = SG_NV; 176 177 /* 178 * Initialize level 2 descriptors (which immediately 179 * follow the level 1 table). We need: 180 * NPTEPG / SG4_LEV3SIZE 181 * level 2 descriptors to map each of the nptpages 182 * pages of PTEs. Note that we set the "used" bit 183 * now to save the HW the expense of doing it. 184 */ 185 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 186 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 187 epte = &pte[num]; 188 protoste = kptpa | SG_U | SG_RW | SG_V; 189 while (pte < epte) { 190 *pte++ = protoste; 191 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 192 } 193 194 /* 195 * Initialize level 1 descriptors. We need: 196 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 197 * level 1 descriptors to map the `num' level 2's. 198 */ 199 pte = (u_int *)kstpa; 200 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 201 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 202 while (pte < epte) { 203 *pte++ = protoste; 204 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 205 } 206 207 /* 208 * Initialize the final level 1 descriptor to map the last 209 * block of level 2 descriptors. 210 */ 211 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 212 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 213 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 214 /* 215 * Now initialize the final portion of that block of 216 * descriptors to map kptmpa and the "last PT page". 217 */ 218 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 219 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 220 protoste = kptmpa | SG_U | SG_RW | SG_V; 221 while (pte < epte) { 222 *pte++ = protoste; 223 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 224 } 225 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 226 protoste = lkptpa | SG_U | SG_RW | SG_V; 227 while (pte < epte) { 228 *pte++ = protoste; 229 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 230 } 231 232 /* 233 * Initialize Sysptmap 234 */ 235 pte = (u_int *)kptmpa; 236 epte = &pte[nptpages]; 237 protopte = kptpa | PG_RW | PG_CI | PG_V; 238 while (pte < epte) { 239 *pte++ = protopte; 240 protopte += PAGE_SIZE; 241 } 242 243 /* 244 * Invalidate all but the last remaining entry. 245 */ 246 epte = &((u_int *)kptmpa)[NPTEPG-2]; 247 while (pte < epte) { 248 *pte++ = PG_NV; 249 } 250 /* 251 * Initialize the last to point to kptmpa and the page 252 * table page allocated earlier. 253 */ 254 *pte = kptmpa | PG_RW | PG_CI | PG_V; 255 pte++; 256 *pte = lkptpa | PG_RW | PG_CI | PG_V; 257 } else { 258 /* 259 * Map the page table pages in both the HW segment table 260 * and the software Sysptmap. 261 */ 262 ste = (u_int *)kstpa; 263 pte = (u_int *)kptmpa; 264 epte = &pte[nptpages]; 265 protoste = kptpa | SG_RW | SG_V; 266 protopte = kptpa | PG_RW | PG_CI | PG_V; 267 while (pte < epte) { 268 *ste++ = protoste; 269 *pte++ = protopte; 270 protoste += PAGE_SIZE; 271 protopte += PAGE_SIZE; 272 } 273 /* 274 * Invalidate all but the last remaining entries in both. 275 */ 276 epte = &((u_int *)kptmpa)[NPTEPG-2]; 277 while (pte < epte) { 278 *ste++ = SG_NV; 279 *pte++ = PG_NV; 280 } 281 /* 282 * Initialize the last to point to kptmpa and the page 283 * table page allocated earlier. 284 */ 285 *ste = kptmpa | SG_RW | SG_V; 286 *pte = kptmpa | PG_RW | PG_CI | PG_V; 287 ste++; 288 pte++; 289 *ste = lkptpa | SG_RW | SG_V; 290 *pte = lkptpa | PG_RW | PG_CI | PG_V; 291 } 292 /* 293 * Invalidate all but the final entry in the last kernel PT page 294 * (u-area PTEs will be validated later). The final entry maps 295 * the last page of physical memory. 296 */ 297 pte = (u_int *)lkptpa; 298 epte = &pte[NPTEPG]; 299 while (pte < epte) 300 *pte++ = PG_NV; 301 302 /* 303 * Initialize kernel page table. 304 * Start by invalidating the `nptpages' that we have allocated. 305 */ 306 pte = (u_int *)kptpa; 307 epte = &pte[nptpages * NPTEPG]; 308 while (pte < epte) 309 *pte++ = PG_NV; 310 311 /* 312 * Validate PTEs for kernel text (RO). 313 */ 314 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 315 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 316 protopte = firstpa | PG_RO | PG_V; 317 while (pte < epte) { 318 *pte++ = protopte; 319 protopte += PAGE_SIZE; 320 } 321 /* 322 * Validate PTEs for kernel data/bss, dynamic data allocated 323 * by us so far (nextpa - firstpa bytes), and pages for proc0 324 * u-area and page table allocated below (RW). 325 */ 326 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 327 protopte = (protopte & ~PG_PROT) | PG_RW; 328 /* 329 * Enable copy-back caching of data pages 330 */ 331 if (RELOC(mmutype, int) == MMU_68040) 332 protopte |= PG_CCB; 333 334 while (pte < epte) { 335 *pte++ = protopte; 336 protopte += PAGE_SIZE; 337 } 338 339 /* 340 * Calculate important exported kernel virtual addresses 341 */ 342 /* 343 * Sysseg: base of kernel segment table 344 */ 345 RELOC(Sysseg, st_entry_t *) = 346 (st_entry_t *)(kstpa - firstpa + KERNBASE); 347 /* 348 * Sysptmap: base of kernel page table map 349 */ 350 RELOC(Sysptmap, pt_entry_t *) = 351 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 352 /* 353 * Sysmap: kernel page table (as mapped through Sysptmap) 354 * Allocated at the end of KVA space. 355 */ 356 RELOC(Sysmap, pt_entry_t *) = 357 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 358 359 /* 360 * Setup u-area for process 0. 361 */ 362 /* 363 * Zero the u-area. 364 * NOTE: `pte' and `epte' aren't PTEs here. 365 */ 366 pte = (u_int *)p0upa; 367 epte = (u_int *)(p0upa + USPACE); 368 while (pte < epte) 369 *pte++ = 0; 370 /* 371 * Remember the u-area address so it can be loaded in the 372 * proc struct p_addr field later. 373 */ 374 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 375 376 /* 377 * VM data structures are now initialized, set up data for 378 * the pmap module. 379 * 380 * Note about avail_end: msgbuf is initialized just after 381 * avail_end in machdep.c. Since the last page is used 382 * for rebooting the system (code is copied there and 383 * excution continues from copied code before the MMU 384 * is disabled), the msgbuf will get trounced between 385 * reboots if it's placed in the last physical page. 386 * To work around this, we move avail_end back one more 387 * page so the msgbuf can be preserved. 388 */ 389 RELOC(avail_start, vm_offset_t) = nextpa; 390 RELOC(avail_end, vm_offset_t) = firstpa 391 + m68k_ptob(RELOC(physmem, int)) 392 - m68k_round_page(MSGBUFSIZE) 393 - PAGE_SIZE; /* if that start of last page??? */ 394 RELOC(virtual_avail, vm_offset_t) = 395 KERNBASE + (nextpa - firstpa); 396 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 397 398 /* 399 * Initialize protection array. 400 * XXX don't use a switch statement, it might produce an 401 * absolute "jmp" table. 402 */ 403 { 404 int *kp; 405 406 kp = &RELOC(protection_codes, int); 407 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 408 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 409 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 410 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 411 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 412 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 413 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 414 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 415 } 416 417 /* 418 * Kernel page/segment table allocated above, 419 * just initialize pointers. 420 */ 421 { 422 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 423 424 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 425 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 426 simple_lock_init(&kpm->pm_lock); 427 kpm->pm_count = 1; 428 kpm->pm_stpa = (st_entry_t *)kstpa; 429 /* 430 * For the 040 we also initialize the free level 2 431 * descriptor mask noting that we have used: 432 * 0: level 1 table 433 * 1 to `num': map page tables 434 * MAXKL2SIZE-1: maps kptmpa and last-page page table 435 */ 436 if (RELOC(mmutype, int) == MMU_68040) { 437 int num; 438 439 kpm->pm_stfree = ~l2tobm(0); 440 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 441 SG4_LEV2SIZE) / SG4_LEV2SIZE; 442 while (num) 443 kpm->pm_stfree &= ~l2tobm(num--); 444 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 445 for (num = MAXKL2SIZE; 446 num < sizeof(kpm->pm_stfree)*NBBY; 447 num++) 448 kpm->pm_stfree &= ~l2tobm(num); 449 } 450 } 451 452 /* 453 * Allocate some fixed, special purpose kernel virtual addresses 454 */ 455 { 456 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 457 458 RELOC(CADDR1, void *) = (void *)va; 459 va += PAGE_SIZE; 460 RELOC(CADDR2, void *) = (void *)va; 461 va += PAGE_SIZE; 462 RELOC(vmmap, void *) = (void *)va; 463 va += PAGE_SIZE; 464 RELOC(msgbufaddr, void *) = (void *)va; 465 va += m68k_round_page(MSGBUFSIZE); 466 RELOC(virtual_avail, vm_offset_t) = va; 467 } 468 } 469