1 /* $NetBSD: pmap_bootstrap.c,v 1.16 2009/03/14 15:36:04 dsl Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.16 2009/03/14 15:36:04 dsl Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/msgbuf.h> 43 #include <sys/proc.h> 44 45 #include <machine/frame.h> 46 #include <machine/cpu.h> 47 #include <machine/vmparam.h> 48 #include <machine/pte.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE)) 53 #define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa - KERNBASE)) 54 55 extern char *etext; 56 extern char *proc0paddr; 57 extern paddr_t avail_start, avail_end; 58 59 extern int physmem; 60 61 void pmap_bootstrap(vm_offset_t, vm_offset_t); 62 63 /* 64 * Special purpose kernel virtual addresses, used for mapping 65 * physical pages for a variety of temporary or permanent purposes: 66 * 67 * CADDR1, CADDR2: pmap zero/copy operations 68 * vmmap: /dev/mem, crash dumps, parity error checking 69 * msgbufp: kernel message buffer 70 */ 71 void *CADDR1, *CADDR2; 72 char *vmmap; 73 void *msgbufaddr; 74 75 /* 76 * Bootstrap the VM system. 77 * 78 * Called with MMU off so we must relocate all global references by `firstpa' 79 * (don't call any functions here!) `nextpa' is the first available physical 80 * memory address. Returns an updated first PA reflecting the memory we 81 * have allocated. MMU is still off when we return. 82 * 83 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 84 * XXX a PIC compiler would make this much easier. 85 */ 86 void 87 pmap_bootstrap(vm_offset_t nextpa, vm_offset_t firstpa) 88 { 89 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 90 u_int nptpages, kstsize; 91 st_entry_t protoste, *ste; 92 pt_entry_t protopte, *pte, *epte; 93 94 /* 95 * Calculate important physical addresses: 96 * 97 * kstpa kernel segment table 1 page (!040) 98 * N pages (040) 99 * 100 * kptpa statically allocated 101 * kernel PT pages Sysptsize+ pages 102 * 103 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 104 * EIOMAPSIZE are the number of PTEs, hence we need to round 105 * the total to a page boundary with IO maps at the end. ] 106 * 107 * kptmpa kernel PT map 1 page 108 * 109 * lkptpa last kernel PT page 1 page 110 * 111 * p0upa proc 0 u-area UPAGES pages 112 * 113 * The KVA corresponding to any of these PAs is: 114 * (PA - firstpa + KERNBASE). 115 */ 116 if (RELOC(mmutype, int) == MMU_68040) 117 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 118 else 119 kstsize = 1; 120 kstpa = nextpa; 121 nextpa += kstsize * PAGE_SIZE; 122 kptmpa = nextpa; 123 nextpa += PAGE_SIZE; 124 lkptpa = nextpa; 125 nextpa += PAGE_SIZE; 126 p0upa = nextpa; 127 nextpa += USPACE; 128 kptpa = nextpa; 129 nptpages = RELOC(Sysptsize, int); 130 nextpa += nptpages * PAGE_SIZE; 131 132 /* 133 * Initialize segment table and kernel page table map. 134 * 135 * On 68030s and earlier MMUs the two are identical except for 136 * the valid bits so both are initialized with essentially the 137 * same values. On the 68040, which has a mandatory 3-level 138 * structure, the segment table holds the level 1 table and part 139 * (or all) of the level 2 table and hence is considerably 140 * different. Here the first level consists of 128 descriptors 141 * (512 bytes) each mapping 32mb of address space. Each of these 142 * points to blocks of 128 second level descriptors (512 bytes) 143 * each mapping 256kb. Note that there may be additional "segment 144 * table" pages depending on how large MAXKL2SIZE is. 145 * 146 * Portions of the last segment of KVA space (0xFFF00000 - 147 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 148 * for UPAGES is used for mapping the current process u-area 149 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 150 * to the last physical page of RAM to give us a region in which 151 * PA == VA. We use the first part of this page for enabling 152 * and disabling mapping. The last part of this page also contains 153 * info left by the boot ROM. 154 * 155 * XXX cramming two levels of mapping into the single "segment" 156 * table on the 68040 is intended as a temporary hack to get things 157 * working. The 224mb of address space that this allows will most 158 * likely be insufficient in the future (at least for the kernel). 159 */ 160 if (RELOC(mmutype, int) == MMU_68040) { 161 int num; 162 163 /* 164 * First invalidate the entire "segment table" pages 165 * (levels 1 and 2 have the same "invalid" value). 166 */ 167 pte = (u_int *)kstpa; 168 epte = &pte[kstsize * NPTEPG]; 169 while (pte < epte) 170 *pte++ = SG_NV; 171 172 /* 173 * Initialize level 2 descriptors (which immediately 174 * follow the level 1 table). We need: 175 * NPTEPG / SG4_LEV3SIZE 176 * level 2 descriptors to map each of the nptpages 177 * pages of PTEs. Note that we set the "used" bit 178 * now to save the HW the expense of doing it. 179 */ 180 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 181 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 182 epte = &pte[num]; 183 protoste = kptpa | SG_U | SG_RW | SG_V; 184 while (pte < epte) { 185 *pte++ = protoste; 186 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 187 } 188 189 /* 190 * Initialize level 1 descriptors. We need: 191 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 192 * level 1 descriptors to map the `num' level 2's. 193 */ 194 pte = (u_int *)kstpa; 195 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 196 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 197 while (pte < epte) { 198 *pte++ = protoste; 199 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 200 } 201 202 /* 203 * Initialize the final level 1 descriptor to map the last 204 * block of level 2 descriptors. 205 */ 206 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 207 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 208 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 209 /* 210 * Now initialize the final portion of that block of 211 * descriptors to map kptmpa and the "last PT page". 212 */ 213 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 214 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 215 protoste = kptmpa | SG_U | SG_RW | SG_V; 216 while (pte < epte) { 217 *pte++ = protoste; 218 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 219 } 220 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 221 protoste = lkptpa | SG_U | SG_RW | SG_V; 222 while (pte < epte) { 223 *pte++ = protoste; 224 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 225 } 226 227 /* 228 * Initialize Sysptmap 229 */ 230 pte = (u_int *)kptmpa; 231 epte = &pte[nptpages]; 232 protopte = kptpa | PG_RW | PG_CI | PG_V; 233 while (pte < epte) { 234 *pte++ = protopte; 235 protopte += PAGE_SIZE; 236 } 237 238 /* 239 * Invalidate all but the last remaining entry. 240 */ 241 epte = &((u_int *)kptmpa)[NPTEPG-2]; 242 while (pte < epte) { 243 *pte++ = PG_NV; 244 } 245 /* 246 * Initialize the last to point to kptmpa and the page 247 * table page allocated earlier. 248 */ 249 *pte = kptmpa | PG_RW | PG_CI | PG_V; 250 pte++; 251 *pte = lkptpa | PG_RW | PG_CI | PG_V; 252 } else { 253 /* 254 * Map the page table pages in both the HW segment table 255 * and the software Sysptmap. 256 */ 257 ste = (u_int *)kstpa; 258 pte = (u_int *)kptmpa; 259 epte = &pte[nptpages]; 260 protoste = kptpa | SG_RW | SG_V; 261 protopte = kptpa | PG_RW | PG_CI | PG_V; 262 while (pte < epte) { 263 *ste++ = protoste; 264 *pte++ = protopte; 265 protoste += PAGE_SIZE; 266 protopte += PAGE_SIZE; 267 } 268 /* 269 * Invalidate all but the last remaining entries in both. 270 */ 271 epte = &((u_int *)kptmpa)[NPTEPG-2]; 272 while (pte < epte) { 273 *ste++ = SG_NV; 274 *pte++ = PG_NV; 275 } 276 /* 277 * Initialize the last to point to kptmpa and the page 278 * table page allocated earlier. 279 */ 280 *ste = kptmpa | SG_RW | SG_V; 281 *pte = kptmpa | PG_RW | PG_CI | PG_V; 282 ste++; 283 pte++; 284 *ste = lkptpa | SG_RW | SG_V; 285 *pte = lkptpa | PG_RW | PG_CI | PG_V; 286 } 287 /* 288 * Invalidate all but the final entry in the last kernel PT page 289 * (u-area PTEs will be validated later). The final entry maps 290 * the last page of physical memory. 291 */ 292 pte = (u_int *)lkptpa; 293 epte = &pte[NPTEPG]; 294 while (pte < epte) 295 *pte++ = PG_NV; 296 297 /* 298 * Initialize kernel page table. 299 * Start by invalidating the `nptpages' that we have allocated. 300 */ 301 pte = (u_int *)kptpa; 302 epte = &pte[nptpages * NPTEPG]; 303 while (pte < epte) 304 *pte++ = PG_NV; 305 306 /* 307 * Validate PTEs for kernel text (RO). 308 */ 309 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 310 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 311 protopte = firstpa | PG_RO | PG_V; 312 while (pte < epte) { 313 *pte++ = protopte; 314 protopte += PAGE_SIZE; 315 } 316 /* 317 * Validate PTEs for kernel data/bss, dynamic data allocated 318 * by us so far (nextpa - firstpa bytes), and pages for proc0 319 * u-area and page table allocated below (RW). 320 */ 321 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 322 protopte = (protopte & ~PG_PROT) | PG_RW; 323 /* 324 * Enable copy-back caching of data pages 325 */ 326 if (RELOC(mmutype, int) == MMU_68040) 327 protopte |= PG_CCB; 328 329 while (pte < epte) { 330 *pte++ = protopte; 331 protopte += PAGE_SIZE; 332 } 333 334 /* 335 * Calculate important exported kernel virtual addresses 336 */ 337 /* 338 * Sysseg: base of kernel segment table 339 */ 340 RELOC(Sysseg, st_entry_t *) = 341 (st_entry_t *)(kstpa - firstpa + KERNBASE); 342 /* 343 * Sysptmap: base of kernel page table map 344 */ 345 RELOC(Sysptmap, pt_entry_t *) = 346 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 347 /* 348 * Sysmap: kernel page table (as mapped through Sysptmap) 349 * Allocated at the end of KVA space. 350 */ 351 RELOC(Sysmap, pt_entry_t *) = 352 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 353 354 /* 355 * Setup u-area for process 0. 356 */ 357 /* 358 * Zero the u-area. 359 * NOTE: `pte' and `epte' aren't PTEs here. 360 */ 361 pte = (u_int *)p0upa; 362 epte = (u_int *)(p0upa + USPACE); 363 while (pte < epte) 364 *pte++ = 0; 365 /* 366 * Remember the u-area address so it can be loaded in the 367 * proc struct p_addr field later. 368 */ 369 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 370 371 /* 372 * VM data structures are now initialized, set up data for 373 * the pmap module. 374 * 375 * Note about avail_end: msgbuf is initialized just after 376 * avail_end in machdep.c. Since the last page is used 377 * for rebooting the system (code is copied there and 378 * excution continues from copied code before the MMU 379 * is disabled), the msgbuf will get trounced between 380 * reboots if it's placed in the last physical page. 381 * To work around this, we move avail_end back one more 382 * page so the msgbuf can be preserved. 383 */ 384 RELOC(avail_start, vm_offset_t) = nextpa; 385 RELOC(avail_end, vm_offset_t) = firstpa 386 + m68k_ptob(RELOC(physmem, int)) 387 - m68k_round_page(MSGBUFSIZE) 388 - PAGE_SIZE; /* if that start of last page??? */ 389 RELOC(virtual_avail, vm_offset_t) = 390 KERNBASE + (nextpa - firstpa); 391 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 392 393 /* 394 * Initialize protection array. 395 * XXX don't use a switch statement, it might produce an 396 * absolute "jmp" table. 397 */ 398 { 399 u_int *kp; 400 401 kp = &RELOC(protection_codes, u_int); 402 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 403 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 404 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 405 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 406 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 407 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 408 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 409 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 410 } 411 412 /* 413 * Kernel page/segment table allocated above, 414 * just initialize pointers. 415 */ 416 { 417 struct pmap *kpm; 418 419 kpm = RELOCPTR(kernel_pmap_ptr, struct pmap *); 420 421 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 422 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 423 simple_lock_init(&kpm->pm_lock); 424 kpm->pm_count = 1; 425 kpm->pm_stpa = (st_entry_t *)kstpa; 426 /* 427 * For the 040 we also initialize the free level 2 428 * descriptor mask noting that we have used: 429 * 0: level 1 table 430 * 1 to `num': map page tables 431 * MAXKL2SIZE-1: maps kptmpa and last-page page table 432 */ 433 if (RELOC(mmutype, int) == MMU_68040) { 434 int num; 435 436 kpm->pm_stfree = ~l2tobm(0); 437 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 438 SG4_LEV2SIZE) / SG4_LEV2SIZE; 439 while (num) 440 kpm->pm_stfree &= ~l2tobm(num--); 441 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 442 for (num = MAXKL2SIZE; 443 num < sizeof(kpm->pm_stfree)*NBBY; 444 num++) 445 kpm->pm_stfree &= ~l2tobm(num); 446 } 447 } 448 449 /* 450 * Allocate some fixed, special purpose kernel virtual addresses 451 */ 452 { 453 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 454 455 RELOC(CADDR1, void *) = (void *)va; 456 va += PAGE_SIZE; 457 RELOC(CADDR2, void *) = (void *)va; 458 va += PAGE_SIZE; 459 RELOC(vmmap, void *) = (void *)va; 460 va += PAGE_SIZE; 461 RELOC(msgbufaddr, void *) = (void *)va; 462 va += m68k_round_page(MSGBUFSIZE); 463 RELOC(virtual_avail, vm_offset_t) = va; 464 } 465 } 466