1 /* $NetBSD: pmap_bootstrap.c,v 1.17 2009/08/11 17:04:15 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.17 2009/08/11 17:04:15 matt Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/msgbuf.h> 44 #include <sys/proc.h> 45 46 #include <machine/frame.h> 47 #include <machine/cpu.h> 48 #include <machine/vmparam.h> 49 #include <machine/pte.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE)) 54 #define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa - KERNBASE)) 55 56 extern char *etext; 57 extern char *proc0paddr; 58 extern paddr_t avail_start, avail_end; 59 60 void pmap_bootstrap(vm_offset_t, vm_offset_t); 61 62 /* 63 * Special purpose kernel virtual addresses, used for mapping 64 * physical pages for a variety of temporary or permanent purposes: 65 * 66 * CADDR1, CADDR2: pmap zero/copy operations 67 * vmmap: /dev/mem, crash dumps, parity error checking 68 * msgbufp: kernel message buffer 69 */ 70 void *CADDR1, *CADDR2; 71 char *vmmap; 72 void *msgbufaddr; 73 74 /* 75 * Bootstrap the VM system. 76 * 77 * Called with MMU off so we must relocate all global references by `firstpa' 78 * (don't call any functions here!) `nextpa' is the first available physical 79 * memory address. Returns an updated first PA reflecting the memory we 80 * have allocated. MMU is still off when we return. 81 * 82 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 83 * XXX a PIC compiler would make this much easier. 84 */ 85 void 86 pmap_bootstrap(vm_offset_t nextpa, vm_offset_t firstpa) 87 { 88 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 89 u_int nptpages, kstsize; 90 st_entry_t protoste, *ste; 91 pt_entry_t protopte, *pte, *epte; 92 93 /* 94 * Calculate important physical addresses: 95 * 96 * kstpa kernel segment table 1 page (!040) 97 * N pages (040) 98 * 99 * kptpa statically allocated 100 * kernel PT pages Sysptsize+ pages 101 * 102 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 103 * EIOMAPSIZE are the number of PTEs, hence we need to round 104 * the total to a page boundary with IO maps at the end. ] 105 * 106 * kptmpa kernel PT map 1 page 107 * 108 * lkptpa last kernel PT page 1 page 109 * 110 * p0upa proc 0 u-area UPAGES pages 111 * 112 * The KVA corresponding to any of these PAs is: 113 * (PA - firstpa + KERNBASE). 114 */ 115 if (RELOC(mmutype, int) == MMU_68040) 116 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 117 else 118 kstsize = 1; 119 kstpa = nextpa; 120 nextpa += kstsize * PAGE_SIZE; 121 kptmpa = nextpa; 122 nextpa += PAGE_SIZE; 123 lkptpa = nextpa; 124 nextpa += PAGE_SIZE; 125 p0upa = nextpa; 126 nextpa += USPACE; 127 kptpa = nextpa; 128 nptpages = RELOC(Sysptsize, int); 129 nextpa += nptpages * PAGE_SIZE; 130 131 /* 132 * Initialize segment table and kernel page table map. 133 * 134 * On 68030s and earlier MMUs the two are identical except for 135 * the valid bits so both are initialized with essentially the 136 * same values. On the 68040, which has a mandatory 3-level 137 * structure, the segment table holds the level 1 table and part 138 * (or all) of the level 2 table and hence is considerably 139 * different. Here the first level consists of 128 descriptors 140 * (512 bytes) each mapping 32mb of address space. Each of these 141 * points to blocks of 128 second level descriptors (512 bytes) 142 * each mapping 256kb. Note that there may be additional "segment 143 * table" pages depending on how large MAXKL2SIZE is. 144 * 145 * Portions of the last segment of KVA space (0xFFF00000 - 146 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 147 * for UPAGES is used for mapping the current process u-area 148 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 149 * to the last physical page of RAM to give us a region in which 150 * PA == VA. We use the first part of this page for enabling 151 * and disabling mapping. The last part of this page also contains 152 * info left by the boot ROM. 153 * 154 * XXX cramming two levels of mapping into the single "segment" 155 * table on the 68040 is intended as a temporary hack to get things 156 * working. The 224mb of address space that this allows will most 157 * likely be insufficient in the future (at least for the kernel). 158 */ 159 if (RELOC(mmutype, int) == MMU_68040) { 160 int num; 161 162 /* 163 * First invalidate the entire "segment table" pages 164 * (levels 1 and 2 have the same "invalid" value). 165 */ 166 pte = (u_int *)kstpa; 167 epte = &pte[kstsize * NPTEPG]; 168 while (pte < epte) 169 *pte++ = SG_NV; 170 171 /* 172 * Initialize level 2 descriptors (which immediately 173 * follow the level 1 table). We need: 174 * NPTEPG / SG4_LEV3SIZE 175 * level 2 descriptors to map each of the nptpages 176 * pages of PTEs. Note that we set the "used" bit 177 * now to save the HW the expense of doing it. 178 */ 179 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 180 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 181 epte = &pte[num]; 182 protoste = kptpa | SG_U | SG_RW | SG_V; 183 while (pte < epte) { 184 *pte++ = protoste; 185 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 186 } 187 188 /* 189 * Initialize level 1 descriptors. We need: 190 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 191 * level 1 descriptors to map the `num' level 2's. 192 */ 193 pte = (u_int *)kstpa; 194 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 195 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 196 while (pte < epte) { 197 *pte++ = protoste; 198 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 199 } 200 201 /* 202 * Initialize the final level 1 descriptor to map the last 203 * block of level 2 descriptors. 204 */ 205 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 206 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 207 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 208 /* 209 * Now initialize the final portion of that block of 210 * descriptors to map kptmpa and the "last PT page". 211 */ 212 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 213 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 214 protoste = kptmpa | SG_U | SG_RW | SG_V; 215 while (pte < epte) { 216 *pte++ = protoste; 217 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 218 } 219 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 220 protoste = lkptpa | SG_U | SG_RW | SG_V; 221 while (pte < epte) { 222 *pte++ = protoste; 223 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 224 } 225 226 /* 227 * Initialize Sysptmap 228 */ 229 pte = (u_int *)kptmpa; 230 epte = &pte[nptpages]; 231 protopte = kptpa | PG_RW | PG_CI | PG_V; 232 while (pte < epte) { 233 *pte++ = protopte; 234 protopte += PAGE_SIZE; 235 } 236 237 /* 238 * Invalidate all but the last remaining entry. 239 */ 240 epte = &((u_int *)kptmpa)[NPTEPG-2]; 241 while (pte < epte) { 242 *pte++ = PG_NV; 243 } 244 /* 245 * Initialize the last to point to kptmpa and the page 246 * table page allocated earlier. 247 */ 248 *pte = kptmpa | PG_RW | PG_CI | PG_V; 249 pte++; 250 *pte = lkptpa | PG_RW | PG_CI | PG_V; 251 } else { 252 /* 253 * Map the page table pages in both the HW segment table 254 * and the software Sysptmap. 255 */ 256 ste = (u_int *)kstpa; 257 pte = (u_int *)kptmpa; 258 epte = &pte[nptpages]; 259 protoste = kptpa | SG_RW | SG_V; 260 protopte = kptpa | PG_RW | PG_CI | PG_V; 261 while (pte < epte) { 262 *ste++ = protoste; 263 *pte++ = protopte; 264 protoste += PAGE_SIZE; 265 protopte += PAGE_SIZE; 266 } 267 /* 268 * Invalidate all but the last remaining entries in both. 269 */ 270 epte = &((u_int *)kptmpa)[NPTEPG-2]; 271 while (pte < epte) { 272 *ste++ = SG_NV; 273 *pte++ = PG_NV; 274 } 275 /* 276 * Initialize the last to point to kptmpa and the page 277 * table page allocated earlier. 278 */ 279 *ste = kptmpa | SG_RW | SG_V; 280 *pte = kptmpa | PG_RW | PG_CI | PG_V; 281 ste++; 282 pte++; 283 *ste = lkptpa | SG_RW | SG_V; 284 *pte = lkptpa | PG_RW | PG_CI | PG_V; 285 } 286 /* 287 * Invalidate all but the final entry in the last kernel PT page 288 * (u-area PTEs will be validated later). The final entry maps 289 * the last page of physical memory. 290 */ 291 pte = (u_int *)lkptpa; 292 epte = &pte[NPTEPG]; 293 while (pte < epte) 294 *pte++ = PG_NV; 295 296 /* 297 * Initialize kernel page table. 298 * Start by invalidating the `nptpages' that we have allocated. 299 */ 300 pte = (u_int *)kptpa; 301 epte = &pte[nptpages * NPTEPG]; 302 while (pte < epte) 303 *pte++ = PG_NV; 304 305 /* 306 * Validate PTEs for kernel text (RO). 307 */ 308 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 309 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 310 protopte = firstpa | PG_RO | PG_V; 311 while (pte < epte) { 312 *pte++ = protopte; 313 protopte += PAGE_SIZE; 314 } 315 /* 316 * Validate PTEs for kernel data/bss, dynamic data allocated 317 * by us so far (nextpa - firstpa bytes), and pages for proc0 318 * u-area and page table allocated below (RW). 319 */ 320 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 321 protopte = (protopte & ~PG_PROT) | PG_RW; 322 /* 323 * Enable copy-back caching of data pages 324 */ 325 if (RELOC(mmutype, int) == MMU_68040) 326 protopte |= PG_CCB; 327 328 while (pte < epte) { 329 *pte++ = protopte; 330 protopte += PAGE_SIZE; 331 } 332 333 /* 334 * Calculate important exported kernel virtual addresses 335 */ 336 /* 337 * Sysseg: base of kernel segment table 338 */ 339 RELOC(Sysseg, st_entry_t *) = 340 (st_entry_t *)(kstpa - firstpa + KERNBASE); 341 /* 342 * Sysptmap: base of kernel page table map 343 */ 344 RELOC(Sysptmap, pt_entry_t *) = 345 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 346 /* 347 * Sysmap: kernel page table (as mapped through Sysptmap) 348 * Allocated at the end of KVA space. 349 */ 350 RELOC(Sysmap, pt_entry_t *) = 351 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 352 353 /* 354 * Setup u-area for process 0. 355 */ 356 /* 357 * Zero the u-area. 358 * NOTE: `pte' and `epte' aren't PTEs here. 359 */ 360 pte = (u_int *)p0upa; 361 epte = (u_int *)(p0upa + USPACE); 362 while (pte < epte) 363 *pte++ = 0; 364 /* 365 * Remember the u-area address so it can be loaded in the 366 * proc struct p_addr field later. 367 */ 368 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 369 370 /* 371 * VM data structures are now initialized, set up data for 372 * the pmap module. 373 * 374 * Note about avail_end: msgbuf is initialized just after 375 * avail_end in machdep.c. Since the last page is used 376 * for rebooting the system (code is copied there and 377 * excution continues from copied code before the MMU 378 * is disabled), the msgbuf will get trounced between 379 * reboots if it's placed in the last physical page. 380 * To work around this, we move avail_end back one more 381 * page so the msgbuf can be preserved. 382 */ 383 RELOC(avail_start, vm_offset_t) = nextpa; 384 RELOC(avail_end, vm_offset_t) = firstpa 385 + m68k_ptob(RELOC(physmem, int)) 386 - m68k_round_page(MSGBUFSIZE) 387 - PAGE_SIZE; /* if that start of last page??? */ 388 RELOC(virtual_avail, vm_offset_t) = 389 KERNBASE + (nextpa - firstpa); 390 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 391 392 /* 393 * Initialize protection array. 394 * XXX don't use a switch statement, it might produce an 395 * absolute "jmp" table. 396 */ 397 { 398 u_int *kp; 399 400 kp = &RELOC(protection_codes, u_int); 401 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 402 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 403 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 404 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 405 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 406 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 407 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 408 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 409 } 410 411 /* 412 * Kernel page/segment table allocated above, 413 * just initialize pointers. 414 */ 415 { 416 struct pmap *kpm; 417 418 kpm = RELOCPTR(kernel_pmap_ptr, struct pmap *); 419 420 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 421 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 422 simple_lock_init(&kpm->pm_lock); 423 kpm->pm_count = 1; 424 kpm->pm_stpa = (st_entry_t *)kstpa; 425 /* 426 * For the 040 we also initialize the free level 2 427 * descriptor mask noting that we have used: 428 * 0: level 1 table 429 * 1 to `num': map page tables 430 * MAXKL2SIZE-1: maps kptmpa and last-page page table 431 */ 432 if (RELOC(mmutype, int) == MMU_68040) { 433 int num; 434 435 kpm->pm_stfree = ~l2tobm(0); 436 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 437 SG4_LEV2SIZE) / SG4_LEV2SIZE; 438 while (num) 439 kpm->pm_stfree &= ~l2tobm(num--); 440 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 441 for (num = MAXKL2SIZE; 442 num < sizeof(kpm->pm_stfree)*NBBY; 443 num++) 444 kpm->pm_stfree &= ~l2tobm(num); 445 } 446 } 447 448 /* 449 * Allocate some fixed, special purpose kernel virtual addresses 450 */ 451 { 452 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 453 454 RELOC(CADDR1, void *) = (void *)va; 455 va += PAGE_SIZE; 456 RELOC(CADDR2, void *) = (void *)va; 457 va += PAGE_SIZE; 458 RELOC(vmmap, void *) = (void *)va; 459 va += PAGE_SIZE; 460 RELOC(msgbufaddr, void *) = (void *)va; 461 va += m68k_round_page(MSGBUFSIZE); 462 RELOC(virtual_avail, vm_offset_t) = va; 463 } 464 } 465