1 /* $NetBSD: pmap_bootstrap.c,v 1.14 2009/01/17 07:17:35 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.14 2009/01/17 07:17:35 tsutsui Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/msgbuf.h> 43 #include <sys/proc.h> 44 45 #include <machine/frame.h> 46 #include <machine/cpu.h> 47 #include <machine/vmparam.h> 48 #include <machine/pte.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE)) 53 #define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa - KERNBASE)) 54 55 extern char *etext; 56 extern char *proc0paddr; 57 extern paddr_t avail_start, avail_end; 58 59 extern int physmem; 60 61 void pmap_bootstrap __P((vm_offset_t, vm_offset_t)); 62 63 /* 64 * Special purpose kernel virtual addresses, used for mapping 65 * physical pages for a variety of temporary or permanent purposes: 66 * 67 * CADDR1, CADDR2: pmap zero/copy operations 68 * vmmap: /dev/mem, crash dumps, parity error checking 69 * msgbufp: kernel message buffer 70 */ 71 void *CADDR1, *CADDR2; 72 char *vmmap; 73 void *msgbufaddr; 74 75 /* 76 * Bootstrap the VM system. 77 * 78 * Called with MMU off so we must relocate all global references by `firstpa' 79 * (don't call any functions here!) `nextpa' is the first available physical 80 * memory address. Returns an updated first PA reflecting the memory we 81 * have allocated. MMU is still off when we return. 82 * 83 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 84 * XXX a PIC compiler would make this much easier. 85 */ 86 void 87 pmap_bootstrap(nextpa, firstpa) 88 vm_offset_t nextpa; 89 vm_offset_t firstpa; 90 { 91 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 92 u_int nptpages, kstsize; 93 st_entry_t protoste, *ste; 94 pt_entry_t protopte, *pte, *epte; 95 96 /* 97 * Calculate important physical addresses: 98 * 99 * kstpa kernel segment table 1 page (!040) 100 * N pages (040) 101 * 102 * kptpa statically allocated 103 * kernel PT pages Sysptsize+ pages 104 * 105 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 106 * EIOMAPSIZE are the number of PTEs, hence we need to round 107 * the total to a page boundary with IO maps at the end. ] 108 * 109 * kptmpa kernel PT map 1 page 110 * 111 * lkptpa last kernel PT page 1 page 112 * 113 * p0upa proc 0 u-area UPAGES pages 114 * 115 * The KVA corresponding to any of these PAs is: 116 * (PA - firstpa + KERNBASE). 117 */ 118 if (RELOC(mmutype, int) == MMU_68040) 119 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 120 else 121 kstsize = 1; 122 kstpa = nextpa; 123 nextpa += kstsize * PAGE_SIZE; 124 kptmpa = nextpa; 125 nextpa += PAGE_SIZE; 126 lkptpa = nextpa; 127 nextpa += PAGE_SIZE; 128 p0upa = nextpa; 129 nextpa += USPACE; 130 kptpa = nextpa; 131 nptpages = RELOC(Sysptsize, int); 132 nextpa += nptpages * PAGE_SIZE; 133 134 /* 135 * Initialize segment table and kernel page table map. 136 * 137 * On 68030s and earlier MMUs the two are identical except for 138 * the valid bits so both are initialized with essentially the 139 * same values. On the 68040, which has a mandatory 3-level 140 * structure, the segment table holds the level 1 table and part 141 * (or all) of the level 2 table and hence is considerably 142 * different. Here the first level consists of 128 descriptors 143 * (512 bytes) each mapping 32mb of address space. Each of these 144 * points to blocks of 128 second level descriptors (512 bytes) 145 * each mapping 256kb. Note that there may be additional "segment 146 * table" pages depending on how large MAXKL2SIZE is. 147 * 148 * Portions of the last segment of KVA space (0xFFF00000 - 149 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 150 * for UPAGES is used for mapping the current process u-area 151 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 152 * to the last physical page of RAM to give us a region in which 153 * PA == VA. We use the first part of this page for enabling 154 * and disabling mapping. The last part of this page also contains 155 * info left by the boot ROM. 156 * 157 * XXX cramming two levels of mapping into the single "segment" 158 * table on the 68040 is intended as a temporary hack to get things 159 * working. The 224mb of address space that this allows will most 160 * likely be insufficient in the future (at least for the kernel). 161 */ 162 if (RELOC(mmutype, int) == MMU_68040) { 163 int num; 164 165 /* 166 * First invalidate the entire "segment table" pages 167 * (levels 1 and 2 have the same "invalid" value). 168 */ 169 pte = (u_int *)kstpa; 170 epte = &pte[kstsize * NPTEPG]; 171 while (pte < epte) 172 *pte++ = SG_NV; 173 174 /* 175 * Initialize level 2 descriptors (which immediately 176 * follow the level 1 table). We need: 177 * NPTEPG / SG4_LEV3SIZE 178 * level 2 descriptors to map each of the nptpages 179 * pages of PTEs. Note that we set the "used" bit 180 * now to save the HW the expense of doing it. 181 */ 182 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 183 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 184 epte = &pte[num]; 185 protoste = kptpa | SG_U | SG_RW | SG_V; 186 while (pte < epte) { 187 *pte++ = protoste; 188 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 189 } 190 191 /* 192 * Initialize level 1 descriptors. We need: 193 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 194 * level 1 descriptors to map the `num' level 2's. 195 */ 196 pte = (u_int *)kstpa; 197 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 198 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 199 while (pte < epte) { 200 *pte++ = protoste; 201 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 202 } 203 204 /* 205 * Initialize the final level 1 descriptor to map the last 206 * block of level 2 descriptors. 207 */ 208 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 209 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 210 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 211 /* 212 * Now initialize the final portion of that block of 213 * descriptors to map kptmpa and the "last PT page". 214 */ 215 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 216 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 217 protoste = kptmpa | SG_U | SG_RW | SG_V; 218 while (pte < epte) { 219 *pte++ = protoste; 220 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 221 } 222 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 223 protoste = lkptpa | SG_U | SG_RW | SG_V; 224 while (pte < epte) { 225 *pte++ = protoste; 226 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 227 } 228 229 /* 230 * Initialize Sysptmap 231 */ 232 pte = (u_int *)kptmpa; 233 epte = &pte[nptpages]; 234 protopte = kptpa | PG_RW | PG_CI | PG_V; 235 while (pte < epte) { 236 *pte++ = protopte; 237 protopte += PAGE_SIZE; 238 } 239 240 /* 241 * Invalidate all but the last remaining entry. 242 */ 243 epte = &((u_int *)kptmpa)[NPTEPG-2]; 244 while (pte < epte) { 245 *pte++ = PG_NV; 246 } 247 /* 248 * Initialize the last to point to kptmpa and the page 249 * table page allocated earlier. 250 */ 251 *pte = kptmpa | PG_RW | PG_CI | PG_V; 252 pte++; 253 *pte = lkptpa | PG_RW | PG_CI | PG_V; 254 } else { 255 /* 256 * Map the page table pages in both the HW segment table 257 * and the software Sysptmap. 258 */ 259 ste = (u_int *)kstpa; 260 pte = (u_int *)kptmpa; 261 epte = &pte[nptpages]; 262 protoste = kptpa | SG_RW | SG_V; 263 protopte = kptpa | PG_RW | PG_CI | PG_V; 264 while (pte < epte) { 265 *ste++ = protoste; 266 *pte++ = protopte; 267 protoste += PAGE_SIZE; 268 protopte += PAGE_SIZE; 269 } 270 /* 271 * Invalidate all but the last remaining entries in both. 272 */ 273 epte = &((u_int *)kptmpa)[NPTEPG-2]; 274 while (pte < epte) { 275 *ste++ = SG_NV; 276 *pte++ = PG_NV; 277 } 278 /* 279 * Initialize the last to point to kptmpa and the page 280 * table page allocated earlier. 281 */ 282 *ste = kptmpa | SG_RW | SG_V; 283 *pte = kptmpa | PG_RW | PG_CI | PG_V; 284 ste++; 285 pte++; 286 *ste = lkptpa | SG_RW | SG_V; 287 *pte = lkptpa | PG_RW | PG_CI | PG_V; 288 } 289 /* 290 * Invalidate all but the final entry in the last kernel PT page 291 * (u-area PTEs will be validated later). The final entry maps 292 * the last page of physical memory. 293 */ 294 pte = (u_int *)lkptpa; 295 epte = &pte[NPTEPG]; 296 while (pte < epte) 297 *pte++ = PG_NV; 298 299 /* 300 * Initialize kernel page table. 301 * Start by invalidating the `nptpages' that we have allocated. 302 */ 303 pte = (u_int *)kptpa; 304 epte = &pte[nptpages * NPTEPG]; 305 while (pte < epte) 306 *pte++ = PG_NV; 307 308 /* 309 * Validate PTEs for kernel text (RO). 310 */ 311 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 312 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 313 protopte = firstpa | PG_RO | PG_V; 314 while (pte < epte) { 315 *pte++ = protopte; 316 protopte += PAGE_SIZE; 317 } 318 /* 319 * Validate PTEs for kernel data/bss, dynamic data allocated 320 * by us so far (nextpa - firstpa bytes), and pages for proc0 321 * u-area and page table allocated below (RW). 322 */ 323 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 324 protopte = (protopte & ~PG_PROT) | PG_RW; 325 /* 326 * Enable copy-back caching of data pages 327 */ 328 if (RELOC(mmutype, int) == MMU_68040) 329 protopte |= PG_CCB; 330 331 while (pte < epte) { 332 *pte++ = protopte; 333 protopte += PAGE_SIZE; 334 } 335 336 /* 337 * Calculate important exported kernel virtual addresses 338 */ 339 /* 340 * Sysseg: base of kernel segment table 341 */ 342 RELOC(Sysseg, st_entry_t *) = 343 (st_entry_t *)(kstpa - firstpa + KERNBASE); 344 /* 345 * Sysptmap: base of kernel page table map 346 */ 347 RELOC(Sysptmap, pt_entry_t *) = 348 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 349 /* 350 * Sysmap: kernel page table (as mapped through Sysptmap) 351 * Allocated at the end of KVA space. 352 */ 353 RELOC(Sysmap, pt_entry_t *) = 354 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 355 356 /* 357 * Setup u-area for process 0. 358 */ 359 /* 360 * Zero the u-area. 361 * NOTE: `pte' and `epte' aren't PTEs here. 362 */ 363 pte = (u_int *)p0upa; 364 epte = (u_int *)(p0upa + USPACE); 365 while (pte < epte) 366 *pte++ = 0; 367 /* 368 * Remember the u-area address so it can be loaded in the 369 * proc struct p_addr field later. 370 */ 371 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 372 373 /* 374 * VM data structures are now initialized, set up data for 375 * the pmap module. 376 * 377 * Note about avail_end: msgbuf is initialized just after 378 * avail_end in machdep.c. Since the last page is used 379 * for rebooting the system (code is copied there and 380 * excution continues from copied code before the MMU 381 * is disabled), the msgbuf will get trounced between 382 * reboots if it's placed in the last physical page. 383 * To work around this, we move avail_end back one more 384 * page so the msgbuf can be preserved. 385 */ 386 RELOC(avail_start, vm_offset_t) = nextpa; 387 RELOC(avail_end, vm_offset_t) = firstpa 388 + m68k_ptob(RELOC(physmem, int)) 389 - m68k_round_page(MSGBUFSIZE) 390 - PAGE_SIZE; /* if that start of last page??? */ 391 RELOC(virtual_avail, vm_offset_t) = 392 KERNBASE + (nextpa - firstpa); 393 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 394 395 /* 396 * Initialize protection array. 397 * XXX don't use a switch statement, it might produce an 398 * absolute "jmp" table. 399 */ 400 { 401 u_int *kp; 402 403 kp = &RELOC(protection_codes, u_int); 404 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 405 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 406 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 407 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 408 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 409 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 410 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 411 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 412 } 413 414 /* 415 * Kernel page/segment table allocated above, 416 * just initialize pointers. 417 */ 418 { 419 struct pmap *kpm; 420 421 kpm = RELOCPTR(kernel_pmap_ptr, struct pmap *); 422 423 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 424 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 425 simple_lock_init(&kpm->pm_lock); 426 kpm->pm_count = 1; 427 kpm->pm_stpa = (st_entry_t *)kstpa; 428 /* 429 * For the 040 we also initialize the free level 2 430 * descriptor mask noting that we have used: 431 * 0: level 1 table 432 * 1 to `num': map page tables 433 * MAXKL2SIZE-1: maps kptmpa and last-page page table 434 */ 435 if (RELOC(mmutype, int) == MMU_68040) { 436 int num; 437 438 kpm->pm_stfree = ~l2tobm(0); 439 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 440 SG4_LEV2SIZE) / SG4_LEV2SIZE; 441 while (num) 442 kpm->pm_stfree &= ~l2tobm(num--); 443 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 444 for (num = MAXKL2SIZE; 445 num < sizeof(kpm->pm_stfree)*NBBY; 446 num++) 447 kpm->pm_stfree &= ~l2tobm(num); 448 } 449 } 450 451 /* 452 * Allocate some fixed, special purpose kernel virtual addresses 453 */ 454 { 455 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 456 457 RELOC(CADDR1, void *) = (void *)va; 458 va += PAGE_SIZE; 459 RELOC(CADDR2, void *) = (void *)va; 460 va += PAGE_SIZE; 461 RELOC(vmmap, void *) = (void *)va; 462 va += PAGE_SIZE; 463 RELOC(msgbufaddr, void *) = (void *)va; 464 va += m68k_round_page(MSGBUFSIZE); 465 RELOC(virtual_avail, vm_offset_t) = va; 466 } 467 } 468