1 /* $NetBSD: pmap_bootstrap.c,v 1.13 2008/12/28 05:15:59 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.13 2008/12/28 05:15:59 tsutsui Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/msgbuf.h> 43 #include <sys/proc.h> 44 45 #include <machine/frame.h> 46 #include <machine/cpu.h> 47 #include <machine/vmparam.h> 48 #include <machine/pte.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa - KERNBASE)) 53 #define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa - KERNBASE)) 54 55 extern char *etext; 56 extern int Sysptsize; 57 extern char *proc0paddr; 58 extern st_entry_t *Sysseg; 59 extern pt_entry_t *Sysptmap, *Sysmap; 60 61 extern int physmem; 62 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; 63 extern int protection_codes[]; 64 65 void pmap_bootstrap __P((vm_offset_t, vm_offset_t)); 66 67 /* 68 * Special purpose kernel virtual addresses, used for mapping 69 * physical pages for a variety of temporary or permanent purposes: 70 * 71 * CADDR1, CADDR2: pmap zero/copy operations 72 * vmmap: /dev/mem, crash dumps, parity error checking 73 * msgbufp: kernel message buffer 74 */ 75 void *CADDR1, *CADDR2; 76 char *vmmap; 77 void *msgbufaddr; 78 79 /* 80 * Bootstrap the VM system. 81 * 82 * Called with MMU off so we must relocate all global references by `firstpa' 83 * (don't call any functions here!) `nextpa' is the first available physical 84 * memory address. Returns an updated first PA reflecting the memory we 85 * have allocated. MMU is still off when we return. 86 * 87 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 88 * XXX a PIC compiler would make this much easier. 89 */ 90 void 91 pmap_bootstrap(nextpa, firstpa) 92 vm_offset_t nextpa; 93 vm_offset_t firstpa; 94 { 95 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 96 u_int nptpages, kstsize; 97 st_entry_t protoste, *ste; 98 pt_entry_t protopte, *pte, *epte; 99 100 /* 101 * Calculate important physical addresses: 102 * 103 * kstpa kernel segment table 1 page (!040) 104 * N pages (040) 105 * 106 * kptpa statically allocated 107 * kernel PT pages Sysptsize+ pages 108 * 109 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 110 * EIOMAPSIZE are the number of PTEs, hence we need to round 111 * the total to a page boundary with IO maps at the end. ] 112 * 113 * kptmpa kernel PT map 1 page 114 * 115 * lkptpa last kernel PT page 1 page 116 * 117 * p0upa proc 0 u-area UPAGES pages 118 * 119 * The KVA corresponding to any of these PAs is: 120 * (PA - firstpa + KERNBASE). 121 */ 122 if (RELOC(mmutype, int) == MMU_68040) 123 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 124 else 125 kstsize = 1; 126 kstpa = nextpa; 127 nextpa += kstsize * PAGE_SIZE; 128 kptmpa = nextpa; 129 nextpa += PAGE_SIZE; 130 lkptpa = nextpa; 131 nextpa += PAGE_SIZE; 132 p0upa = nextpa; 133 nextpa += USPACE; 134 kptpa = nextpa; 135 nptpages = RELOC(Sysptsize, int); 136 nextpa += nptpages * PAGE_SIZE; 137 138 /* 139 * Initialize segment table and kernel page table map. 140 * 141 * On 68030s and earlier MMUs the two are identical except for 142 * the valid bits so both are initialized with essentially the 143 * same values. On the 68040, which has a mandatory 3-level 144 * structure, the segment table holds the level 1 table and part 145 * (or all) of the level 2 table and hence is considerably 146 * different. Here the first level consists of 128 descriptors 147 * (512 bytes) each mapping 32mb of address space. Each of these 148 * points to blocks of 128 second level descriptors (512 bytes) 149 * each mapping 256kb. Note that there may be additional "segment 150 * table" pages depending on how large MAXKL2SIZE is. 151 * 152 * Portions of the last segment of KVA space (0xFFF00000 - 153 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 154 * for UPAGES is used for mapping the current process u-area 155 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 156 * to the last physical page of RAM to give us a region in which 157 * PA == VA. We use the first part of this page for enabling 158 * and disabling mapping. The last part of this page also contains 159 * info left by the boot ROM. 160 * 161 * XXX cramming two levels of mapping into the single "segment" 162 * table on the 68040 is intended as a temporary hack to get things 163 * working. The 224mb of address space that this allows will most 164 * likely be insufficient in the future (at least for the kernel). 165 */ 166 if (RELOC(mmutype, int) == MMU_68040) { 167 int num; 168 169 /* 170 * First invalidate the entire "segment table" pages 171 * (levels 1 and 2 have the same "invalid" value). 172 */ 173 pte = (u_int *)kstpa; 174 epte = &pte[kstsize * NPTEPG]; 175 while (pte < epte) 176 *pte++ = SG_NV; 177 178 /* 179 * Initialize level 2 descriptors (which immediately 180 * follow the level 1 table). We need: 181 * NPTEPG / SG4_LEV3SIZE 182 * level 2 descriptors to map each of the nptpages 183 * pages of PTEs. Note that we set the "used" bit 184 * now to save the HW the expense of doing it. 185 */ 186 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 187 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 188 epte = &pte[num]; 189 protoste = kptpa | SG_U | SG_RW | SG_V; 190 while (pte < epte) { 191 *pte++ = protoste; 192 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 193 } 194 195 /* 196 * Initialize level 1 descriptors. We need: 197 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 198 * level 1 descriptors to map the `num' level 2's. 199 */ 200 pte = (u_int *)kstpa; 201 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 202 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 203 while (pte < epte) { 204 *pte++ = protoste; 205 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 206 } 207 208 /* 209 * Initialize the final level 1 descriptor to map the last 210 * block of level 2 descriptors. 211 */ 212 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 213 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 214 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 215 /* 216 * Now initialize the final portion of that block of 217 * descriptors to map kptmpa and the "last PT page". 218 */ 219 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 220 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 221 protoste = kptmpa | SG_U | SG_RW | SG_V; 222 while (pte < epte) { 223 *pte++ = protoste; 224 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 225 } 226 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 227 protoste = lkptpa | SG_U | SG_RW | SG_V; 228 while (pte < epte) { 229 *pte++ = protoste; 230 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 231 } 232 233 /* 234 * Initialize Sysptmap 235 */ 236 pte = (u_int *)kptmpa; 237 epte = &pte[nptpages]; 238 protopte = kptpa | PG_RW | PG_CI | PG_V; 239 while (pte < epte) { 240 *pte++ = protopte; 241 protopte += PAGE_SIZE; 242 } 243 244 /* 245 * Invalidate all but the last remaining entry. 246 */ 247 epte = &((u_int *)kptmpa)[NPTEPG-2]; 248 while (pte < epte) { 249 *pte++ = PG_NV; 250 } 251 /* 252 * Initialize the last to point to kptmpa and the page 253 * table page allocated earlier. 254 */ 255 *pte = kptmpa | PG_RW | PG_CI | PG_V; 256 pte++; 257 *pte = lkptpa | PG_RW | PG_CI | PG_V; 258 } else { 259 /* 260 * Map the page table pages in both the HW segment table 261 * and the software Sysptmap. 262 */ 263 ste = (u_int *)kstpa; 264 pte = (u_int *)kptmpa; 265 epte = &pte[nptpages]; 266 protoste = kptpa | SG_RW | SG_V; 267 protopte = kptpa | PG_RW | PG_CI | PG_V; 268 while (pte < epte) { 269 *ste++ = protoste; 270 *pte++ = protopte; 271 protoste += PAGE_SIZE; 272 protopte += PAGE_SIZE; 273 } 274 /* 275 * Invalidate all but the last remaining entries in both. 276 */ 277 epte = &((u_int *)kptmpa)[NPTEPG-2]; 278 while (pte < epte) { 279 *ste++ = SG_NV; 280 *pte++ = PG_NV; 281 } 282 /* 283 * Initialize the last to point to kptmpa and the page 284 * table page allocated earlier. 285 */ 286 *ste = kptmpa | SG_RW | SG_V; 287 *pte = kptmpa | PG_RW | PG_CI | PG_V; 288 ste++; 289 pte++; 290 *ste = lkptpa | SG_RW | SG_V; 291 *pte = lkptpa | PG_RW | PG_CI | PG_V; 292 } 293 /* 294 * Invalidate all but the final entry in the last kernel PT page 295 * (u-area PTEs will be validated later). The final entry maps 296 * the last page of physical memory. 297 */ 298 pte = (u_int *)lkptpa; 299 epte = &pte[NPTEPG]; 300 while (pte < epte) 301 *pte++ = PG_NV; 302 303 /* 304 * Initialize kernel page table. 305 * Start by invalidating the `nptpages' that we have allocated. 306 */ 307 pte = (u_int *)kptpa; 308 epte = &pte[nptpages * NPTEPG]; 309 while (pte < epte) 310 *pte++ = PG_NV; 311 312 /* 313 * Validate PTEs for kernel text (RO). 314 */ 315 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 316 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 317 protopte = firstpa | PG_RO | PG_V; 318 while (pte < epte) { 319 *pte++ = protopte; 320 protopte += PAGE_SIZE; 321 } 322 /* 323 * Validate PTEs for kernel data/bss, dynamic data allocated 324 * by us so far (nextpa - firstpa bytes), and pages for proc0 325 * u-area and page table allocated below (RW). 326 */ 327 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 328 protopte = (protopte & ~PG_PROT) | PG_RW; 329 /* 330 * Enable copy-back caching of data pages 331 */ 332 if (RELOC(mmutype, int) == MMU_68040) 333 protopte |= PG_CCB; 334 335 while (pte < epte) { 336 *pte++ = protopte; 337 protopte += PAGE_SIZE; 338 } 339 340 /* 341 * Calculate important exported kernel virtual addresses 342 */ 343 /* 344 * Sysseg: base of kernel segment table 345 */ 346 RELOC(Sysseg, st_entry_t *) = 347 (st_entry_t *)(kstpa - firstpa + KERNBASE); 348 /* 349 * Sysptmap: base of kernel page table map 350 */ 351 RELOC(Sysptmap, pt_entry_t *) = 352 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 353 /* 354 * Sysmap: kernel page table (as mapped through Sysptmap) 355 * Allocated at the end of KVA space. 356 */ 357 RELOC(Sysmap, pt_entry_t *) = 358 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 359 360 /* 361 * Setup u-area for process 0. 362 */ 363 /* 364 * Zero the u-area. 365 * NOTE: `pte' and `epte' aren't PTEs here. 366 */ 367 pte = (u_int *)p0upa; 368 epte = (u_int *)(p0upa + USPACE); 369 while (pte < epte) 370 *pte++ = 0; 371 /* 372 * Remember the u-area address so it can be loaded in the 373 * proc struct p_addr field later. 374 */ 375 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 376 377 /* 378 * VM data structures are now initialized, set up data for 379 * the pmap module. 380 * 381 * Note about avail_end: msgbuf is initialized just after 382 * avail_end in machdep.c. Since the last page is used 383 * for rebooting the system (code is copied there and 384 * excution continues from copied code before the MMU 385 * is disabled), the msgbuf will get trounced between 386 * reboots if it's placed in the last physical page. 387 * To work around this, we move avail_end back one more 388 * page so the msgbuf can be preserved. 389 */ 390 RELOC(avail_start, vm_offset_t) = nextpa; 391 RELOC(avail_end, vm_offset_t) = firstpa 392 + m68k_ptob(RELOC(physmem, int)) 393 - m68k_round_page(MSGBUFSIZE) 394 - PAGE_SIZE; /* if that start of last page??? */ 395 RELOC(virtual_avail, vm_offset_t) = 396 KERNBASE + (nextpa - firstpa); 397 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 398 399 /* 400 * Initialize protection array. 401 * XXX don't use a switch statement, it might produce an 402 * absolute "jmp" table. 403 */ 404 { 405 int *kp; 406 407 kp = &RELOC(protection_codes, int); 408 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 409 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 410 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 411 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 412 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 413 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 414 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 415 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 416 } 417 418 /* 419 * Kernel page/segment table allocated above, 420 * just initialize pointers. 421 */ 422 { 423 struct pmap *kpm; 424 425 kpm = RELOCPTR(kernel_pmap_ptr, struct pmap *); 426 427 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 428 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 429 simple_lock_init(&kpm->pm_lock); 430 kpm->pm_count = 1; 431 kpm->pm_stpa = (st_entry_t *)kstpa; 432 /* 433 * For the 040 we also initialize the free level 2 434 * descriptor mask noting that we have used: 435 * 0: level 1 table 436 * 1 to `num': map page tables 437 * MAXKL2SIZE-1: maps kptmpa and last-page page table 438 */ 439 if (RELOC(mmutype, int) == MMU_68040) { 440 int num; 441 442 kpm->pm_stfree = ~l2tobm(0); 443 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 444 SG4_LEV2SIZE) / SG4_LEV2SIZE; 445 while (num) 446 kpm->pm_stfree &= ~l2tobm(num--); 447 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 448 for (num = MAXKL2SIZE; 449 num < sizeof(kpm->pm_stfree)*NBBY; 450 num++) 451 kpm->pm_stfree &= ~l2tobm(num); 452 } 453 } 454 455 /* 456 * Allocate some fixed, special purpose kernel virtual addresses 457 */ 458 { 459 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 460 461 RELOC(CADDR1, void *) = (void *)va; 462 va += PAGE_SIZE; 463 RELOC(CADDR2, void *) = (void *)va; 464 va += PAGE_SIZE; 465 RELOC(vmmap, void *) = (void *)va; 466 va += PAGE_SIZE; 467 RELOC(msgbufaddr, void *) = (void *)va; 468 va += m68k_round_page(MSGBUFSIZE); 469 RELOC(virtual_avail, vm_offset_t) = va; 470 } 471 } 472