1 /* $NetBSD: pmap_bootstrap.c,v 1.4 2003/07/15 01:29:20 lukem Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 40 */ 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.4 2003/07/15 01:29:20 lukem Exp $"); 44 45 #include <sys/param.h> 46 #include <sys/msgbuf.h> 47 #include <sys/proc.h> 48 49 #include <machine/frame.h> 50 #include <machine/cpu.h> 51 #include <machine/vmparam.h> 52 #include <machine/pte.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa - KERNBASE)) 57 58 extern char *etext; 59 extern int Sysptsize; 60 extern char *proc0paddr; 61 extern st_entry_t *Sysseg; 62 extern pt_entry_t *Sysptmap, *Sysmap; 63 64 extern int physmem; 65 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; 66 extern int protection_codes[]; 67 68 void pmap_bootstrap __P((vm_offset_t, vm_offset_t)); 69 70 /* 71 * Special purpose kernel virtual addresses, used for mapping 72 * physical pages for a variety of temporary or permanent purposes: 73 * 74 * CADDR1, CADDR2: pmap zero/copy operations 75 * vmmap: /dev/mem, crash dumps, parity error checking 76 * msgbufp: kernel message buffer 77 */ 78 caddr_t CADDR1, CADDR2, vmmap; 79 extern caddr_t msgbufaddr; 80 81 /* 82 * Bootstrap the VM system. 83 * 84 * Called with MMU off so we must relocate all global references by `firstpa' 85 * (don't call any functions here!) `nextpa' is the first available physical 86 * memory address. Returns an updated first PA reflecting the memory we 87 * have allocated. MMU is still off when we return. 88 * 89 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 90 * XXX a PIC compiler would make this much easier. 91 */ 92 void 93 pmap_bootstrap(nextpa, firstpa) 94 vm_offset_t nextpa; 95 vm_offset_t firstpa; 96 { 97 vm_offset_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 98 u_int nptpages, kstsize; 99 st_entry_t protoste, *ste; 100 pt_entry_t protopte, *pte, *epte; 101 102 /* 103 * Calculate important physical addresses: 104 * 105 * kstpa kernel segment table 1 page (!040) 106 * N pages (040) 107 * 108 * kptpa statically allocated 109 * kernel PT pages Sysptsize+ pages 110 * 111 * iiopa internal IO space 112 * PT pages IIOMAPSIZE pages 113 * 114 * eiopa external IO space 115 * PT pages EIOMAPSIZE pages 116 * 117 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 118 * EIOMAPSIZE are the number of PTEs, hence we need to round 119 * the total to a page boundary with IO maps at the end. ] 120 * 121 * kptmpa kernel PT map 1 page 122 * 123 * lkptpa last kernel PT page 1 page 124 * 125 * p0upa proc 0 u-area UPAGES pages 126 * 127 * The KVA corresponding to any of these PAs is: 128 * (PA - firstpa + KERNBASE). 129 */ 130 if (RELOC(mmutype, int) == MMU_68040) 131 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 132 else 133 kstsize = 1; 134 kstpa = nextpa; 135 nextpa += kstsize * PAGE_SIZE; 136 kptpa = nextpa; 137 nptpages = RELOC(Sysptsize, int); 138 nextpa += nptpages * PAGE_SIZE; 139 kptmpa = nextpa; 140 nextpa += PAGE_SIZE; 141 lkptpa = nextpa; 142 nextpa += PAGE_SIZE; 143 p0upa = nextpa; 144 nextpa += USPACE; 145 146 /* 147 * Initialize segment table and kernel page table map. 148 * 149 * On 68030s and earlier MMUs the two are identical except for 150 * the valid bits so both are initialized with essentially the 151 * same values. On the 68040, which has a mandatory 3-level 152 * structure, the segment table holds the level 1 table and part 153 * (or all) of the level 2 table and hence is considerably 154 * different. Here the first level consists of 128 descriptors 155 * (512 bytes) each mapping 32mb of address space. Each of these 156 * points to blocks of 128 second level descriptors (512 bytes) 157 * each mapping 256kb. Note that there may be additional "segment 158 * table" pages depending on how large MAXKL2SIZE is. 159 * 160 * Portions of the last segment of KVA space (0xFFF00000 - 161 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 162 * for UPAGES is used for mapping the current process u-area 163 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 164 * to the last physical page of RAM to give us a region in which 165 * PA == VA. We use the first part of this page for enabling 166 * and disabling mapping. The last part of this page also contains 167 * info left by the boot ROM. 168 * 169 * XXX cramming two levels of mapping into the single "segment" 170 * table on the 68040 is intended as a temporary hack to get things 171 * working. The 224mb of address space that this allows will most 172 * likely be insufficient in the future (at least for the kernel). 173 */ 174 if (RELOC(mmutype, int) == MMU_68040) { 175 int num; 176 177 /* 178 * First invalidate the entire "segment table" pages 179 * (levels 1 and 2 have the same "invalid" value). 180 */ 181 pte = (u_int *)kstpa; 182 epte = &pte[kstsize * NPTEPG]; 183 while (pte < epte) 184 *pte++ = SG_NV; 185 186 /* 187 * Initialize level 2 descriptors (which immediately 188 * follow the level 1 table). We need: 189 * NPTEPG / SG4_LEV3SIZE 190 * level 2 descriptors to map each of the nptpages+1 191 * pages of PTEs. Note that we set the "used" bit 192 * now to save the HW the expense of doing it. 193 */ 194 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 195 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 196 epte = &pte[num]; 197 protoste = kptpa | SG_U | SG_RW | SG_V; 198 while (pte < epte) { 199 *pte++ = protoste; 200 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 201 } 202 203 /* 204 * Initialize level 1 descriptors. We need: 205 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 206 * level 1 descriptors to map the `num' level 2's. 207 */ 208 pte = (u_int *)kstpa; 209 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 210 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 211 while (pte < epte) { 212 *pte++ = protoste; 213 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 214 } 215 216 /* 217 * Initialize the final level 1 descriptor to map the last 218 * block of level 2 descriptors. 219 */ 220 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 221 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 222 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 223 /* 224 * Now initialize the final portion of that block of 225 * descriptors to map the "last PT page". 226 */ 227 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 228 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 229 protoste = lkptpa | SG_U | SG_RW | SG_V; 230 while (pte < epte) { 231 *pte++ = protoste; 232 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 233 } 234 235 /* 236 * Initialize Sysptmap 237 */ 238 pte = (u_int *)kptmpa; 239 epte = &pte[nptpages+1]; 240 protopte = kptpa | PG_RW | PG_CI | PG_V; 241 while (pte < epte) { 242 *pte++ = protopte; 243 protopte += PAGE_SIZE; 244 } 245 246 /* 247 * Invalidate all but the last remaining entry. 248 */ 249 epte = &((u_int *)kptmpa)[NPTEPG-1]; 250 while (pte < epte) { 251 *pte++ = PG_NV; 252 } 253 /* 254 * Initialize the last to point to the page 255 * table page allocated earlier. 256 */ 257 *pte = lkptpa | PG_RW | PG_CI | PG_V; 258 } else { 259 /* 260 * Map the page table pages in both the HW segment table 261 * and the software Sysptmap. Note that Sysptmap is also 262 * considered a PT page hence the +1. 263 */ 264 ste = (u_int *)kstpa; 265 pte = (u_int *)kptmpa; 266 epte = &pte[nptpages+1]; 267 protoste = kptpa | SG_RW | SG_V; 268 protopte = kptpa | PG_RW | PG_CI | PG_V; 269 while (pte < epte) { 270 *ste++ = protoste; 271 *pte++ = protopte; 272 protoste += PAGE_SIZE; 273 protopte += PAGE_SIZE; 274 } 275 /* 276 * Invalidate all but the last remaining entries in both. 277 */ 278 epte = &((u_int *)kptmpa)[NPTEPG-1]; 279 while (pte < epte) { 280 *ste++ = SG_NV; 281 *pte++ = PG_NV; 282 } 283 /* 284 * Initialize the last to point to point to the page 285 * table page allocated earlier. 286 */ 287 *ste = lkptpa | SG_RW | SG_V; 288 *pte = lkptpa | PG_RW | PG_CI | PG_V; 289 } 290 /* 291 * Invalidate all but the final entry in the last kernel PT page 292 * (u-area PTEs will be validated later). The final entry maps 293 * the last page of physical memory. 294 */ 295 pte = (u_int *)lkptpa; 296 epte = &pte[NPTEPG]; 297 while (pte < epte) 298 *pte++ = PG_NV; 299 300 /* 301 * Initialize kernel page table. 302 * Start by invalidating the `nptpages' that we have allocated. 303 */ 304 pte = (u_int *)kptpa; 305 epte = &pte[nptpages * NPTEPG]; 306 while (pte < epte) 307 *pte++ = PG_NV; 308 309 /* 310 * Validate PTEs for kernel text (RO). 311 */ 312 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 313 epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; 314 protopte = firstpa | PG_RO | PG_V; 315 while (pte < epte) { 316 *pte++ = protopte; 317 protopte += PAGE_SIZE; 318 } 319 /* 320 * Validate PTEs for kernel data/bss, dynamic data allocated 321 * by us so far (nextpa - firstpa bytes), and pages for proc0 322 * u-area and page table allocated below (RW). 323 */ 324 epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; 325 protopte = (protopte & ~PG_PROT) | PG_RW; 326 /* 327 * Enable copy-back caching of data pages 328 */ 329 if (RELOC(mmutype, int) == MMU_68040) 330 protopte |= PG_CCB; 331 332 while (pte < epte) { 333 *pte++ = protopte; 334 protopte += PAGE_SIZE; 335 } 336 337 /* 338 * Calculate important exported kernel virtual addresses 339 */ 340 /* 341 * Sysseg: base of kernel segment table 342 */ 343 RELOC(Sysseg, st_entry_t *) = 344 (st_entry_t *)(kstpa - firstpa + KERNBASE); 345 /* 346 * Sysptmap: base of kernel page table map 347 */ 348 RELOC(Sysptmap, pt_entry_t *) = 349 (pt_entry_t *)(kptmpa - firstpa + KERNBASE); 350 /* 351 * Sysmap: kernel page table (as mapped through Sysptmap) 352 * Immediately follows `nptpages' of static kernel page table. 353 */ 354 RELOC(Sysmap, pt_entry_t *) = 355 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG); 356 357 /* 358 * Setup u-area for process 0. 359 */ 360 /* 361 * Zero the u-area. 362 * NOTE: `pte' and `epte' aren't PTEs here. 363 */ 364 pte = (u_int *)p0upa; 365 epte = (u_int *)(p0upa + USPACE); 366 while (pte < epte) 367 *pte++ = 0; 368 /* 369 * Remember the u-area address so it can be loaded in the 370 * proc struct p_addr field later. 371 */ 372 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa + KERNBASE); 373 374 /* 375 * VM data structures are now initialized, set up data for 376 * the pmap module. 377 * 378 * Note about avail_end: msgbuf is initialized just after 379 * avail_end in machdep.c. Since the last page is used 380 * for rebooting the system (code is copied there and 381 * excution continues from copied code before the MMU 382 * is disabled), the msgbuf will get trounced between 383 * reboots if it's placed in the last physical page. 384 * To work around this, we move avail_end back one more 385 * page so the msgbuf can be preserved. 386 */ 387 RELOC(avail_start, vm_offset_t) = nextpa; 388 RELOC(avail_end, vm_offset_t) = firstpa 389 + m68k_ptob(RELOC(physmem, int)) 390 - m68k_round_page(MSGBUFSIZE) 391 - PAGE_SIZE; /* if that start of last page??? */ 392 RELOC(virtual_avail, vm_offset_t) = 393 KERNBASE + (nextpa - firstpa); 394 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 395 396 /* 397 * Initialize protection array. 398 * XXX don't use a switch statement, it might produce an 399 * absolute "jmp" table. 400 */ 401 { 402 int *kp; 403 404 kp = &RELOC(protection_codes, int); 405 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 406 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 407 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 408 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 409 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 410 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 411 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 412 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 413 } 414 415 /* 416 * Kernel page/segment table allocated in locore, 417 * just initialize pointers. 418 */ 419 { 420 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 421 422 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 423 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 424 simple_lock_init(&kpm->pm_lock); 425 kpm->pm_count = 1; 426 kpm->pm_stpa = (st_entry_t *)kstpa; 427 /* 428 * For the 040 we also initialize the free level 2 429 * descriptor mask noting that we have used: 430 * 0: level 1 table 431 * 1 to `num': map page tables 432 * MAXKL2SIZE-1: maps last-page page table 433 */ 434 if (RELOC(mmutype, int) == MMU_68040) { 435 int num; 436 437 kpm->pm_stfree = ~l2tobm(0); 438 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 439 SG4_LEV2SIZE) / SG4_LEV2SIZE; 440 while (num) 441 kpm->pm_stfree &= ~l2tobm(num--); 442 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 443 for (num = MAXKL2SIZE; 444 num < sizeof(kpm->pm_stfree)*NBBY; 445 num++) 446 kpm->pm_stfree &= ~l2tobm(num); 447 } 448 } 449 450 /* 451 * Allocate some fixed, special purpose kernel virtual addresses 452 */ 453 { 454 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 455 456 RELOC(CADDR1, caddr_t) = (caddr_t)va; 457 va += PAGE_SIZE; 458 RELOC(CADDR2, caddr_t) = (caddr_t)va; 459 va += PAGE_SIZE; 460 RELOC(vmmap, caddr_t) = (caddr_t)va; 461 va += PAGE_SIZE; 462 RELOC(msgbufaddr, caddr_t) = (caddr_t)va; 463 va += m68k_round_page(MSGBUFSIZE); 464 RELOC(virtual_avail, vm_offset_t) = va; 465 } 466 } 467 468 void 469 pmap_init_md(void) 470 { 471 /* Nothing here. */ 472 } 473