1 /* $NetBSD: pmap_bootstrap.c,v 1.29 2005/12/11 12:19:45 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.29 2005/12/11 12:19:45 christos Exp $"); 40 41 #include "opt_m680x0.h" 42 43 #include <sys/param.h> 44 #include <uvm/uvm_extern.h> 45 #include <machine/pte.h> 46 #include <machine/vmparam.h> 47 #include <machine/cpu.h> 48 #include <arch/x68k/x68k/iodevice.h> 49 50 51 #define RELOC(v, t) *((t*)((caddr_t)&(v) + firstpa)) 52 53 extern char *etext; 54 extern int Sysptsize; 55 extern char *proc0paddr; 56 extern st_entry_t *Sysseg; 57 extern pt_entry_t *Sysptmap, *Sysmap; 58 59 extern int maxmem, physmem; 60 extern paddr_t avail_start, avail_end; 61 extern vaddr_t virtual_avail, virtual_end; 62 extern psize_t mem_size; 63 extern int protection_codes[]; 64 65 u_int8_t *intiobase = (u_int8_t *) PHYS_IODEV; 66 67 void pmap_bootstrap(paddr_t, paddr_t); 68 69 /* 70 * Special purpose kernel virtual addresses, used for mapping 71 * physical pages for a variety of temporary or permanent purposes: 72 * 73 * CADDR1, CADDR2: pmap zero/copy operations 74 * vmmap: /dev/mem, crash dumps, parity error checking 75 * msgbufaddr: kernel message buffer 76 */ 77 caddr_t CADDR1, CADDR2, vmmap; 78 extern caddr_t msgbufaddr; 79 80 /* 81 * Bootstrap the VM system. 82 * 83 * Called with MMU off so we must relocate all global references by `firstpa' 84 * (don't call any functions here!) `nextpa' is the first available physical 85 * memory address. Returns an updated first PA reflecting the memory we 86 * have allocated. MMU is still off when we return. 87 * 88 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 89 * XXX a PIC compiler would make this much easier. 90 */ 91 void 92 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 93 { 94 paddr_t kstpa, kptpa, kptmpa, p0upa; 95 u_int nptpages, kstsize; 96 st_entry_t protoste, *ste; 97 pt_entry_t protopte, *pte, *epte; 98 99 /* 100 * Calculate important physical addresses: 101 * 102 * kstpa kernel segment table 1 page (!040) 103 * N pages (040) 104 * 105 * kptpa statically allocated 106 * kernel PT pages Sysptsize+ pages 107 * 108 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE 109 * is the number of PTEs, hence we need to round 110 * the total to a page boundary with IO maps at the end. ] 111 * 112 * kptmpa kernel PT map 1 page 113 * 114 * p0upa proc 0 u-area UPAGES pages 115 * 116 * The KVA corresponding to any of these PAs is: 117 * (PA - firstpa + KERNBASE). 118 */ 119 if (RELOC(mmutype, int) == MMU_68040) 120 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 121 else 122 kstsize = 1; 123 kstpa = nextpa; 124 nextpa += kstsize * PAGE_SIZE; 125 kptmpa = nextpa; 126 nextpa += PAGE_SIZE; 127 p0upa = nextpa; 128 nextpa += USPACE; 129 kptpa = nextpa; 130 nptpages = RELOC(Sysptsize, int) + 131 (IIOMAPSIZE + NPTEPG - 1) / NPTEPG; 132 nextpa += nptpages * PAGE_SIZE; 133 134 /* 135 * Clear all PTEs to zero 136 */ 137 for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)nextpa; pte++) 138 *pte = 0; 139 140 /* 141 * Initialize segment table and kernel page table map. 142 * 143 * On 68030s and earlier MMUs the two are identical except for 144 * the valid bits so both are initialized with essentially the 145 * same values. On the 68040, which has a mandatory 3-level 146 * structure, the segment table holds the level 1 table and part 147 * (or all) of the level 2 table and hence is considerably 148 * different. Here the first level consists of 128 descriptors 149 * (512 bytes) each mapping 32mb of address space. Each of these 150 * points to blocks of 128 second level descriptors (512 bytes) 151 * each mapping 256kb. Note that there may be additional "segment 152 * table" pages depending on how large MAXKL2SIZE is. 153 * 154 * XXX cramming two levels of mapping into the single "segment" 155 * table on the 68040 is intended as a temporary hack to get things 156 * working. The 224mb of address space that this allows will most 157 * likely be insufficient in the future (at least for the kernel). 158 */ 159 #if defined(M68040) || defined(M68060) 160 if (RELOC(mmutype, int) == MMU_68040) { 161 int num; 162 163 /* 164 * First invalidate the entire "segment table" pages 165 * (levels 1 and 2 have the same "invalid" value). 166 */ 167 pte = (u_int *)kstpa; 168 epte = &pte[kstsize * NPTEPG]; 169 while (pte < epte) 170 *pte++ = SG_NV; 171 /* 172 * Initialize level 2 descriptors (which immediately 173 * follow the level 1 table). We need: 174 * NPTEPG / SG4_LEV3SIZE 175 * level 2 descriptors to map each of the nptpages 176 * pages of PTEs. Note that we set the "used" bit 177 * now to save the HW the expense of doing it. 178 */ 179 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 180 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 181 epte = &pte[num]; 182 protoste = kptpa | SG_U | SG_RW | SG_V; 183 while (pte < epte) { 184 *pte++ = protoste; 185 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 186 } 187 /* 188 * Initialize level 1 descriptors. We need: 189 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 190 * level 1 descriptors to map the `num' level 2's. 191 */ 192 pte = (u_int *)kstpa; 193 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 194 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 195 while (pte < epte) { 196 *pte++ = protoste; 197 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 198 } 199 /* 200 * Initialize the final level 1 descriptor to map the last 201 * block of level 2 descriptors. 202 */ 203 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 204 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 205 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 206 /* 207 * Now initialize the final portion of that block of 208 * descriptors to map kptmpa. 209 */ 210 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 211 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 212 protoste = kptmpa | SG_U | SG_RW | SG_V; 213 while (pte < epte) { 214 *pte++ = protoste; 215 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 216 } 217 /* 218 * Initialize Sysptmap 219 */ 220 pte = (u_int *)kptmpa; 221 epte = &pte[nptpages]; 222 protopte = kptpa | PG_RW | PG_CI | PG_V; 223 while (pte < epte) { 224 *pte++ = protopte; 225 protopte += PAGE_SIZE; 226 } 227 /* 228 * Invalidate all but the last remaining entry. 229 */ 230 epte = &((u_int *)kptmpa)[NPTEPG-1]; 231 while (pte < epte) { 232 *pte++ = PG_NV; 233 } 234 /* 235 * Initialize the last one to point to Sysptmap. 236 */ 237 *pte = kptmpa | PG_RW | PG_CI | PG_V; 238 } else 239 #endif /* M68040 || M68060 */ 240 { 241 /* 242 * Map the page table pages in both the HW segment table 243 * and the software Sysptmap. 244 */ 245 ste = (u_int *)kstpa; 246 pte = (u_int *)kptmpa; 247 epte = &pte[nptpages]; 248 protoste = kptpa | SG_RW | SG_V; 249 protopte = kptpa | PG_RW | PG_CI | PG_V; 250 while (pte < epte) { 251 *ste++ = protoste; 252 *pte++ = protopte; 253 protoste += PAGE_SIZE; 254 protopte += PAGE_SIZE; 255 } 256 /* 257 * Invalidate all but the last remaining entries in both. 258 */ 259 epte = &((u_int *)kptmpa)[NPTEPG-1]; 260 while (pte < epte) { 261 *ste++ = SG_NV; 262 *pte++ = PG_NV; 263 } 264 /* 265 * Initialize the last one to point to Sysptmap. 266 */ 267 *ste = kptmpa | SG_RW | SG_V; 268 *pte = kptmpa | PG_RW | PG_CI | PG_V; 269 } 270 271 /* 272 * Initialize kernel page table. 273 * Start by invalidating the `nptpages' that we have allocated. 274 */ 275 pte = (u_int *)kptpa; 276 epte = &pte[nptpages * NPTEPG]; 277 while (pte < epte) 278 *pte++ = PG_NV; 279 /* 280 * Validate PTEs for kernel text (RO) 281 */ 282 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 283 /* XXX why KERNBASE relative? */ 284 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 285 protopte = firstpa | PG_RO | PG_V; 286 while (pte < epte) { 287 *pte++ = protopte; 288 protopte += PAGE_SIZE; 289 } 290 /* 291 * Validate PTEs for kernel data/bss, dynamic data allocated 292 * by us so far (kstpa - firstpa bytes), and pages for proc0 293 * u-area and page table allocated below (RW). 294 */ 295 epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; 296 protopte = (protopte & ~PG_PROT) | PG_RW; 297 /* 298 * Enable copy-back caching of data pages 299 */ 300 if (RELOC(mmutype, int) == MMU_68040) 301 protopte |= PG_CCB; 302 while (pte < epte) { 303 *pte++ = protopte; 304 protopte += PAGE_SIZE; 305 } 306 /* 307 * map the kernel segment table cache invalidated for 308 * these machines (for the 68040 not strictly necessary, but 309 * recommended by Motorola; for the 68060 mandatory) 310 * XXX this includes p0upa. why? 311 */ 312 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; 313 protopte = (protopte & ~PG_PROT) | PG_RW; 314 if (RELOC(mmutype, int) == MMU_68040) { 315 protopte &= ~PG_CCB; 316 protopte |= PG_CIN; 317 } 318 while (pte < epte) { 319 *pte++ = protopte; 320 protopte += PAGE_SIZE; 321 } 322 323 /* 324 * Finally, validate the internal IO space PTEs (RW+CI). 325 */ 326 327 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa)) 328 329 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 330 epte = &pte[IIOMAPSIZE]; 331 RELOC(IODEVbase, char *) = (char *)PTE2VA(pte); 332 RELOC(intiobase, u_int8_t *) = RELOC(IODEVbase, u_int8_t *); /* XXX */ 333 RELOC(intiolimit, char *) = (char *)PTE2VA(epte); 334 while (pte < epte) { 335 *pte++ = protopte; 336 protopte += PAGE_SIZE; 337 } 338 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte); 339 340 /* 341 * Calculate important exported kernel virtual addresses 342 */ 343 /* 344 * Sysseg: base of kernel segment table 345 */ 346 RELOC(Sysseg, st_entry_t *) = 347 (st_entry_t *)(kstpa - firstpa); 348 /* 349 * Sysptmap: base of kernel page table map 350 */ 351 RELOC(Sysptmap, pt_entry_t *) = 352 (pt_entry_t *)(kptmpa - firstpa); 353 /* 354 * Sysmap: kernel page table (as mapped through Sysptmap) 355 * Immediately follows `nptpages' of static kernel page table. 356 */ 357 RELOC(Sysmap, pt_entry_t *) = 358 (pt_entry_t *)m68k_ptob((NPTEPG - 1) * NPTEPG); 359 360 /* 361 * Setup u-area for process 0. 362 */ 363 /* 364 * Zero the u-area. 365 * NOTE: `pte' and `epte' aren't PTEs here. 366 */ 367 pte = (u_int *)p0upa; 368 epte = (u_int *)(p0upa + USPACE); 369 while (pte < epte) 370 *pte++ = 0; 371 /* 372 * Remember the u-area address so it can be loaded in the 373 * proc struct p_addr field later. 374 */ 375 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 376 377 /* 378 * VM data structures are now initialized, set up data for 379 * the pmap module. 380 */ 381 RELOC(avail_start, paddr_t) = nextpa; 382 RELOC(avail_end, paddr_t) = 383 m68k_ptob(RELOC(maxmem, int)) 384 /* XXX allow for msgbuf */ 385 - m68k_round_page(MSGBUFSIZE); 386 RELOC(mem_size, psize_t) = m68k_ptob(RELOC(physmem, int)); 387 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; 388 389 /* 390 * Initialize protection array. 391 * XXX don't use a switch statement, it might produce an 392 * absolute "jmp" table. 393 */ 394 { 395 int *kp; 396 397 kp = &RELOC(protection_codes, int); 398 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 399 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 400 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 401 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 402 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 403 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 404 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 405 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 406 } 407 408 /* 409 * Kernel page/segment table allocated above, 410 * just initialize pointers. 411 */ 412 { 413 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 414 415 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 416 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 417 simple_lock_init(&kpm->pm_lock); 418 kpm->pm_count = 1; 419 kpm->pm_stpa = (st_entry_t *)kstpa; 420 #if defined(M68040) || defined(M68060) 421 /* 422 * For the 040 we also initialize the free level 2 423 * descriptor mask noting that we have used: 424 * 0: level 1 table 425 * 1 to `num': map page tables 426 * MAXKL2SIZE-1: maps kptmpa and last-page page table 427 */ 428 if (RELOC(mmutype, int) == MMU_68040) { 429 int num; 430 431 kpm->pm_stfree = ~l2tobm(0); 432 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 433 SG4_LEV2SIZE) / SG4_LEV2SIZE; 434 while (num) 435 kpm->pm_stfree &= ~l2tobm(num--); 436 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 437 for (num = MAXKL2SIZE; 438 num < sizeof(kpm->pm_stfree)*NBBY; 439 num++) 440 kpm->pm_stfree &= ~l2tobm(num); 441 } 442 #endif 443 } 444 445 /* 446 * Allocate some fixed, special purpose kernel virtual addresses 447 */ 448 { 449 vaddr_t va = RELOC(virtual_avail, vaddr_t); 450 451 RELOC(CADDR1, caddr_t) = (caddr_t)va; 452 va += PAGE_SIZE; 453 RELOC(CADDR2, caddr_t) = (caddr_t)va; 454 va += PAGE_SIZE; 455 RELOC(vmmap, caddr_t) = (caddr_t)va; 456 va += PAGE_SIZE; 457 RELOC(msgbufaddr, caddr_t) = (caddr_t)va; 458 va += m68k_round_page(MSGBUFSIZE); 459 RELOC(virtual_avail, vaddr_t) = va; 460 } 461 } 462