1 /* $NetBSD: pmap_bootstrap.c,v 1.16 1999/03/17 12:29:56 minoura Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 40 */ 41 42 #include <sys/param.h> 43 #include <machine/pte.h> 44 #include <x68k/x68k/iodevice.h> 45 #include <machine/vmparam.h> 46 #include <machine/cpu.h> 47 48 #include <vm/vm.h> 49 50 #define RELOC(v, t) *((t*)((caddr_t)&(v) + firstpa)) 51 #define RELOCA(a, t) ((t)((caddr_t)(a) + firstpa)) 52 53 extern char *etext; 54 extern int Sysptsize; 55 extern char *extiobase, *proc0paddr; 56 extern st_entry_t *Sysseg; 57 extern pt_entry_t *Sysptmap, *Sysmap; 58 59 extern int maxmem, physmem; 60 extern paddr_t avail_start, avail_end; 61 extern vaddr_t virtual_avail, virtual_end; 62 extern psize_t mem_size; 63 extern int protection_codes[]; 64 #ifdef M68K_MMU_HP 65 extern int pmap_aliasmask; 66 #endif 67 68 u_int8_t *intiobase = (u_int8_t *) PHYS_IODEV; 69 70 void pmap_bootstrap __P((paddr_t, paddr_t)); 71 72 #ifdef EXTENDED_MEMORY 73 static int mem_exists __P((caddr_t, u_long)); 74 static void setmemrange __P((paddr_t)); 75 76 /* 77 * These are used to map the non-contiguous memory. 78 */ 79 int numranges; /* = 0 == don't use the ranges */ 80 u_long low[8]; 81 u_long high[8]; 82 #endif 83 84 #ifndef EIOMAPSIZE 85 #define EIOMAPSIZE 0 86 #endif 87 88 /* 89 * Special purpose kernel virtual addresses, used for mapping 90 * physical pages for a variety of temporary or permanent purposes: 91 * 92 * CADDR1, CADDR2: pmap zero/copy operations 93 * vmmap: /dev/mem, crash dumps, parity error checking 94 * msgbufaddr: kernel message buffer 95 */ 96 caddr_t CADDR1, CADDR2, vmmap; 97 extern caddr_t msgbufaddr; 98 99 /* 100 * Bootstrap the VM system. 101 * 102 * Called with MMU off so we must relocate all global references by `firstpa' 103 * (don't call any functions here!) `nextpa' is the first available physical 104 * memory address. Returns an updated first PA reflecting the memory we 105 * have allocated. MMU is still off when we return. 106 * 107 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 108 * XXX a PIC compiler would make this much easier. 109 */ 110 void 111 pmap_bootstrap(nextpa, firstpa) 112 paddr_t nextpa; 113 paddr_t firstpa; 114 { 115 paddr_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa; 116 u_int nptpages, kstsize; 117 st_entry_t protoste, *ste; 118 pt_entry_t protopte, *pte, *epte; 119 120 /* 121 * Calculate important physical addresses: 122 * 123 * kstpa kernel segment table 1 page (!040) 124 * N pages (040) 125 * 126 * kptpa statically allocated 127 * kernel PT pages Sysptsize+ pages 128 * 129 * iiopa internal IO space 130 * PT pages IIOMAPSIZE pages 131 * 132 * eiopa external IO space 133 * PT pages EIOMAPSIZE pages 134 * 135 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 136 * EIOMAPSIZE are the number of PTEs, hence we need to round 137 * the total to a page boundary with IO maps at the end. ] 138 * 139 * kptmpa kernel PT map 1 page 140 * 141 * lkptpa last kernel PT page 1 page 142 * 143 * p0upa proc 0 u-area UPAGES pages 144 * 145 * The KVA corresponding to any of these PAs is: 146 * (PA - firstpa + KERNBASE). 147 */ 148 if (RELOC(mmutype, int) == MMU_68040) 149 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 150 else 151 kstsize = 1; 152 kstpa = nextpa; 153 nextpa += kstsize * NBPG; 154 kptpa = nextpa; 155 nptpages = RELOC(Sysptsize, int) + 156 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG; 157 nextpa += nptpages * NBPG; 158 eiopa = nextpa - EIOMAPSIZE * sizeof(pt_entry_t); 159 iiopa = eiopa - IIOMAPSIZE * sizeof(pt_entry_t); 160 kptmpa = nextpa; 161 nextpa += NBPG; 162 lkptpa = nextpa; 163 nextpa += NBPG; 164 p0upa = nextpa; 165 nextpa += USPACE; 166 #ifdef EXTENDED_MEMORY 167 setmemrange(firstpa); 168 #if 0 169 if (nextpa > high[0]) { 170 printf("Failure in BSD boot. nextpa=0x%lx, high[0]=0x%lx.\n", 171 nextpa, high[0]); 172 panic("You're hosed!\n"); 173 } 174 #endif 175 #endif 176 /* 177 * Initialize segment table and kernel page table map. 178 * 179 * On 68030s and earlier MMUs the two are identical except for 180 * the valid bits so both are initialized with essentially the 181 * same values. On the 68040, which has a mandatory 3-level 182 * structure, the segment table holds the level 1 table and part 183 * (or all) of the level 2 table and hence is considerably 184 * different. Here the first level consists of 128 descriptors 185 * (512 bytes) each mapping 32mb of address space. Each of these 186 * points to blocks of 128 second level descriptors (512 bytes) 187 * each mapping 256kb. Note that there may be additional "segment 188 * table" pages depending on how large MAXKL2SIZE is. 189 * 190 * Portions of the last segment of KVA space (0xFFF00000 - 191 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 192 * for UPAGES is used for mapping the current process u-area 193 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 194 * to the last physical page of RAM to give us a region in which 195 * PA == VA. We use the first part of this page for enabling 196 * and disabling mapping. The last part of this page also contains 197 * info left by the boot ROM. 198 * 199 * XXX cramming two levels of mapping into the single "segment" 200 * table on the 68040 is intended as a temporary hack to get things 201 * working. The 224mb of address space that this allows will most 202 * likely be insufficient in the future (at least for the kernel). 203 */ 204 #if defined(M68040) || defined(M68060) 205 if (RELOC(mmutype, int) == MMU_68040) { 206 int num; 207 208 /* 209 * First invalidate the entire "segment table" pages 210 * (levels 1 and 2 have the same "invalid" value). 211 */ 212 pte = (u_int *)kstpa; 213 epte = &pte[kstsize * NPTEPG]; 214 while (pte < epte) 215 *pte++ = SG_NV; 216 /* 217 * Initialize level 2 descriptors (which immediately 218 * follow the level 1 table). We need: 219 * NPTEPG / SG4_LEV3SIZE 220 * level 2 descriptors to map each of the nptpages+1 221 * pages of PTEs. Note that we set the "used" bit 222 * now to save the HW the expense of doing it. 223 */ 224 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 225 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 226 epte = &pte[num]; 227 protoste = kptpa | SG_U | SG_RW | SG_V; 228 while (pte < epte) { 229 *pte++ = protoste; 230 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 231 } 232 /* 233 * Initialize level 1 descriptors. We need: 234 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 235 * level 1 descriptors to map the `num' level 2's. 236 */ 237 pte = (u_int *)kstpa; 238 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 239 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 240 while (pte < epte) { 241 *pte++ = protoste; 242 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 243 } 244 /* 245 * Initialize the final level 1 descriptor to map the last 246 * block of level 2 descriptors. 247 */ 248 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 249 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 250 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 251 /* 252 * Now initialize the final portion of that block of 253 * descriptors to map the "last PT page". 254 */ 255 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 256 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 257 protoste = lkptpa | SG_U | SG_RW | SG_V; 258 while (pte < epte) { 259 *pte++ = protoste; 260 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 261 } 262 /* 263 * Initialize Sysptmap 264 */ 265 pte = (u_int *)kptmpa; 266 epte = &pte[nptpages+1]; 267 protopte = kptpa | PG_RW | PG_CI | PG_V; 268 while (pte < epte) { 269 *pte++ = protopte; 270 protopte += NBPG; 271 } 272 /* 273 * Invalidate all but the last remaining entry. 274 */ 275 epte = &((u_int *)kptmpa)[NPTEPG-1]; 276 while (pte < epte) { 277 *pte++ = PG_NV; 278 } 279 /* 280 * Initialize the last to point to the page 281 * table page allocated earlier. 282 */ 283 *pte = lkptpa | PG_RW | PG_CI | PG_V; 284 } else 285 #endif /* M68040 || M68060 */ 286 { 287 /* 288 * Map the page table pages in both the HW segment table 289 * and the software Sysptmap. Note that Sysptmap is also 290 * considered a PT page hence the +1. 291 */ 292 ste = (u_int *)kstpa; 293 pte = (u_int *)kptmpa; 294 epte = &pte[nptpages+1]; 295 protoste = kptpa | SG_RW | SG_V; 296 protopte = kptpa | PG_RW | PG_CI | PG_V; 297 while (pte < epte) { 298 *ste++ = protoste; 299 *pte++ = protopte; 300 protoste += NBPG; 301 protopte += NBPG; 302 } 303 /* 304 * Invalidate all but the last remaining entries in both. 305 */ 306 epte = &((u_int *)kptmpa)[NPTEPG-1]; 307 while (pte < epte) { 308 *ste++ = SG_NV; 309 *pte++ = PG_NV; 310 } 311 /* 312 * Initialize the last to point to point to the page 313 * table page allocated earlier. 314 */ 315 *ste = lkptpa | SG_RW | SG_V; 316 *pte = lkptpa | PG_RW | PG_CI | PG_V; 317 } 318 /* 319 * Invalidate all but the final entry in the last kernel PT page 320 * (u-area PTEs will be validated later). The final entry maps 321 * the last page of physical memory. 322 */ 323 pte = (u_int *)lkptpa; 324 epte = &pte[NPTEPG/*-1*/]; 325 while (pte < epte) 326 *pte++ = PG_NV; 327 /* *pte = MAXADDR | PG_RW | PG_CI | PG_V;*/ 328 /* 329 * Initialize kernel page table. 330 * Start by invalidating the `nptpages' that we have allocated. 331 */ 332 pte = (u_int *)kptpa; 333 epte = &pte[nptpages * NPTEPG]; 334 while (pte < epte) 335 *pte++ = PG_NV; 336 /* 337 * Validate PTEs for kernel text (RO) 338 */ 339 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 340 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 341 protopte = firstpa | PG_RO | PG_V; 342 while (pte < epte) { 343 *pte++ = protopte; 344 protopte += NBPG; 345 } 346 /* 347 * Validate PTEs for kernel data/bss, dynamic data allocated 348 * by us so far (nextpa - firstpa bytes), and pages for proc0 349 * u-area and page table allocated below (RW). 350 */ 351 epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; 352 protopte = (protopte & ~PG_PROT) | PG_RW; 353 /* 354 * Enable copy-back caching of data pages 355 */ 356 if (RELOC(mmutype, int) == MMU_68040) 357 protopte |= PG_CCB; 358 while (pte < epte) { 359 *pte++ = protopte; 360 protopte += NBPG; 361 } 362 /* 363 * map the kernel segment table cache invalidated for 364 * these machines (for the 68040 not strictly necessary, but 365 * recommended by Motorola; for the 68060 mandatory) 366 */ 367 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; 368 protopte = (protopte & ~PG_PROT) | PG_RW; 369 if (RELOC(mmutype, int) == MMU_68040) { 370 protopte &= ~PG_CCB; 371 protopte |= PG_CIN; 372 } 373 while (pte < epte) { 374 *pte++ = protopte; 375 protopte += NBPG; 376 } 377 /* 378 * Finally, validate the internal IO space PTEs (RW+CI). 379 * We do this here since the 320/350 MMU registers (also 380 * used, but to a lesser extent, on other models) are mapped 381 * in this range and it would be nice to be able to access 382 * them after the MMU is turned on. 383 */ 384 pte = (u_int *)iiopa; 385 epte = (u_int *)eiopa; 386 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 387 while (pte < epte) { 388 *pte++ = protopte; 389 protopte += NBPG; 390 } 391 392 /* 393 * Calculate important exported kernel virtual addresses 394 */ 395 /* 396 * Sysseg: base of kernel segment table 397 */ 398 RELOC(Sysseg, st_entry_t *) = 399 (st_entry_t *)(kstpa - firstpa); 400 /* 401 * Sysptmap: base of kernel page table map 402 */ 403 RELOC(Sysptmap, pt_entry_t *) = 404 (pt_entry_t *)(kptmpa - firstpa); 405 /* 406 * Sysmap: kernel page table (as mapped through Sysptmap) 407 * Immediately follows `nptpages' of static kernel page table. 408 */ 409 RELOC(Sysmap, pt_entry_t *) = 410 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG); 411 /* 412 * IODEVbase, intiolimit: base and end of internal (DIO) IO space. 413 * IIOMAPSIZE pages prior to external IO space at end of static 414 * kernel page table. 415 */ 416 RELOC(IODEVbase, char *) = 417 (char *)m68k_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE)); 418 RELOC(intiobase, u_int8_t *) = RELOC(IODEVbase, u_int8_t *); /* XXX */ 419 RELOC(intiolimit, char *) = 420 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE); 421 /* 422 * extiobase: base of external (DIO-II) IO space. 423 * EIOMAPSIZE pages at the end of the static kernel page table. 424 */ 425 RELOC(extiobase, char *) = 426 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE); 427 428 /* 429 * Setup u-area for process 0. 430 */ 431 /* 432 * Zero the u-area. 433 * NOTE: `pte' and `epte' aren't PTEs here. 434 */ 435 pte = (u_int *)p0upa; 436 epte = (u_int *)(p0upa + USPACE); 437 while (pte < epte) 438 *pte++ = 0; 439 /* 440 * Remember the u-area address so it can be loaded in the 441 * proc struct p_addr field later. 442 */ 443 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 444 445 /* 446 * VM data structures are now initialized, set up data for 447 * the pmap module. 448 */ 449 RELOC(avail_start, paddr_t) = nextpa; 450 RELOC(avail_end, paddr_t) = 451 m68k_ptob(RELOC(maxmem, int)) 452 /* XXX allow for msgbuf */ 453 - m68k_round_page(MSGBUFSIZE); 454 #ifdef EXTENDED_MEMORY 455 { 456 int i; 457 psize_t av_rem = 0; 458 int av_rng = -1; 459 int nranges = RELOC(numranges, int); 460 u_long *l = RELOCA(low, u_long *); 461 u_long *h = RELOCA(high, u_long *); 462 463 for (i = 0; i < nranges; i++) { 464 if (nextpa >= l[i] && nextpa < h[i]) { 465 av_rng = i; 466 av_rem = h[i] - nextpa; 467 } else if (av_rng != -1) { 468 av_rem += (h[i] - l[i]); 469 } 470 } 471 472 RELOC(physmem, int) = m68k_btop(av_rem + nextpa - firstpa); 473 av_rem -= m68k_round_page(MSGBUFSIZE); 474 h[nranges - 1] -= m68k_round_page(MSGBUFSIZE); 475 /* XXX -- this doesn't look correct to me. */ 476 while (h[nranges - 1] < l[nranges - 1]) { 477 RELOC(numranges, int) = --nranges; 478 h[nranges - 1] -= l[nranges] - h[nranges]; 479 } 480 av_rem = m68k_trunc_page(av_rem); 481 RELOC(avail_end, paddr_t) = nextpa + av_rem; 482 } 483 #endif 484 RELOC(mem_size, psize_t) = m68k_ptob(RELOC(physmem, int)); 485 RELOC(virtual_avail, vaddr_t) = 486 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); 487 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; 488 489 #ifdef M68K_MMU_HP 490 /* 491 * Determine VA aliasing distance if any 492 */ 493 if (RELOC(ectype, int) == EC_VIRT) 494 if (RELOC(machineid, int) == HP_320) 495 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */ 496 else if (RELOC(machineid, int) == HP_350) 497 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */ 498 #endif 499 500 /* 501 * Initialize protection array. 502 * XXX don't use a switch statement, it might produce an 503 * absolute "jmp" table. 504 */ 505 { 506 int *kp; 507 508 kp = &RELOC(protection_codes, int); 509 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 510 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 511 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 512 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 513 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 514 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 515 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 516 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 517 } 518 519 /* 520 * Kernel page/segment table allocated in locore, 521 * just initialize pointers. 522 */ 523 { 524 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 525 526 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 527 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 528 simple_lock_init(&kpm->pm_lock); 529 kpm->pm_count = 1; 530 kpm->pm_stpa = (st_entry_t *)kstpa; 531 #if defined(M68040) || defined(M68060) 532 /* 533 * For the 040 we also initialize the free level 2 534 * descriptor mask noting that we have used: 535 * 0: level 1 table 536 * 1 to `num': map page tables 537 * MAXKL2SIZE-1: maps last-page page table 538 */ 539 if (RELOC(mmutype, int) == MMU_68040) { 540 int num; 541 542 kpm->pm_stfree = ~l2tobm(0); 543 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 544 SG4_LEV2SIZE) / SG4_LEV2SIZE; 545 while (num) 546 kpm->pm_stfree &= ~l2tobm(num--); 547 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 548 for (num = MAXKL2SIZE; 549 num < sizeof(kpm->pm_stfree)*NBBY; 550 num++) 551 kpm->pm_stfree &= ~l2tobm(num); 552 } 553 #endif 554 } 555 556 /* 557 * Allocate some fixed, special purpose kernel virtual addresses 558 */ 559 { 560 vaddr_t va = RELOC(virtual_avail, vaddr_t); 561 562 RELOC(CADDR1, caddr_t) = (caddr_t)va; 563 va += NBPG; 564 RELOC(CADDR2, caddr_t) = (caddr_t)va; 565 va += NBPG; 566 RELOC(vmmap, caddr_t) = (caddr_t)va; 567 va += NBPG; 568 RELOC(msgbufaddr, caddr_t) = (caddr_t)va; 569 va += m68k_round_page(MSGBUFSIZE); 570 RELOC(virtual_avail, vaddr_t) = va; 571 } 572 } 573 574 #ifdef EXTENDED_MEMORY 575 static struct memlist { 576 caddr_t base; 577 psize_t min; 578 psize_t max; 579 } memlist[] = { 580 (caddr_t)0x01000000, 0x01000000, 0x01000000, /* TS-6BE16 16MB memory */ 581 (caddr_t)0x10000000, 0x00400000, 0x08000000, /* 060turbo SIMM slot (4--128MB) */ 582 }; 583 584 585 asm(" .text\n\ 586 .even\n\ 587 _badaddr_nommu:\n\ 588 movc vbr,a1\n\ 589 addql #8,a1 | bus error vector\n\ 590 movl a1@,d0 | save original vector\n\ 591 movl sp,d1 | save original sp\n\ 592 pea pc@(Laddrbad)\n\ 593 movl sp@+,a1@\n\ 594 tstw a0@ | test address\n\ 595 movl d0,a1@ | restore vector\n\ 596 clrl d0\n\ 597 rts | d0 == 0, ZF = 1\n\ 598 Laddrbad:\n\ 599 movl d1,sp | restore sp\n\ 600 movl d0,a1@ | restore vector\n\ 601 rts | d0 != 0, ZF = 0\n\ 602 "); 603 604 #define badaddr_nommu(addr) \ 605 ({ int val asm("d0"); caddr_t a asm("a0") = addr; \ 606 asm("jbsr _badaddr_nommu" : \ 607 "=d"(val) : "a"(a) : "d1", "a1"); \ 608 val; }) 609 610 /* 611 * check memory existency 612 */ 613 static int 614 mem_exists(mem, basemax) 615 caddr_t mem; 616 u_long basemax; 617 { 618 /* most variables must be register! */ 619 register volatile unsigned char *m, *b; 620 register unsigned char save_m, save_b; 621 register int baseismem; 622 register int exists = 0; 623 caddr_t base; 624 caddr_t begin_check, end_check; 625 626 if (badaddr_nommu(mem)) 627 return 0; 628 629 /* only 24bits are significant on normal X680x0 systems */ 630 base = (caddr_t)((u_long)mem & 0x00FFFFFF); 631 632 /* This is somewhat paranoid -- avoid overwriting myself */ 633 asm("lea pc@(begin_check_mem),%0" : "=a"(begin_check)); 634 asm("lea pc@(end_check_mem),%0" : "=a"(end_check)); 635 if (base >= begin_check && base < end_check) { 636 size_t off = end_check - begin_check; 637 638 mem -= off; 639 base -= off; 640 } 641 642 m = mem; 643 b = base; 644 645 /* 646 * Can't check by writing if the corresponding 647 * base address isn't memory. 648 * 649 * I hope this would be no harm.... 650 */ 651 baseismem = base < (caddr_t)basemax; 652 653 /* save original value (base must be saved first) */ 654 if (baseismem) 655 save_b = *b; 656 save_m = *m; 657 658 asm("begin_check_mem:"); 659 /* 660 * stack and other data segment variables are unusable 661 * til end_check_mem, because they may be clobbered. 662 */ 663 664 /* 665 * check memory by writing/reading 666 */ 667 if (baseismem) 668 *b = 0x55; 669 *m = 0xAA; 670 if ((baseismem && *b != 0x55) || *m != 0xAA) 671 goto out; 672 673 *m = 0x55; 674 if (baseismem) 675 *b = 0xAA; 676 if (*m != 0x55 || (baseismem && *b != 0xAA)) 677 goto out; 678 679 exists = 1; 680 out: 681 *m = save_m; 682 if (baseismem) 683 *b = save_b; 684 685 asm("end_check_mem:"); 686 687 return exists; 688 } 689 690 static void 691 setmemrange(firstpa) 692 paddr_t firstpa; 693 { 694 int i; 695 psize_t s, min, max; 696 u_long *l = RELOCA(low, u_long *); 697 u_long *h = RELOCA(high, u_long *); 698 struct memlist *mlist = RELOCA(memlist, struct memlist *); 699 int nranges; 700 701 /* first, x68k base memory */ 702 nranges = 0; 703 l[nranges] = 0x00000000; 704 h[nranges] = *(u_long *)0x00ED0008; 705 nranges++; 706 707 /* second, discover extended memory */ 708 for (i = 0; i < sizeof(memlist) / sizeof(memlist[0]); i++) { 709 min = mlist[i].min; 710 max = mlist[i].max; 711 /* 712 * Normally, x68k hardware is NOT 32bit-clean. 713 * But some type of extended memory is in 32bit address space. 714 * Check whether. 715 */ 716 if (!mem_exists(mlist[i].base, h[0])) 717 continue; 718 l[nranges] = (u_long)mlist[i].base; 719 h[nranges] = 0; 720 /* range check */ 721 for (s = min; s <= max; s += 0x00100000) { 722 if (!mem_exists(mlist[i].base + s - 4, h[0])) 723 break; 724 h[nranges] = (u_long)(mlist[i].base + s); 725 } 726 if (l[nranges] < h[nranges]) 727 nranges++; 728 } 729 730 RELOC(numranges, int) = nranges; 731 } 732 #endif 733