1 /* $NetBSD: pmap_bootstrap.c,v 1.17 1995/10/02 09:41:06 briggs Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 40 */ 41 42 #include <sys/param.h> 43 #include <sys/msgbuf.h> 44 #include <sys/reboot.h> 45 #include <machine/pte.h> 46 #include <mac68k/mac68k/clockreg.h> 47 #include <machine/vmparam.h> 48 #include <machine/cpu.h> 49 50 #include <vm/vm.h> 51 52 #define PA2VA(v, t) (t)((u_int)(v) - firstpa) 53 54 extern char *etext; 55 extern int Sysptsize; 56 extern char *extiobase, *proc0paddr; 57 extern st_entry_t *Sysseg; 58 extern pt_entry_t *Sysptmap, *Sysmap; 59 60 extern int maxmem, physmem; 61 extern int avail_remaining, avail_range, avail_end; 62 extern vm_offset_t avail_start, avail_next; 63 extern vm_offset_t virtual_avail, virtual_end; 64 extern vm_size_t mem_size; 65 extern int protection_codes[]; 66 67 extern vm_offset_t reserve_dumppages __P((vm_offset_t)); 68 69 /* 70 * These are used to map the RAM: 71 */ 72 int numranges; /* = 0 == don't use the ranges */ 73 u_long low[8]; 74 u_long high[8]; 75 extern int nbnumranges; 76 extern u_long nbphys[]; 77 extern u_long nblog[]; 78 extern signed long nblen[]; 79 #define VIDMAPSIZE btoc(mac68k_round_page(mac68k_vidlen)) 80 extern u_int32_t mac68k_vidlen; 81 extern u_int32_t mac68k_vidlog; 82 extern u_int32_t mac68k_vidphys; 83 84 extern caddr_t ROMBase; 85 86 /* 87 * Special purpose kernel virtual addresses, used for mapping 88 * physical pages for a variety of temporary or permanent purposes: 89 * 90 * CADDR1, CADDR2: pmap zero/copy operations 91 * vmmap: /dev/mem, crash dumps, parity error checking 92 * msgbufp: kernel message buffer 93 */ 94 caddr_t CADDR1, CADDR2, vmmap; 95 struct msgbuf *msgbufp; 96 97 /* 98 * Bootstrap the VM system. 99 * 100 * This is called with the MMU either on or off. If it's on, we assume 101 * that it's mapped with the same PA <=> LA mapping that we eventually 102 * want. The page sizes and the protections will be wrong, anyway. 103 */ 104 void 105 pmap_bootstrap(nextpa, firstpa) 106 vm_offset_t nextpa; 107 register vm_offset_t firstpa; 108 { 109 vm_offset_t kstpa, kptpa, vidpa, iiopa, nbpa, rompa; 110 vm_offset_t kptmpa, lkptpa, p0upa; 111 u_int nptpages, kstsize; 112 int i; 113 register st_entry_t protoste, *ste; 114 register pt_entry_t protopte, *pte, *epte; 115 116 /* 117 * Calculate important physical addresses: 118 * 119 * kstpa kernel segment table 1 page (!040) 120 * N pages (040) 121 * 122 * kptpa statically allocated 123 * kernel PT pages Sysptsize+ pages 124 * 125 * vidpa internal video space for some machines 126 * PT pages VIDMAPSIZE pages 127 * 128 * nbpa NuBus IO space 129 * PT pages NBMAPSIZE pages 130 * 131 * rompa ROM space 132 * PT pages ROMMAPSIZE pages 133 * 134 * iiopa internal IO space 135 * PT pages IIOMAPSIZE pages 136 * 137 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 138 * NBMAPSIZE are the number of PTEs, hence we need to round 139 * the total to a page boundary with IO maps at the end. ] 140 * 141 * kptmpa kernel PT map 1 page 142 * 143 * lkptpa last kernel PT page 1 page 144 * 145 * p0upa proc 0 u-area UPAGES pages 146 * 147 */ 148 if (mmutype == MMU_68040) 149 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 150 else 151 kstsize = 1; 152 kstpa = nextpa; 153 nextpa += kstsize * NBPG; 154 kptpa = nextpa; 155 nptpages = Sysptsize + 156 (IIOMAPSIZE + NBMAPSIZE + ROMMAPSIZE + VIDMAPSIZE 157 + NPTEPG - 1) / NPTEPG; 158 nextpa += nptpages * NBPG; 159 vidpa = nextpa - VIDMAPSIZE * sizeof(pt_entry_t); 160 nbpa = vidpa - NBMAPSIZE * sizeof(pt_entry_t); 161 rompa = nbpa - ROMMAPSIZE * sizeof(pt_entry_t); 162 iiopa = rompa - IIOMAPSIZE * sizeof(pt_entry_t); 163 kptmpa = nextpa; 164 nextpa += NBPG; 165 lkptpa = nextpa; 166 nextpa += NBPG; 167 p0upa = nextpa; 168 nextpa += USPACE; 169 170 if (nextpa > high[0]) { 171 printf("Failure in BSD boot. nextpa=0x%x, high[0]=0x%x.\n", 172 nextpa, high[0]); 173 panic("You're hosed!\n"); 174 } 175 176 /* 177 * Initialize segment table and kernel page table map. 178 * 179 * On 68030s and earlier MMUs the two are identical except for 180 * the valid bits so both are initialized with essentially the 181 * same values. On the 68040, which has a mandatory 3-level 182 * structure, the segment table holds the level 1 table and part 183 * (or all) of the level 2 table and hence is considerably 184 * different. Here the first level consists of 128 descriptors 185 * (512 bytes) each mapping 32mb of address space. Each of these 186 * points to blocks of 128 second level descriptors (512 bytes) 187 * each mapping 256kb. Note that there may be additional "segment 188 * table" pages depending on how large MAXKL2SIZE is. 189 * 190 * XXX cramming two levels of mapping into the single "segment" 191 * table on the 68040 is intended as a temporary hack to get things 192 * working. The 224mb of address space that this allows will most 193 * likely be insufficient in the future (at least for the kernel). 194 */ 195 if (mmutype == MMU_68040) { 196 register int num; 197 198 /* 199 * First invalidate the entire "segment table" pages 200 * (levels 1 and 2 have the same "invalid" value). 201 */ 202 pte = PA2VA(kstpa, u_int *); 203 epte = &pte[kstsize * NPTEPG]; 204 while (pte < epte) 205 *pte++ = SG_NV; 206 /* 207 * Initialize level 2 descriptors (which immediately 208 * follow the level 1 table). We need: 209 * NPTEPG / SG4_LEV3SIZE 210 * level 2 descriptors to map each of the nptpages+1 211 * pages of PTEs. Note that we set the "used" bit 212 * now to save the HW the expense of doing it. 213 */ 214 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 215 pte = &(PA2VA(kstpa, u_int *))[SG4_LEV1SIZE]; 216 epte = &pte[num]; 217 protoste = kptpa | SG_U | SG_RW | SG_V; 218 while (pte < epte) { 219 *pte++ = protoste; 220 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 221 } 222 /* 223 * Initialize level 1 descriptors. We need: 224 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 225 * level 1 descriptors to map the `num' level 2's. 226 */ 227 pte = PA2VA(kstpa, u_int *); 228 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 229 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 230 while (pte < epte) { 231 *pte++ = protoste; 232 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 233 } 234 /* 235 * Initialize the final level 1 descriptor to map the last 236 * block of level 2 descriptors. 237 */ 238 ste = &(PA2VA(kstpa, u_int*))[SG4_LEV1SIZE-1]; 239 pte = &(PA2VA(kstpa, u_int*))[kstsize*NPTEPG - SG4_LEV2SIZE]; 240 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 241 /* 242 * Now initialize the final portion of that block of 243 * descriptors to map the "last PT page". 244 */ 245 pte = &(PA2VA(kstpa, u_int*)) 246 [kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 247 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 248 protoste = lkptpa | SG_U | SG_RW | SG_V; 249 while (pte < epte) { 250 *pte++ = protoste; 251 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 252 } 253 /* 254 * Initialize Sysptmap 255 */ 256 pte = PA2VA(kptmpa, u_int *); 257 epte = &pte[nptpages+1]; 258 protopte = kptpa | PG_RW | PG_CI | PG_V; 259 while (pte < epte) { 260 *pte++ = protopte; 261 protopte += NBPG; 262 } 263 /* 264 * Invalidate all but the last remaining entries in both. 265 */ 266 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG-1]; 267 while (pte < epte) { 268 *pte++ = PG_NV; 269 } 270 pte = &(PA2VA(kptmpa, u_int *))[NPTEPG-1]; 271 *pte = lkptpa | PG_RW | PG_CI | PG_V; 272 } else { 273 /* 274 * Map the page table pages in both the HW segment table 275 * and the software Sysptmap. Note that Sysptmap is also 276 * considered a PT page hence the +1. 277 */ 278 ste = PA2VA(kstpa, u_int*); 279 pte = PA2VA(kptmpa, u_int*); 280 epte = &pte[nptpages+1]; 281 protoste = kptpa | SG_RW | SG_V; 282 protopte = kptpa | PG_RW | PG_CI | PG_V; 283 while (pte < epte) { 284 *ste++ = protoste; 285 *pte++ = protopte; 286 protoste += NBPG; 287 protopte += NBPG; 288 } 289 /* 290 * Invalidate all but the last remaining entries in both. 291 */ 292 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG-1]; 293 while (pte < epte) { 294 *ste++ = SG_NV; 295 *pte++ = PG_NV; 296 } 297 /* 298 * Initialize the last to point to point to the page 299 * table page allocated earlier. 300 */ 301 *ste = lkptpa | SG_RW | SG_V; 302 *pte = lkptpa | PG_RW | PG_CI | PG_V; 303 } 304 /* 305 * Invalidate all but the final entry in the last kernel PT page 306 * (u-area PTEs will be validated later). The final entry maps 307 * the last page of physical memory. 308 */ 309 pte = PA2VA(lkptpa, u_int *); 310 epte = &pte[NPTEPG-1]; 311 while (pte < epte) 312 *pte++ = PG_NV; 313 *pte = (0xFFFFF000) | PG_RW | PG_CI | PG_V; /* XXX */ 314 315 /* 316 * Initialize kernel page table. 317 * Start by invalidating the `nptpages' that we have allocated. 318 */ 319 pte = PA2VA(kptpa, u_int *); 320 epte = &pte[nptpages * NPTEPG]; 321 while (pte < epte) 322 *pte++ = PG_NV; 323 324 /* 325 * Validate PTEs for kernel text (RO) 326 */ 327 pte = &(PA2VA(kptpa, u_int *))[mac68k_btop(KERNBASE)]; 328 epte = &pte[mac68k_btop(mac68k_trunc_page(&etext))]; 329 #if defined(KGDB) || defined(DDB) 330 protopte = firstpa | PG_RW | PG_V; /* XXX RW for now */ 331 #else 332 protopte = firstpa | PG_RO | PG_V; 333 #endif 334 while (pte < epte) { 335 *pte++ = protopte; 336 protopte += NBPG; 337 } 338 /* 339 * Validate PTEs for kernel data/bss, dynamic data allocated 340 * by us so far (nextpa - firstpa bytes), and pages for proc0 341 * u-area and page table allocated below (RW). 342 */ 343 epte = &(PA2VA(kptpa, u_int *))[mac68k_btop(nextpa - firstpa)]; 344 protopte = (protopte & ~PG_PROT) | PG_RW; 345 /* 346 * Enable copy-back caching of data pages 347 */ 348 if (mmutype == MMU_68040) 349 protopte |= PG_CCB; 350 while (pte < epte) { 351 *pte++ = protopte; 352 protopte += NBPG; 353 } 354 /* 355 * Finally, validate the internal IO space PTEs (RW+CI). 356 * We do this here since the 320/350 MMU registers (also 357 * used, but to a lesser extent, on other models) are mapped 358 * in this range and it would be nice to be able to access 359 * them after the MMU is turned on. 360 */ 361 pte = PA2VA(iiopa, u_int *); 362 epte = PA2VA(rompa, u_int *); 363 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 364 while (pte < epte) { 365 *pte++ = protopte; 366 protopte += NBPG; 367 } 368 369 pte = PA2VA(rompa, u_int *); 370 epte = PA2VA(nbpa, u_int *); 371 protopte = ((u_int) ROMBase) | PG_RO | PG_V; 372 while (pte < epte) { 373 *pte++ = protopte; 374 protopte += NBPG; 375 } 376 377 pte = PA2VA(nbpa, u_int *); 378 epte = pte + NBMAPSIZE; 379 protopte = NBBASE | PG_RW | PG_V | PG_CI; 380 while (pte < epte) { 381 *pte++ = protopte; 382 protopte += NBPG; 383 } 384 385 if (mac68k_vidlog) { 386 pte = PA2VA(vidpa, u_int *); 387 epte = pte + VIDMAPSIZE; 388 protopte = mac68k_vidphys | PG_RW | PG_V | PG_CI; 389 while (pte < epte) { 390 *pte++ = protopte; 391 protopte += NBPG; 392 } 393 } 394 395 /* 396 * Calculate important exported kernel virtual addresses 397 */ 398 /* 399 * Sysseg: base of kernel segment table 400 */ 401 Sysseg = PA2VA(kstpa, st_entry_t *); 402 /* 403 * Sysptmap: base of kernel page table map 404 */ 405 Sysptmap = PA2VA(kptmpa, pt_entry_t *); 406 /* 407 * Sysmap: kernel page table (as mapped through Sysptmap) 408 * Immediately follows `nptpages' of static kernel page table. 409 */ 410 Sysmap = (pt_entry_t *)mac68k_ptob(nptpages * NPTEPG); 411 412 IOBase = (u_long)mac68k_ptob(nptpages*NPTEPG - 413 (IIOMAPSIZE + ROMMAPSIZE + NBMAPSIZE + VIDMAPSIZE)); 414 415 ROMBase = (char *)mac68k_ptob(nptpages*NPTEPG - 416 (ROMMAPSIZE + NBMAPSIZE + VIDMAPSIZE)); 417 418 NuBusBase = (u_long)mac68k_ptob(nptpages*NPTEPG - 419 (NBMAPSIZE + VIDMAPSIZE)); 420 421 if (mac68k_vidlog) 422 mac68k_vidlog = (u_int32_t) 423 mac68k_ptob(nptpages*NPTEPG - VIDMAPSIZE) 424 + (mac68k_vidphys & PGOFSET); 425 426 /* 427 * Setup u-area for process 0. 428 */ 429 /* 430 * Zero the u-area. 431 * NOTE: `pte' and `epte' aren't PTEs here. 432 */ 433 pte = PA2VA(p0upa, u_int *); 434 epte = (u_int *) (PA2VA(p0upa, u_int) + USPACE); 435 while (pte < epte) 436 *pte++ = 0; 437 /* 438 * Remember the u-area address so it can be loaded in the 439 * proc struct p_addr field later. 440 */ 441 proc0paddr = PA2VA(p0upa, char *); 442 443 /* 444 * VM data structures are now initialized, set up data for 445 * the pmap module. 446 */ 447 avail_next = avail_start = mac68k_round_page(nextpa); 448 avail_remaining = 0; 449 avail_range = -1; 450 for (i = 0; i < numranges; i++) { 451 if (avail_next >= low[i] && avail_next < high[i]) { 452 avail_range = i; 453 avail_remaining = high[i] - avail_next; 454 } else if (avail_range != -1) { 455 avail_remaining += (high[i] - low[i]); 456 } 457 } 458 physmem = mac68k_btop(avail_remaining + nextpa - firstpa); 459 avail_remaining -= mac68k_round_page(sizeof(struct msgbuf)); 460 high[numranges - 1] -= mac68k_round_page(sizeof(struct msgbuf)); 461 462 /* XXX -- this doesn't look correct to me. */ 463 while (high[numranges - 1] < low[numranges - 1]) { 464 numranges--; 465 high[numranges - 1] -= low[numranges] - high[numranges]; 466 } 467 468 avail_remaining = mac68k_trunc_page(avail_remaining); 469 avail_end = avail_start + avail_remaining; 470 avail_remaining = mac68k_btop(avail_remaining); 471 472 mem_size = mac68k_ptob(physmem); 473 virtual_avail = VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); 474 virtual_end = VM_MAX_KERNEL_ADDRESS; 475 476 /* 477 * Initialize protection array. 478 * XXX don't use a switch statement, it might produce an 479 * absolute "jmp" table. 480 */ 481 { 482 register int *kp; 483 484 kp = (int *) &protection_codes; 485 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 486 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 487 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 488 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 489 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 490 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 491 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 492 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 493 } 494 495 /* 496 * Kernel page/segment table allocated in locore, 497 * just initialize pointers. 498 */ 499 { 500 struct pmap *kpm = (struct pmap *)&kernel_pmap_store; 501 502 kpm->pm_stab = Sysseg; 503 kpm->pm_ptab = Sysmap; 504 simple_lock_init(&kpm->pm_lock); 505 kpm->pm_count = 1; 506 kpm->pm_stpa = (st_entry_t *)kstpa; 507 /* 508 * For the 040 we also initialize the free level 2 509 * descriptor mask noting that we have used: 510 * 0: level 1 table 511 * 1 to `num': map page tables 512 * MAXKL2SIZE-1: maps last-page page table 513 */ 514 if (mmutype == MMU_68040) { 515 register int num; 516 517 kpm->pm_stfree = ~l2tobm(0); 518 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 519 SG4_LEV2SIZE) / SG4_LEV2SIZE; 520 while (num) 521 kpm->pm_stfree &= ~l2tobm(num--); 522 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 523 for (num = MAXKL2SIZE; 524 num < sizeof(kpm->pm_stfree)*NBBY; 525 num++) 526 kpm->pm_stfree &= ~l2tobm(num); 527 } 528 } 529 530 /* 531 * Allocate some fixed, special purpose kernel virtual addresses 532 */ 533 { 534 vm_offset_t va = virtual_avail; 535 536 CADDR1 = (caddr_t)va; 537 va += NBPG; 538 CADDR2 = (caddr_t)va; 539 va += NBPG; 540 vmmap = (caddr_t)va; 541 va += NBPG; 542 msgbufp = (struct msgbuf *)va; 543 va += NBPG; 544 virtual_avail = reserve_dumppages(va); 545 } 546 } 547 548 void 549 bootstrap_mac68k(tc) 550 int tc; 551 { 552 extern caddr_t esym; 553 extern u_long videoaddr, boothowto; 554 u_long newvideoaddr = 0; 555 vm_offset_t nextpa; 556 caddr_t oldROMBase; 557 558 if (mac68k_machine.do_graybars) 559 printf("Bootstrapping NetBSD/mac68k.\n"); 560 561 oldROMBase = ROMBase; 562 563 if ((tc & 0x80000000) && (mmutype == MMU_68030)) { 564 565 if (mac68k_machine.do_graybars) 566 printf("Getting mapping from MMU.\n"); 567 get_mapping(); 568 if (mac68k_machine.do_graybars) 569 printf("Done.\n"); 570 } else { 571 /* MMU not enabled. Fake up ranges. */ 572 nbnumranges = 0; 573 numranges = 1; 574 low[0] = 0; 575 high[0] = mac68k_machine.mach_memsize * (1024 * 1024); 576 if (mac68k_machine.do_graybars) 577 printf("Faked range to byte 0x%x.\n", high[0]); 578 } 579 nextpa = load_addr + ((int)esym + NBPG - 1) & PG_FRAME; 580 581 #if MFS 582 if (boothowto & RB_MINIROOT) { 583 int v; 584 boothowto |= RB_DFLTROOT; 585 nextpa = mac68k_round_page(nextpa); 586 if ((v = mfs_initminiroot(nextpa-load_addr)) == 0) { 587 printf("Error loading miniroot.\n"); 588 } 589 printf("Loaded %d byte miniroot.\n", v); 590 nextpa += v; 591 } 592 #endif 593 594 if (mac68k_machine.do_graybars) 595 printf("Bootstrapping the pmap system.\n"); 596 597 pmap_bootstrap(nextpa, load_addr); 598 599 if (mac68k_machine.do_graybars) 600 printf("Pmap bootstrapped.\n"); 601 602 if (mac68k_vidlog) 603 newvideoaddr = mac68k_vidlog; 604 else { 605 if (NBBASE <= videoaddr && videoaddr <= NBTOP) 606 newvideoaddr = videoaddr - NBBASE + NuBusBase; 607 else 608 panic("Don't know how to relocate video!\n"); 609 } 610 611 if (mac68k_machine.do_graybars) 612 printf("Moving ROMBase from 0x%x to 0x%x.\n", 613 oldROMBase, ROMBase); 614 615 mrg_fixupROMBase(oldROMBase, ROMBase); 616 617 if (mac68k_machine.do_graybars) 618 printf("Video address 0x%x -> 0x%x.\n", 619 videoaddr, newvideoaddr); 620 621 mac68k_set_io_offsets(IOBase); 622 videoaddr = newvideoaddr; 623 } 624