1 /* $NetBSD: pmap_bootstrap.c,v 1.64 2005/12/11 12:18:03 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.64 2005/12/11 12:18:03 christos Exp $"); 40 41 #include "opt_ddb.h" 42 #include "opt_kgdb.h" 43 #include "zsc.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/reboot.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/pte.h> 52 #include <mac68k/mac68k/clockreg.h> 53 #include <machine/vmparam.h> 54 #include <machine/cpu.h> 55 #include <machine/pmap.h> 56 #include <machine/autoconf.h> 57 58 #include <ufs/mfs/mfs_extern.h> 59 60 #include <mac68k/mac68k/macrom.h> 61 62 #define PA2VA(v, t) (t)((u_int)(v) - firstpa) 63 64 extern char *etext; 65 extern int Sysptsize; 66 extern char *extiobase, *proc0paddr; 67 extern st_entry_t *Sysseg; 68 extern pt_entry_t *Sysptmap, *Sysmap; 69 70 extern int physmem; 71 extern paddr_t avail_start; 72 extern paddr_t avail_end; 73 extern vaddr_t virtual_avail, virtual_end; 74 extern vsize_t mem_size; 75 extern int protection_codes[]; 76 77 #if NZSC > 0 78 extern int zsinited; 79 #endif 80 81 /* 82 * These are used to map the RAM: 83 */ 84 int numranges; /* = 0 == don't use the ranges */ 85 u_long low[8]; 86 u_long high[8]; 87 u_long maxaddr; /* PA of the last physical page */ 88 int vidlen; 89 #define VIDMAPSIZE btoc(vidlen) 90 extern u_int32_t mac68k_vidphys; 91 extern u_int32_t videoaddr; 92 extern u_int32_t videorowbytes; 93 extern u_int32_t videosize; 94 static u_int32_t newvideoaddr; 95 96 extern caddr_t ROMBase; 97 98 /* 99 * Special purpose kernel virtual addresses, used for mapping 100 * physical pages for a variety of temporary or permanent purposes: 101 * 102 * CADDR1, CADDR2: pmap zero/copy operations 103 * vmmap: /dev/mem, crash dumps, parity error checking 104 * msgbufaddr: kernel message buffer 105 */ 106 caddr_t CADDR1, CADDR2, vmmap; 107 extern caddr_t msgbufaddr; 108 109 void pmap_bootstrap(paddr_t, paddr_t); 110 void bootstrap_mac68k(int); 111 112 /* 113 * Bootstrap the VM system. 114 * 115 * This is called with the MMU either on or off. If it's on, we assume 116 * that it's mapped with the same PA <=> LA mapping that we eventually 117 * want. The page sizes and the protections will be wrong, anyway. 118 * 119 * nextpa is the first address following the loaded kernel. On a IIsi 120 * on 12 May 1996, that was 0xf9000 beyond firstpa. 121 */ 122 void 123 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 124 { 125 paddr_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 126 u_int nptpages, kstsize; 127 paddr_t avail_next; 128 int avail_remaining; 129 int avail_range; 130 int i; 131 st_entry_t protoste, *ste; 132 pt_entry_t protopte, *pte, *epte; 133 extern char start[]; 134 135 vidlen = m68k_round_page(((videosize >> 16) & 0xffff) * videorowbytes + 136 m68k_page_offset(mac68k_vidphys)); 137 138 /* 139 * Calculate important physical addresses: 140 * 141 * kstpa kernel segment table 1 page (!040) 142 * N pages (040) 143 * 144 * kptpa statically allocated 145 * kernel PT pages Sysptsize+ pages 146 * 147 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 148 * NBMAPSIZE are the number of PTEs, hence we need to round 149 * the total to a page boundary with IO maps at the end. ] 150 * 151 * kptmpa kernel PT map 1 page 152 * 153 * lkptpa last kernel PT page 1 page 154 * 155 * p0upa proc 0 u-area UPAGES pages 156 * 157 */ 158 if (mmutype == MMU_68040) 159 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 160 else 161 kstsize = 1; 162 kstpa = nextpa; 163 nextpa += kstsize * PAGE_SIZE; 164 kptmpa = nextpa; 165 nextpa += PAGE_SIZE; 166 lkptpa = nextpa; 167 nextpa += PAGE_SIZE; 168 p0upa = nextpa; 169 nextpa += USPACE; 170 kptpa = nextpa; 171 nptpages = Sysptsize + 172 (IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE + NPTEPG - 1) / NPTEPG; 173 nextpa += nptpages * PAGE_SIZE; 174 175 for (i = 0; i < numranges; i++) 176 if (low[i] <= firstpa && firstpa < high[i]) 177 break; 178 if (i >= numranges || nextpa > high[i]) { 179 if (mac68k_machine.do_graybars) { 180 printf("Failure in NetBSD boot; "); 181 if (i < numranges) 182 printf("nextpa=0x%lx, high[%d]=0x%lx.\n", 183 nextpa, i, high[i]); 184 else 185 printf("can't find kernel RAM segment.\n"); 186 printf("You're hosed! Try booting with 32-bit "); 187 printf("addressing enabled in the memory control "); 188 printf("panel.\n"); 189 printf("Older machines may need Mode32 to get that "); 190 printf("option.\n"); 191 } 192 panic("Cannot work with the current memory mappings."); 193 } 194 195 /* 196 * Initialize segment table and kernel page table map. 197 * 198 * On 68030s and earlier MMUs the two are identical except for 199 * the valid bits so both are initialized with essentially the 200 * same values. On the 68040, which has a mandatory 3-level 201 * structure, the segment table holds the level 1 table and part 202 * (or all) of the level 2 table and hence is considerably 203 * different. Here the first level consists of 128 descriptors 204 * (512 bytes) each mapping 32mb of address space. Each of these 205 * points to blocks of 128 second level descriptors (512 bytes) 206 * each mapping 256kb. Note that there may be additional "segment 207 * table" pages depending on how large MAXKL2SIZE is. 208 * 209 * XXX cramming two levels of mapping into the single "segment" 210 * table on the 68040 is intended as a temporary hack to get things 211 * working. The 224mb of address space that this allows will most 212 * likely be insufficient in the future (at least for the kernel). 213 */ 214 if (mmutype == MMU_68040) { 215 int num; 216 217 /* 218 * First invalidate the entire "segment table" pages 219 * (levels 1 and 2 have the same "invalid" value). 220 */ 221 pte = PA2VA(kstpa, u_int *); 222 epte = &pte[kstsize * NPTEPG]; 223 while (pte < epte) 224 *pte++ = SG_NV; 225 /* 226 * Initialize level 2 descriptors (which immediately 227 * follow the level 1 table). We need: 228 * NPTEPG / SG4_LEV3SIZE 229 * level 2 descriptors to map each of the nptpages 230 * pages of PTEs. Note that we set the "used" bit 231 * now to save the HW the expense of doing it. 232 */ 233 num = nptpages * (NPTEPG / SG4_LEV3SIZE); 234 pte = &(PA2VA(kstpa, u_int *))[SG4_LEV1SIZE]; 235 epte = &pte[num]; 236 protoste = kptpa | SG_U | SG_RW | SG_V; 237 while (pte < epte) { 238 *pte++ = protoste; 239 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 240 } 241 /* 242 * Initialize level 1 descriptors. We need: 243 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 244 * level 1 descriptors to map the `num' level 2's. 245 */ 246 pte = PA2VA(kstpa, u_int *); 247 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 248 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 249 while (pte < epte) { 250 *pte++ = protoste; 251 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 252 } 253 /* 254 * Initialize the final level 1 descriptor to map the last 255 * block of level 2 descriptors. 256 */ 257 ste = &(PA2VA(kstpa, u_int*))[SG4_LEV1SIZE-1]; 258 pte = &(PA2VA(kstpa, u_int*))[kstsize*NPTEPG - SG4_LEV2SIZE]; 259 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 260 /* 261 * Now initialize the final portion of that block of 262 * descriptors to map kptmpa and the "last PT page". 263 */ 264 pte = &(PA2VA(kstpa, u_int*)) 265 [kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; 266 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 267 protoste = kptmpa | SG_U | SG_RW | SG_V; 268 while (pte < epte) { 269 *pte++ = protoste; 270 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 271 } 272 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 273 protoste = lkptpa | SG_U | SG_RW | SG_V; 274 while (pte < epte) { 275 *pte++ = protoste; 276 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 277 } 278 /* 279 * Initialize Sysptmap 280 */ 281 pte = PA2VA(kptmpa, u_int *); 282 epte = &pte[nptpages]; 283 protopte = kptpa | PG_RW | PG_CI | PG_V; 284 while (pte < epte) { 285 *pte++ = protopte; 286 protopte += PAGE_SIZE; 287 } 288 /* 289 * Invalidate all but the last two remaining entries. 290 */ 291 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG-2]; 292 while (pte < epte) { 293 *pte++ = PG_NV; 294 } 295 /* 296 * Initialize the last ones to point to Sysptmap and the page 297 * table page allocated earlier. 298 */ 299 *pte = kptmpa | PG_RW | PG_CI | PG_V; 300 pte++; 301 *pte = lkptpa | PG_RW | PG_CI | PG_V; 302 } else { 303 /* 304 * Map the page table pages in both the HW segment table 305 * and the software Sysptmap. 306 */ 307 ste = PA2VA(kstpa, u_int*); 308 pte = PA2VA(kptmpa, u_int*); 309 epte = &pte[nptpages]; 310 protoste = kptpa | SG_RW | SG_V; 311 protopte = kptpa | PG_RW | PG_CI | PG_V; 312 while (pte < epte) { 313 *ste++ = protoste; 314 *pte++ = protopte; 315 protoste += PAGE_SIZE; 316 protopte += PAGE_SIZE; 317 } 318 /* 319 * Invalidate all but the last two remaining entries in both. 320 */ 321 epte = &(PA2VA(kptmpa, u_int *))[NPTEPG-2]; 322 while (pte < epte) { 323 *ste++ = SG_NV; 324 *pte++ = PG_NV; 325 } 326 /* 327 * Initialize the last ones to point to Sysptmap and the page 328 * table page allocated earlier. 329 */ 330 *ste = kptmpa | SG_RW | SG_V; 331 *pte = kptmpa | PG_RW | PG_CI | PG_V; 332 ste++; 333 pte++; 334 *ste = lkptpa | SG_RW | SG_V; 335 *pte = lkptpa | PG_RW | PG_CI | PG_V; 336 } 337 /* 338 * Invalidate all entries in the last kernel PT page 339 * (u-area PTEs will be validated later). 340 */ 341 pte = PA2VA(lkptpa, u_int *); 342 epte = &pte[NPTEPG]; 343 while (pte < epte) 344 *pte++ = PG_NV; 345 346 /* 347 * Initialize kernel page table. 348 * Start by invalidating the `nptpages' that we have allocated. 349 */ 350 pte = PA2VA(kptpa, u_int *); 351 epte = &pte[nptpages * NPTEPG]; 352 while (pte < epte) 353 *pte++ = PG_NV; 354 355 /* 356 * Validate PTEs for kernel text (RO). 357 * Pages up to "start" must be writable for the ROM. 358 */ 359 pte = &(PA2VA(kptpa, u_int *))[m68k_btop(KERNBASE)]; 360 /* XXX why KERNBASE relative? */ 361 epte = &pte[m68k_btop(m68k_round_page(start))]; 362 protopte = firstpa | PG_RW | PG_V; 363 while (pte < epte) { 364 *pte++ = protopte; 365 protopte += PAGE_SIZE; 366 } 367 /* XXX why KERNBASE relative? */ 368 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 369 protopte = (protopte & ~PG_PROT) | PG_RO; 370 while (pte < epte) { 371 *pte++ = protopte; 372 protopte += PAGE_SIZE; 373 } 374 /* 375 * Validate PTEs for kernel data/bss, dynamic data allocated 376 * by us so far (nextpa - firstpa bytes), and pages for proc0 377 * u-area and page table allocated below (RW). 378 */ 379 epte = &(PA2VA(kptpa, u_int *))[m68k_btop(nextpa - firstpa)]; 380 protopte = (protopte & ~PG_PROT) | PG_RW; 381 /* 382 * Enable copy-back caching of data pages 383 */ 384 if (mmutype == MMU_68040) 385 protopte |= PG_CCB; 386 while (pte < epte) { 387 *pte++ = protopte; 388 protopte += PAGE_SIZE; 389 } 390 391 #define PTE2VA(pte) m68k_ptob(pte - PA2VA(kptpa, pt_entry_t *)) 392 393 protopte = IOBase | PG_RW | PG_CI | PG_V; 394 IOBase = PTE2VA(pte); 395 epte = &pte[IIOMAPSIZE]; 396 while (pte < epte) { 397 *pte++ = protopte; 398 protopte += PAGE_SIZE; 399 } 400 401 protopte = (pt_entry_t)ROMBase | PG_RO | PG_V; 402 ROMBase = (caddr_t)PTE2VA(pte); 403 epte = &pte[ROMMAPSIZE]; 404 while (pte < epte) { 405 *pte++ = protopte; 406 protopte += PAGE_SIZE; 407 } 408 409 if (vidlen) { 410 protopte = m68k_trunc_page(mac68k_vidphys) | 411 PG_RW | PG_V | PG_CI; 412 newvideoaddr = PTE2VA(pte) 413 + m68k_page_offset(mac68k_vidphys); 414 epte = &pte[VIDMAPSIZE]; 415 while (pte < epte) { 416 *pte++ = protopte; 417 protopte += PAGE_SIZE; 418 } 419 } 420 virtual_avail = PTE2VA(pte); 421 422 /* 423 * Calculate important exported kernel virtual addresses 424 */ 425 /* 426 * Sysseg: base of kernel segment table 427 */ 428 Sysseg = PA2VA(kstpa, st_entry_t *); 429 /* 430 * Sysptmap: base of kernel page table map 431 */ 432 Sysptmap = PA2VA(kptmpa, pt_entry_t *); 433 /* 434 * Sysmap: kernel page table (as mapped through Sysptmap) 435 * Immediately follows `nptpages' of static kernel page table. 436 */ 437 Sysmap = (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 438 439 /* 440 * Setup u-area for process 0. 441 */ 442 /* 443 * Zero the u-area. 444 * NOTE: `pte' and `epte' aren't PTEs here. 445 */ 446 pte = PA2VA(p0upa, u_int *); 447 epte = (u_int *)(PA2VA(p0upa, u_int) + USPACE); 448 while (pte < epte) 449 *pte++ = 0; 450 /* 451 * Remember the u-area address so it can be loaded in the 452 * proc struct p_addr field later. 453 */ 454 proc0paddr = PA2VA(p0upa, char *); 455 456 /* 457 * VM data structures are now initialized, set up data for 458 * the pmap module. 459 * 460 * Note about avail_end: msgbuf is initialized just after 461 * avail_end in machdep.c. Since the last page is used 462 * for rebooting the system (code is copied there and 463 * excution continues from copied code before the MMU 464 * is disabled), the msgbuf will get trounced between 465 * reboots if it's placed in the last physical page. 466 * To work around this, we move avail_end back one more 467 * page so the msgbuf can be preserved. 468 */ 469 avail_next = avail_start = m68k_round_page(nextpa); 470 avail_remaining = 0; 471 avail_range = -1; 472 for (i = 0; i < numranges; i++) { 473 if (low[i] <= avail_next && avail_next < high[i]) { 474 avail_range = i; 475 avail_remaining = high[i] - avail_next; 476 } else if (avail_range != -1) { 477 avail_remaining += (high[i] - low[i]); 478 } 479 } 480 physmem = m68k_btop(avail_remaining + nextpa - firstpa); 481 482 maxaddr = high[numranges - 1] - m68k_ptob(1); 483 high[numranges - 1] -= (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1)); 484 avail_end = high[numranges - 1]; 485 mem_size = m68k_ptob(physmem); 486 virtual_end = VM_MAX_KERNEL_ADDRESS; 487 488 /* 489 * Initialize protection array. 490 * XXX don't use a switch statement, it might produce an 491 * absolute "jmp" table. 492 */ 493 { 494 int *kp; 495 496 kp = (int *)&protection_codes; 497 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 498 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 499 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 500 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 501 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 502 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 503 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 504 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 505 } 506 507 /* 508 * Kernel page/segment table allocated above, 509 * just initialize pointers. 510 */ 511 { 512 struct pmap *kpm = (struct pmap *)&kernel_pmap_store; 513 514 kpm->pm_stab = Sysseg; 515 kpm->pm_ptab = Sysmap; 516 simple_lock_init(&kpm->pm_lock); 517 kpm->pm_count = 1; 518 kpm->pm_stpa = (st_entry_t *)kstpa; 519 /* 520 * For the 040 we also initialize the free level 2 521 * descriptor mask noting that we have used: 522 * 0: level 1 table 523 * 1 to `num': map page tables 524 * MAXKL2SIZE-1: maps kptmpa and last-page page table 525 */ 526 if (mmutype == MMU_68040) { 527 int num; 528 529 kpm->pm_stfree = ~l2tobm(0); 530 num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), 531 SG4_LEV2SIZE) / SG4_LEV2SIZE; 532 while (num) 533 kpm->pm_stfree &= ~l2tobm(num--); 534 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 535 for (num = MAXKL2SIZE; 536 num < sizeof(kpm->pm_stfree)*NBBY; 537 num++) 538 kpm->pm_stfree &= ~l2tobm(num); 539 } 540 } 541 542 /* 543 * Allocate some fixed, special purpose kernel virtual addresses 544 */ 545 { 546 vaddr_t va = virtual_avail; 547 548 CADDR1 = (caddr_t)va; 549 va += PAGE_SIZE; 550 CADDR2 = (caddr_t)va; 551 va += PAGE_SIZE; 552 vmmap = (caddr_t)va; 553 va += PAGE_SIZE; 554 msgbufaddr = (caddr_t)va; 555 va += m68k_round_page(MSGBUFSIZE); 556 virtual_avail = va; 557 } 558 } 559 560 void 561 bootstrap_mac68k(int tc) 562 { 563 #if NZSC > 0 564 extern void zs_init(void); 565 #endif 566 extern int *esym; 567 paddr_t nextpa; 568 caddr_t oldROMBase; 569 570 if (mac68k_machine.do_graybars) 571 printf("Bootstrapping NetBSD/mac68k.\n"); 572 573 oldROMBase = ROMBase; 574 mac68k_vidphys = videoaddr; 575 576 if (((tc & 0x80000000) && (mmutype == MMU_68030)) || 577 ((tc & 0x8000) && (mmutype == MMU_68040))) { 578 if (mac68k_machine.do_graybars) 579 printf("Getting mapping from MMU.\n"); 580 (void) get_mapping(); 581 if (mac68k_machine.do_graybars) 582 printf("Done.\n"); 583 } else { 584 /* MMU not enabled. Fake up ranges. */ 585 numranges = 1; 586 low[0] = 0; 587 high[0] = mac68k_machine.mach_memsize * (1024 * 1024); 588 if (mac68k_machine.do_graybars) 589 printf("Faked range to byte 0x%lx.\n", high[0]); 590 } 591 nextpa = load_addr + m68k_round_page(esym); 592 593 if (mac68k_machine.do_graybars) 594 printf("Bootstrapping the pmap system.\n"); 595 596 pmap_bootstrap(nextpa, load_addr); 597 598 if (mac68k_machine.do_graybars) 599 printf("Pmap bootstrapped.\n"); 600 601 if (!vidlen) 602 panic("Don't know how to relocate video!"); 603 604 if (mac68k_machine.do_graybars) 605 printf("Moving ROMBase from %p to %p.\n", oldROMBase, ROMBase); 606 607 mrg_fixupROMBase(oldROMBase, ROMBase); 608 609 if (mac68k_machine.do_graybars) 610 printf("Video address 0x%lx -> 0x%lx.\n", 611 (unsigned long)videoaddr, (unsigned long)newvideoaddr); 612 613 mac68k_set_io_offsets(IOBase); 614 615 /* 616 * If the serial ports are going (for console or 'echo'), then 617 * we need to make sure the IO change gets propagated properly. 618 * This resets the base addresses for the 8530 (serial) driver. 619 * 620 * WARNING!!! No printfs() (etc) BETWEEN zs_init() and the end 621 * of this function (where we start using the MMU, so the new 622 * address is correct. 623 */ 624 #if NZSC > 0 625 if (zsinited != 0) 626 zs_init(); 627 #endif 628 629 videoaddr = newvideoaddr; 630 } 631