1 /* $NetBSD: atari_init.c,v 1.36 1998/02/24 13:02:06 leo Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <vm/vm.h> 39 #include <sys/user.h> 40 #include <sys/ioctl.h> 41 #include <sys/select.h> 42 #include <sys/tty.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/msgbuf.h> 46 #include <sys/mbuf.h> 47 #include <sys/protosw.h> 48 #include <sys/domain.h> 49 #include <sys/dkbad.h> 50 #include <sys/reboot.h> 51 #include <sys/exec.h> 52 #include <sys/core.h> 53 #include <sys/kcore.h> 54 #include <vm/pmap.h> 55 56 #include <machine/vmparam.h> 57 #include <machine/pte.h> 58 #include <machine/cpu.h> 59 #include <machine/iomap.h> 60 #include <machine/mfp.h> 61 #include <machine/scu.h> 62 #include <machine/acia.h> 63 #include <machine/kcore.h> 64 65 #include <m68k/cpu.h> 66 #include <m68k/cacheops.h> 67 68 #include <atari/atari/intr.h> 69 #include <atari/atari/stalloc.h> 70 #include <atari/dev/ym2149reg.h> 71 72 #include "pci.h" 73 74 void start_c __P((int, u_int, u_int, u_int, char *)); 75 static void atari_hwinit __P((void)); 76 static void cpu_init_kcorehdr __P((u_long)); 77 static void initcpu __P((void)); 78 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 79 pt_entry_t *, u_int, u_int)); 80 static void map_io_areas __P((pt_entry_t *, u_int, u_int)); 81 static void set_machtype __P((void)); 82 83 #if defined(M68040) || defined(M68060) 84 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 85 pt_entry_t *, u_int, u_int)); 86 #endif 87 88 /* 89 * All info needed to generate a panic dump. All fields are setup by 90 * start_c(). 91 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 92 * here.... Also, the name is badly choosen. Phys_segs contains the 93 * segment descriptions _after_ reservations are made. 94 * XXX: 'lowram' is obsoleted by the new panicdump format 95 */ 96 static cpu_kcore_hdr_t cpu_kcore_hdr; 97 98 extern u_int lowram; 99 extern u_int Sysptsize, Sysseg_pa, proc0paddr; 100 extern pt_entry_t *Sysptmap; 101 extern st_entry_t *Sysseg; 102 u_int *Sysmap; 103 int machineid, mmutype, cputype, astpending; 104 char *vmmap; 105 pv_entry_t pv_table; 106 #if defined(M68040) || defined(M68060) 107 extern int protostfree; 108 #endif 109 110 extern char *esym; 111 extern struct pcb *curpcb; 112 113 /* 114 * This is the virtual address of physical page 0. Used by 'do_boot()'. 115 */ 116 vm_offset_t page_zero; 117 118 /* 119 * Crude support for allocation in ST-ram. Currently only used to allocate 120 * video ram. 121 * The physical address is also returned because the video init needs it to 122 * setup the controller at the time the vm-system is not yet operational so 123 * 'kvtop()' cannot be used. 124 */ 125 #ifndef ST_POOL_SIZE 126 #define ST_POOL_SIZE 40 /* XXX: enough? */ 127 #endif 128 129 u_long st_pool_size = ST_POOL_SIZE * NBPG; /* Patchable */ 130 u_long st_pool_virt, st_pool_phys; 131 132 /* 133 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 134 * it is also reported not to work on all TT's. So the default is NO. 135 */ 136 #ifndef RELOC_KERNEL 137 #define RELOC_KERNEL 0 138 #endif 139 int reloc_kernel = RELOC_KERNEL; /* Patchable */ 140 141 /* 142 * this is the C-level entry function, it's called from locore.s. 143 * Preconditions: 144 * Interrupts are disabled 145 * PA == VA, we don't have to relocate addresses before enabling 146 * the MMU 147 * Exec is no longer available (because we're loaded all over 148 * low memory, no ExecBase is available anymore) 149 * 150 * It's purpose is: 151 * Do the things that are done in locore.s in the hp300 version, 152 * this includes allocation of kernel maps and enabling the MMU. 153 * 154 * Some of the code in here is `stolen' from Amiga MACH, and was 155 * written by Bryan Ford and Niklas Hallqvist. 156 * 157 * Very crude 68040 support by Michael L. Hitch. 158 */ 159 160 void 161 start_c(id, ttphystart, ttphysize, stphysize, esym_addr) 162 int id; /* Machine id */ 163 u_int ttphystart, ttphysize; /* Start address and size of TT-ram */ 164 u_int stphysize; /* Size of ST-ram */ 165 char *esym_addr; /* Address of kernel '_esym' symbol */ 166 { 167 extern char end[]; 168 extern void etext __P((void)); 169 extern u_long protorp[2]; 170 u_int pstart; /* Next available physical address*/ 171 u_int vstart; /* Next available virtual address */ 172 u_int avail; 173 pt_entry_t *pt; 174 u_int ptsize, ptextra; 175 u_int tc, i; 176 u_int *pg; 177 u_int pg_proto; 178 u_int end_loaded; 179 u_long kbase; 180 u_int kstsize; 181 182 boot_segs[0].start = 0; 183 boot_segs[0].end = stphysize; 184 boot_segs[1].start = ttphystart; 185 boot_segs[1].end = ttphystart + ttphysize; 186 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 187 188 /* 189 * The following is a hack. We do not know how much ST memory we 190 * really need until after configuration has finished. At this 191 * time I have no idea how to grab ST memory at that time. 192 * The round_page() call is ment to correct errors made by 193 * binpatching! 194 */ 195 st_pool_size = m68k_round_page(st_pool_size); 196 st_pool_phys = stphysize - st_pool_size; 197 stphysize = st_pool_phys; 198 199 machineid = id; 200 esym = esym_addr; 201 202 /* 203 * the kernel ends at end() or esym. 204 */ 205 if(esym == NULL) 206 end_loaded = (u_int)end; 207 else end_loaded = (u_int)esym; 208 209 /* 210 * If we have enough fast-memory to put the kernel in and the 211 * RELOC_KERNEL option is set, do it! 212 */ 213 if((reloc_kernel != 0) && (ttphysize >= end_loaded)) 214 kbase = ttphystart; 215 else kbase = 0; 216 217 /* 218 * update these as soon as possible! 219 */ 220 PAGE_SIZE = NBPG; 221 PAGE_MASK = NBPG-1; 222 PAGE_SHIFT = PG_SHIFT; 223 224 /* 225 * Determine the type of machine we are running on. This needs 226 * to be done early (and before initcpu())! 227 */ 228 set_machtype(); 229 230 /* 231 * Initialize cpu specific stuff 232 */ 233 initcpu(); 234 235 /* 236 * We run the kernel from ST memory at the moment. 237 * The kernel segment table is put just behind the loaded image. 238 * pstart: start of usable ST memory 239 * avail : size of ST memory available. 240 */ 241 pstart = (u_int)end_loaded; 242 pstart = m68k_round_page(pstart); 243 avail = stphysize - pstart; 244 245 /* 246 * Calculate the number of pages needed for Sysseg. 247 * For the 68030, we need 256 descriptors (segment-table-entries). 248 * This easily fits into one page. 249 * For the 68040, both the level-1 and level-2 descriptors are 250 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 251 * level-1 & level-2 tables. 252 */ 253 #if defined(M68040) || defined(M68060) 254 if (mmutype == MMU_68040) 255 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 256 else 257 #endif 258 kstsize = 1; 259 /* 260 * allocate the kernel segment table 261 */ 262 Sysseg = (st_entry_t *)pstart; 263 Sysseg_pa = (u_int)Sysseg + kbase; 264 pstart += kstsize * NBPG; 265 avail -= kstsize * NBPG; 266 267 /* 268 * Determine the number of pte's we need for extra's like 269 * ST I/O map's. 270 */ 271 ptextra = btoc(STIO_SIZE); 272 273 /* 274 * If present, add pci areas 275 */ 276 if (machineid & ATARI_HADES) 277 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 278 279 /* 280 * The 'pt' (the initial kernel pagetable) has to map the kernel and 281 * the I/O areas. The various I/O areas are mapped (virtually) at 282 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 283 */ 284 pt = (pt_entry_t *)pstart; 285 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 286 pstart += ptsize; 287 avail -= ptsize; 288 289 /* 290 * allocate kernel page table map 291 */ 292 Sysptmap = (pt_entry_t *)pstart; 293 pstart += NBPG; 294 avail -= NBPG; 295 296 /* 297 * Set Sysmap; mapped after page table pages. Because I too (LWP) 298 * didn't understand the reason for this, I borrowed the following 299 * (sligthly modified) comment from mac68k/locore.s: 300 * LAK: There seems to be some confusion here about the next line, 301 * so I'll explain. The kernel needs some way of dynamically modifying 302 * the page tables for its own virtual memory. What it does is that it 303 * has a page table map. This page table map is mapped right after the 304 * kernel itself (in our implementation; in HP's it was after the I/O 305 * space). Therefore, the first three (or so) entries in the segment 306 * table point to the first three pages of the page tables (which 307 * point to the kernel) and the next entry in the segment table points 308 * to the page table map (this is done later). Therefore, the value 309 * of the pointer "Sysmap" will be something like 16M*3 = 48M. When 310 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get 311 * the first longword of the first page map (== pt[0]). Since the 312 * page map mirrors the segment table, addressing any index of Sysmap 313 * will give you a PTE of the page maps which map the kernel. 314 */ 315 Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT)); 316 317 /* 318 * Initialize segment tables 319 */ 320 #if defined(M68040) || defined(M68060) 321 if (mmutype == MMU_68040) 322 mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 323 else 324 #endif /* defined(M68040) || defined(M68060) */ 325 mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 326 327 /* 328 * initialize kernel page table page(s). 329 * Assume load at VA 0. 330 * - Text pages are RO 331 * - Page zero is invalid 332 */ 333 pg_proto = (0 + kbase) | PG_RO | PG_V; 334 pg = pt; 335 *pg++ = PG_NV; pg_proto += NBPG; 336 for(i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG) 337 *pg++ = pg_proto; 338 339 /* 340 * data, bss and dynamic tables are read/write 341 */ 342 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 343 344 #if defined(M68040) || defined(M68060) 345 /* 346 * Map the kernel segment table cache invalidated for 347 * these machines (for the 68040 not strictly necessary, but 348 * recommended by Motorola; for the 68060 mandatory) 349 */ 350 if (mmutype == MMU_68040) { 351 for (; i < (u_int)Sysseg; i += NBPG, pg_proto += NBPG) 352 *pg++ = pg_proto; 353 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 354 for (; i < (u_int)&Sysseg[kstsize * NPTEPG]; i += NBPG, 355 pg_proto += NBPG) 356 *pg++ = pg_proto; 357 pg_proto = (pg_proto & ~PG_CI) | PG_CCB; 358 } 359 #endif /* defined(M68040) || defined(M68060) */ 360 361 /* 362 * go till end of data allocated so far 363 * plus proc0 u-area (to be allocated) 364 */ 365 for(; i < pstart + USPACE; i += NBPG, pg_proto += NBPG) 366 *pg++ = pg_proto; 367 368 /* 369 * invalidate remainder of kernel PT 370 */ 371 while(pg < &pt[ptsize/sizeof(pt_entry_t)]) 372 *pg++ = PG_NV; 373 374 /* 375 * Map various I/O areas 376 */ 377 map_io_areas(pt, ptsize, ptextra); 378 379 /* 380 * Save KVA of proc0 user-area and allocate it 381 */ 382 proc0paddr = pstart; 383 pstart += USPACE; 384 avail -= USPACE; 385 386 /* 387 * At this point, virtual and physical allocation starts to divert. 388 */ 389 vstart = pstart; 390 391 /* 392 * Map the allocated space in ST-ram now. In the contig-case, there 393 * is no need to make a distinction between virtual and physical 394 * adresses. But I make it anyway to be prepared. 395 * Physcal space is already reserved! 396 */ 397 st_pool_virt = vstart; 398 pg = &pt[vstart / NBPG]; 399 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 400 vstart += st_pool_size; 401 while(pg_proto < (st_pool_phys + st_pool_size)) { 402 *pg++ = pg_proto; 403 pg_proto += NBPG; 404 } 405 406 /* 407 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 408 * to reference it in the reboot code. Two pages are mapped, because 409 * we must make sure 'doboot()' is contained in it (see the tricky 410 * copying there....). 411 */ 412 page_zero = vstart; 413 pg = &pt[vstart / NBPG]; 414 *pg++ = PG_RW | PG_CI | PG_V; 415 vstart += NBPG; 416 *pg = PG_RW | PG_CI | PG_V | NBPG; 417 vstart += NBPG; 418 419 lowram = 0 >> PGSHIFT; /* XXX */ 420 421 /* 422 * Fill in usable segments. The page indexes will be initialized 423 * later when all reservations are made. 424 */ 425 usable_segs[0].start = 0; 426 usable_segs[0].end = stphysize; 427 usable_segs[1].start = ttphystart; 428 usable_segs[1].end = ttphystart + ttphysize; 429 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 430 431 if(kbase) { 432 /* 433 * First page of ST-ram is unusable, reserve the space 434 * for the kernel in the TT-ram segment. 435 * Note: Because physical page-zero is partially mapped to ROM 436 * by hardware, it is unusable. 437 */ 438 usable_segs[0].start = NBPG; 439 usable_segs[1].start += pstart; 440 } 441 else usable_segs[0].start += pstart; 442 443 /* 444 * As all segment sizes are now valid, calculate page indexes and 445 * available physical memory. 446 */ 447 usable_segs[0].first_page = 0; 448 for (i = 1; usable_segs[i].start; i++) { 449 usable_segs[i].first_page = usable_segs[i-1].first_page; 450 usable_segs[i].first_page += 451 (usable_segs[i-1].end - usable_segs[i-1].start) / NBPG; 452 } 453 for (i = 0, physmem = 0; usable_segs[i].start; i++) 454 physmem += usable_segs[i].end - usable_segs[i].start; 455 physmem >>= PGSHIFT; 456 457 /* 458 * get the pmap module in sync with reality. 459 */ 460 pmap_bootstrap(vstart, stio_addr, ptextra); 461 462 /* 463 * Prepare to enable the MMU. 464 * Setup and load SRP nolimit, share global, 4 byte PTE's 465 */ 466 protorp[0] = 0x80000202; 467 protorp[1] = (u_int)Sysseg + kbase; /* + segtable address */ 468 Sysseg_pa = (u_int)Sysseg + kbase; 469 470 cpu_init_kcorehdr(kbase); 471 472 /* 473 * copy over the kernel (and all now initialized variables) 474 * to fastram. DONT use bcopy(), this beast is much larger 475 * than 128k ! 476 */ 477 if(kbase) { 478 register u_long *lp, *le, *fp; 479 480 lp = (u_long *)0; 481 le = (u_long *)pstart; 482 fp = (u_long *)kbase; 483 while(lp < le) 484 *fp++ = *lp++; 485 } 486 #if defined(M68040) || defined(M68060) 487 if (mmutype == MMU_68040) { 488 /* 489 * movel Sysseg_pa,a0; 490 * movec a0,SRP; 491 * pflusha; 492 * movel #$0xc000,d0; 493 * movec d0,TC 494 */ 495 if (cputype == CPU_68060) { 496 /* XXX: Need the branch cache be cleared? */ 497 asm volatile (".word 0x4e7a,0x0002;" 498 "orl #0x400000,d0;" 499 ".word 0x4e7b,0x0002" : : : "d0"); 500 } 501 asm volatile ("movel %0,a0;" 502 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 503 asm volatile (".word 0xf518" : : ); 504 asm volatile ("movel #0xc000,d0;" 505 ".word 0x4e7b,0x0003" : : : "d0" ); 506 } else 507 #endif 508 { 509 asm volatile ("pmove %0@,srp" : : "a" (&protorp[0])); 510 /* 511 * setup and load TC register. 512 * enable_cpr, enable_srp, pagesize=8k, 513 * A = 8 bits, B = 11 bits 514 */ 515 tc = 0x82d08b00; 516 asm volatile ("pmove %0@,tc" : : "a" (&tc)); 517 } 518 519 /* Is this to fool the optimizer?? */ 520 i = *(int *)proc0paddr; 521 *(volatile int *)proc0paddr = i; 522 523 /* 524 * Initialize the "u-area" pages. 525 * Must initialize p_addr before autoconfig or the 526 * fault handler will get a NULL reference. 527 */ 528 bzero((u_char *)proc0paddr, USPACE); 529 proc0.p_addr = (struct user *)proc0paddr; 530 curproc = &proc0; 531 curpcb = &((struct user *)proc0paddr)->u_pcb; 532 533 /* 534 * Get the hardware into a defined state 535 */ 536 atari_hwinit(); 537 538 /* 539 * Initialize stmem allocator 540 */ 541 init_stmem(); 542 543 /* 544 * Initialize interrupt mapping. 545 */ 546 intr_init(); 547 } 548 549 /* 550 * Try to figure out on what type of machine we are running 551 * Note: This module runs *before* the io-mapping is setup! 552 */ 553 static void 554 set_machtype() 555 { 556 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 557 if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) { 558 /* 559 * Watch out! We can also have a Hades with < 16Mb 560 * RAM here... 561 */ 562 if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) { 563 machineid |= ATARI_FALCON; 564 return; 565 } 566 } 567 if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 568 machineid |= ATARI_HADES; 569 else machineid |= ATARI_TT; 570 } 571 572 static void 573 atari_hwinit() 574 { 575 /* 576 * Initialize the sound chip 577 */ 578 ym2149_init(); 579 580 /* 581 * Make sure that the midi acia will not generate an interrupt 582 * unless something attaches to it. We cannot do this for the 583 * keyboard acia because this breaks the '-d' option of the 584 * booter... 585 */ 586 MDI->ac_cs = 0; 587 588 /* 589 * Initialize both MFP chips (if both present!) to generate 590 * auto-vectored interrupts with EOI. The active-edge registers are 591 * set up. The interrupt enable registers are set to disable all 592 * interrupts. 593 */ 594 MFP->mf_iera = MFP->mf_ierb = 0; 595 MFP->mf_imra = MFP->mf_imrb = 0; 596 MFP->mf_aer = MFP->mf_ddr = 0; 597 MFP->mf_vr = 0x40; 598 if(machineid & (ATARI_TT|ATARI_HADES)) { 599 MFP2->mf_iera = MFP2->mf_ierb = 0; 600 MFP2->mf_imra = MFP2->mf_imrb = 0; 601 MFP2->mf_aer = 0x80; 602 MFP2->mf_vr = 0x50; 603 } 604 if(machineid & ATARI_TT) { 605 /* 606 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 607 * MFP (ipl6) and softints (ipl1). 608 */ 609 SCU->sys_mask = SCU_SYS_SOFT; 610 SCU->vme_mask = SCU_MFP | SCU_SCC; 611 #ifdef DDB 612 /* 613 * This allows people with the correct hardware modification 614 * to drop into the debugger from an NMI. 615 */ 616 SCU->sys_mask |= SCU_IRQ7; 617 #endif 618 } 619 620 #if NPCI > 0 621 if(machineid & ATARI_HADES) { 622 /* 623 * Configure PCI-bus 624 */ 625 init_pci_bus(); 626 } 627 #endif 628 629 } 630 631 /* 632 * Do the dull work of mapping the various I/O areas. They MUST be Cache 633 * inhibited! 634 * All I/O areas are virtually mapped at the end of the pt-table. 635 */ 636 static void 637 map_io_areas(pt, ptsize, ptextra) 638 pt_entry_t *pt; 639 u_int ptsize; /* Size of 'pt' in bytes */ 640 u_int ptextra; /* #of additional I/O pte's */ 641 { 642 vm_offset_t ioaddr; 643 pt_entry_t *pg, *epg; 644 pt_entry_t pg_proto; 645 u_long mask; 646 647 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * NBPG; 648 649 /* 650 * Map ST-IO area 651 */ 652 stio_addr = ioaddr; 653 ioaddr += STIO_SIZE; 654 pg = &pt[stio_addr / NBPG]; 655 epg = &pg[btoc(STIO_SIZE)]; 656 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 657 while(pg < epg) { 658 *pg++ = pg_proto; 659 pg_proto += NBPG; 660 } 661 662 /* 663 * Map PCI areas 664 */ 665 if (machineid & ATARI_HADES) { 666 667 pci_conf_addr = ioaddr; 668 ioaddr += PCI_CONF_SIZE; 669 pg = &pt[pci_conf_addr / NBPG]; 670 epg = &pg[btoc(PCI_CONF_SIZE)]; 671 mask = PCI_CONFM_PHYS; 672 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 673 for(; pg < epg; mask >>= 1) 674 *pg++ = pg_proto | mask; 675 676 pci_io_addr = ioaddr; 677 ioaddr += PCI_IO_SIZE; 678 epg = &pg[btoc(PCI_IO_SIZE)]; 679 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 680 while(pg < epg) { 681 *pg++ = pg_proto; 682 pg_proto += NBPG; 683 } 684 685 pci_mem_addr = ioaddr; 686 ioaddr += PCI_MEM_SIZE; 687 epg = &pg[btoc(PCI_MEM_SIZE)]; 688 pg_proto = PCI_MEM_PHYS | PG_RW | PG_CI | PG_V; 689 while(pg < epg) { 690 *pg++ = pg_proto; 691 pg_proto += NBPG; 692 } 693 } 694 } 695 696 /* 697 * Used by dumpconf() to get the size of the machine-dependent panic-dump 698 * header in disk blocks. 699 */ 700 int 701 cpu_dumpsize() 702 { 703 int size; 704 705 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 706 return (btodb(roundup(size, dbtob(1)))); 707 } 708 709 /* 710 * Called by dumpsys() to dump the machine-dependent header. 711 * XXX: Assumes that it will all fit in one diskblock. 712 */ 713 int 714 cpu_dump(dump, p_blkno) 715 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 716 daddr_t *p_blkno; 717 { 718 int buf[dbtob(1)/sizeof(int)]; 719 int error; 720 kcore_seg_t *kseg_p; 721 cpu_kcore_hdr_t *chdr_p; 722 723 kseg_p = (kcore_seg_t *)buf; 724 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 725 726 /* 727 * Generate a segment header 728 */ 729 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 730 kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p)); 731 732 /* 733 * Add the md header 734 */ 735 *chdr_p = cpu_kcore_hdr; 736 error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1)); 737 *p_blkno += 1; 738 return (error); 739 } 740 741 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 742 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 743 #endif 744 /* 745 * Initialize the cpu_kcore_header. 746 */ 747 static void 748 cpu_init_kcorehdr(kbase) 749 u_long kbase; 750 { 751 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 752 struct m68k_kcore_hdr *m = &h->un._m68k; 753 extern char end[]; 754 extern char machine[]; 755 int i; 756 757 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr)); 758 759 /* 760 * Initialize the `dispatcher' portion of the header. 761 */ 762 strcpy(h->name, machine); 763 h->page_size = NBPG; 764 h->kernbase = KERNBASE; 765 766 /* 767 * Fill in information about our MMU configuration. 768 */ 769 m->mmutype = mmutype; 770 m->sg_v = SG_V; 771 m->sg_frame = SG_FRAME; 772 m->sg_ishift = SG_ISHIFT; 773 m->sg_pmask = SG_PMASK; 774 m->sg40_shift1 = SG4_SHIFT1; 775 m->sg40_mask2 = SG4_MASK2; 776 m->sg40_shift2 = SG4_SHIFT2; 777 m->sg40_mask3 = SG4_MASK3; 778 m->sg40_shift3 = SG4_SHIFT3; 779 m->sg40_addr1 = SG4_ADDR1; 780 m->sg40_addr2 = SG4_ADDR2; 781 m->pg_v = PG_V; 782 m->pg_frame = PG_FRAME; 783 784 /* 785 * Initialize pointer to kernel segment table. 786 */ 787 m->sysseg_pa = (u_int)Sysseg + kbase; 788 789 /* 790 * Initialize relocation value such that: 791 * 792 * pa = (va - KERNBASE) + reloc 793 */ 794 m->reloc = kbase; 795 796 /* 797 * Define the end of the relocatable range. 798 */ 799 m->relocend = (u_int32_t)end; 800 801 for (i = 0; i < NMEM_SEGS; i++) { 802 m->ram_segs[i].start = boot_segs[i].start; 803 m->ram_segs[i].size = boot_segs[i].end - 804 boot_segs[i].start; 805 } 806 } 807 808 void 809 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 810 st_entry_t *sysseg; /* System segment table */ 811 u_int kstsize; /* size of 'sysseg' in pages */ 812 pt_entry_t *pt; /* Kernel page table */ 813 u_int ptsize; /* size of 'pt' in bytes */ 814 pt_entry_t *sysptmap; /* System page table */ 815 u_int sysptsize; /* size of 'sysptmap' in pages */ 816 u_int kbase; 817 { 818 st_entry_t sg_proto, *sg; 819 pt_entry_t pg_proto, *pg, *epg; 820 821 sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V; 822 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 823 824 /* 825 * Map the page table pages in both the HW segment table 826 * and the software Sysptmap. Note that Sysptmap is also 827 * considered a PT page, hence the +sysptsize. 828 */ 829 sg = sysseg; 830 pg = sysptmap; 831 epg = &pg[(ptsize >> PGSHIFT) + sysptsize]; 832 while(pg < epg) { 833 *sg++ = sg_proto; 834 *pg++ = pg_proto; 835 sg_proto += NBPG; 836 pg_proto += NBPG; 837 } 838 839 /* 840 * invalidate the remainder of the tables 841 */ 842 epg = &sysptmap[sysptsize * NPTEPG]; 843 while(pg < epg) { 844 *sg++ = SG_NV; 845 *pg++ = PG_NV; 846 } 847 } 848 849 #if defined(M68040) || defined(M68060) 850 void 851 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 852 st_entry_t *sysseg; /* System segment table */ 853 u_int kstsize; /* size of 'sysseg' in pages */ 854 pt_entry_t *pt; /* Kernel page table */ 855 u_int ptsize; /* size of 'pt' in bytes */ 856 pt_entry_t *sysptmap; /* System page table */ 857 u_int sysptsize; /* size of 'sysptmap' in pages */ 858 u_int kbase; 859 { 860 int i; 861 st_entry_t sg_proto, *sg, *esg; 862 pt_entry_t pg_proto; 863 864 /* 865 * First invalidate the entire "segment table" pages 866 * (levels 1 and 2 have the same "invalid" values). 867 */ 868 sg = sysseg; 869 esg = &sg[kstsize * NPTEPG]; 870 while (sg < esg) 871 *sg++ = SG_NV; 872 873 /* 874 * Initialize level 2 descriptors (which immediately 875 * follow the level 1 table). These should map 'pt' + 'sysptmap'. 876 * We need: 877 * NPTEPG / SG4_LEV3SIZE 878 * level 2 descriptors to map each of the nptpages + 1 879 * pages of PTEs. Note that we set the "used" bit 880 * now to save the HW the expense of doing it. 881 */ 882 i = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE); 883 sg = &sysseg[SG4_LEV1SIZE]; 884 esg = &sg[i]; 885 sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V; 886 while (sg < esg) { 887 *sg++ = sg_proto; 888 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 889 } 890 891 /* 892 * Initialize level 1 descriptors. We need: 893 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 894 * level 1 descriptors to map the 'num' level 2's. 895 */ 896 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 897 protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */; 898 sg = sysseg; 899 esg = &sg[i]; 900 sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V; 901 while (sg < esg) { 902 *sg++ = sg_proto; 903 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 904 } 905 906 /* 907 * Initialize sysptmap 908 */ 909 sg = sysptmap; 910 esg = &sg[(ptsize >> PGSHIFT) + sysptsize]; 911 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 912 while (sg < esg) { 913 *sg++ = pg_proto; 914 pg_proto += NBPG; 915 } 916 /* 917 * Invalidate rest of Sysptmap page 918 */ 919 esg = &sysptmap[sysptsize * NPTEPG]; 920 while (sg < esg) 921 *sg++ = SG_NV; 922 } 923 #endif /* M68040 */ 924 925 #if defined(M68060) 926 int m68060_pcr_init = 0x21; /* make this patchable */ 927 #endif 928 929 static void 930 initcpu() 931 { 932 typedef void trapfun __P((void)); 933 934 switch (cputype) { 935 936 #if defined(M68060) 937 case CPU_68060: 938 { 939 extern trapfun *vectab[256]; 940 extern trapfun buserr60, addrerr4060, fpfault; 941 #if defined(M060SP) 942 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 943 #else 944 extern trapfun illinst; 945 #endif 946 947 asm volatile ("movl %0,d0; .word 0x4e7b,0x0808" : : 948 "d"(m68060_pcr_init):"d0" ); 949 950 /* bus/addrerr vectors */ 951 vectab[2] = buserr60; 952 vectab[3] = addrerr4060; 953 954 #if defined(M060SP) 955 /* integer support */ 956 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 957 958 /* floating point support */ 959 /* 960 * XXX maybe we really should run-time check for the 961 * stack frame format here: 962 */ 963 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 964 965 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 966 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 967 968 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 969 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 970 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 971 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 972 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 973 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 974 #else 975 vectab[61] = illinst; 976 #endif 977 vectab[48] = fpfault; 978 } 979 break; 980 #endif /* defined(M68060) */ 981 #if defined(M68040) 982 case CPU_68040: 983 { 984 extern trapfun *vectab[256]; 985 extern trapfun buserr40, addrerr4060; 986 987 /* bus/addrerr vectors */ 988 vectab[2] = buserr40; 989 vectab[3] = addrerr4060; 990 } 991 break; 992 #endif /* defined(M68040) */ 993 #if defined(M68030) || defined(M68020) 994 case CPU_68030: 995 case CPU_68020: 996 { 997 extern trapfun *vectab[256]; 998 extern trapfun buserr2030, addrerr2030; 999 1000 /* bus/addrerr vectors */ 1001 vectab[2] = buserr2030; 1002 vectab[3] = addrerr2030; 1003 } 1004 break; 1005 #endif /* defined(M68030) || defined(M68020) */ 1006 } 1007 1008 DCIS(); 1009 } 1010 1011 #ifdef DEBUG 1012 void 1013 dump_segtable(stp) 1014 u_int *stp; 1015 { 1016 u_int *s, *es; 1017 int shift, i; 1018 1019 s = stp; 1020 { 1021 es = s + (ATARI_STSIZE >> 2); 1022 shift = SG_ISHIFT; 1023 } 1024 1025 /* 1026 * XXX need changes for 68040 1027 */ 1028 for (i = 0; s < es; s++, i++) 1029 if (*s & SG_V) 1030 printf("$%08lx: $%08lx\t", i << shift, *s & SG_FRAME); 1031 printf("\n"); 1032 } 1033 1034 void 1035 dump_pagetable(ptp, i, n) 1036 u_int *ptp, i, n; 1037 { 1038 u_int *p, *ep; 1039 1040 p = ptp + i; 1041 ep = p + n; 1042 for (; p < ep; p++, i++) 1043 if (*p & PG_V) 1044 printf("$%08lx -> $%08lx\t", i, *p & PG_FRAME); 1045 printf("\n"); 1046 } 1047 1048 u_int 1049 vmtophys(ste, vm) 1050 u_int *ste, vm; 1051 { 1052 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1053 ste += (vm & SG_PMASK) >> PGSHIFT; 1054 return((*ste & -NBPG) | (vm & (NBPG - 1))); 1055 } 1056 1057 #endif 1058