1 /* $NetBSD: atari_init.c,v 1.57 2003/04/01 23:47:01 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_ddb.h" 36 #include "opt_mbtype.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/user.h> 42 #include <sys/ioctl.h> 43 #include <sys/select.h> 44 #include <sys/tty.h> 45 #include <sys/proc.h> 46 #include <sys/buf.h> 47 #include <sys/msgbuf.h> 48 #include <sys/mbuf.h> 49 #include <sys/extent.h> 50 #include <sys/protosw.h> 51 #include <sys/domain.h> 52 #include <sys/dkbad.h> 53 #include <sys/reboot.h> 54 #include <sys/exec.h> 55 #include <sys/core.h> 56 #include <sys/kcore.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #include <machine/vmparam.h> 61 #include <machine/pte.h> 62 #include <machine/cpu.h> 63 #include <machine/iomap.h> 64 #include <machine/mfp.h> 65 #include <machine/scu.h> 66 #include <machine/acia.h> 67 #include <machine/kcore.h> 68 69 #include <m68k/cpu.h> 70 #include <m68k/cacheops.h> 71 72 #include <atari/atari/intr.h> 73 #include <atari/atari/stalloc.h> 74 #include <atari/dev/ym2149reg.h> 75 76 #include "pci.h" 77 78 void start_c __P((int, u_int, u_int, u_int, char *)); 79 static void atari_hwinit __P((void)); 80 static void cpu_init_kcorehdr __P((u_long)); 81 static void initcpu __P((void)); 82 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 83 pt_entry_t *, u_int, u_int)); 84 static void map_io_areas __P((pt_entry_t *, u_int, u_int)); 85 static void set_machtype __P((void)); 86 87 #if defined(M68040) || defined(M68060) 88 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 89 pt_entry_t *, u_int, u_int)); 90 #endif 91 92 /* 93 * Extent maps to manage all memory space, including I/O ranges. Allocate 94 * storage for 8 regions in each, initially. Later, iomem_malloc_safe 95 * will indicate that it's safe to use malloc() to dynamically allocate 96 * region descriptors. 97 * This means that the fixed static storage is only used for registrating 98 * the found memory regions and the bus-mapping of the console. 99 * 100 * The extent maps are not static! They are used for bus address space 101 * allocation. 102 */ 103 static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; 104 struct extent *iomem_ex; 105 int iomem_malloc_safe; 106 107 /* 108 * All info needed to generate a panic dump. All fields are setup by 109 * start_c(). 110 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 111 * here.... Also, the name is badly choosen. Phys_segs contains the 112 * segment descriptions _after_ reservations are made. 113 * XXX: 'lowram' is obsoleted by the new panicdump format 114 */ 115 static cpu_kcore_hdr_t cpu_kcore_hdr; 116 117 extern u_int lowram; 118 extern u_int Sysptsize, Sysseg_pa, proc0paddr; 119 extern pt_entry_t *Sysptmap; 120 extern st_entry_t *Sysseg; 121 u_int *Sysmap; 122 int machineid, mmutype, cputype, astpending; 123 char *vmmap; 124 pv_entry_t pv_table; 125 #if defined(M68040) || defined(M68060) 126 extern int protostfree; 127 #endif 128 129 extern char *esym; 130 extern struct pcb *curpcb; 131 132 /* 133 * This is the virtual address of physical page 0. Used by 'do_boot()'. 134 */ 135 vaddr_t page_zero; 136 137 /* 138 * Crude support for allocation in ST-ram. Currently only used to allocate 139 * video ram. 140 * The physical address is also returned because the video init needs it to 141 * setup the controller at the time the vm-system is not yet operational so 142 * 'kvtop()' cannot be used. 143 */ 144 #ifndef ST_POOL_SIZE 145 #define ST_POOL_SIZE 40 /* XXX: enough? */ 146 #endif 147 148 u_long st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */ 149 u_long st_pool_virt, st_pool_phys; 150 151 /* 152 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 153 * it is also reported not to work on all TT's. So the default is NO. 154 */ 155 #ifndef RELOC_KERNEL 156 #define RELOC_KERNEL 0 157 #endif 158 int reloc_kernel = RELOC_KERNEL; /* Patchable */ 159 160 /* 161 * this is the C-level entry function, it's called from locore.s. 162 * Preconditions: 163 * Interrupts are disabled 164 * PA == VA, we don't have to relocate addresses before enabling 165 * the MMU 166 * Exec is no longer available (because we're loaded all over 167 * low memory, no ExecBase is available anymore) 168 * 169 * It's purpose is: 170 * Do the things that are done in locore.s in the hp300 version, 171 * this includes allocation of kernel maps and enabling the MMU. 172 * 173 * Some of the code in here is `stolen' from Amiga MACH, and was 174 * written by Bryan Ford and Niklas Hallqvist. 175 * 176 * Very crude 68040 support by Michael L. Hitch. 177 */ 178 int kernel_copyback = 1; 179 180 void 181 start_c(id, ttphystart, ttphysize, stphysize, esym_addr) 182 int id; /* Machine id */ 183 u_int ttphystart, ttphysize; /* Start address and size of TT-ram */ 184 u_int stphysize; /* Size of ST-ram */ 185 char *esym_addr; /* Address of kernel '_esym' symbol */ 186 { 187 extern char end[]; 188 extern void etext __P((void)); 189 extern u_long protorp[2]; 190 u_int pstart; /* Next available physical address*/ 191 u_int vstart; /* Next available virtual address */ 192 u_int avail; 193 pt_entry_t *pt; 194 u_int ptsize, ptextra; 195 u_int tc, i; 196 u_int *pg; 197 u_int pg_proto; 198 u_int end_loaded; 199 u_long kbase; 200 u_int kstsize; 201 202 #if defined(_MILANHW_) 203 /* XXX 204 * XXX The right place todo this is probably the booter (Leo) 205 * XXX More than 16MB memory is not yet supported on the Milan! 206 * The Milan Lies about the presence of TT-RAM. If you insert 207 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM, 208 * starting at address 16MB. 209 */ 210 stphysize += ttphysize; 211 ttphysize = ttphystart = 0; 212 #endif 213 boot_segs[0].start = 0; 214 boot_segs[0].end = stphysize; 215 boot_segs[1].start = ttphystart; 216 boot_segs[1].end = ttphystart + ttphysize; 217 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 218 219 /* 220 * The following is a hack. We do not know how much ST memory we 221 * really need until after configuration has finished. At this 222 * time I have no idea how to grab ST memory at that time. 223 * The round_page() call is ment to correct errors made by 224 * binpatching! 225 */ 226 st_pool_size = m68k_round_page(st_pool_size); 227 st_pool_phys = stphysize - st_pool_size; 228 stphysize = st_pool_phys; 229 230 machineid = id; 231 esym = esym_addr; 232 233 /* 234 * the kernel ends at end() or esym. 235 */ 236 if(esym == NULL) 237 end_loaded = (u_int)end; 238 else end_loaded = (u_int)esym; 239 240 /* 241 * If we have enough fast-memory to put the kernel in and the 242 * RELOC_KERNEL option is set, do it! 243 */ 244 if((reloc_kernel != 0) && (ttphysize >= end_loaded)) 245 kbase = ttphystart; 246 else kbase = 0; 247 248 /* 249 * Determine the type of machine we are running on. This needs 250 * to be done early (and before initcpu())! 251 */ 252 set_machtype(); 253 254 /* 255 * Initialize cpu specific stuff 256 */ 257 initcpu(); 258 259 /* 260 * We run the kernel from ST memory at the moment. 261 * The kernel segment table is put just behind the loaded image. 262 * pstart: start of usable ST memory 263 * avail : size of ST memory available. 264 */ 265 pstart = (u_int)end_loaded; 266 pstart = m68k_round_page(pstart); 267 avail = stphysize - pstart; 268 269 /* 270 * Calculate the number of pages needed for Sysseg. 271 * For the 68030, we need 256 descriptors (segment-table-entries). 272 * This easily fits into one page. 273 * For the 68040, both the level-1 and level-2 descriptors are 274 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 275 * level-1 & level-2 tables. 276 */ 277 #if defined(M68040) || defined(M68060) 278 if (mmutype == MMU_68040) 279 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 280 else 281 #endif 282 kstsize = 1; 283 /* 284 * allocate the kernel segment table 285 */ 286 Sysseg = (st_entry_t *)pstart; 287 Sysseg_pa = (u_int)Sysseg + kbase; 288 pstart += kstsize * PAGE_SIZE; 289 avail -= kstsize * PAGE_SIZE; 290 291 /* 292 * Determine the number of pte's we need for extra's like 293 * ST I/O map's. 294 */ 295 ptextra = btoc(STIO_SIZE); 296 297 /* 298 * If present, add pci areas 299 */ 300 if (machineid & ATARI_HADES) 301 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 302 if (machineid & ATARI_MILAN) 303 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE); 304 ptextra += btoc(BOOTM_VA_POOL); 305 306 /* 307 * The 'pt' (the initial kernel pagetable) has to map the kernel and 308 * the I/O areas. The various I/O areas are mapped (virtually) at 309 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 310 */ 311 pt = (pt_entry_t *)pstart; 312 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 313 pstart += ptsize; 314 avail -= ptsize; 315 316 /* 317 * allocate kernel page table map 318 */ 319 Sysptmap = (pt_entry_t *)pstart; 320 pstart += PAGE_SIZE; 321 avail -= PAGE_SIZE; 322 323 /* 324 * Set Sysmap; mapped after page table pages. Because I too (LWP) 325 * didn't understand the reason for this, I borrowed the following 326 * (sligthly modified) comment from mac68k/locore.s: 327 * LAK: There seems to be some confusion here about the next line, 328 * so I'll explain. The kernel needs some way of dynamically modifying 329 * the page tables for its own virtual memory. What it does is that it 330 * has a page table map. This page table map is mapped right after the 331 * kernel itself (in our implementation; in HP's it was after the I/O 332 * space). Therefore, the first three (or so) entries in the segment 333 * table point to the first three pages of the page tables (which 334 * point to the kernel) and the next entry in the segment table points 335 * to the page table map (this is done later). Therefore, the value 336 * of the pointer "Sysmap" will be something like 16M*3 = 48M. When 337 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get 338 * the first longword of the first page map (== pt[0]). Since the 339 * page map mirrors the segment table, addressing any index of Sysmap 340 * will give you a PTE of the page maps which map the kernel. 341 */ 342 Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT)); 343 344 /* 345 * Initialize segment tables 346 */ 347 #if defined(M68040) || defined(M68060) 348 if (mmutype == MMU_68040) 349 mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 350 else 351 #endif /* defined(M68040) || defined(M68060) */ 352 mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 353 354 /* 355 * initialize kernel page table page(s). 356 * Assume load at VA 0. 357 * - Text pages are RO 358 * - Page zero is invalid 359 */ 360 pg_proto = (0 + kbase) | PG_RO | PG_V; 361 pg = pt; 362 *pg++ = PG_NV; pg_proto += PAGE_SIZE; 363 for(i = PAGE_SIZE; i < (u_int)etext; 364 i += PAGE_SIZE, pg_proto += PAGE_SIZE) 365 *pg++ = pg_proto; 366 367 /* 368 * data, bss and dynamic tables are read/write 369 */ 370 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 371 372 #if defined(M68040) || defined(M68060) 373 /* 374 * Map the kernel segment table cache invalidated for 375 * these machines (for the 68040 not strictly necessary, but 376 * recommended by Motorola; for the 68060 mandatory) 377 */ 378 if (mmutype == MMU_68040) { 379 380 if (kernel_copyback) 381 pg_proto |= PG_CCB; 382 383 for (; i < (u_int)Sysseg; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 384 *pg++ = pg_proto; 385 386 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 387 for (; i < pstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 388 *pg++ = pg_proto; 389 pg_proto = (pg_proto & ~PG_CI); 390 if (kernel_copyback) 391 pg_proto |= PG_CCB; 392 } 393 #endif /* defined(M68040) || defined(M68060) */ 394 395 /* 396 * go till end of data allocated so far 397 * plus proc0 u-area (to be allocated) 398 */ 399 for(; i < pstart + USPACE; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 400 *pg++ = pg_proto; 401 402 /* 403 * invalidate remainder of kernel PT 404 */ 405 while(pg < &pt[ptsize/sizeof(pt_entry_t)]) 406 *pg++ = PG_NV; 407 408 /* 409 * Map various I/O areas 410 */ 411 map_io_areas(pt, ptsize, ptextra); 412 413 /* 414 * Save KVA of proc0 user-area and allocate it 415 */ 416 proc0paddr = pstart; 417 pstart += USPACE; 418 avail -= USPACE; 419 420 /* 421 * At this point, virtual and physical allocation starts to divert. 422 */ 423 vstart = pstart; 424 425 /* 426 * Map the allocated space in ST-ram now. In the contig-case, there 427 * is no need to make a distinction between virtual and physical 428 * adresses. But I make it anyway to be prepared. 429 * Physcal space is already reserved! 430 */ 431 st_pool_virt = vstart; 432 pg = &pt[vstart / PAGE_SIZE]; 433 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 434 vstart += st_pool_size; 435 while(pg_proto < (st_pool_phys + st_pool_size)) { 436 *pg++ = pg_proto; 437 pg_proto += PAGE_SIZE; 438 } 439 440 /* 441 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 442 * to reference it in the reboot code. Two pages are mapped, because 443 * we must make sure 'doboot()' is contained in it (see the tricky 444 * copying there....). 445 */ 446 page_zero = vstart; 447 pg = &pt[vstart / PAGE_SIZE]; 448 *pg++ = PG_RW | PG_CI | PG_V; 449 vstart += PAGE_SIZE; 450 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE; 451 vstart += PAGE_SIZE; 452 453 lowram = 0 >> PGSHIFT; /* XXX */ 454 455 /* 456 * Fill in usable segments. The page indexes will be initialized 457 * later when all reservations are made. 458 */ 459 usable_segs[0].start = 0; 460 usable_segs[0].end = stphysize; 461 usable_segs[1].start = ttphystart; 462 usable_segs[1].end = ttphystart + ttphysize; 463 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 464 465 if(kbase) { 466 /* 467 * First page of ST-ram is unusable, reserve the space 468 * for the kernel in the TT-ram segment. 469 * Note: Because physical page-zero is partially mapped to ROM 470 * by hardware, it is unusable. 471 */ 472 usable_segs[0].start = PAGE_SIZE; 473 usable_segs[1].start += pstart; 474 } 475 else usable_segs[0].start += pstart; 476 477 /* 478 * As all segment sizes are now valid, calculate page indexes and 479 * available physical memory. 480 */ 481 usable_segs[0].first_page = 0; 482 for (i = 1; usable_segs[i].start; i++) { 483 usable_segs[i].first_page = usable_segs[i-1].first_page; 484 usable_segs[i].first_page += 485 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE; 486 } 487 for (i = 0, physmem = 0; usable_segs[i].start; i++) 488 physmem += usable_segs[i].end - usable_segs[i].start; 489 physmem >>= PGSHIFT; 490 491 /* 492 * get the pmap module in sync with reality. 493 */ 494 pmap_bootstrap(vstart, stio_addr, ptextra); 495 496 /* 497 * Prepare to enable the MMU. 498 * Setup and load SRP nolimit, share global, 4 byte PTE's 499 */ 500 protorp[0] = 0x80000202; 501 protorp[1] = (u_int)Sysseg + kbase; /* + segtable address */ 502 Sysseg_pa = (u_int)Sysseg + kbase; 503 504 cpu_init_kcorehdr(kbase); 505 506 /* 507 * copy over the kernel (and all now initialized variables) 508 * to fastram. DONT use bcopy(), this beast is much larger 509 * than 128k ! 510 */ 511 if(kbase) { 512 register u_long *lp, *le, *fp; 513 514 lp = (u_long *)0; 515 le = (u_long *)pstart; 516 fp = (u_long *)kbase; 517 while(lp < le) 518 *fp++ = *lp++; 519 } 520 #if defined(M68040) || defined(M68060) 521 if (mmutype == MMU_68040) { 522 /* 523 * movel Sysseg_pa,a0; 524 * movec a0,SRP; 525 * pflusha; 526 * movel #$0xc000,d0; 527 * movec d0,TC 528 */ 529 if (cputype == CPU_68060) { 530 /* XXX: Need the branch cache be cleared? */ 531 asm volatile (".word 0x4e7a,0x0002;" 532 "orl #0x400000,%%d0;" 533 ".word 0x4e7b,0x0002" : : : "d0"); 534 } 535 asm volatile ("movel %0,%%a0;" 536 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 537 asm volatile (".word 0xf518" : : ); 538 asm volatile ("movel #0xc000,%%d0;" 539 ".word 0x4e7b,0x0003" : : : "d0" ); 540 } else 541 #endif 542 { 543 asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0])); 544 /* 545 * setup and load TC register. 546 * enable_cpr, enable_srp, pagesize=8k, 547 * A = 8 bits, B = 11 bits 548 */ 549 tc = 0x82d08b00; 550 asm volatile ("pmove %0@,%%tc" : : "a" (&tc)); 551 } 552 553 /* Is this to fool the optimizer?? */ 554 i = *(int *)proc0paddr; 555 *(volatile int *)proc0paddr = i; 556 557 /* 558 * Initialize the "u-area" pages. 559 * Must initialize p_addr before autoconfig or the 560 * fault handler will get a NULL reference. 561 */ 562 bzero((u_char *)proc0paddr, USPACE); 563 lwp0.l_addr = (struct user *)proc0paddr; 564 curlwp = &lwp0; 565 curpcb = &((struct user *)proc0paddr)->u_pcb; 566 567 /* 568 * Get the hardware into a defined state 569 */ 570 atari_hwinit(); 571 572 /* 573 * Initialize stmem allocator 574 */ 575 init_stmem(); 576 577 /* 578 * Initialize the I/O mem extent map. 579 * Note: we don't have to check the return value since 580 * creation of a fixed extent map will never fail (since 581 * descriptor storage has already been allocated). 582 * 583 * N.B. The iomem extent manages _all_ physical addresses 584 * on the machine. When the amount of RAM is found, all 585 * extents of RAM are allocated from the map. 586 */ 587 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF, 588 (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage), 589 EX_NOCOALESCE|EX_NOWAIT); 590 591 /* 592 * Allocate the physical RAM from the extent map 593 */ 594 for (i = 0; boot_segs[i].end != 0; i++) { 595 if (extent_alloc_region(iomem_ex, boot_segs[i].start, 596 boot_segs[i].end - boot_segs[i].start, EX_NOWAIT)) { 597 /* XXX: Ahum, should not happen ;-) */ 598 printf("Warning: Cannot allocate boot memory from" 599 " extent map!?\n"); 600 } 601 } 602 603 /* 604 * Initialize interrupt mapping. 605 */ 606 intr_init(); 607 } 608 609 /* 610 * Try to figure out on what type of machine we are running 611 * Note: This module runs *before* the io-mapping is setup! 612 */ 613 static void 614 set_machtype() 615 { 616 #ifdef _MILANHW_ 617 machineid |= ATARI_MILAN; 618 619 #else 620 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 621 if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) { 622 /* 623 * Watch out! We can also have a Hades with < 16Mb 624 * RAM here... 625 */ 626 if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) { 627 machineid |= ATARI_FALCON; 628 return; 629 } 630 } 631 if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 632 machineid |= ATARI_HADES; 633 else machineid |= ATARI_TT; 634 #endif /* _MILANHW_ */ 635 } 636 637 static void 638 atari_hwinit() 639 { 640 #if defined(_ATARIHW_) 641 /* 642 * Initialize the sound chip 643 */ 644 ym2149_init(); 645 646 /* 647 * Make sure that the midi acia will not generate an interrupt 648 * unless something attaches to it. We cannot do this for the 649 * keyboard acia because this breaks the '-d' option of the 650 * booter... 651 */ 652 MDI->ac_cs = 0; 653 #endif /* defined(_ATARIHW_) */ 654 655 /* 656 * Initialize both MFP chips (if both present!) to generate 657 * auto-vectored interrupts with EOI. The active-edge registers are 658 * set up. The interrupt enable registers are set to disable all 659 * interrupts. 660 */ 661 MFP->mf_iera = MFP->mf_ierb = 0; 662 MFP->mf_imra = MFP->mf_imrb = 0; 663 MFP->mf_aer = MFP->mf_ddr = 0; 664 MFP->mf_vr = 0x40; 665 666 #if defined(_ATARIHW_) 667 if(machineid & (ATARI_TT|ATARI_HADES)) { 668 MFP2->mf_iera = MFP2->mf_ierb = 0; 669 MFP2->mf_imra = MFP2->mf_imrb = 0; 670 MFP2->mf_aer = 0x80; 671 MFP2->mf_vr = 0x50; 672 } 673 674 if(machineid & ATARI_TT) { 675 /* 676 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 677 * MFP (ipl6) and softints (ipl1). 678 */ 679 SCU->sys_mask = SCU_SYS_SOFT; 680 SCU->vme_mask = SCU_MFP | SCU_SCC; 681 #ifdef DDB 682 /* 683 * This allows people with the correct hardware modification 684 * to drop into the debugger from an NMI. 685 */ 686 SCU->sys_mask |= SCU_IRQ7; 687 #endif 688 } 689 #endif /* defined(_ATARIHW_) */ 690 691 #if NPCI > 0 692 if(machineid & (ATARI_HADES|ATARI_MILAN)) { 693 /* 694 * Configure PCI-bus 695 */ 696 init_pci_bus(); 697 } 698 #endif 699 700 } 701 702 /* 703 * Do the dull work of mapping the various I/O areas. They MUST be Cache 704 * inhibited! 705 * All I/O areas are virtually mapped at the end of the pt-table. 706 */ 707 static void 708 map_io_areas(pt, ptsize, ptextra) 709 pt_entry_t *pt; 710 u_int ptsize; /* Size of 'pt' in bytes */ 711 u_int ptextra; /* #of additional I/O pte's */ 712 { 713 extern void bootm_init __P((vaddr_t, pt_entry_t *, u_long)); 714 vaddr_t ioaddr; 715 pt_entry_t *pg, *epg; 716 pt_entry_t pg_proto; 717 u_long mask; 718 719 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE; 720 721 /* 722 * Map ST-IO area 723 */ 724 stio_addr = ioaddr; 725 ioaddr += STIO_SIZE; 726 pg = &pt[stio_addr / PAGE_SIZE]; 727 epg = &pg[btoc(STIO_SIZE)]; 728 #ifdef _MILANHW_ 729 /* 730 * Turn on byte swaps in the ST I/O area. On the Milan, the 731 * U0 signal of the MMU controls the BigEndian signal 732 * of the PLX9080. We use this setting so we can read/write the 733 * PLX registers (and PCI-config space) in big-endian mode. 734 */ 735 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100; 736 #else 737 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 738 #endif 739 while(pg < epg) { 740 *pg++ = pg_proto; 741 pg_proto += PAGE_SIZE; 742 } 743 744 /* 745 * Map PCI areas 746 */ 747 if (machineid & ATARI_HADES) { 748 /* 749 * Only Hades maps the PCI-config space! 750 */ 751 pci_conf_addr = ioaddr; 752 ioaddr += PCI_CONF_SIZE; 753 pg = &pt[pci_conf_addr / PAGE_SIZE]; 754 epg = &pg[btoc(PCI_CONF_SIZE)]; 755 mask = PCI_CONFM_PHYS; 756 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 757 for(; pg < epg; mask <<= 1) 758 *pg++ = pg_proto | mask; 759 } 760 else pci_conf_addr = 0; /* XXX: should crash */ 761 762 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 763 pci_io_addr = ioaddr; 764 ioaddr += PCI_IO_SIZE; 765 pg = &pt[pci_io_addr / PAGE_SIZE]; 766 epg = &pg[btoc(PCI_IO_SIZE)]; 767 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 768 while(pg < epg) { 769 *pg++ = pg_proto; 770 pg_proto += PAGE_SIZE; 771 } 772 773 pci_mem_addr = ioaddr; 774 /* Provide an uncached PCI address for the MILAN */ 775 pci_mem_uncached = ioaddr; 776 ioaddr += PCI_MEM_SIZE; 777 epg = &pg[btoc(PCI_MEM_SIZE)]; 778 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 779 while(pg < epg) { 780 *pg++ = pg_proto; 781 pg_proto += PAGE_SIZE; 782 } 783 } 784 785 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 786 /* 787 * ioaddr += BOOTM_VA_POOL; 788 * pg = &pg[btoc(BOOTM_VA_POOL)]; 789 */ 790 } 791 792 /* 793 * Used by dumpconf() to get the size of the machine-dependent panic-dump 794 * header in disk blocks. 795 */ 796 int 797 cpu_dumpsize() 798 { 799 int size; 800 801 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 802 return (btodb(roundup(size, dbtob(1)))); 803 } 804 805 /* 806 * Called by dumpsys() to dump the machine-dependent header. 807 * XXX: Assumes that it will all fit in one diskblock. 808 */ 809 int 810 cpu_dump(dump, p_blkno) 811 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 812 daddr_t *p_blkno; 813 { 814 int buf[dbtob(1)/sizeof(int)]; 815 int error; 816 kcore_seg_t *kseg_p; 817 cpu_kcore_hdr_t *chdr_p; 818 819 kseg_p = (kcore_seg_t *)buf; 820 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 821 822 /* 823 * Generate a segment header 824 */ 825 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 826 kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p)); 827 828 /* 829 * Add the md header 830 */ 831 *chdr_p = cpu_kcore_hdr; 832 error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1)); 833 *p_blkno += 1; 834 return (error); 835 } 836 837 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 838 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 839 #endif 840 /* 841 * Initialize the cpu_kcore_header. 842 */ 843 static void 844 cpu_init_kcorehdr(kbase) 845 u_long kbase; 846 { 847 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 848 struct m68k_kcore_hdr *m = &h->un._m68k; 849 extern char end[]; 850 int i; 851 852 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr)); 853 854 /* 855 * Initialize the `dispatcher' portion of the header. 856 */ 857 strcpy(h->name, machine); 858 h->page_size = PAGE_SIZE; 859 h->kernbase = KERNBASE; 860 861 /* 862 * Fill in information about our MMU configuration. 863 */ 864 m->mmutype = mmutype; 865 m->sg_v = SG_V; 866 m->sg_frame = SG_FRAME; 867 m->sg_ishift = SG_ISHIFT; 868 m->sg_pmask = SG_PMASK; 869 m->sg40_shift1 = SG4_SHIFT1; 870 m->sg40_mask2 = SG4_MASK2; 871 m->sg40_shift2 = SG4_SHIFT2; 872 m->sg40_mask3 = SG4_MASK3; 873 m->sg40_shift3 = SG4_SHIFT3; 874 m->sg40_addr1 = SG4_ADDR1; 875 m->sg40_addr2 = SG4_ADDR2; 876 m->pg_v = PG_V; 877 m->pg_frame = PG_FRAME; 878 879 /* 880 * Initialize pointer to kernel segment table. 881 */ 882 m->sysseg_pa = (u_int)Sysseg + kbase; 883 884 /* 885 * Initialize relocation value such that: 886 * 887 * pa = (va - KERNBASE) + reloc 888 */ 889 m->reloc = kbase; 890 891 /* 892 * Define the end of the relocatable range. 893 */ 894 m->relocend = (u_int32_t)end; 895 896 for (i = 0; i < NMEM_SEGS; i++) { 897 m->ram_segs[i].start = boot_segs[i].start; 898 m->ram_segs[i].size = boot_segs[i].end - 899 boot_segs[i].start; 900 } 901 } 902 903 void 904 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 905 st_entry_t *sysseg; /* System segment table */ 906 u_int kstsize; /* size of 'sysseg' in pages */ 907 pt_entry_t *pt; /* Kernel page table */ 908 u_int ptsize; /* size of 'pt' in bytes */ 909 pt_entry_t *sysptmap; /* System page table */ 910 u_int sysptsize; /* size of 'sysptmap' in pages */ 911 u_int kbase; 912 { 913 st_entry_t sg_proto, *sg; 914 pt_entry_t pg_proto, *pg, *epg; 915 916 sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V; 917 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 918 919 /* 920 * Map the page table pages in both the HW segment table 921 * and the software Sysptmap. Note that Sysptmap is also 922 * considered a PT page, hence the +sysptsize. 923 */ 924 sg = sysseg; 925 pg = sysptmap; 926 epg = &pg[(ptsize >> PGSHIFT) + sysptsize]; 927 while(pg < epg) { 928 *sg++ = sg_proto; 929 *pg++ = pg_proto; 930 sg_proto += PAGE_SIZE; 931 pg_proto += PAGE_SIZE; 932 } 933 934 /* 935 * invalidate the remainder of the tables 936 */ 937 epg = &sysptmap[sysptsize * NPTEPG]; 938 while(pg < epg) { 939 *sg++ = SG_NV; 940 *pg++ = PG_NV; 941 } 942 } 943 944 #if defined(M68040) || defined(M68060) 945 void 946 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 947 st_entry_t *sysseg; /* System segment table */ 948 u_int kstsize; /* size of 'sysseg' in pages */ 949 pt_entry_t *pt; /* Kernel page table */ 950 u_int ptsize; /* size of 'pt' in bytes */ 951 pt_entry_t *sysptmap; /* System page table */ 952 u_int sysptsize; /* size of 'sysptmap' in pages */ 953 u_int kbase; 954 { 955 int i; 956 st_entry_t sg_proto, *sg, *esg; 957 pt_entry_t pg_proto; 958 959 /* 960 * First invalidate the entire "segment table" pages 961 * (levels 1 and 2 have the same "invalid" values). 962 */ 963 sg = sysseg; 964 esg = &sg[kstsize * NPTEPG]; 965 while (sg < esg) 966 *sg++ = SG_NV; 967 968 /* 969 * Initialize level 2 descriptors (which immediately 970 * follow the level 1 table). These should map 'pt' + 'sysptmap'. 971 * We need: 972 * NPTEPG / SG4_LEV3SIZE 973 * level 2 descriptors to map each of the nptpages + 1 974 * pages of PTEs. Note that we set the "used" bit 975 * now to save the HW the expense of doing it. 976 */ 977 i = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE); 978 sg = &sysseg[SG4_LEV1SIZE]; 979 esg = &sg[i]; 980 sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V; 981 while (sg < esg) { 982 *sg++ = sg_proto; 983 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 984 } 985 986 /* 987 * Initialize level 1 descriptors. We need: 988 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 989 * level 1 descriptors to map the 'num' level 2's. 990 */ 991 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 992 protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */; 993 sg = sysseg; 994 esg = &sg[i]; 995 sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V; 996 while (sg < esg) { 997 *sg++ = sg_proto; 998 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 999 } 1000 1001 /* 1002 * Initialize sysptmap 1003 */ 1004 sg = sysptmap; 1005 esg = &sg[(ptsize >> PGSHIFT) + sysptsize]; 1006 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 1007 while (sg < esg) { 1008 *sg++ = pg_proto; 1009 pg_proto += PAGE_SIZE; 1010 } 1011 /* 1012 * Invalidate rest of Sysptmap page 1013 */ 1014 esg = &sysptmap[sysptsize * NPTEPG]; 1015 while (sg < esg) 1016 *sg++ = SG_NV; 1017 } 1018 #endif /* M68040 */ 1019 1020 #if defined(M68060) 1021 int m68060_pcr_init = 0x21; /* make this patchable */ 1022 #endif 1023 1024 static void 1025 initcpu() 1026 { 1027 typedef void trapfun __P((void)); 1028 1029 switch (cputype) { 1030 1031 #if defined(M68060) 1032 case CPU_68060: 1033 { 1034 extern trapfun *vectab[256]; 1035 extern trapfun buserr60, addrerr4060, fpfault; 1036 #if defined(M060SP) 1037 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1038 #else 1039 extern trapfun illinst; 1040 #endif 1041 1042 asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : : 1043 "d"(m68060_pcr_init):"d0" ); 1044 1045 /* bus/addrerr vectors */ 1046 vectab[2] = buserr60; 1047 vectab[3] = addrerr4060; 1048 1049 #if defined(M060SP) 1050 /* integer support */ 1051 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1052 1053 /* floating point support */ 1054 /* 1055 * XXX maybe we really should run-time check for the 1056 * stack frame format here: 1057 */ 1058 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1059 1060 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1061 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1062 1063 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1064 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1065 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1066 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1067 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1068 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1069 #else 1070 vectab[61] = illinst; 1071 #endif 1072 vectab[48] = fpfault; 1073 } 1074 break; 1075 #endif /* defined(M68060) */ 1076 #if defined(M68040) 1077 case CPU_68040: 1078 { 1079 extern trapfun *vectab[256]; 1080 extern trapfun buserr40, addrerr4060; 1081 1082 /* bus/addrerr vectors */ 1083 vectab[2] = buserr40; 1084 vectab[3] = addrerr4060; 1085 } 1086 break; 1087 #endif /* defined(M68040) */ 1088 #if defined(M68030) || defined(M68020) 1089 case CPU_68030: 1090 case CPU_68020: 1091 { 1092 extern trapfun *vectab[256]; 1093 extern trapfun buserr2030, addrerr2030; 1094 1095 /* bus/addrerr vectors */ 1096 vectab[2] = buserr2030; 1097 vectab[3] = addrerr2030; 1098 } 1099 break; 1100 #endif /* defined(M68030) || defined(M68020) */ 1101 } 1102 1103 DCIS(); 1104 } 1105 1106 #ifdef DEBUG 1107 void dump_segtable __P((u_int *)); 1108 void dump_pagetable __P((u_int *, u_int, u_int)); 1109 u_int vmtophys __P((u_int *, u_int)); 1110 1111 void 1112 dump_segtable(stp) 1113 u_int *stp; 1114 { 1115 u_int *s, *es; 1116 int shift, i; 1117 1118 s = stp; 1119 { 1120 es = s + (ATARI_STSIZE >> 2); 1121 shift = SG_ISHIFT; 1122 } 1123 1124 /* 1125 * XXX need changes for 68040 1126 */ 1127 for (i = 0; s < es; s++, i++) 1128 if (*s & SG_V) 1129 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1130 printf("\n"); 1131 } 1132 1133 void 1134 dump_pagetable(ptp, i, n) 1135 u_int *ptp, i, n; 1136 { 1137 u_int *p, *ep; 1138 1139 p = ptp + i; 1140 ep = p + n; 1141 for (; p < ep; p++, i++) 1142 if (*p & PG_V) 1143 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1144 printf("\n"); 1145 } 1146 1147 u_int 1148 vmtophys(ste, vm) 1149 u_int *ste, vm; 1150 { 1151 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1152 ste += (vm & SG_PMASK) >> PGSHIFT; 1153 return((*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1))); 1154 } 1155 1156 #endif 1157