1 /* $NetBSD: atari_init.c,v 1.58 2003/06/23 11:01:09 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_ddb.h" 36 #include "opt_mbtype.h" 37 #include "opt_m060sp.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/user.h> 43 #include <sys/ioctl.h> 44 #include <sys/select.h> 45 #include <sys/tty.h> 46 #include <sys/proc.h> 47 #include <sys/buf.h> 48 #include <sys/msgbuf.h> 49 #include <sys/mbuf.h> 50 #include <sys/extent.h> 51 #include <sys/protosw.h> 52 #include <sys/domain.h> 53 #include <sys/dkbad.h> 54 #include <sys/reboot.h> 55 #include <sys/exec.h> 56 #include <sys/core.h> 57 #include <sys/kcore.h> 58 59 #include <uvm/uvm_extern.h> 60 61 #include <machine/vmparam.h> 62 #include <machine/pte.h> 63 #include <machine/cpu.h> 64 #include <machine/iomap.h> 65 #include <machine/mfp.h> 66 #include <machine/scu.h> 67 #include <machine/acia.h> 68 #include <machine/kcore.h> 69 70 #include <m68k/cpu.h> 71 #include <m68k/cacheops.h> 72 73 #include <atari/atari/intr.h> 74 #include <atari/atari/stalloc.h> 75 #include <atari/dev/ym2149reg.h> 76 77 #include "pci.h" 78 79 void start_c __P((int, u_int, u_int, u_int, char *)); 80 static void atari_hwinit __P((void)); 81 static void cpu_init_kcorehdr __P((u_long)); 82 static void initcpu __P((void)); 83 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 84 pt_entry_t *, u_int, u_int)); 85 static void map_io_areas __P((pt_entry_t *, u_int, u_int)); 86 static void set_machtype __P((void)); 87 88 #if defined(M68040) || defined(M68060) 89 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 90 pt_entry_t *, u_int, u_int)); 91 #endif 92 93 /* 94 * Extent maps to manage all memory space, including I/O ranges. Allocate 95 * storage for 8 regions in each, initially. Later, iomem_malloc_safe 96 * will indicate that it's safe to use malloc() to dynamically allocate 97 * region descriptors. 98 * This means that the fixed static storage is only used for registrating 99 * the found memory regions and the bus-mapping of the console. 100 * 101 * The extent maps are not static! They are used for bus address space 102 * allocation. 103 */ 104 static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; 105 struct extent *iomem_ex; 106 int iomem_malloc_safe; 107 108 /* 109 * All info needed to generate a panic dump. All fields are setup by 110 * start_c(). 111 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 112 * here.... Also, the name is badly choosen. Phys_segs contains the 113 * segment descriptions _after_ reservations are made. 114 * XXX: 'lowram' is obsoleted by the new panicdump format 115 */ 116 static cpu_kcore_hdr_t cpu_kcore_hdr; 117 118 extern u_int lowram; 119 extern u_int Sysptsize, Sysseg_pa, proc0paddr; 120 extern pt_entry_t *Sysptmap; 121 extern st_entry_t *Sysseg; 122 u_int *Sysmap; 123 int machineid, mmutype, cputype, astpending; 124 char *vmmap; 125 pv_entry_t pv_table; 126 #if defined(M68040) || defined(M68060) 127 extern int protostfree; 128 #endif 129 130 extern char *esym; 131 extern struct pcb *curpcb; 132 133 /* 134 * This is the virtual address of physical page 0. Used by 'do_boot()'. 135 */ 136 vaddr_t page_zero; 137 138 /* 139 * Crude support for allocation in ST-ram. Currently only used to allocate 140 * video ram. 141 * The physical address is also returned because the video init needs it to 142 * setup the controller at the time the vm-system is not yet operational so 143 * 'kvtop()' cannot be used. 144 */ 145 #ifndef ST_POOL_SIZE 146 #define ST_POOL_SIZE 40 /* XXX: enough? */ 147 #endif 148 149 u_long st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */ 150 u_long st_pool_virt, st_pool_phys; 151 152 /* 153 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 154 * it is also reported not to work on all TT's. So the default is NO. 155 */ 156 #ifndef RELOC_KERNEL 157 #define RELOC_KERNEL 0 158 #endif 159 int reloc_kernel = RELOC_KERNEL; /* Patchable */ 160 161 /* 162 * this is the C-level entry function, it's called from locore.s. 163 * Preconditions: 164 * Interrupts are disabled 165 * PA == VA, we don't have to relocate addresses before enabling 166 * the MMU 167 * Exec is no longer available (because we're loaded all over 168 * low memory, no ExecBase is available anymore) 169 * 170 * It's purpose is: 171 * Do the things that are done in locore.s in the hp300 version, 172 * this includes allocation of kernel maps and enabling the MMU. 173 * 174 * Some of the code in here is `stolen' from Amiga MACH, and was 175 * written by Bryan Ford and Niklas Hallqvist. 176 * 177 * Very crude 68040 support by Michael L. Hitch. 178 */ 179 int kernel_copyback = 1; 180 181 void 182 start_c(id, ttphystart, ttphysize, stphysize, esym_addr) 183 int id; /* Machine id */ 184 u_int ttphystart, ttphysize; /* Start address and size of TT-ram */ 185 u_int stphysize; /* Size of ST-ram */ 186 char *esym_addr; /* Address of kernel '_esym' symbol */ 187 { 188 extern char end[]; 189 extern void etext __P((void)); 190 extern u_long protorp[2]; 191 u_int pstart; /* Next available physical address*/ 192 u_int vstart; /* Next available virtual address */ 193 u_int avail; 194 pt_entry_t *pt; 195 u_int ptsize, ptextra; 196 u_int tc, i; 197 u_int *pg; 198 u_int pg_proto; 199 u_int end_loaded; 200 u_long kbase; 201 u_int kstsize; 202 203 #if defined(_MILANHW_) 204 /* XXX 205 * XXX The right place todo this is probably the booter (Leo) 206 * XXX More than 16MB memory is not yet supported on the Milan! 207 * The Milan Lies about the presence of TT-RAM. If you insert 208 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM, 209 * starting at address 16MB. 210 */ 211 stphysize += ttphysize; 212 ttphysize = ttphystart = 0; 213 #endif 214 boot_segs[0].start = 0; 215 boot_segs[0].end = stphysize; 216 boot_segs[1].start = ttphystart; 217 boot_segs[1].end = ttphystart + ttphysize; 218 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 219 220 /* 221 * The following is a hack. We do not know how much ST memory we 222 * really need until after configuration has finished. At this 223 * time I have no idea how to grab ST memory at that time. 224 * The round_page() call is ment to correct errors made by 225 * binpatching! 226 */ 227 st_pool_size = m68k_round_page(st_pool_size); 228 st_pool_phys = stphysize - st_pool_size; 229 stphysize = st_pool_phys; 230 231 machineid = id; 232 esym = esym_addr; 233 234 /* 235 * the kernel ends at end() or esym. 236 */ 237 if(esym == NULL) 238 end_loaded = (u_int)end; 239 else end_loaded = (u_int)esym; 240 241 /* 242 * If we have enough fast-memory to put the kernel in and the 243 * RELOC_KERNEL option is set, do it! 244 */ 245 if((reloc_kernel != 0) && (ttphysize >= end_loaded)) 246 kbase = ttphystart; 247 else kbase = 0; 248 249 /* 250 * Determine the type of machine we are running on. This needs 251 * to be done early (and before initcpu())! 252 */ 253 set_machtype(); 254 255 /* 256 * Initialize cpu specific stuff 257 */ 258 initcpu(); 259 260 /* 261 * We run the kernel from ST memory at the moment. 262 * The kernel segment table is put just behind the loaded image. 263 * pstart: start of usable ST memory 264 * avail : size of ST memory available. 265 */ 266 pstart = (u_int)end_loaded; 267 pstart = m68k_round_page(pstart); 268 avail = stphysize - pstart; 269 270 /* 271 * Calculate the number of pages needed for Sysseg. 272 * For the 68030, we need 256 descriptors (segment-table-entries). 273 * This easily fits into one page. 274 * For the 68040, both the level-1 and level-2 descriptors are 275 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 276 * level-1 & level-2 tables. 277 */ 278 #if defined(M68040) || defined(M68060) 279 if (mmutype == MMU_68040) 280 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 281 else 282 #endif 283 kstsize = 1; 284 /* 285 * allocate the kernel segment table 286 */ 287 Sysseg = (st_entry_t *)pstart; 288 Sysseg_pa = (u_int)Sysseg + kbase; 289 pstart += kstsize * PAGE_SIZE; 290 avail -= kstsize * PAGE_SIZE; 291 292 /* 293 * Determine the number of pte's we need for extra's like 294 * ST I/O map's. 295 */ 296 ptextra = btoc(STIO_SIZE); 297 298 /* 299 * If present, add pci areas 300 */ 301 if (machineid & ATARI_HADES) 302 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 303 if (machineid & ATARI_MILAN) 304 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE); 305 ptextra += btoc(BOOTM_VA_POOL); 306 307 /* 308 * The 'pt' (the initial kernel pagetable) has to map the kernel and 309 * the I/O areas. The various I/O areas are mapped (virtually) at 310 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 311 */ 312 pt = (pt_entry_t *)pstart; 313 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 314 pstart += ptsize; 315 avail -= ptsize; 316 317 /* 318 * allocate kernel page table map 319 */ 320 Sysptmap = (pt_entry_t *)pstart; 321 pstart += PAGE_SIZE; 322 avail -= PAGE_SIZE; 323 324 /* 325 * Set Sysmap; mapped after page table pages. Because I too (LWP) 326 * didn't understand the reason for this, I borrowed the following 327 * (sligthly modified) comment from mac68k/locore.s: 328 * LAK: There seems to be some confusion here about the next line, 329 * so I'll explain. The kernel needs some way of dynamically modifying 330 * the page tables for its own virtual memory. What it does is that it 331 * has a page table map. This page table map is mapped right after the 332 * kernel itself (in our implementation; in HP's it was after the I/O 333 * space). Therefore, the first three (or so) entries in the segment 334 * table point to the first three pages of the page tables (which 335 * point to the kernel) and the next entry in the segment table points 336 * to the page table map (this is done later). Therefore, the value 337 * of the pointer "Sysmap" will be something like 16M*3 = 48M. When 338 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get 339 * the first longword of the first page map (== pt[0]). Since the 340 * page map mirrors the segment table, addressing any index of Sysmap 341 * will give you a PTE of the page maps which map the kernel. 342 */ 343 Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT)); 344 345 /* 346 * Initialize segment tables 347 */ 348 #if defined(M68040) || defined(M68060) 349 if (mmutype == MMU_68040) 350 mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 351 else 352 #endif /* defined(M68040) || defined(M68060) */ 353 mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 354 355 /* 356 * initialize kernel page table page(s). 357 * Assume load at VA 0. 358 * - Text pages are RO 359 * - Page zero is invalid 360 */ 361 pg_proto = (0 + kbase) | PG_RO | PG_V; 362 pg = pt; 363 *pg++ = PG_NV; pg_proto += PAGE_SIZE; 364 for(i = PAGE_SIZE; i < (u_int)etext; 365 i += PAGE_SIZE, pg_proto += PAGE_SIZE) 366 *pg++ = pg_proto; 367 368 /* 369 * data, bss and dynamic tables are read/write 370 */ 371 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 372 373 #if defined(M68040) || defined(M68060) 374 /* 375 * Map the kernel segment table cache invalidated for 376 * these machines (for the 68040 not strictly necessary, but 377 * recommended by Motorola; for the 68060 mandatory) 378 */ 379 if (mmutype == MMU_68040) { 380 381 if (kernel_copyback) 382 pg_proto |= PG_CCB; 383 384 for (; i < (u_int)Sysseg; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 385 *pg++ = pg_proto; 386 387 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 388 for (; i < pstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 389 *pg++ = pg_proto; 390 pg_proto = (pg_proto & ~PG_CI); 391 if (kernel_copyback) 392 pg_proto |= PG_CCB; 393 } 394 #endif /* defined(M68040) || defined(M68060) */ 395 396 /* 397 * go till end of data allocated so far 398 * plus proc0 u-area (to be allocated) 399 */ 400 for(; i < pstart + USPACE; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 401 *pg++ = pg_proto; 402 403 /* 404 * invalidate remainder of kernel PT 405 */ 406 while(pg < &pt[ptsize/sizeof(pt_entry_t)]) 407 *pg++ = PG_NV; 408 409 /* 410 * Map various I/O areas 411 */ 412 map_io_areas(pt, ptsize, ptextra); 413 414 /* 415 * Save KVA of proc0 user-area and allocate it 416 */ 417 proc0paddr = pstart; 418 pstart += USPACE; 419 avail -= USPACE; 420 421 /* 422 * At this point, virtual and physical allocation starts to divert. 423 */ 424 vstart = pstart; 425 426 /* 427 * Map the allocated space in ST-ram now. In the contig-case, there 428 * is no need to make a distinction between virtual and physical 429 * adresses. But I make it anyway to be prepared. 430 * Physcal space is already reserved! 431 */ 432 st_pool_virt = vstart; 433 pg = &pt[vstart / PAGE_SIZE]; 434 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 435 vstart += st_pool_size; 436 while(pg_proto < (st_pool_phys + st_pool_size)) { 437 *pg++ = pg_proto; 438 pg_proto += PAGE_SIZE; 439 } 440 441 /* 442 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 443 * to reference it in the reboot code. Two pages are mapped, because 444 * we must make sure 'doboot()' is contained in it (see the tricky 445 * copying there....). 446 */ 447 page_zero = vstart; 448 pg = &pt[vstart / PAGE_SIZE]; 449 *pg++ = PG_RW | PG_CI | PG_V; 450 vstart += PAGE_SIZE; 451 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE; 452 vstart += PAGE_SIZE; 453 454 lowram = 0 >> PGSHIFT; /* XXX */ 455 456 /* 457 * Fill in usable segments. The page indexes will be initialized 458 * later when all reservations are made. 459 */ 460 usable_segs[0].start = 0; 461 usable_segs[0].end = stphysize; 462 usable_segs[1].start = ttphystart; 463 usable_segs[1].end = ttphystart + ttphysize; 464 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 465 466 if(kbase) { 467 /* 468 * First page of ST-ram is unusable, reserve the space 469 * for the kernel in the TT-ram segment. 470 * Note: Because physical page-zero is partially mapped to ROM 471 * by hardware, it is unusable. 472 */ 473 usable_segs[0].start = PAGE_SIZE; 474 usable_segs[1].start += pstart; 475 } 476 else usable_segs[0].start += pstart; 477 478 /* 479 * As all segment sizes are now valid, calculate page indexes and 480 * available physical memory. 481 */ 482 usable_segs[0].first_page = 0; 483 for (i = 1; usable_segs[i].start; i++) { 484 usable_segs[i].first_page = usable_segs[i-1].first_page; 485 usable_segs[i].first_page += 486 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE; 487 } 488 for (i = 0, physmem = 0; usable_segs[i].start; i++) 489 physmem += usable_segs[i].end - usable_segs[i].start; 490 physmem >>= PGSHIFT; 491 492 /* 493 * get the pmap module in sync with reality. 494 */ 495 pmap_bootstrap(vstart, stio_addr, ptextra); 496 497 /* 498 * Prepare to enable the MMU. 499 * Setup and load SRP nolimit, share global, 4 byte PTE's 500 */ 501 protorp[0] = 0x80000202; 502 protorp[1] = (u_int)Sysseg + kbase; /* + segtable address */ 503 Sysseg_pa = (u_int)Sysseg + kbase; 504 505 cpu_init_kcorehdr(kbase); 506 507 /* 508 * copy over the kernel (and all now initialized variables) 509 * to fastram. DONT use bcopy(), this beast is much larger 510 * than 128k ! 511 */ 512 if(kbase) { 513 register u_long *lp, *le, *fp; 514 515 lp = (u_long *)0; 516 le = (u_long *)pstart; 517 fp = (u_long *)kbase; 518 while(lp < le) 519 *fp++ = *lp++; 520 } 521 #if defined(M68040) || defined(M68060) 522 if (mmutype == MMU_68040) { 523 /* 524 * movel Sysseg_pa,a0; 525 * movec a0,SRP; 526 * pflusha; 527 * movel #$0xc000,d0; 528 * movec d0,TC 529 */ 530 if (cputype == CPU_68060) { 531 /* XXX: Need the branch cache be cleared? */ 532 asm volatile (".word 0x4e7a,0x0002;" 533 "orl #0x400000,%%d0;" 534 ".word 0x4e7b,0x0002" : : : "d0"); 535 } 536 asm volatile ("movel %0,%%a0;" 537 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 538 asm volatile (".word 0xf518" : : ); 539 asm volatile ("movel #0xc000,%%d0;" 540 ".word 0x4e7b,0x0003" : : : "d0" ); 541 } else 542 #endif 543 { 544 asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0])); 545 /* 546 * setup and load TC register. 547 * enable_cpr, enable_srp, pagesize=8k, 548 * A = 8 bits, B = 11 bits 549 */ 550 tc = 0x82d08b00; 551 asm volatile ("pmove %0@,%%tc" : : "a" (&tc)); 552 } 553 554 /* Is this to fool the optimizer?? */ 555 i = *(int *)proc0paddr; 556 *(volatile int *)proc0paddr = i; 557 558 /* 559 * Initialize the "u-area" pages. 560 * Must initialize p_addr before autoconfig or the 561 * fault handler will get a NULL reference. 562 */ 563 bzero((u_char *)proc0paddr, USPACE); 564 lwp0.l_addr = (struct user *)proc0paddr; 565 curlwp = &lwp0; 566 curpcb = &((struct user *)proc0paddr)->u_pcb; 567 568 /* 569 * Get the hardware into a defined state 570 */ 571 atari_hwinit(); 572 573 /* 574 * Initialize stmem allocator 575 */ 576 init_stmem(); 577 578 /* 579 * Initialize the I/O mem extent map. 580 * Note: we don't have to check the return value since 581 * creation of a fixed extent map will never fail (since 582 * descriptor storage has already been allocated). 583 * 584 * N.B. The iomem extent manages _all_ physical addresses 585 * on the machine. When the amount of RAM is found, all 586 * extents of RAM are allocated from the map. 587 */ 588 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF, 589 (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage), 590 EX_NOCOALESCE|EX_NOWAIT); 591 592 /* 593 * Allocate the physical RAM from the extent map 594 */ 595 for (i = 0; boot_segs[i].end != 0; i++) { 596 if (extent_alloc_region(iomem_ex, boot_segs[i].start, 597 boot_segs[i].end - boot_segs[i].start, EX_NOWAIT)) { 598 /* XXX: Ahum, should not happen ;-) */ 599 printf("Warning: Cannot allocate boot memory from" 600 " extent map!?\n"); 601 } 602 } 603 604 /* 605 * Initialize interrupt mapping. 606 */ 607 intr_init(); 608 } 609 610 /* 611 * Try to figure out on what type of machine we are running 612 * Note: This module runs *before* the io-mapping is setup! 613 */ 614 static void 615 set_machtype() 616 { 617 #ifdef _MILANHW_ 618 machineid |= ATARI_MILAN; 619 620 #else 621 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 622 if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) { 623 /* 624 * Watch out! We can also have a Hades with < 16Mb 625 * RAM here... 626 */ 627 if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) { 628 machineid |= ATARI_FALCON; 629 return; 630 } 631 } 632 if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 633 machineid |= ATARI_HADES; 634 else machineid |= ATARI_TT; 635 #endif /* _MILANHW_ */ 636 } 637 638 static void 639 atari_hwinit() 640 { 641 #if defined(_ATARIHW_) 642 /* 643 * Initialize the sound chip 644 */ 645 ym2149_init(); 646 647 /* 648 * Make sure that the midi acia will not generate an interrupt 649 * unless something attaches to it. We cannot do this for the 650 * keyboard acia because this breaks the '-d' option of the 651 * booter... 652 */ 653 MDI->ac_cs = 0; 654 #endif /* defined(_ATARIHW_) */ 655 656 /* 657 * Initialize both MFP chips (if both present!) to generate 658 * auto-vectored interrupts with EOI. The active-edge registers are 659 * set up. The interrupt enable registers are set to disable all 660 * interrupts. 661 */ 662 MFP->mf_iera = MFP->mf_ierb = 0; 663 MFP->mf_imra = MFP->mf_imrb = 0; 664 MFP->mf_aer = MFP->mf_ddr = 0; 665 MFP->mf_vr = 0x40; 666 667 #if defined(_ATARIHW_) 668 if(machineid & (ATARI_TT|ATARI_HADES)) { 669 MFP2->mf_iera = MFP2->mf_ierb = 0; 670 MFP2->mf_imra = MFP2->mf_imrb = 0; 671 MFP2->mf_aer = 0x80; 672 MFP2->mf_vr = 0x50; 673 } 674 675 if(machineid & ATARI_TT) { 676 /* 677 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 678 * MFP (ipl6) and softints (ipl1). 679 */ 680 SCU->sys_mask = SCU_SYS_SOFT; 681 SCU->vme_mask = SCU_MFP | SCU_SCC; 682 #ifdef DDB 683 /* 684 * This allows people with the correct hardware modification 685 * to drop into the debugger from an NMI. 686 */ 687 SCU->sys_mask |= SCU_IRQ7; 688 #endif 689 } 690 #endif /* defined(_ATARIHW_) */ 691 692 #if NPCI > 0 693 if(machineid & (ATARI_HADES|ATARI_MILAN)) { 694 /* 695 * Configure PCI-bus 696 */ 697 init_pci_bus(); 698 } 699 #endif 700 701 } 702 703 /* 704 * Do the dull work of mapping the various I/O areas. They MUST be Cache 705 * inhibited! 706 * All I/O areas are virtually mapped at the end of the pt-table. 707 */ 708 static void 709 map_io_areas(pt, ptsize, ptextra) 710 pt_entry_t *pt; 711 u_int ptsize; /* Size of 'pt' in bytes */ 712 u_int ptextra; /* #of additional I/O pte's */ 713 { 714 extern void bootm_init __P((vaddr_t, pt_entry_t *, u_long)); 715 vaddr_t ioaddr; 716 pt_entry_t *pg, *epg; 717 pt_entry_t pg_proto; 718 u_long mask; 719 720 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE; 721 722 /* 723 * Map ST-IO area 724 */ 725 stio_addr = ioaddr; 726 ioaddr += STIO_SIZE; 727 pg = &pt[stio_addr / PAGE_SIZE]; 728 epg = &pg[btoc(STIO_SIZE)]; 729 #ifdef _MILANHW_ 730 /* 731 * Turn on byte swaps in the ST I/O area. On the Milan, the 732 * U0 signal of the MMU controls the BigEndian signal 733 * of the PLX9080. We use this setting so we can read/write the 734 * PLX registers (and PCI-config space) in big-endian mode. 735 */ 736 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100; 737 #else 738 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 739 #endif 740 while(pg < epg) { 741 *pg++ = pg_proto; 742 pg_proto += PAGE_SIZE; 743 } 744 745 /* 746 * Map PCI areas 747 */ 748 if (machineid & ATARI_HADES) { 749 /* 750 * Only Hades maps the PCI-config space! 751 */ 752 pci_conf_addr = ioaddr; 753 ioaddr += PCI_CONF_SIZE; 754 pg = &pt[pci_conf_addr / PAGE_SIZE]; 755 epg = &pg[btoc(PCI_CONF_SIZE)]; 756 mask = PCI_CONFM_PHYS; 757 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 758 for(; pg < epg; mask <<= 1) 759 *pg++ = pg_proto | mask; 760 } 761 else pci_conf_addr = 0; /* XXX: should crash */ 762 763 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 764 pci_io_addr = ioaddr; 765 ioaddr += PCI_IO_SIZE; 766 pg = &pt[pci_io_addr / PAGE_SIZE]; 767 epg = &pg[btoc(PCI_IO_SIZE)]; 768 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 769 while(pg < epg) { 770 *pg++ = pg_proto; 771 pg_proto += PAGE_SIZE; 772 } 773 774 pci_mem_addr = ioaddr; 775 /* Provide an uncached PCI address for the MILAN */ 776 pci_mem_uncached = ioaddr; 777 ioaddr += PCI_MEM_SIZE; 778 epg = &pg[btoc(PCI_MEM_SIZE)]; 779 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 780 while(pg < epg) { 781 *pg++ = pg_proto; 782 pg_proto += PAGE_SIZE; 783 } 784 } 785 786 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 787 /* 788 * ioaddr += BOOTM_VA_POOL; 789 * pg = &pg[btoc(BOOTM_VA_POOL)]; 790 */ 791 } 792 793 /* 794 * Used by dumpconf() to get the size of the machine-dependent panic-dump 795 * header in disk blocks. 796 */ 797 int 798 cpu_dumpsize() 799 { 800 int size; 801 802 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 803 return (btodb(roundup(size, dbtob(1)))); 804 } 805 806 /* 807 * Called by dumpsys() to dump the machine-dependent header. 808 * XXX: Assumes that it will all fit in one diskblock. 809 */ 810 int 811 cpu_dump(dump, p_blkno) 812 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 813 daddr_t *p_blkno; 814 { 815 int buf[dbtob(1)/sizeof(int)]; 816 int error; 817 kcore_seg_t *kseg_p; 818 cpu_kcore_hdr_t *chdr_p; 819 820 kseg_p = (kcore_seg_t *)buf; 821 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 822 823 /* 824 * Generate a segment header 825 */ 826 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 827 kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p)); 828 829 /* 830 * Add the md header 831 */ 832 *chdr_p = cpu_kcore_hdr; 833 error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1)); 834 *p_blkno += 1; 835 return (error); 836 } 837 838 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 839 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 840 #endif 841 /* 842 * Initialize the cpu_kcore_header. 843 */ 844 static void 845 cpu_init_kcorehdr(kbase) 846 u_long kbase; 847 { 848 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 849 struct m68k_kcore_hdr *m = &h->un._m68k; 850 extern char end[]; 851 int i; 852 853 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr)); 854 855 /* 856 * Initialize the `dispatcher' portion of the header. 857 */ 858 strcpy(h->name, machine); 859 h->page_size = PAGE_SIZE; 860 h->kernbase = KERNBASE; 861 862 /* 863 * Fill in information about our MMU configuration. 864 */ 865 m->mmutype = mmutype; 866 m->sg_v = SG_V; 867 m->sg_frame = SG_FRAME; 868 m->sg_ishift = SG_ISHIFT; 869 m->sg_pmask = SG_PMASK; 870 m->sg40_shift1 = SG4_SHIFT1; 871 m->sg40_mask2 = SG4_MASK2; 872 m->sg40_shift2 = SG4_SHIFT2; 873 m->sg40_mask3 = SG4_MASK3; 874 m->sg40_shift3 = SG4_SHIFT3; 875 m->sg40_addr1 = SG4_ADDR1; 876 m->sg40_addr2 = SG4_ADDR2; 877 m->pg_v = PG_V; 878 m->pg_frame = PG_FRAME; 879 880 /* 881 * Initialize pointer to kernel segment table. 882 */ 883 m->sysseg_pa = (u_int)Sysseg + kbase; 884 885 /* 886 * Initialize relocation value such that: 887 * 888 * pa = (va - KERNBASE) + reloc 889 */ 890 m->reloc = kbase; 891 892 /* 893 * Define the end of the relocatable range. 894 */ 895 m->relocend = (u_int32_t)end; 896 897 for (i = 0; i < NMEM_SEGS; i++) { 898 m->ram_segs[i].start = boot_segs[i].start; 899 m->ram_segs[i].size = boot_segs[i].end - 900 boot_segs[i].start; 901 } 902 } 903 904 void 905 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 906 st_entry_t *sysseg; /* System segment table */ 907 u_int kstsize; /* size of 'sysseg' in pages */ 908 pt_entry_t *pt; /* Kernel page table */ 909 u_int ptsize; /* size of 'pt' in bytes */ 910 pt_entry_t *sysptmap; /* System page table */ 911 u_int sysptsize; /* size of 'sysptmap' in pages */ 912 u_int kbase; 913 { 914 st_entry_t sg_proto, *sg; 915 pt_entry_t pg_proto, *pg, *epg; 916 917 sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V; 918 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 919 920 /* 921 * Map the page table pages in both the HW segment table 922 * and the software Sysptmap. Note that Sysptmap is also 923 * considered a PT page, hence the +sysptsize. 924 */ 925 sg = sysseg; 926 pg = sysptmap; 927 epg = &pg[(ptsize >> PGSHIFT) + sysptsize]; 928 while(pg < epg) { 929 *sg++ = sg_proto; 930 *pg++ = pg_proto; 931 sg_proto += PAGE_SIZE; 932 pg_proto += PAGE_SIZE; 933 } 934 935 /* 936 * invalidate the remainder of the tables 937 */ 938 epg = &sysptmap[sysptsize * NPTEPG]; 939 while(pg < epg) { 940 *sg++ = SG_NV; 941 *pg++ = PG_NV; 942 } 943 } 944 945 #if defined(M68040) || defined(M68060) 946 void 947 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 948 st_entry_t *sysseg; /* System segment table */ 949 u_int kstsize; /* size of 'sysseg' in pages */ 950 pt_entry_t *pt; /* Kernel page table */ 951 u_int ptsize; /* size of 'pt' in bytes */ 952 pt_entry_t *sysptmap; /* System page table */ 953 u_int sysptsize; /* size of 'sysptmap' in pages */ 954 u_int kbase; 955 { 956 int i; 957 st_entry_t sg_proto, *sg, *esg; 958 pt_entry_t pg_proto; 959 960 /* 961 * First invalidate the entire "segment table" pages 962 * (levels 1 and 2 have the same "invalid" values). 963 */ 964 sg = sysseg; 965 esg = &sg[kstsize * NPTEPG]; 966 while (sg < esg) 967 *sg++ = SG_NV; 968 969 /* 970 * Initialize level 2 descriptors (which immediately 971 * follow the level 1 table). These should map 'pt' + 'sysptmap'. 972 * We need: 973 * NPTEPG / SG4_LEV3SIZE 974 * level 2 descriptors to map each of the nptpages + 1 975 * pages of PTEs. Note that we set the "used" bit 976 * now to save the HW the expense of doing it. 977 */ 978 i = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE); 979 sg = &sysseg[SG4_LEV1SIZE]; 980 esg = &sg[i]; 981 sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V; 982 while (sg < esg) { 983 *sg++ = sg_proto; 984 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 985 } 986 987 /* 988 * Initialize level 1 descriptors. We need: 989 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 990 * level 1 descriptors to map the 'num' level 2's. 991 */ 992 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 993 protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */; 994 sg = sysseg; 995 esg = &sg[i]; 996 sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V; 997 while (sg < esg) { 998 *sg++ = sg_proto; 999 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 1000 } 1001 1002 /* 1003 * Initialize sysptmap 1004 */ 1005 sg = sysptmap; 1006 esg = &sg[(ptsize >> PGSHIFT) + sysptsize]; 1007 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 1008 while (sg < esg) { 1009 *sg++ = pg_proto; 1010 pg_proto += PAGE_SIZE; 1011 } 1012 /* 1013 * Invalidate rest of Sysptmap page 1014 */ 1015 esg = &sysptmap[sysptsize * NPTEPG]; 1016 while (sg < esg) 1017 *sg++ = SG_NV; 1018 } 1019 #endif /* M68040 */ 1020 1021 #if defined(M68060) 1022 int m68060_pcr_init = 0x21; /* make this patchable */ 1023 #endif 1024 1025 static void 1026 initcpu() 1027 { 1028 typedef void trapfun __P((void)); 1029 1030 switch (cputype) { 1031 1032 #if defined(M68060) 1033 case CPU_68060: 1034 { 1035 extern trapfun *vectab[256]; 1036 extern trapfun buserr60, addrerr4060, fpfault; 1037 #if defined(M060SP) 1038 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1039 #else 1040 extern trapfun illinst; 1041 #endif 1042 1043 asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : : 1044 "d"(m68060_pcr_init):"d0" ); 1045 1046 /* bus/addrerr vectors */ 1047 vectab[2] = buserr60; 1048 vectab[3] = addrerr4060; 1049 1050 #if defined(M060SP) 1051 /* integer support */ 1052 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1053 1054 /* floating point support */ 1055 /* 1056 * XXX maybe we really should run-time check for the 1057 * stack frame format here: 1058 */ 1059 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1060 1061 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1062 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1063 1064 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1065 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1066 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1067 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1068 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1069 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1070 #else 1071 vectab[61] = illinst; 1072 #endif 1073 vectab[48] = fpfault; 1074 } 1075 break; 1076 #endif /* defined(M68060) */ 1077 #if defined(M68040) 1078 case CPU_68040: 1079 { 1080 extern trapfun *vectab[256]; 1081 extern trapfun buserr40, addrerr4060; 1082 1083 /* bus/addrerr vectors */ 1084 vectab[2] = buserr40; 1085 vectab[3] = addrerr4060; 1086 } 1087 break; 1088 #endif /* defined(M68040) */ 1089 #if defined(M68030) || defined(M68020) 1090 case CPU_68030: 1091 case CPU_68020: 1092 { 1093 extern trapfun *vectab[256]; 1094 extern trapfun buserr2030, addrerr2030; 1095 1096 /* bus/addrerr vectors */ 1097 vectab[2] = buserr2030; 1098 vectab[3] = addrerr2030; 1099 } 1100 break; 1101 #endif /* defined(M68030) || defined(M68020) */ 1102 } 1103 1104 DCIS(); 1105 } 1106 1107 #ifdef DEBUG 1108 void dump_segtable __P((u_int *)); 1109 void dump_pagetable __P((u_int *, u_int, u_int)); 1110 u_int vmtophys __P((u_int *, u_int)); 1111 1112 void 1113 dump_segtable(stp) 1114 u_int *stp; 1115 { 1116 u_int *s, *es; 1117 int shift, i; 1118 1119 s = stp; 1120 { 1121 es = s + (ATARI_STSIZE >> 2); 1122 shift = SG_ISHIFT; 1123 } 1124 1125 /* 1126 * XXX need changes for 68040 1127 */ 1128 for (i = 0; s < es; s++, i++) 1129 if (*s & SG_V) 1130 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1131 printf("\n"); 1132 } 1133 1134 void 1135 dump_pagetable(ptp, i, n) 1136 u_int *ptp, i, n; 1137 { 1138 u_int *p, *ep; 1139 1140 p = ptp + i; 1141 ep = p + n; 1142 for (; p < ep; p++, i++) 1143 if (*p & PG_V) 1144 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1145 printf("\n"); 1146 } 1147 1148 u_int 1149 vmtophys(ste, vm) 1150 u_int *ste, vm; 1151 { 1152 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1153 ste += (vm & SG_PMASK) >> PGSHIFT; 1154 return((*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1))); 1155 } 1156 1157 #endif 1158