1 /* $NetBSD: atari_init.c,v 1.51 2000/06/29 08:28:23 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_ddb.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/proc.h> 40 #include <sys/user.h> 41 #include <sys/ioctl.h> 42 #include <sys/select.h> 43 #include <sys/tty.h> 44 #include <sys/proc.h> 45 #include <sys/buf.h> 46 #include <sys/msgbuf.h> 47 #include <sys/mbuf.h> 48 #include <sys/extent.h> 49 #include <sys/protosw.h> 50 #include <sys/domain.h> 51 #include <sys/dkbad.h> 52 #include <sys/reboot.h> 53 #include <sys/exec.h> 54 #include <sys/core.h> 55 #include <sys/kcore.h> 56 57 #include <uvm/uvm_extern.h> 58 59 #include <machine/vmparam.h> 60 #include <machine/pte.h> 61 #include <machine/cpu.h> 62 #include <machine/iomap.h> 63 #include <machine/mfp.h> 64 #include <machine/scu.h> 65 #include <machine/acia.h> 66 #include <machine/kcore.h> 67 68 #include <m68k/cpu.h> 69 #include <m68k/cacheops.h> 70 71 #include <atari/atari/intr.h> 72 #include <atari/atari/stalloc.h> 73 #include <atari/dev/ym2149reg.h> 74 75 #include "pci.h" 76 77 void start_c __P((int, u_int, u_int, u_int, char *)); 78 static void atari_hwinit __P((void)); 79 static void cpu_init_kcorehdr __P((u_long)); 80 static void initcpu __P((void)); 81 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 82 pt_entry_t *, u_int, u_int)); 83 static void map_io_areas __P((pt_entry_t *, u_int, u_int)); 84 static void set_machtype __P((void)); 85 86 #if defined(M68040) || defined(M68060) 87 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int, 88 pt_entry_t *, u_int, u_int)); 89 #endif 90 91 /* 92 * Extent maps to manage all memory space, including I/O ranges. Allocate 93 * storage for 8 regions in each, initially. Later, iomem_malloc_safe 94 * will indicate that it's safe to use malloc() to dynamically allocate 95 * region descriptors. 96 * This means that the fixed static storage is only used for registrating 97 * the found memory regions and the bus-mapping of the console. 98 * 99 * The extent maps are not static! They are used for bus address space 100 * allocation. 101 */ 102 static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; 103 struct extent *iomem_ex; 104 int iomem_malloc_safe; 105 106 /* 107 * All info needed to generate a panic dump. All fields are setup by 108 * start_c(). 109 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 110 * here.... Also, the name is badly choosen. Phys_segs contains the 111 * segment descriptions _after_ reservations are made. 112 * XXX: 'lowram' is obsoleted by the new panicdump format 113 */ 114 static cpu_kcore_hdr_t cpu_kcore_hdr; 115 116 extern u_int lowram; 117 extern u_int Sysptsize, Sysseg_pa, proc0paddr; 118 extern pt_entry_t *Sysptmap; 119 extern st_entry_t *Sysseg; 120 u_int *Sysmap; 121 int machineid, mmutype, cputype, astpending; 122 char *vmmap; 123 pv_entry_t pv_table; 124 #if defined(M68040) || defined(M68060) 125 extern int protostfree; 126 #endif 127 128 extern char *esym; 129 extern struct pcb *curpcb; 130 131 /* 132 * This is the virtual address of physical page 0. Used by 'do_boot()'. 133 */ 134 vaddr_t page_zero; 135 136 /* 137 * Crude support for allocation in ST-ram. Currently only used to allocate 138 * video ram. 139 * The physical address is also returned because the video init needs it to 140 * setup the controller at the time the vm-system is not yet operational so 141 * 'kvtop()' cannot be used. 142 */ 143 #ifndef ST_POOL_SIZE 144 #define ST_POOL_SIZE 40 /* XXX: enough? */ 145 #endif 146 147 u_long st_pool_size = ST_POOL_SIZE * NBPG; /* Patchable */ 148 u_long st_pool_virt, st_pool_phys; 149 150 /* 151 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 152 * it is also reported not to work on all TT's. So the default is NO. 153 */ 154 #ifndef RELOC_KERNEL 155 #define RELOC_KERNEL 0 156 #endif 157 int reloc_kernel = RELOC_KERNEL; /* Patchable */ 158 159 /* 160 * this is the C-level entry function, it's called from locore.s. 161 * Preconditions: 162 * Interrupts are disabled 163 * PA == VA, we don't have to relocate addresses before enabling 164 * the MMU 165 * Exec is no longer available (because we're loaded all over 166 * low memory, no ExecBase is available anymore) 167 * 168 * It's purpose is: 169 * Do the things that are done in locore.s in the hp300 version, 170 * this includes allocation of kernel maps and enabling the MMU. 171 * 172 * Some of the code in here is `stolen' from Amiga MACH, and was 173 * written by Bryan Ford and Niklas Hallqvist. 174 * 175 * Very crude 68040 support by Michael L. Hitch. 176 */ 177 int kernel_copyback = 1; 178 179 void 180 start_c(id, ttphystart, ttphysize, stphysize, esym_addr) 181 int id; /* Machine id */ 182 u_int ttphystart, ttphysize; /* Start address and size of TT-ram */ 183 u_int stphysize; /* Size of ST-ram */ 184 char *esym_addr; /* Address of kernel '_esym' symbol */ 185 { 186 extern char end[]; 187 extern void etext __P((void)); 188 extern u_long protorp[2]; 189 u_int pstart; /* Next available physical address*/ 190 u_int vstart; /* Next available virtual address */ 191 u_int avail; 192 pt_entry_t *pt; 193 u_int ptsize, ptextra; 194 u_int tc, i; 195 u_int *pg; 196 u_int pg_proto; 197 u_int end_loaded; 198 u_long kbase; 199 u_int kstsize; 200 201 boot_segs[0].start = 0; 202 boot_segs[0].end = stphysize; 203 boot_segs[1].start = ttphystart; 204 boot_segs[1].end = ttphystart + ttphysize; 205 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 206 207 /* 208 * The following is a hack. We do not know how much ST memory we 209 * really need until after configuration has finished. At this 210 * time I have no idea how to grab ST memory at that time. 211 * The round_page() call is ment to correct errors made by 212 * binpatching! 213 */ 214 st_pool_size = m68k_round_page(st_pool_size); 215 st_pool_phys = stphysize - st_pool_size; 216 stphysize = st_pool_phys; 217 218 machineid = id; 219 esym = esym_addr; 220 221 /* 222 * the kernel ends at end() or esym. 223 */ 224 if(esym == NULL) 225 end_loaded = (u_int)end; 226 else end_loaded = (u_int)esym; 227 228 /* 229 * If we have enough fast-memory to put the kernel in and the 230 * RELOC_KERNEL option is set, do it! 231 */ 232 if((reloc_kernel != 0) && (ttphysize >= end_loaded)) 233 kbase = ttphystart; 234 else kbase = 0; 235 236 /* 237 * update these as soon as possible! 238 */ 239 PAGE_SIZE = NBPG; 240 PAGE_MASK = NBPG-1; 241 PAGE_SHIFT = PG_SHIFT; 242 243 /* 244 * Determine the type of machine we are running on. This needs 245 * to be done early (and before initcpu())! 246 */ 247 set_machtype(); 248 249 /* 250 * Initialize cpu specific stuff 251 */ 252 initcpu(); 253 254 /* 255 * We run the kernel from ST memory at the moment. 256 * The kernel segment table is put just behind the loaded image. 257 * pstart: start of usable ST memory 258 * avail : size of ST memory available. 259 */ 260 pstart = (u_int)end_loaded; 261 pstart = m68k_round_page(pstart); 262 avail = stphysize - pstart; 263 264 /* 265 * Calculate the number of pages needed for Sysseg. 266 * For the 68030, we need 256 descriptors (segment-table-entries). 267 * This easily fits into one page. 268 * For the 68040, both the level-1 and level-2 descriptors are 269 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 270 * level-1 & level-2 tables. 271 */ 272 #if defined(M68040) || defined(M68060) 273 if (mmutype == MMU_68040) 274 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 275 else 276 #endif 277 kstsize = 1; 278 /* 279 * allocate the kernel segment table 280 */ 281 Sysseg = (st_entry_t *)pstart; 282 Sysseg_pa = (u_int)Sysseg + kbase; 283 pstart += kstsize * NBPG; 284 avail -= kstsize * NBPG; 285 286 /* 287 * Determine the number of pte's we need for extra's like 288 * ST I/O map's. 289 */ 290 ptextra = btoc(STIO_SIZE); 291 292 /* 293 * If present, add pci areas 294 */ 295 if (machineid & ATARI_HADES) 296 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_VGA_SIZE); 297 ptextra += btoc(BOOTM_VA_POOL); 298 299 /* 300 * The 'pt' (the initial kernel pagetable) has to map the kernel and 301 * the I/O areas. The various I/O areas are mapped (virtually) at 302 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 303 */ 304 pt = (pt_entry_t *)pstart; 305 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 306 pstart += ptsize; 307 avail -= ptsize; 308 309 /* 310 * allocate kernel page table map 311 */ 312 Sysptmap = (pt_entry_t *)pstart; 313 pstart += NBPG; 314 avail -= NBPG; 315 316 /* 317 * Set Sysmap; mapped after page table pages. Because I too (LWP) 318 * didn't understand the reason for this, I borrowed the following 319 * (sligthly modified) comment from mac68k/locore.s: 320 * LAK: There seems to be some confusion here about the next line, 321 * so I'll explain. The kernel needs some way of dynamically modifying 322 * the page tables for its own virtual memory. What it does is that it 323 * has a page table map. This page table map is mapped right after the 324 * kernel itself (in our implementation; in HP's it was after the I/O 325 * space). Therefore, the first three (or so) entries in the segment 326 * table point to the first three pages of the page tables (which 327 * point to the kernel) and the next entry in the segment table points 328 * to the page table map (this is done later). Therefore, the value 329 * of the pointer "Sysmap" will be something like 16M*3 = 48M. When 330 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get 331 * the first longword of the first page map (== pt[0]). Since the 332 * page map mirrors the segment table, addressing any index of Sysmap 333 * will give you a PTE of the page maps which map the kernel. 334 */ 335 Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT)); 336 337 /* 338 * Initialize segment tables 339 */ 340 #if defined(M68040) || defined(M68060) 341 if (mmutype == MMU_68040) 342 mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 343 else 344 #endif /* defined(M68040) || defined(M68060) */ 345 mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase); 346 347 /* 348 * initialize kernel page table page(s). 349 * Assume load at VA 0. 350 * - Text pages are RO 351 * - Page zero is invalid 352 */ 353 pg_proto = (0 + kbase) | PG_RO | PG_V; 354 pg = pt; 355 *pg++ = PG_NV; pg_proto += NBPG; 356 for(i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG) 357 *pg++ = pg_proto; 358 359 /* 360 * data, bss and dynamic tables are read/write 361 */ 362 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 363 364 #if defined(M68040) || defined(M68060) 365 /* 366 * Map the kernel segment table cache invalidated for 367 * these machines (for the 68040 not strictly necessary, but 368 * recommended by Motorola; for the 68060 mandatory) 369 */ 370 if (mmutype == MMU_68040) { 371 372 if (kernel_copyback) 373 pg_proto |= PG_CCB; 374 375 for (; i < (u_int)Sysseg; i += NBPG, pg_proto += NBPG) 376 *pg++ = pg_proto; 377 378 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 379 for (; i < pstart; i += NBPG, pg_proto += NBPG) 380 *pg++ = pg_proto; 381 pg_proto = (pg_proto & ~PG_CI); 382 if (kernel_copyback) 383 pg_proto |= PG_CCB; 384 } 385 #endif /* defined(M68040) || defined(M68060) */ 386 387 /* 388 * go till end of data allocated so far 389 * plus proc0 u-area (to be allocated) 390 */ 391 for(; i < pstart + USPACE; i += NBPG, pg_proto += NBPG) 392 *pg++ = pg_proto; 393 394 /* 395 * invalidate remainder of kernel PT 396 */ 397 while(pg < &pt[ptsize/sizeof(pt_entry_t)]) 398 *pg++ = PG_NV; 399 400 /* 401 * Map various I/O areas 402 */ 403 map_io_areas(pt, ptsize, ptextra); 404 405 /* 406 * Save KVA of proc0 user-area and allocate it 407 */ 408 proc0paddr = pstart; 409 pstart += USPACE; 410 avail -= USPACE; 411 412 /* 413 * At this point, virtual and physical allocation starts to divert. 414 */ 415 vstart = pstart; 416 417 /* 418 * Map the allocated space in ST-ram now. In the contig-case, there 419 * is no need to make a distinction between virtual and physical 420 * adresses. But I make it anyway to be prepared. 421 * Physcal space is already reserved! 422 */ 423 st_pool_virt = vstart; 424 pg = &pt[vstart / NBPG]; 425 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 426 vstart += st_pool_size; 427 while(pg_proto < (st_pool_phys + st_pool_size)) { 428 *pg++ = pg_proto; 429 pg_proto += NBPG; 430 } 431 432 /* 433 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 434 * to reference it in the reboot code. Two pages are mapped, because 435 * we must make sure 'doboot()' is contained in it (see the tricky 436 * copying there....). 437 */ 438 page_zero = vstart; 439 pg = &pt[vstart / NBPG]; 440 *pg++ = PG_RW | PG_CI | PG_V; 441 vstart += NBPG; 442 *pg = PG_RW | PG_CI | PG_V | NBPG; 443 vstart += NBPG; 444 445 lowram = 0 >> PGSHIFT; /* XXX */ 446 447 /* 448 * Fill in usable segments. The page indexes will be initialized 449 * later when all reservations are made. 450 */ 451 usable_segs[0].start = 0; 452 usable_segs[0].end = stphysize; 453 usable_segs[1].start = ttphystart; 454 usable_segs[1].end = ttphystart + ttphysize; 455 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 456 457 if(kbase) { 458 /* 459 * First page of ST-ram is unusable, reserve the space 460 * for the kernel in the TT-ram segment. 461 * Note: Because physical page-zero is partially mapped to ROM 462 * by hardware, it is unusable. 463 */ 464 usable_segs[0].start = NBPG; 465 usable_segs[1].start += pstart; 466 } 467 else usable_segs[0].start += pstart; 468 469 /* 470 * As all segment sizes are now valid, calculate page indexes and 471 * available physical memory. 472 */ 473 usable_segs[0].first_page = 0; 474 for (i = 1; usable_segs[i].start; i++) { 475 usable_segs[i].first_page = usable_segs[i-1].first_page; 476 usable_segs[i].first_page += 477 (usable_segs[i-1].end - usable_segs[i-1].start) / NBPG; 478 } 479 for (i = 0, physmem = 0; usable_segs[i].start; i++) 480 physmem += usable_segs[i].end - usable_segs[i].start; 481 physmem >>= PGSHIFT; 482 483 /* 484 * get the pmap module in sync with reality. 485 */ 486 pmap_bootstrap(vstart, stio_addr, ptextra); 487 488 /* 489 * Prepare to enable the MMU. 490 * Setup and load SRP nolimit, share global, 4 byte PTE's 491 */ 492 protorp[0] = 0x80000202; 493 protorp[1] = (u_int)Sysseg + kbase; /* + segtable address */ 494 Sysseg_pa = (u_int)Sysseg + kbase; 495 496 cpu_init_kcorehdr(kbase); 497 498 /* 499 * copy over the kernel (and all now initialized variables) 500 * to fastram. DONT use bcopy(), this beast is much larger 501 * than 128k ! 502 */ 503 if(kbase) { 504 register u_long *lp, *le, *fp; 505 506 lp = (u_long *)0; 507 le = (u_long *)pstart; 508 fp = (u_long *)kbase; 509 while(lp < le) 510 *fp++ = *lp++; 511 } 512 #if defined(M68040) || defined(M68060) 513 if (mmutype == MMU_68040) { 514 /* 515 * movel Sysseg_pa,a0; 516 * movec a0,SRP; 517 * pflusha; 518 * movel #$0xc000,d0; 519 * movec d0,TC 520 */ 521 if (cputype == CPU_68060) { 522 /* XXX: Need the branch cache be cleared? */ 523 asm volatile (".word 0x4e7a,0x0002;" 524 "orl #0x400000,d0;" 525 ".word 0x4e7b,0x0002" : : : "d0"); 526 } 527 asm volatile ("movel %0,a0;" 528 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 529 asm volatile (".word 0xf518" : : ); 530 asm volatile ("movel #0xc000,d0;" 531 ".word 0x4e7b,0x0003" : : : "d0" ); 532 } else 533 #endif 534 { 535 asm volatile ("pmove %0@,srp" : : "a" (&protorp[0])); 536 /* 537 * setup and load TC register. 538 * enable_cpr, enable_srp, pagesize=8k, 539 * A = 8 bits, B = 11 bits 540 */ 541 tc = 0x82d08b00; 542 asm volatile ("pmove %0@,tc" : : "a" (&tc)); 543 } 544 545 /* Is this to fool the optimizer?? */ 546 i = *(int *)proc0paddr; 547 *(volatile int *)proc0paddr = i; 548 549 /* 550 * Initialize the "u-area" pages. 551 * Must initialize p_addr before autoconfig or the 552 * fault handler will get a NULL reference. 553 */ 554 bzero((u_char *)proc0paddr, USPACE); 555 proc0.p_addr = (struct user *)proc0paddr; 556 curproc = &proc0; 557 curpcb = &((struct user *)proc0paddr)->u_pcb; 558 559 /* 560 * Get the hardware into a defined state 561 */ 562 atari_hwinit(); 563 564 /* 565 * Initialize stmem allocator 566 */ 567 init_stmem(); 568 569 /* 570 * Initialize the I/O mem extent map. 571 * Note: we don't have to check the return value since 572 * creation of a fixed extent map will never fail (since 573 * descriptor storage has already been allocated). 574 * 575 * N.B. The iomem extent manages _all_ physical addresses 576 * on the machine. When the amount of RAM is found, all 577 * extents of RAM are allocated from the map. 578 */ 579 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF, 580 (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage), 581 EX_NOCOALESCE|EX_NOWAIT); 582 583 /* 584 * Allocate the physical RAM from the extent map 585 */ 586 for (i = 0; boot_segs[i].end != 0; i++) { 587 if (extent_alloc_region(iomem_ex, boot_segs[i].start, 588 boot_segs[i].end - boot_segs[i].start, EX_NOWAIT)) { 589 /* XXX: Ahum, should not happen ;-) */ 590 printf("Warning: Cannot allocate boot memory from" 591 " extent map!?\n"); 592 } 593 } 594 595 /* 596 * Initialize interrupt mapping. 597 */ 598 intr_init(); 599 } 600 601 /* 602 * Try to figure out on what type of machine we are running 603 * Note: This module runs *before* the io-mapping is setup! 604 */ 605 static void 606 set_machtype() 607 { 608 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 609 if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) { 610 /* 611 * Watch out! We can also have a Hades with < 16Mb 612 * RAM here... 613 */ 614 if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) { 615 machineid |= ATARI_FALCON; 616 return; 617 } 618 } 619 if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 620 machineid |= ATARI_HADES; 621 else machineid |= ATARI_TT; 622 } 623 624 static void 625 atari_hwinit() 626 { 627 /* 628 * Initialize the sound chip 629 */ 630 ym2149_init(); 631 632 /* 633 * Make sure that the midi acia will not generate an interrupt 634 * unless something attaches to it. We cannot do this for the 635 * keyboard acia because this breaks the '-d' option of the 636 * booter... 637 */ 638 MDI->ac_cs = 0; 639 640 /* 641 * Initialize both MFP chips (if both present!) to generate 642 * auto-vectored interrupts with EOI. The active-edge registers are 643 * set up. The interrupt enable registers are set to disable all 644 * interrupts. 645 */ 646 MFP->mf_iera = MFP->mf_ierb = 0; 647 MFP->mf_imra = MFP->mf_imrb = 0; 648 MFP->mf_aer = MFP->mf_ddr = 0; 649 MFP->mf_vr = 0x40; 650 if(machineid & (ATARI_TT|ATARI_HADES)) { 651 MFP2->mf_iera = MFP2->mf_ierb = 0; 652 MFP2->mf_imra = MFP2->mf_imrb = 0; 653 MFP2->mf_aer = 0x80; 654 MFP2->mf_vr = 0x50; 655 } 656 if(machineid & ATARI_TT) { 657 /* 658 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 659 * MFP (ipl6) and softints (ipl1). 660 */ 661 SCU->sys_mask = SCU_SYS_SOFT; 662 SCU->vme_mask = SCU_MFP | SCU_SCC; 663 #ifdef DDB 664 /* 665 * This allows people with the correct hardware modification 666 * to drop into the debugger from an NMI. 667 */ 668 SCU->sys_mask |= SCU_IRQ7; 669 #endif 670 } 671 672 #if NPCI > 0 673 if(machineid & ATARI_HADES) { 674 /* 675 * Configure PCI-bus 676 */ 677 init_pci_bus(); 678 } 679 #endif 680 681 } 682 683 /* 684 * Do the dull work of mapping the various I/O areas. They MUST be Cache 685 * inhibited! 686 * All I/O areas are virtually mapped at the end of the pt-table. 687 */ 688 static void 689 map_io_areas(pt, ptsize, ptextra) 690 pt_entry_t *pt; 691 u_int ptsize; /* Size of 'pt' in bytes */ 692 u_int ptextra; /* #of additional I/O pte's */ 693 { 694 extern void bootm_init __P((vaddr_t, pt_entry_t *, u_long)); 695 vaddr_t ioaddr; 696 pt_entry_t *pg, *epg; 697 pt_entry_t pg_proto; 698 u_long mask; 699 700 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * NBPG; 701 702 /* 703 * Map ST-IO area 704 */ 705 stio_addr = ioaddr; 706 ioaddr += STIO_SIZE; 707 pg = &pt[stio_addr / NBPG]; 708 epg = &pg[btoc(STIO_SIZE)]; 709 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 710 while(pg < epg) { 711 *pg++ = pg_proto; 712 pg_proto += NBPG; 713 } 714 715 /* 716 * Map PCI areas 717 */ 718 if (machineid & ATARI_HADES) { 719 720 pci_conf_addr = ioaddr; 721 ioaddr += PCI_CONF_SIZE; 722 pg = &pt[pci_conf_addr / NBPG]; 723 epg = &pg[btoc(PCI_CONF_SIZE)]; 724 mask = PCI_CONFM_PHYS; 725 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 726 for(; pg < epg; mask <<= 1) 727 *pg++ = pg_proto | mask; 728 729 pci_io_addr = ioaddr; 730 ioaddr += PCI_IO_SIZE; 731 epg = &pg[btoc(PCI_IO_SIZE)]; 732 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 733 while(pg < epg) { 734 *pg++ = pg_proto; 735 pg_proto += NBPG; 736 } 737 738 pci_mem_addr = ioaddr; 739 ioaddr += PCI_VGA_SIZE; 740 epg = &pg[btoc(PCI_VGA_SIZE)]; 741 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 742 while(pg < epg) { 743 *pg++ = pg_proto; 744 pg_proto += NBPG; 745 } 746 } 747 748 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 749 /* 750 * ioaddr += BOOTM_VA_POOL; 751 * pg = &pg[btoc(BOOTM_VA_POOL)]; 752 */ 753 } 754 755 /* 756 * Used by dumpconf() to get the size of the machine-dependent panic-dump 757 * header in disk blocks. 758 */ 759 int 760 cpu_dumpsize() 761 { 762 int size; 763 764 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 765 return (btodb(roundup(size, dbtob(1)))); 766 } 767 768 /* 769 * Called by dumpsys() to dump the machine-dependent header. 770 * XXX: Assumes that it will all fit in one diskblock. 771 */ 772 int 773 cpu_dump(dump, p_blkno) 774 int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 775 daddr_t *p_blkno; 776 { 777 int buf[dbtob(1)/sizeof(int)]; 778 int error; 779 kcore_seg_t *kseg_p; 780 cpu_kcore_hdr_t *chdr_p; 781 782 kseg_p = (kcore_seg_t *)buf; 783 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 784 785 /* 786 * Generate a segment header 787 */ 788 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 789 kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p)); 790 791 /* 792 * Add the md header 793 */ 794 *chdr_p = cpu_kcore_hdr; 795 error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1)); 796 *p_blkno += 1; 797 return (error); 798 } 799 800 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 801 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 802 #endif 803 /* 804 * Initialize the cpu_kcore_header. 805 */ 806 static void 807 cpu_init_kcorehdr(kbase) 808 u_long kbase; 809 { 810 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 811 struct m68k_kcore_hdr *m = &h->un._m68k; 812 extern char end[]; 813 int i; 814 815 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr)); 816 817 /* 818 * Initialize the `dispatcher' portion of the header. 819 */ 820 strcpy(h->name, machine); 821 h->page_size = NBPG; 822 h->kernbase = KERNBASE; 823 824 /* 825 * Fill in information about our MMU configuration. 826 */ 827 m->mmutype = mmutype; 828 m->sg_v = SG_V; 829 m->sg_frame = SG_FRAME; 830 m->sg_ishift = SG_ISHIFT; 831 m->sg_pmask = SG_PMASK; 832 m->sg40_shift1 = SG4_SHIFT1; 833 m->sg40_mask2 = SG4_MASK2; 834 m->sg40_shift2 = SG4_SHIFT2; 835 m->sg40_mask3 = SG4_MASK3; 836 m->sg40_shift3 = SG4_SHIFT3; 837 m->sg40_addr1 = SG4_ADDR1; 838 m->sg40_addr2 = SG4_ADDR2; 839 m->pg_v = PG_V; 840 m->pg_frame = PG_FRAME; 841 842 /* 843 * Initialize pointer to kernel segment table. 844 */ 845 m->sysseg_pa = (u_int)Sysseg + kbase; 846 847 /* 848 * Initialize relocation value such that: 849 * 850 * pa = (va - KERNBASE) + reloc 851 */ 852 m->reloc = kbase; 853 854 /* 855 * Define the end of the relocatable range. 856 */ 857 m->relocend = (u_int32_t)end; 858 859 for (i = 0; i < NMEM_SEGS; i++) { 860 m->ram_segs[i].start = boot_segs[i].start; 861 m->ram_segs[i].size = boot_segs[i].end - 862 boot_segs[i].start; 863 } 864 } 865 866 void 867 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 868 st_entry_t *sysseg; /* System segment table */ 869 u_int kstsize; /* size of 'sysseg' in pages */ 870 pt_entry_t *pt; /* Kernel page table */ 871 u_int ptsize; /* size of 'pt' in bytes */ 872 pt_entry_t *sysptmap; /* System page table */ 873 u_int sysptsize; /* size of 'sysptmap' in pages */ 874 u_int kbase; 875 { 876 st_entry_t sg_proto, *sg; 877 pt_entry_t pg_proto, *pg, *epg; 878 879 sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V; 880 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 881 882 /* 883 * Map the page table pages in both the HW segment table 884 * and the software Sysptmap. Note that Sysptmap is also 885 * considered a PT page, hence the +sysptsize. 886 */ 887 sg = sysseg; 888 pg = sysptmap; 889 epg = &pg[(ptsize >> PGSHIFT) + sysptsize]; 890 while(pg < epg) { 891 *sg++ = sg_proto; 892 *pg++ = pg_proto; 893 sg_proto += NBPG; 894 pg_proto += NBPG; 895 } 896 897 /* 898 * invalidate the remainder of the tables 899 */ 900 epg = &sysptmap[sysptsize * NPTEPG]; 901 while(pg < epg) { 902 *sg++ = SG_NV; 903 *pg++ = PG_NV; 904 } 905 } 906 907 #if defined(M68040) || defined(M68060) 908 void 909 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase) 910 st_entry_t *sysseg; /* System segment table */ 911 u_int kstsize; /* size of 'sysseg' in pages */ 912 pt_entry_t *pt; /* Kernel page table */ 913 u_int ptsize; /* size of 'pt' in bytes */ 914 pt_entry_t *sysptmap; /* System page table */ 915 u_int sysptsize; /* size of 'sysptmap' in pages */ 916 u_int kbase; 917 { 918 int i; 919 st_entry_t sg_proto, *sg, *esg; 920 pt_entry_t pg_proto; 921 922 /* 923 * First invalidate the entire "segment table" pages 924 * (levels 1 and 2 have the same "invalid" values). 925 */ 926 sg = sysseg; 927 esg = &sg[kstsize * NPTEPG]; 928 while (sg < esg) 929 *sg++ = SG_NV; 930 931 /* 932 * Initialize level 2 descriptors (which immediately 933 * follow the level 1 table). These should map 'pt' + 'sysptmap'. 934 * We need: 935 * NPTEPG / SG4_LEV3SIZE 936 * level 2 descriptors to map each of the nptpages + 1 937 * pages of PTEs. Note that we set the "used" bit 938 * now to save the HW the expense of doing it. 939 */ 940 i = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE); 941 sg = &sysseg[SG4_LEV1SIZE]; 942 esg = &sg[i]; 943 sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V; 944 while (sg < esg) { 945 *sg++ = sg_proto; 946 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 947 } 948 949 /* 950 * Initialize level 1 descriptors. We need: 951 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 952 * level 1 descriptors to map the 'num' level 2's. 953 */ 954 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 955 protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */; 956 sg = sysseg; 957 esg = &sg[i]; 958 sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V; 959 while (sg < esg) { 960 *sg++ = sg_proto; 961 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 962 } 963 964 /* 965 * Initialize sysptmap 966 */ 967 sg = sysptmap; 968 esg = &sg[(ptsize >> PGSHIFT) + sysptsize]; 969 pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V; 970 while (sg < esg) { 971 *sg++ = pg_proto; 972 pg_proto += NBPG; 973 } 974 /* 975 * Invalidate rest of Sysptmap page 976 */ 977 esg = &sysptmap[sysptsize * NPTEPG]; 978 while (sg < esg) 979 *sg++ = SG_NV; 980 } 981 #endif /* M68040 */ 982 983 #if defined(M68060) 984 int m68060_pcr_init = 0x21; /* make this patchable */ 985 #endif 986 987 static void 988 initcpu() 989 { 990 typedef void trapfun __P((void)); 991 992 switch (cputype) { 993 994 #if defined(M68060) 995 case CPU_68060: 996 { 997 extern trapfun *vectab[256]; 998 extern trapfun buserr60, addrerr4060, fpfault; 999 #if defined(M060SP) 1000 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1001 #else 1002 extern trapfun illinst; 1003 #endif 1004 1005 asm volatile ("movl %0,d0; .word 0x4e7b,0x0808" : : 1006 "d"(m68060_pcr_init):"d0" ); 1007 1008 /* bus/addrerr vectors */ 1009 vectab[2] = buserr60; 1010 vectab[3] = addrerr4060; 1011 1012 #if defined(M060SP) 1013 /* integer support */ 1014 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1015 1016 /* floating point support */ 1017 /* 1018 * XXX maybe we really should run-time check for the 1019 * stack frame format here: 1020 */ 1021 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1022 1023 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1024 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1025 1026 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1027 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1028 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1029 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1030 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1031 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1032 #else 1033 vectab[61] = illinst; 1034 #endif 1035 vectab[48] = fpfault; 1036 } 1037 break; 1038 #endif /* defined(M68060) */ 1039 #if defined(M68040) 1040 case CPU_68040: 1041 { 1042 extern trapfun *vectab[256]; 1043 extern trapfun buserr40, addrerr4060; 1044 1045 /* bus/addrerr vectors */ 1046 vectab[2] = buserr40; 1047 vectab[3] = addrerr4060; 1048 } 1049 break; 1050 #endif /* defined(M68040) */ 1051 #if defined(M68030) || defined(M68020) 1052 case CPU_68030: 1053 case CPU_68020: 1054 { 1055 extern trapfun *vectab[256]; 1056 extern trapfun buserr2030, addrerr2030; 1057 1058 /* bus/addrerr vectors */ 1059 vectab[2] = buserr2030; 1060 vectab[3] = addrerr2030; 1061 } 1062 break; 1063 #endif /* defined(M68030) || defined(M68020) */ 1064 } 1065 1066 DCIS(); 1067 } 1068 1069 #ifdef DEBUG 1070 void dump_segtable __P((u_int *)); 1071 void dump_pagetable __P((u_int *, u_int, u_int)); 1072 u_int vmtophys __P((u_int *, u_int)); 1073 1074 void 1075 dump_segtable(stp) 1076 u_int *stp; 1077 { 1078 u_int *s, *es; 1079 int shift, i; 1080 1081 s = stp; 1082 { 1083 es = s + (ATARI_STSIZE >> 2); 1084 shift = SG_ISHIFT; 1085 } 1086 1087 /* 1088 * XXX need changes for 68040 1089 */ 1090 for (i = 0; s < es; s++, i++) 1091 if (*s & SG_V) 1092 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1093 printf("\n"); 1094 } 1095 1096 void 1097 dump_pagetable(ptp, i, n) 1098 u_int *ptp, i, n; 1099 { 1100 u_int *p, *ep; 1101 1102 p = ptp + i; 1103 ep = p + n; 1104 for (; p < ep; p++, i++) 1105 if (*p & PG_V) 1106 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1107 printf("\n"); 1108 } 1109 1110 u_int 1111 vmtophys(ste, vm) 1112 u_int *ste, vm; 1113 { 1114 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1115 ste += (vm & SG_PMASK) >> PGSHIFT; 1116 return((*ste & -NBPG) | (vm & (NBPG - 1))); 1117 } 1118 1119 #endif 1120