xref: /netbsd-src/sys/arch/atari/atari/atari_init.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: atari_init.c,v 1.42 1998/12/20 14:32:34 thomas Exp $	*/
2 
3 /*
4  * Copyright (c) 1995 Leo Weppelman
5  * Copyright (c) 1994 Michael L. Hitch
6  * Copyright (c) 1993 Markus Wild
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Markus Wild.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "opt_ddb.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <vm/vm.h>
41 #include <sys/user.h>
42 #include <sys/ioctl.h>
43 #include <sys/select.h>
44 #include <sys/tty.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/msgbuf.h>
48 #include <sys/mbuf.h>
49 #include <sys/protosw.h>
50 #include <sys/domain.h>
51 #include <sys/dkbad.h>
52 #include <sys/reboot.h>
53 #include <sys/exec.h>
54 #include <sys/core.h>
55 #include <sys/kcore.h>
56 #include <vm/pmap.h>
57 
58 #include <machine/vmparam.h>
59 #include <machine/pte.h>
60 #include <machine/cpu.h>
61 #include <machine/iomap.h>
62 #include <machine/mfp.h>
63 #include <machine/scu.h>
64 #include <machine/acia.h>
65 #include <machine/kcore.h>
66 
67 #include <m68k/cpu.h>
68 #include <m68k/cacheops.h>
69 
70 #include <atari/atari/intr.h>
71 #include <atari/atari/stalloc.h>
72 #include <atari/dev/ym2149reg.h>
73 
74 #include "pci.h"
75 
76 void start_c __P((int, u_int, u_int, u_int, char *));
77 static void atari_hwinit __P((void));
78 static void cpu_init_kcorehdr __P((u_long));
79 static void initcpu __P((void));
80 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
81 			      pt_entry_t *, u_int, u_int));
82 static void map_io_areas __P((pt_entry_t *, u_int, u_int));
83 static void set_machtype __P((void));
84 
85 #if defined(M68040) || defined(M68060)
86 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
87 			      pt_entry_t *, u_int, u_int));
88 #endif
89 
90 /*
91  * All info needed to generate a panic dump. All fields are setup by
92  * start_c().
93  * XXX: Should sheck usage of phys_segs. There is some unwanted overlap
94  *      here.... Also, the name is badly choosen. Phys_segs contains the
95  *      segment descriptions _after_ reservations are made.
96  * XXX: 'lowram' is obsoleted by the new panicdump format
97  */
98 static cpu_kcore_hdr_t cpu_kcore_hdr;
99 
100 extern u_int 	lowram;
101 extern u_int	Sysptsize, Sysseg_pa, proc0paddr;
102 extern pt_entry_t *Sysptmap;
103 extern st_entry_t *Sysseg;
104 u_int		*Sysmap;
105 int		machineid, mmutype, cputype, astpending;
106 char		*vmmap;
107 pv_entry_t	pv_table;
108 #if defined(M68040) || defined(M68060)
109 extern int	protostfree;
110 #endif
111 
112 extern char		*esym;
113 extern struct pcb	*curpcb;
114 
115 /*
116  * This is the virtual address of physical page 0. Used by 'do_boot()'.
117  */
118 vaddr_t	page_zero;
119 
120 /*
121  * Crude support for allocation in ST-ram. Currently only used to allocate
122  * video ram.
123  * The physical address is also returned because the video init needs it to
124  * setup the controller at the time the vm-system is not yet operational so
125  * 'kvtop()' cannot be used.
126  */
127 #ifndef ST_POOL_SIZE
128 #define	ST_POOL_SIZE	40			/* XXX: enough? */
129 #endif
130 
131 u_long	st_pool_size = ST_POOL_SIZE * NBPG;	/* Patchable	*/
132 u_long	st_pool_virt, st_pool_phys;
133 
134 /*
135  * Are we relocating the kernel to TT-Ram if possible? It is faster, but
136  * it is also reported not to work on all TT's. So the default is NO.
137  */
138 #ifndef	RELOC_KERNEL
139 #define	RELOC_KERNEL	0
140 #endif
141 int	reloc_kernel = RELOC_KERNEL;		/* Patchable	*/
142 
143 /*
144  * this is the C-level entry function, it's called from locore.s.
145  * Preconditions:
146  *	Interrupts are disabled
147  *	PA == VA, we don't have to relocate addresses before enabling
148  *		the MMU
149  * 	Exec is no longer available (because we're loaded all over
150  *		low memory, no ExecBase is available anymore)
151  *
152  * It's purpose is:
153  *	Do the things that are done in locore.s in the hp300 version,
154  *		this includes allocation of kernel maps and enabling the MMU.
155  *
156  * Some of the code in here is `stolen' from Amiga MACH, and was
157  * written by Bryan Ford and Niklas Hallqvist.
158  *
159  * Very crude 68040 support by Michael L. Hitch.
160  */
161 
162 void
163 start_c(id, ttphystart, ttphysize, stphysize, esym_addr)
164 int	id;			/* Machine id				*/
165 u_int	ttphystart, ttphysize;	/* Start address and size of TT-ram	*/
166 u_int	stphysize;		/* Size of ST-ram	 		*/
167 char	*esym_addr;		/* Address of kernel '_esym' symbol	*/
168 {
169 	extern char	end[];
170 	extern void	etext __P((void));
171 	extern u_long	protorp[2];
172 	u_int		pstart;		/* Next available physical address*/
173 	u_int		vstart;		/* Next available virtual address */
174 	u_int		avail;
175 	pt_entry_t	*pt;
176 	u_int		ptsize, ptextra;
177 	u_int		tc, i;
178 	u_int		*pg;
179 	u_int		pg_proto;
180 	u_int		end_loaded;
181 	u_long		kbase;
182 	u_int		kstsize;
183 
184 	boot_segs[0].start       = 0;
185 	boot_segs[0].end         = stphysize;
186 	boot_segs[1].start       = ttphystart;
187 	boot_segs[1].end         = ttphystart + ttphysize;
188 	boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */
189 
190 	/*
191 	 * The following is a hack. We do not know how much ST memory we
192 	 * really need until after configuration has finished. At this
193 	 * time I have no idea how to grab ST memory at that time.
194 	 * The round_page() call is ment to correct errors made by
195 	 * binpatching!
196 	 */
197 	st_pool_size   = m68k_round_page(st_pool_size);
198 	st_pool_phys   = stphysize - st_pool_size;
199 	stphysize      = st_pool_phys;
200 
201 	machineid      = id;
202 	esym           = esym_addr;
203 
204 	/*
205 	 * the kernel ends at end() or esym.
206 	 */
207 	if(esym == NULL)
208 		end_loaded = (u_int)end;
209 	else end_loaded = (u_int)esym;
210 
211 	/*
212 	 * If we have enough fast-memory to put the kernel in and the
213 	 * RELOC_KERNEL option is set, do it!
214 	 */
215 	if((reloc_kernel != 0) && (ttphysize >= end_loaded))
216 		kbase = ttphystart;
217 	else kbase = 0;
218 
219 	/*
220 	 * update these as soon as possible!
221 	 */
222 	PAGE_SIZE  = NBPG;
223 	PAGE_MASK  = NBPG-1;
224 	PAGE_SHIFT = PG_SHIFT;
225 
226 	/*
227 	 * Determine the type of machine we are running on. This needs
228 	 * to be done early (and before initcpu())!
229 	 */
230 	set_machtype();
231 
232 	/*
233 	 * Initialize cpu specific stuff
234 	 */
235 	initcpu();
236 
237 	/*
238 	 * We run the kernel from ST memory at the moment.
239 	 * The kernel segment table is put just behind the loaded image.
240 	 * pstart: start of usable ST memory
241 	 * avail : size of ST memory available.
242 	 */
243 	pstart = (u_int)end_loaded;
244 	pstart = m68k_round_page(pstart);
245 	avail  = stphysize - pstart;
246 
247 	/*
248 	 * Calculate the number of pages needed for Sysseg.
249 	 * For the 68030, we need 256 descriptors (segment-table-entries).
250 	 * This easily fits into one page.
251 	 * For the 68040, both the level-1 and level-2 descriptors are
252 	 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE
253 	 * level-1 & level-2 tables.
254 	 */
255 #if defined(M68040) || defined(M68060)
256 	if (mmutype == MMU_68040)
257 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
258 	else
259 #endif
260 		kstsize = 1;
261 	/*
262 	 * allocate the kernel segment table
263 	 */
264 	Sysseg     = (st_entry_t *)pstart;
265 	Sysseg_pa  = (u_int)Sysseg + kbase;
266 	pstart    += kstsize * NBPG;
267 	avail     -= kstsize * NBPG;
268 
269 	/*
270 	 * Determine the number of pte's we need for extra's like
271 	 * ST I/O map's.
272 	 */
273 	ptextra = btoc(STIO_SIZE);
274 
275 	/*
276 	 * If present, add pci areas
277 	 */
278 	if (machineid & ATARI_HADES)
279 		ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_VGA_SIZE);
280 
281 	/*
282 	 * The 'pt' (the initial kernel pagetable) has to map the kernel and
283 	 * the I/O areas. The various I/O areas are mapped (virtually) at
284 	 * the top of the address space mapped by 'pt' (ie. just below Sysmap).
285 	 */
286 	pt      = (pt_entry_t *)pstart;
287 	ptsize  = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT;
288 	pstart += ptsize;
289 	avail  -= ptsize;
290 
291 	/*
292 	 * allocate kernel page table map
293 	 */
294 	Sysptmap = (pt_entry_t *)pstart;
295 	pstart  += NBPG;
296 	avail   -= NBPG;
297 
298 	/*
299 	 * Set Sysmap; mapped after page table pages. Because I too (LWP)
300 	 * didn't understand the reason for this, I borrowed the following
301 	 * (sligthly modified) comment from mac68k/locore.s:
302 	 * LAK:  There seems to be some confusion here about the next line,
303 	 * so I'll explain.  The kernel needs some way of dynamically modifying
304 	 * the page tables for its own virtual memory.  What it does is that it
305 	 * has a page table map.  This page table map is mapped right after the
306 	 * kernel itself (in our implementation; in HP's it was after the I/O
307 	 * space). Therefore, the first three (or so) entries in the segment
308 	 * table point to the first three pages of the page tables (which
309 	 * point to the kernel) and the next entry in the segment table points
310 	 * to the page table map (this is done later).  Therefore, the value
311 	 * of the pointer "Sysmap" will be something like 16M*3 = 48M.  When
312 	 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get
313 	 * the first longword of the first page map (== pt[0]).  Since the
314 	 * page map mirrors the segment table, addressing any index of Sysmap
315 	 * will give you a PTE of the page maps which map the kernel.
316 	 */
317 	Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT));
318 
319 	/*
320 	 * Initialize segment tables
321 	 */
322 #if defined(M68040) || defined(M68060)
323 	if (mmutype == MMU_68040)
324 		mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
325 	else
326 #endif /* defined(M68040) || defined(M68060) */
327 		mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
328 
329 	/*
330 	 * initialize kernel page table page(s).
331 	 * Assume load at VA 0.
332 	 * - Text pages are RO
333 	 * - Page zero is invalid
334 	 */
335 	pg_proto = (0 + kbase) | PG_RO | PG_V;
336 	pg       = pt;
337 	*pg++ = PG_NV; pg_proto += NBPG;
338 	for(i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG)
339 		*pg++ = pg_proto;
340 
341 	/*
342 	 * data, bss and dynamic tables are read/write
343 	 */
344 	pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
345 
346 #if defined(M68040) || defined(M68060)
347 	/*
348 	 * Map the kernel segment table cache invalidated for
349 	 * these machines (for the 68040 not strictly necessary, but
350 	 * recommended by Motorola; for the 68060 mandatory)
351 	 */
352 	if (mmutype == MMU_68040) {
353 	    for (; i < (u_int)Sysseg; i += NBPG, pg_proto += NBPG)
354 		*pg++ = pg_proto;
355 	    pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
356 	    for (; i < (u_int)&Sysseg[kstsize * NPTEPG]; i += NBPG,
357 							 pg_proto += NBPG)
358 		*pg++ = pg_proto;
359 	    pg_proto = (pg_proto & ~PG_CI) | PG_CCB;
360 	}
361 #endif /* defined(M68040) || defined(M68060) */
362 
363 	/*
364 	 * go till end of data allocated so far
365 	 * plus proc0 u-area (to be allocated)
366 	 */
367 	for(; i < pstart + USPACE; i += NBPG, pg_proto += NBPG)
368 		*pg++ = pg_proto;
369 
370 	/*
371 	 * invalidate remainder of kernel PT
372 	 */
373 	while(pg < &pt[ptsize/sizeof(pt_entry_t)])
374 		*pg++ = PG_NV;
375 
376 	/*
377 	 * Map various I/O areas
378 	 */
379 	map_io_areas(pt, ptsize, ptextra);
380 
381 	/*
382 	 * Save KVA of proc0 user-area and allocate it
383 	 */
384 	proc0paddr = pstart;
385 	pstart    += USPACE;
386 	avail     -= USPACE;
387 
388 	/*
389 	 * At this point, virtual and physical allocation starts to divert.
390 	 */
391 	vstart     = pstart;
392 
393 	/*
394 	 * Map the allocated space in ST-ram now. In the contig-case, there
395 	 * is no need to make a distinction between virtual and physical
396 	 * adresses. But I make it anyway to be prepared.
397 	 * Physcal space is already reserved!
398 	 */
399 	st_pool_virt = vstart;
400 	pg           = &pt[vstart / NBPG];
401 	pg_proto     = st_pool_phys | PG_RW | PG_CI | PG_V;
402 	vstart      += st_pool_size;
403 	while(pg_proto < (st_pool_phys + st_pool_size)) {
404 		*pg++     = pg_proto;
405 		pg_proto += NBPG;
406 	}
407 
408 	/*
409 	 * Map physical page_zero and page-zero+1 (First ST-ram page). We need
410 	 * to reference it in the reboot code. Two pages are mapped, because
411 	 * we must make sure 'doboot()' is contained in it (see the tricky
412 	 * copying there....).
413 	 */
414 	page_zero  = vstart;
415 	pg         = &pt[vstart / NBPG];
416 	*pg++      = PG_RW | PG_CI | PG_V;
417 	vstart    += NBPG;
418 	*pg        = PG_RW | PG_CI | PG_V | NBPG;
419 	vstart    += NBPG;
420 
421 	lowram  = 0 >> PGSHIFT; /* XXX */
422 
423 	/*
424 	 * Fill in usable segments. The page indexes will be initialized
425 	 * later when all reservations are made.
426 	 */
427 	usable_segs[0].start = 0;
428 	usable_segs[0].end   = stphysize;
429 	usable_segs[1].start = ttphystart;
430 	usable_segs[1].end   = ttphystart + ttphysize;
431 	usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */
432 
433 	if(kbase) {
434 		/*
435 		 * First page of ST-ram is unusable, reserve the space
436 		 * for the kernel in the TT-ram segment.
437 		 * Note: Because physical page-zero is partially mapped to ROM
438 		 *       by hardware, it is unusable.
439 		 */
440 		usable_segs[0].start  = NBPG;
441 		usable_segs[1].start += pstart;
442 	}
443 	else usable_segs[0].start += pstart;
444 
445 	/*
446 	 * As all segment sizes are now valid, calculate page indexes and
447 	 * available physical memory.
448 	 */
449 	usable_segs[0].first_page = 0;
450 	for (i = 1; usable_segs[i].start; i++) {
451 		usable_segs[i].first_page  = usable_segs[i-1].first_page;
452 		usable_segs[i].first_page +=
453 			(usable_segs[i-1].end - usable_segs[i-1].start) / NBPG;
454 	}
455 	for (i = 0, physmem = 0; usable_segs[i].start; i++)
456 		physmem += usable_segs[i].end - usable_segs[i].start;
457 	physmem >>= PGSHIFT;
458 
459 	/*
460 	 * get the pmap module in sync with reality.
461 	 */
462 	pmap_bootstrap(vstart, stio_addr, ptextra);
463 
464 	/*
465 	 * Prepare to enable the MMU.
466 	 * Setup and load SRP nolimit, share global, 4 byte PTE's
467 	 */
468 	protorp[0] = 0x80000202;
469 	protorp[1] = (u_int)Sysseg + kbase;	/* + segtable address */
470 	Sysseg_pa  = (u_int)Sysseg + kbase;
471 
472 	cpu_init_kcorehdr(kbase);
473 
474 	/*
475 	 * copy over the kernel (and all now initialized variables)
476 	 * to fastram.  DONT use bcopy(), this beast is much larger
477 	 * than 128k !
478 	 */
479 	if(kbase) {
480 		register u_long	*lp, *le, *fp;
481 
482 		lp = (u_long *)0;
483 		le = (u_long *)pstart;
484 		fp = (u_long *)kbase;
485 		while(lp < le)
486 			*fp++ = *lp++;
487 	}
488 #if defined(M68040) || defined(M68060)
489 	if (mmutype == MMU_68040) {
490 		/*
491 		 * movel Sysseg_pa,a0;
492 		 * movec a0,SRP;
493 		 * pflusha;
494 		 * movel #$0xc000,d0;
495 		 * movec d0,TC
496 		 */
497 		if (cputype == CPU_68060) {
498 			/* XXX: Need the branch cache be cleared? */
499 			asm volatile (".word 0x4e7a,0x0002;"
500 				      "orl #0x400000,d0;"
501 				      ".word 0x4e7b,0x0002" : : : "d0");
502 		}
503 		asm volatile ("movel %0,a0;"
504 			      ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0");
505 		asm volatile (".word 0xf518" : : );
506 		asm volatile ("movel #0xc000,d0;"
507 			      ".word 0x4e7b,0x0003" : : : "d0" );
508 	} else
509 #endif
510 	{
511 		asm volatile ("pmove %0@,srp" : : "a" (&protorp[0]));
512 		/*
513 		 * setup and load TC register.
514 		 * enable_cpr, enable_srp, pagesize=8k,
515 		 * A = 8 bits, B = 11 bits
516 		 */
517 		tc = 0x82d08b00;
518 		asm volatile ("pmove %0@,tc" : : "a" (&tc));
519 	}
520 
521 	/* Is this to fool the optimizer?? */
522 	i = *(int *)proc0paddr;
523 	*(volatile int *)proc0paddr = i;
524 
525 	/*
526 	 * Initialize the "u-area" pages.
527 	 * Must initialize p_addr before autoconfig or the
528 	 * fault handler will get a NULL reference.
529 	 */
530 	bzero((u_char *)proc0paddr, USPACE);
531 	proc0.p_addr = (struct user *)proc0paddr;
532 	curproc = &proc0;
533 	curpcb  = &((struct user *)proc0paddr)->u_pcb;
534 
535 	/*
536 	 * Get the hardware into a defined state
537 	 */
538 	atari_hwinit();
539 
540 	/*
541 	 * Initialize stmem allocator
542 	 */
543 	init_stmem();
544 
545 	/*
546 	 * Initialize interrupt mapping.
547 	 */
548 	intr_init();
549 }
550 
551 /*
552  * Try to figure out on what type of machine we are running
553  * Note: This module runs *before* the io-mapping is setup!
554  */
555 static void
556 set_machtype()
557 {
558 	stio_addr = 0xff8000;	/* XXX: For TT & Falcon only */
559 	if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) {
560 		/*
561 		 * Watch out! We can also have a Hades with < 16Mb
562 		 * RAM here...
563 		 */
564 		if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) {
565 			machineid |= ATARI_FALCON;
566 			return;
567 		}
568 	}
569 	if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char)))
570 		machineid |= ATARI_HADES;
571 	else machineid |= ATARI_TT;
572 }
573 
574 static void
575 atari_hwinit()
576 {
577 	/*
578 	 * Initialize the sound chip
579 	 */
580 	ym2149_init();
581 
582 	/*
583 	 * Make sure that the midi acia will not generate an interrupt
584 	 * unless something attaches to it. We cannot do this for the
585 	 * keyboard acia because this breaks the '-d' option of the
586 	 * booter...
587 	 */
588 	MDI->ac_cs = 0;
589 
590 	/*
591 	 * Initialize both MFP chips (if both present!) to generate
592 	 * auto-vectored interrupts with EOI. The active-edge registers are
593 	 * set up. The interrupt enable registers are set to disable all
594 	 * interrupts.
595 	 */
596 	MFP->mf_iera  = MFP->mf_ierb = 0;
597 	MFP->mf_imra  = MFP->mf_imrb = 0;
598 	MFP->mf_aer   = MFP->mf_ddr  = 0;
599 	MFP->mf_vr    = 0x40;
600 	if(machineid & (ATARI_TT|ATARI_HADES)) {
601 		MFP2->mf_iera = MFP2->mf_ierb = 0;
602 		MFP2->mf_imra = MFP2->mf_imrb = 0;
603 		MFP2->mf_aer  = 0x80;
604 		MFP2->mf_vr   = 0x50;
605 	}
606 	if(machineid & ATARI_TT) {
607 		/*
608 		 * Initialize the SCU, to enable interrupts on the SCC (ipl5),
609 		 * MFP (ipl6) and softints (ipl1).
610 		 */
611 		SCU->sys_mask = SCU_SYS_SOFT;
612 		SCU->vme_mask = SCU_MFP | SCU_SCC;
613 #ifdef DDB
614 		/*
615 		 * This allows people with the correct hardware modification
616 		 * to drop into the debugger from an NMI.
617 		 */
618 		SCU->sys_mask |= SCU_IRQ7;
619 #endif
620 	}
621 
622 #if NPCI > 0
623 	if(machineid & ATARI_HADES) {
624 		/*
625 		 * Configure PCI-bus
626 		 */
627 		init_pci_bus();
628 	}
629 #endif
630 
631 }
632 
633 /*
634  * Do the dull work of mapping the various I/O areas. They MUST be Cache
635  * inhibited!
636  * All I/O areas are virtually mapped at the end of the pt-table.
637  */
638 static void
639 map_io_areas(pt, ptsize, ptextra)
640 pt_entry_t	*pt;
641 u_int		ptsize;		/* Size of 'pt' in bytes	*/
642 u_int		ptextra;	/* #of additional I/O pte's	*/
643 {
644 	vaddr_t		ioaddr;
645 	pt_entry_t	*pg, *epg;
646 	pt_entry_t	pg_proto;
647 	u_long		mask;
648 
649 	ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * NBPG;
650 
651 	/*
652 	 * Map ST-IO area
653 	 */
654 	stio_addr = ioaddr;
655 	ioaddr   += STIO_SIZE;
656 	pg        = &pt[stio_addr / NBPG];
657 	epg       = &pg[btoc(STIO_SIZE)];
658 	pg_proto  = STIO_PHYS | PG_RW | PG_CI | PG_V;
659 	while(pg < epg) {
660 		*pg++     = pg_proto;
661 		pg_proto += NBPG;
662 	}
663 
664 	/*
665 	 * Map PCI areas
666 	 */
667 	if (machineid & ATARI_HADES) {
668 
669 		pci_conf_addr = ioaddr;
670 		ioaddr       += PCI_CONF_SIZE;
671 		pg            = &pt[pci_conf_addr / NBPG];
672 		epg           = &pg[btoc(PCI_CONF_SIZE)];
673 		mask          = PCI_CONFM_PHYS;
674 		pg_proto      = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V;
675 		for(; pg < epg; mask >>= 1)
676 			*pg++ = pg_proto | mask;
677 
678 		pci_io_addr   = ioaddr;
679 		ioaddr       += PCI_IO_SIZE;
680 		epg           = &pg[btoc(PCI_IO_SIZE)];
681 		pg_proto      = PCI_IO_PHYS | PG_RW | PG_CI | PG_V;
682 		while(pg < epg) {
683 			*pg++     = pg_proto;
684 			pg_proto += NBPG;
685 		}
686 
687 		pci_mem_addr  = ioaddr;
688 		ioaddr       += PCI_VGA_SIZE;
689 		epg           = &pg[btoc(PCI_VGA_SIZE)];
690 		pg_proto      = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V;
691 		while(pg < epg) {
692 			*pg++     = pg_proto;
693 			pg_proto += NBPG;
694 		}
695 	}
696 }
697 
698 /*
699  * Used by dumpconf() to get the size of the machine-dependent panic-dump
700  * header in disk blocks.
701  */
702 int
703 cpu_dumpsize()
704 {
705 	int	size;
706 
707 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
708 	return (btodb(roundup(size, dbtob(1))));
709 }
710 
711 /*
712  * Called by dumpsys() to dump the machine-dependent header.
713  * XXX: Assumes that it will all fit in one diskblock.
714  */
715 int
716 cpu_dump(dump, p_blkno)
717 int	(*dump) __P((dev_t, daddr_t, caddr_t, size_t));
718 daddr_t	*p_blkno;
719 {
720 	int		buf[dbtob(1)/sizeof(int)];
721 	int		error;
722 	kcore_seg_t	*kseg_p;
723 	cpu_kcore_hdr_t	*chdr_p;
724 
725 	kseg_p = (kcore_seg_t *)buf;
726 	chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)];
727 
728 	/*
729 	 * Generate a segment header
730 	 */
731 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
732 	kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p));
733 
734 	/*
735 	 * Add the md header
736 	 */
737 	*chdr_p = cpu_kcore_hdr;
738 	error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1));
739 	*p_blkno += 1;
740 	return (error);
741 }
742 
743 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS)
744 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS"
745 #endif
746 /*
747  * Initialize the cpu_kcore_header.
748  */
749 static void
750 cpu_init_kcorehdr(kbase)
751 u_long	kbase;
752 {
753 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
754 	struct m68k_kcore_hdr *m = &h->un._m68k;
755 	extern char end[];
756 	extern char machine[];
757 	int	i;
758 
759 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
760 
761 	/*
762 	 * Initialize the `dispatcher' portion of the header.
763 	 */
764 	strcpy(h->name, machine);
765 	h->page_size = NBPG;
766 	h->kernbase = KERNBASE;
767 
768 	/*
769 	 * Fill in information about our MMU configuration.
770 	 */
771 	m->mmutype	= mmutype;
772 	m->sg_v		= SG_V;
773 	m->sg_frame	= SG_FRAME;
774 	m->sg_ishift	= SG_ISHIFT;
775 	m->sg_pmask	= SG_PMASK;
776 	m->sg40_shift1	= SG4_SHIFT1;
777 	m->sg40_mask2	= SG4_MASK2;
778 	m->sg40_shift2	= SG4_SHIFT2;
779 	m->sg40_mask3	= SG4_MASK3;
780 	m->sg40_shift3	= SG4_SHIFT3;
781 	m->sg40_addr1	= SG4_ADDR1;
782 	m->sg40_addr2	= SG4_ADDR2;
783 	m->pg_v		= PG_V;
784 	m->pg_frame	= PG_FRAME;
785 
786 	/*
787 	 * Initialize pointer to kernel segment table.
788 	 */
789 	m->sysseg_pa = (u_int)Sysseg + kbase;
790 
791 	/*
792 	 * Initialize relocation value such that:
793 	 *
794 	 *	pa = (va - KERNBASE) + reloc
795 	 */
796 	m->reloc = kbase;
797 
798 	/*
799 	 * Define the end of the relocatable range.
800 	 */
801 	m->relocend = (u_int32_t)end;
802 
803 	for (i = 0; i < NMEM_SEGS; i++) {
804 		m->ram_segs[i].start = boot_segs[i].start;
805 		m->ram_segs[i].size  = boot_segs[i].end -
806 		    boot_segs[i].start;
807 	}
808 }
809 
810 void
811 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
812 	st_entry_t	*sysseg;	/* System segment table		*/
813 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
814 	pt_entry_t	*pt;		/* Kernel page table		*/
815 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
816 	pt_entry_t	*sysptmap;	/* System page table		*/
817 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
818 	u_int		kbase;
819 {
820 	st_entry_t	sg_proto, *sg;
821 	pt_entry_t	pg_proto, *pg, *epg;
822 
823 	sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V;
824 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
825 
826 	/*
827 	 * Map the page table pages in both the HW segment table
828 	 * and the software Sysptmap.  Note that Sysptmap is also
829 	 * considered a PT page, hence the +sysptsize.
830 	 */
831 	sg  = sysseg;
832 	pg  = sysptmap;
833 	epg = &pg[(ptsize >> PGSHIFT) + sysptsize];
834 	while(pg < epg) {
835 		*sg++ = sg_proto;
836 		*pg++ = pg_proto;
837 		sg_proto += NBPG;
838 		pg_proto += NBPG;
839 	}
840 
841 	/*
842 	 * invalidate the remainder of the tables
843 	 */
844 	epg = &sysptmap[sysptsize * NPTEPG];
845 	while(pg < epg) {
846 		*sg++ = SG_NV;
847 		*pg++ = PG_NV;
848 	}
849 }
850 
851 #if defined(M68040) || defined(M68060)
852 void
853 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
854 	st_entry_t	*sysseg;	/* System segment table		*/
855 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
856 	pt_entry_t	*pt;		/* Kernel page table		*/
857 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
858 	pt_entry_t	*sysptmap;	/* System page table		*/
859 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
860 	u_int		kbase;
861 {
862 	int		i;
863 	st_entry_t	sg_proto, *sg, *esg;
864 	pt_entry_t	pg_proto;
865 
866 	/*
867 	 * First invalidate the entire "segment table" pages
868 	 * (levels 1 and 2 have the same "invalid" values).
869 	 */
870 	sg  = sysseg;
871 	esg = &sg[kstsize * NPTEPG];
872 	while (sg < esg)
873 		*sg++ = SG_NV;
874 
875 	/*
876 	 * Initialize level 2 descriptors (which immediately
877 	 * follow the level 1 table). These should map 'pt' + 'sysptmap'.
878 	 * We need:
879 	 *	NPTEPG / SG4_LEV3SIZE
880 	 * level 2 descriptors to map each of the nptpages + 1
881 	 * pages of PTEs.  Note that we set the "used" bit
882 	 * now to save the HW the expense of doing it.
883 	 */
884 	i   = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE);
885 	sg  = &sysseg[SG4_LEV1SIZE];
886 	esg = &sg[i];
887 	sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V;
888 	while (sg < esg) {
889 		*sg++     = sg_proto;
890 		sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
891 	}
892 
893 	/*
894 	 * Initialize level 1 descriptors.  We need:
895 	 *	roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
896 	 * level 1 descriptors to map the 'num' level 2's.
897 	 */
898 	i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE;
899 	protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */;
900 	sg  = sysseg;
901 	esg = &sg[i];
902 	sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V;
903 	while (sg < esg) {
904 		*sg++     = sg_proto;
905 		sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
906 	}
907 
908 	/*
909 	 * Initialize sysptmap
910 	 */
911 	sg  = sysptmap;
912 	esg = &sg[(ptsize >> PGSHIFT) + sysptsize];
913 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
914 	while (sg < esg) {
915 		*sg++     = pg_proto;
916 		pg_proto += NBPG;
917 	}
918 	/*
919 	 * Invalidate rest of Sysptmap page
920 	 */
921 	esg = &sysptmap[sysptsize * NPTEPG];
922 	while (sg < esg)
923 		*sg++ = SG_NV;
924 }
925 #endif /* M68040 */
926 
927 #if defined(M68060)
928 int m68060_pcr_init = 0x21;	/* make this patchable */
929 #endif
930 
931 static void
932 initcpu()
933 {
934 	typedef void trapfun __P((void));
935 
936 	switch (cputype) {
937 
938 #if defined(M68060)
939 	case CPU_68060:
940 		{
941 			extern trapfun	*vectab[256];
942 			extern trapfun	buserr60, addrerr4060, fpfault;
943 #if defined(M060SP)
944 			extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[];
945 #else
946 			extern trapfun illinst;
947 #endif
948 
949 			asm volatile ("movl %0,d0; .word 0x4e7b,0x0808" : :
950 					"d"(m68060_pcr_init):"d0" );
951 
952 			/* bus/addrerr vectors */
953 			vectab[2] = buserr60;
954 			vectab[3] = addrerr4060;
955 
956 #if defined(M060SP)
957 			/* integer support */
958 			vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00];
959 
960 			/* floating point support */
961 			/*
962 			 * XXX maybe we really should run-time check for the
963 			 * stack frame format here:
964 			 */
965 			vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30];
966 
967 			vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38];
968 			vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40];
969 
970 			vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
971 			vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
972 			vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
973 			vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
974 			vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
975 			vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
976 #else
977 			vectab[61] = illinst;
978 #endif
979 			vectab[48] = fpfault;
980 		}
981 		break;
982 #endif /* defined(M68060) */
983 #if defined(M68040)
984 	case CPU_68040:
985 		{
986 			extern trapfun	*vectab[256];
987 			extern trapfun	buserr40, addrerr4060;
988 
989 			/* bus/addrerr vectors */
990 			vectab[2] = buserr40;
991 			vectab[3] = addrerr4060;
992 		}
993 		break;
994 #endif /* defined(M68040) */
995 #if defined(M68030) || defined(M68020)
996 	case CPU_68030:
997 	case CPU_68020:
998 		{
999 			extern trapfun	*vectab[256];
1000 			extern trapfun	buserr2030, addrerr2030;
1001 
1002 			/* bus/addrerr vectors */
1003 			vectab[2] = buserr2030;
1004 			vectab[3] = addrerr2030;
1005 		}
1006 		break;
1007 #endif /* defined(M68030) || defined(M68020) */
1008 	}
1009 
1010 	DCIS();
1011 }
1012 
1013 #ifdef DEBUG
1014 void dump_segtable __P((u_int *));
1015 void dump_pagetable __P((u_int *, u_int, u_int));
1016 u_int vmtophys __P((u_int *, u_int));
1017 
1018 void
1019 dump_segtable(stp)
1020 	u_int *stp;
1021 {
1022 	u_int *s, *es;
1023 	int shift, i;
1024 
1025 	s = stp;
1026 	{
1027 		es = s + (ATARI_STSIZE >> 2);
1028 		shift = SG_ISHIFT;
1029 	}
1030 
1031 	/*
1032 	 * XXX need changes for 68040
1033 	 */
1034 	for (i = 0; s < es; s++, i++)
1035 		if (*s & SG_V)
1036 			printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME);
1037 	printf("\n");
1038 }
1039 
1040 void
1041 dump_pagetable(ptp, i, n)
1042 	u_int *ptp, i, n;
1043 {
1044 	u_int *p, *ep;
1045 
1046 	p = ptp + i;
1047 	ep = p + n;
1048 	for (; p < ep; p++, i++)
1049 		if (*p & PG_V)
1050 			printf("$%08x -> $%08x\t", i, *p & PG_FRAME);
1051 	printf("\n");
1052 }
1053 
1054 u_int
1055 vmtophys(ste, vm)
1056 	u_int *ste, vm;
1057 {
1058 	ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME);
1059 		ste += (vm & SG_PMASK) >> PGSHIFT;
1060 	return((*ste & -NBPG) | (vm & (NBPG - 1)));
1061 }
1062 
1063 #endif
1064