xref: /netbsd-src/sys/arch/atari/atari/atari_init.c (revision 2a399c6883d870daece976daec6ffa7bb7f934ce)
1 /*	$NetBSD: atari_init.c,v 1.34 1997/10/23 11:26:19 leo Exp $	*/
2 
3 /*
4  * Copyright (c) 1995 Leo Weppelman
5  * Copyright (c) 1994 Michael L. Hitch
6  * Copyright (c) 1993 Markus Wild
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Markus Wild.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <vm/vm.h>
39 #include <sys/user.h>
40 #include <sys/ioctl.h>
41 #include <sys/select.h>
42 #include <sys/tty.h>
43 #include <sys/proc.h>
44 #include <sys/buf.h>
45 #include <sys/msgbuf.h>
46 #include <sys/mbuf.h>
47 #include <sys/protosw.h>
48 #include <sys/domain.h>
49 #include <sys/dkbad.h>
50 #include <sys/reboot.h>
51 #include <sys/exec.h>
52 #include <sys/core.h>
53 #include <sys/kcore.h>
54 #include <vm/pmap.h>
55 
56 #include <machine/vmparam.h>
57 #include <machine/pte.h>
58 #include <machine/cpu.h>
59 #include <machine/iomap.h>
60 #include <machine/mfp.h>
61 #include <machine/scu.h>
62 #include <machine/acia.h>
63 #include <machine/kcore.h>
64 
65 #include <m68k/cpu.h>
66 #include <m68k/cacheops.h>
67 
68 #include <atari/atari/intr.h>
69 #include <atari/atari/stalloc.h>
70 #include <atari/dev/ym2149reg.h>
71 
72 void start_c __P((int, u_int, u_int, u_int, char *));
73 static void atari_hwinit __P((void));
74 static void cpu_init_kcorehdr __P((u_long));
75 static void initcpu __P((void));
76 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
77 			      pt_entry_t *, u_int, u_int));
78 static void map_io_areas __P((pt_entry_t *, u_int, u_int));
79 static void set_machtype __P((void));
80 
81 #if defined(M68040) || defined(M68060)
82 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
83 			      pt_entry_t *, u_int, u_int));
84 #endif
85 
86 /*
87  * All info needed to generate a panic dump. All fields are setup by
88  * start_c().
89  * XXX: Should sheck usage of phys_segs. There is some unwanted overlap
90  *      here.... Also, the name is badly choosen. Phys_segs contains the
91  *      segment descriptions _after_ reservations are made.
92  * XXX: 'lowram' is obsoleted by the new panicdump format
93  */
94 static cpu_kcore_hdr_t cpu_kcore_hdr;
95 
96 extern u_int 	lowram;
97 extern u_int	Sysptsize, Sysseg_pa, proc0paddr;
98 extern pt_entry_t *Sysptmap;
99 extern st_entry_t *Sysseg;
100 u_int		*Sysmap;
101 int		machineid, mmutype, cputype, astpending;
102 char		*vmmap;
103 pv_entry_t	pv_table;
104 #if defined(M68040) || defined(M68060)
105 extern int	protostfree;
106 #endif
107 
108 extern char		*esym;
109 extern struct pcb	*curpcb;
110 
111 /*
112  * This is the virtual address of physical page 0. Used by 'do_boot()'.
113  */
114 vm_offset_t	page_zero;
115 
116 /*
117  * Crude support for allocation in ST-ram. Currently only used to allocate
118  * video ram.
119  * The physical address is also returned because the video init needs it to
120  * setup the controller at the time the vm-system is not yet operational so
121  * 'kvtop()' cannot be used.
122  */
123 #ifndef ST_POOL_SIZE
124 #define	ST_POOL_SIZE	40			/* XXX: enough? */
125 #endif
126 
127 u_long	st_pool_size = ST_POOL_SIZE * NBPG;	/* Patchable	*/
128 u_long	st_pool_virt, st_pool_phys;
129 
130 /*
131  * Are we relocating the kernel to TT-Ram if possible? It is faster, but
132  * it is also reported not to work on all TT's. So the default is NO.
133  */
134 #ifndef	RELOC_KERNEL
135 #define	RELOC_KERNEL	0
136 #endif
137 int	reloc_kernel = RELOC_KERNEL;		/* Patchable	*/
138 
139 /*
140  * this is the C-level entry function, it's called from locore.s.
141  * Preconditions:
142  *	Interrupts are disabled
143  *	PA == VA, we don't have to relocate addresses before enabling
144  *		the MMU
145  * 	Exec is no longer available (because we're loaded all over
146  *		low memory, no ExecBase is available anymore)
147  *
148  * It's purpose is:
149  *	Do the things that are done in locore.s in the hp300 version,
150  *		this includes allocation of kernel maps and enabling the MMU.
151  *
152  * Some of the code in here is `stolen' from Amiga MACH, and was
153  * written by Bryan Ford and Niklas Hallqvist.
154  *
155  * Very crude 68040 support by Michael L. Hitch.
156  */
157 
158 void
159 start_c(id, ttphystart, ttphysize, stphysize, esym_addr)
160 int	id;			/* Machine id				*/
161 u_int	ttphystart, ttphysize;	/* Start address and size of TT-ram	*/
162 u_int	stphysize;		/* Size of ST-ram	 		*/
163 char	*esym_addr;		/* Address of kernel '_esym' symbol	*/
164 {
165 	extern char	end[];
166 	extern void	etext __P((void));
167 	extern u_long	protorp[2];
168 	u_int		pstart;		/* Next available physical address*/
169 	u_int		vstart;		/* Next available virtual address */
170 	u_int		avail;
171 	pt_entry_t	*pt;
172 	u_int		ptsize, ptextra;
173 	u_int		tc, i;
174 	u_int		*pg;
175 	u_int		pg_proto;
176 	u_int		end_loaded;
177 	u_long		kbase;
178 	u_int		kstsize;
179 
180 	boot_segs[0].start       = 0;
181 	boot_segs[0].end         = stphysize;
182 	boot_segs[1].start       = ttphystart;
183 	boot_segs[1].end         = ttphystart + ttphysize;
184 	boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */
185 
186 	/*
187 	 * The following is a hack. We do not know how much ST memory we
188 	 * really need until after configuration has finished. At this
189 	 * time I have no idea how to grab ST memory at that time.
190 	 * The round_page() call is ment to correct errors made by
191 	 * binpatching!
192 	 */
193 	st_pool_size   = m68k_round_page(st_pool_size);
194 	st_pool_phys   = stphysize - st_pool_size;
195 	stphysize      = st_pool_phys;
196 
197 	machineid      = id;
198 	esym           = esym_addr;
199 
200 	/*
201 	 * the kernel ends at end() or esym.
202 	 */
203 	if(esym == NULL)
204 		end_loaded = (u_int)end;
205 	else end_loaded = (u_int)esym;
206 
207 	/*
208 	 * If we have enough fast-memory to put the kernel in and the
209 	 * RELOC_KERNEL option is set, do it!
210 	 */
211 	if((reloc_kernel != 0) && (ttphysize >= end_loaded))
212 		kbase = ttphystart;
213 	else kbase = 0;
214 
215 	/*
216 	 * update these as soon as possible!
217 	 */
218 	PAGE_SIZE  = NBPG;
219 	PAGE_MASK  = NBPG-1;
220 	PAGE_SHIFT = PG_SHIFT;
221 
222 	/*
223 	 * Determine the type of machine we are running on. This needs
224 	 * to be done early (and before initcpu())!
225 	 */
226 	set_machtype();
227 
228 	/*
229 	 * Initialize cpu specific stuff
230 	 */
231 	initcpu();
232 
233 	/*
234 	 * We run the kernel from ST memory at the moment.
235 	 * The kernel segment table is put just behind the loaded image.
236 	 * pstart: start of usable ST memory
237 	 * avail : size of ST memory available.
238 	 */
239 	pstart = (u_int)end_loaded;
240 	pstart = m68k_round_page(pstart);
241 	avail  = stphysize - pstart;
242 
243 	/*
244 	 * Calculate the number of pages needed for Sysseg.
245 	 * For the 68030, we need 256 descriptors (segment-table-entries).
246 	 * This easily fits into one page.
247 	 * For the 68040, both the level-1 and level-2 descriptors are
248 	 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE
249 	 * level-1 & level-2 tables.
250 	 */
251 #if defined(M68040) || defined(M68060)
252 	if (mmutype == MMU_68040)
253 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
254 	else
255 #endif
256 		kstsize = 1;
257 	/*
258 	 * allocate the kernel segment table
259 	 */
260 	Sysseg     = (st_entry_t *)pstart;
261 	Sysseg_pa  = (u_int)Sysseg + kbase;
262 	pstart    += kstsize * NBPG;
263 	avail     -= kstsize * NBPG;
264 
265 	/*
266 	 * Determine the number of pte's we need for extra's like
267 	 * ST I/O map's.
268 	 */
269 	ptextra = btoc(STIO_SIZE);
270 
271 	/*
272 	 * If present, add pci areas
273 	 */
274 	if (machineid & ATARI_HADES)
275 		ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE);
276 
277 	/*
278 	 * The 'pt' (the initial kernel pagetable) has to map the kernel and
279 	 * the I/O areas. The various I/O areas are mapped (virtually) at
280 	 * the top of the address space mapped by 'pt' (ie. just below Sysmap).
281 	 */
282 	pt      = (pt_entry_t *)pstart;
283 	ptsize  = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT;
284 	pstart += ptsize;
285 	avail  -= ptsize;
286 
287 	/*
288 	 * allocate kernel page table map
289 	 */
290 	Sysptmap = (pt_entry_t *)pstart;
291 	pstart  += NBPG;
292 	avail   -= NBPG;
293 
294 	/*
295 	 * Set Sysmap; mapped after page table pages. Because I too (LWP)
296 	 * didn't understand the reason for this, I borrowed the following
297 	 * (sligthly modified) comment from mac68k/locore.s:
298 	 * LAK:  There seems to be some confusion here about the next line,
299 	 * so I'll explain.  The kernel needs some way of dynamically modifying
300 	 * the page tables for its own virtual memory.  What it does is that it
301 	 * has a page table map.  This page table map is mapped right after the
302 	 * kernel itself (in our implementation; in HP's it was after the I/O
303 	 * space). Therefore, the first three (or so) entries in the segment
304 	 * table point to the first three pages of the page tables (which
305 	 * point to the kernel) and the next entry in the segment table points
306 	 * to the page table map (this is done later).  Therefore, the value
307 	 * of the pointer "Sysmap" will be something like 16M*3 = 48M.  When
308 	 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get
309 	 * the first longword of the first page map (== pt[0]).  Since the
310 	 * page map mirrors the segment table, addressing any index of Sysmap
311 	 * will give you a PTE of the page maps which map the kernel.
312 	 */
313 	Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT));
314 
315 	/*
316 	 * Initialize segment tables
317 	 */
318 #if defined(M68040) || defined(M68060)
319 	if (mmutype == MMU_68040)
320 		mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
321 	else
322 #endif /* defined(M68040) || defined(M68060) */
323 		mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
324 
325 	/*
326 	 * initialize kernel page table page(s).
327 	 * Assume load at VA 0.
328 	 * - Text pages are RO
329 	 * - Page zero is invalid
330 	 */
331 	pg_proto = (0 + kbase) | PG_RO | PG_V;
332 	pg       = pt;
333 	*pg++ = PG_NV; pg_proto += NBPG;
334 	for(i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG)
335 		*pg++ = pg_proto;
336 
337 	/*
338 	 * data, bss and dynamic tables are read/write
339 	 */
340 	pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
341 
342 #if defined(M68040) || defined(M68060)
343 	/*
344 	 * Map the kernel segment table cache invalidated for
345 	 * these machines (for the 68040 not strictly necessary, but
346 	 * recommended by Motorola; for the 68060 mandatory)
347 	 */
348 	if (mmutype == MMU_68040) {
349 	    for (; i < (u_int)Sysseg; i += NBPG, pg_proto += NBPG)
350 		*pg++ = pg_proto;
351 	    pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
352 	    for (; i < (u_int)&Sysseg[kstsize * NPTEPG]; i += NBPG,
353 							 pg_proto += NBPG)
354 		*pg++ = pg_proto;
355 	    pg_proto = (pg_proto & ~PG_CI) | PG_CCB;
356 	}
357 #endif /* defined(M68040) || defined(M68060) */
358 
359 	/*
360 	 * go till end of data allocated so far
361 	 * plus proc0 u-area (to be allocated)
362 	 */
363 	for(; i < pstart + USPACE; i += NBPG, pg_proto += NBPG)
364 		*pg++ = pg_proto;
365 
366 	/*
367 	 * invalidate remainder of kernel PT
368 	 */
369 	while(pg < &pt[ptsize/sizeof(pt_entry_t)])
370 		*pg++ = PG_NV;
371 
372 	/*
373 	 * Map various I/O areas
374 	 */
375 	map_io_areas(pt, ptsize, ptextra);
376 
377 	/*
378 	 * Save KVA of proc0 user-area and allocate it
379 	 */
380 	proc0paddr = pstart;
381 	pstart    += USPACE;
382 	avail     -= USPACE;
383 
384 	/*
385 	 * At this point, virtual and physical allocation starts to divert.
386 	 */
387 	vstart     = pstart;
388 
389 	/*
390 	 * Map the allocated space in ST-ram now. In the contig-case, there
391 	 * is no need to make a distinction between virtual and physical
392 	 * adresses. But I make it anyway to be prepared.
393 	 * Physcal space is already reserved!
394 	 */
395 	st_pool_virt = vstart;
396 	pg           = &pt[vstart / NBPG];
397 	pg_proto     = st_pool_phys | PG_RW | PG_CI | PG_V;
398 	vstart      += st_pool_size;
399 	while(pg_proto < (st_pool_phys + st_pool_size)) {
400 		*pg++     = pg_proto;
401 		pg_proto += NBPG;
402 	}
403 
404 	/*
405 	 * Map physical page_zero and page-zero+1 (First ST-ram page). We need
406 	 * to reference it in the reboot code. Two pages are mapped, because
407 	 * we must make sure 'doboot()' is contained in it (see the tricky
408 	 * copying there....).
409 	 */
410 	page_zero  = vstart;
411 	pg         = &pt[vstart / NBPG];
412 	*pg++      = PG_RW | PG_CI | PG_V;
413 	vstart    += NBPG;
414 	*pg        = PG_RW | PG_CI | PG_V | NBPG;
415 	vstart    += NBPG;
416 
417 	lowram  = 0 >> PGSHIFT; /* XXX */
418 
419 	/*
420 	 * Fill in usable segments. The page indexes will be initialized
421 	 * later when all reservations are made.
422 	 */
423 	usable_segs[0].start = 0;
424 	usable_segs[0].end   = stphysize;
425 	usable_segs[1].start = ttphystart;
426 	usable_segs[1].end   = ttphystart + ttphysize;
427 	usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */
428 
429 	if(kbase) {
430 		/*
431 		 * First page of ST-ram is unusable, reserve the space
432 		 * for the kernel in the TT-ram segment.
433 		 * Note: Because physical page-zero is partially mapped to ROM
434 		 *       by hardware, it is unusable.
435 		 */
436 		usable_segs[0].start  = NBPG;
437 		usable_segs[1].start += pstart;
438 	}
439 	else usable_segs[0].start += pstart;
440 
441 	/*
442 	 * As all segment sizes are now valid, calculate page indexes and
443 	 * available physical memory.
444 	 */
445 	usable_segs[0].first_page = 0;
446 	for (i = 1; usable_segs[i].start; i++) {
447 		usable_segs[i].first_page  = usable_segs[i-1].first_page;
448 		usable_segs[i].first_page +=
449 			(usable_segs[i-1].end - usable_segs[i-1].start) / NBPG;
450 	}
451 	for (i = 0, physmem = 0; usable_segs[i].start; i++)
452 		physmem += usable_segs[i].end - usable_segs[i].start;
453 	physmem >>= PGSHIFT;
454 
455 	/*
456 	 * get the pmap module in sync with reality.
457 	 */
458 	pmap_bootstrap(vstart, stio_addr, ptextra);
459 
460 	/*
461 	 * Prepare to enable the MMU.
462 	 * Setup and load SRP nolimit, share global, 4 byte PTE's
463 	 */
464 	protorp[0] = 0x80000202;
465 	protorp[1] = (u_int)Sysseg + kbase;	/* + segtable address */
466 	Sysseg_pa  = (u_int)Sysseg + kbase;
467 
468 	cpu_init_kcorehdr(kbase);
469 
470 	/*
471 	 * copy over the kernel (and all now initialized variables)
472 	 * to fastram.  DONT use bcopy(), this beast is much larger
473 	 * than 128k !
474 	 */
475 	if(kbase) {
476 		register u_long	*lp, *le, *fp;
477 
478 		lp = (u_long *)0;
479 		le = (u_long *)pstart;
480 		fp = (u_long *)kbase;
481 		while(lp < le)
482 			*fp++ = *lp++;
483 	}
484 #if defined(M68040) || defined(M68060)
485 	if (mmutype == MMU_68040) {
486 		/*
487 		 * movel Sysseg_pa,a0;
488 		 * movec a0,SRP;
489 		 * pflusha;
490 		 * movel #$0xc000,d0;
491 		 * movec d0,TC
492 		 */
493 		if (cputype == CPU_68060) {
494 			/* XXX: Need the branch cache be cleared? */
495 			asm volatile (".word 0x4e7a,0x0002;"
496 				      "orl #0x400000,d0;"
497 				      ".word 0x4e7b,0x0002" : : : "d0");
498 		}
499 		asm volatile ("movel %0,a0;"
500 			      ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0");
501 		asm volatile (".word 0xf518" : : );
502 		asm volatile ("movel #0xc000,d0;"
503 			      ".word 0x4e7b,0x0003" : : : "d0" );
504 	} else
505 #endif
506 	{
507 		asm volatile ("pmove %0@,srp" : : "a" (&protorp[0]));
508 		/*
509 		 * setup and load TC register.
510 		 * enable_cpr, enable_srp, pagesize=8k,
511 		 * A = 8 bits, B = 11 bits
512 		 */
513 		tc = 0x82d08b00;
514 		asm volatile ("pmove %0@,tc" : : "a" (&tc));
515 	}
516 
517 	/* Is this to fool the optimizer?? */
518 	i = *(int *)proc0paddr;
519 	*(volatile int *)proc0paddr = i;
520 
521 	/*
522 	 * Initialize the "u-area" pages.
523 	 * Must initialize p_addr before autoconfig or the
524 	 * fault handler will get a NULL reference.
525 	 */
526 	bzero((u_char *)proc0paddr, USPACE);
527 	proc0.p_addr = (struct user *)proc0paddr;
528 	curproc = &proc0;
529 	curpcb  = &((struct user *)proc0paddr)->u_pcb;
530 
531 	/*
532 	 * Get the hardware into a defined state
533 	 */
534 	atari_hwinit();
535 
536 	/*
537 	 * Initialize stmem allocator
538 	 */
539 	init_stmem();
540 
541 	/*
542 	 * Initialize interrupt mapping.
543 	 */
544 	intr_init();
545 }
546 
547 /*
548  * Try to figure out on what type of machine we are running
549  * Note: This module runs *before* the io-mapping is setup!
550  */
551 static void
552 set_machtype()
553 {
554 	stio_addr = 0xff8000;	/* XXX: For TT & Falcon only */
555 	if(badbaddr((caddr_t)&MFP2->mf_gpip, sizeof(char))) {
556 		/*
557 		 * Watch out! We can also have a Hades with < 16Mb
558 		 * RAM here...
559 		 */
560 		if(!badbaddr((caddr_t)&MFP->mf_gpip, sizeof(char))) {
561 			machineid |= ATARI_FALCON;
562 			return;
563 		}
564 	}
565 	if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char)))
566 		machineid |= ATARI_HADES;
567 	else machineid |= ATARI_TT;
568 }
569 
570 static void
571 atari_hwinit()
572 {
573 	/*
574 	 * Initialize the sound chip
575 	 */
576 	ym2149_init();
577 
578 	/*
579 	 * Make sure that the midi acia will not generate an interrupt
580 	 * unless something attaches to it. We cannot do this for the
581 	 * keyboard acia because this breaks the '-d' option of the
582 	 * booter...
583 	 */
584 	MDI->ac_cs = 0;
585 
586 	/*
587 	 * Initialize both MFP chips (if both present!) to generate
588 	 * auto-vectored interrupts with EOI. The active-edge registers are
589 	 * set up. The interrupt enable registers are set to disable all
590 	 * interrupts.
591 	 */
592 	MFP->mf_iera  = MFP->mf_ierb = 0;
593 	MFP->mf_imra  = MFP->mf_imrb = 0;
594 	MFP->mf_aer   = MFP->mf_ddr  = 0;
595 	MFP->mf_vr    = 0x40;
596 	if(machineid & (ATARI_TT|ATARI_HADES)) {
597 		MFP2->mf_iera = MFP2->mf_ierb = 0;
598 		MFP2->mf_imra = MFP2->mf_imrb = 0;
599 		MFP2->mf_aer  = 0x80;
600 		MFP2->mf_vr   = 0x50;
601 	}
602 	if(machineid & ATARI_TT) {
603 		/*
604 		 * Initialize the SCU, to enable interrupts on the SCC (ipl5),
605 		 * MFP (ipl6) and softints (ipl1).
606 		 */
607 		SCU->sys_mask = SCU_SYS_SOFT;
608 		SCU->vme_mask = SCU_MFP | SCU_SCC;
609 #ifdef DDB
610 		/*
611 		 * This allows people with the correct hardware modification
612 		 * to drop into the debugger from an NMI.
613 		 */
614 		SCU->sys_mask |= SCU_IRQ7;
615 #endif
616 	}
617 }
618 
619 /*
620  * Do the dull work of mapping the various I/O areas. They MUST be Cache
621  * inhibited!
622  * All I/O areas are virtually mapped at the end of the pt-table.
623  */
624 static void
625 map_io_areas(pt, ptsize, ptextra)
626 pt_entry_t	*pt;
627 u_int		ptsize;		/* Size of 'pt' in bytes	*/
628 u_int		ptextra;	/* #of additional I/O pte's	*/
629 {
630 	vm_offset_t	ioaddr;
631 	pt_entry_t	*pg, *epg;
632 	pt_entry_t	pg_proto;
633 	u_long		mask;
634 
635 	ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * NBPG;
636 
637 	/*
638 	 * Map ST-IO area
639 	 */
640 	stio_addr = ioaddr;
641 	ioaddr   += STIO_SIZE;
642 	pg        = &pt[stio_addr / NBPG];
643 	epg       = &pg[btoc(STIO_SIZE)];
644 	pg_proto  = STIO_PHYS | PG_RW | PG_CI | PG_V;
645 	while(pg < epg) {
646 		*pg++     = pg_proto;
647 		pg_proto += NBPG;
648 	}
649 
650 	/*
651 	 * Map PCI areas
652 	 */
653 	if (machineid & ATARI_HADES) {
654 
655 		pci_conf_addr = ioaddr;
656 		ioaddr       += PCI_CONF_SIZE;
657 		pg            = &pt[pci_conf_addr / NBPG];
658 		epg           = &pg[btoc(PCI_CONF_SIZE)];
659 		mask          = PCI_CONFM_PHYS;
660 		pg_proto      = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V;
661 		for(; pg < epg; mask >>= 1)
662 			*pg++ = pg_proto | mask;
663 
664 		pci_io_addr   = ioaddr;
665 		ioaddr       += PCI_IO_SIZE;
666 		epg           = &pg[btoc(PCI_IO_SIZE)];
667 		pg_proto      = PCI_IO_PHYS | PG_RW | PG_CI | PG_V;
668 		while(pg < epg) {
669 			*pg++     = pg_proto;
670 			pg_proto += NBPG;
671 		}
672 
673 		pci_mem_addr  = ioaddr;
674 		ioaddr       += PCI_MEM_SIZE;
675 		epg           = &pg[btoc(PCI_MEM_SIZE)];
676 		pg_proto      = PCI_MEM_PHYS | PG_RW | PG_CI | PG_V;
677 		while(pg < epg) {
678 			*pg++     = pg_proto;
679 			pg_proto += NBPG;
680 		}
681 	}
682 }
683 
684 /*
685  * Used by dumpconf() to get the size of the machine-dependent panic-dump
686  * header in disk blocks.
687  */
688 int
689 cpu_dumpsize()
690 {
691 	int	size;
692 
693 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
694 	return (btodb(roundup(size, dbtob(1))));
695 }
696 
697 /*
698  * Called by dumpsys() to dump the machine-dependent header.
699  * XXX: Assumes that it will all fit in one diskblock.
700  */
701 int
702 cpu_dump(dump, p_blkno)
703 int	(*dump) __P((dev_t, daddr_t, caddr_t, size_t));
704 daddr_t	*p_blkno;
705 {
706 	int		buf[dbtob(1)/sizeof(int)];
707 	int		error;
708 	kcore_seg_t	*kseg_p;
709 	cpu_kcore_hdr_t	*chdr_p;
710 
711 	kseg_p = (kcore_seg_t *)buf;
712 	chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)];
713 
714 	/*
715 	 * Generate a segment header
716 	 */
717 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
718 	kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p));
719 
720 	/*
721 	 * Add the md header
722 	 */
723 	*chdr_p = cpu_kcore_hdr;
724 	error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1));
725 	*p_blkno += 1;
726 	return (error);
727 }
728 
729 #if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS)
730 #error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS"
731 #endif
732 /*
733  * Initialize the cpu_kcore_header.
734  */
735 static void
736 cpu_init_kcorehdr(kbase)
737 u_long	kbase;
738 {
739 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
740 	struct m68k_kcore_hdr *m = &h->un._m68k;
741 	extern char end[];
742 	extern char machine[];
743 	int	i;
744 
745 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
746 
747 	/*
748 	 * Initialize the `dispatcher' portion of the header.
749 	 */
750 	strcpy(h->name, machine);
751 	h->page_size = NBPG;
752 	h->kernbase = KERNBASE;
753 
754 	/*
755 	 * Fill in information about our MMU configuration.
756 	 */
757 	m->mmutype	= mmutype;
758 	m->sg_v		= SG_V;
759 	m->sg_frame	= SG_FRAME;
760 	m->sg_ishift	= SG_ISHIFT;
761 	m->sg_pmask	= SG_PMASK;
762 	m->sg40_shift1	= SG4_SHIFT1;
763 	m->sg40_mask2	= SG4_MASK2;
764 	m->sg40_shift2	= SG4_SHIFT2;
765 	m->sg40_mask3	= SG4_MASK3;
766 	m->sg40_shift3	= SG4_SHIFT3;
767 	m->sg40_addr1	= SG4_ADDR1;
768 	m->sg40_addr2	= SG4_ADDR2;
769 	m->pg_v		= PG_V;
770 	m->pg_frame	= PG_FRAME;
771 
772 	/*
773 	 * Initialize pointer to kernel segment table.
774 	 */
775 	m->sysseg_pa = (u_int)Sysseg + kbase;
776 
777 	/*
778 	 * Initialize relocation value such that:
779 	 *
780 	 *	pa = (va - KERNBASE) + reloc
781 	 */
782 	m->reloc = kbase;
783 
784 	/*
785 	 * Define the end of the relocatable range.
786 	 */
787 	m->relocend = (u_int32_t)end;
788 
789 	for (i = 0; i < NMEM_SEGS; i++) {
790 		m->ram_segs[i].start = boot_segs[i].start;
791 		m->ram_segs[i].size  = boot_segs[i].end -
792 		    boot_segs[i].start;
793 	}
794 }
795 
796 void
797 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
798 	st_entry_t	*sysseg;	/* System segment table		*/
799 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
800 	pt_entry_t	*pt;		/* Kernel page table		*/
801 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
802 	pt_entry_t	*sysptmap;	/* System page table		*/
803 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
804 	u_int		kbase;
805 {
806 	st_entry_t	sg_proto, *sg;
807 	pt_entry_t	pg_proto, *pg, *epg;
808 
809 	sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V;
810 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
811 
812 	/*
813 	 * Map the page table pages in both the HW segment table
814 	 * and the software Sysptmap.  Note that Sysptmap is also
815 	 * considered a PT page, hence the +sysptsize.
816 	 */
817 	sg  = sysseg;
818 	pg  = sysptmap;
819 	epg = &pg[(ptsize >> PGSHIFT) + sysptsize];
820 	while(pg < epg) {
821 		*sg++ = sg_proto;
822 		*pg++ = pg_proto;
823 		sg_proto += NBPG;
824 		pg_proto += NBPG;
825 	}
826 
827 	/*
828 	 * invalidate the remainder of the tables
829 	 */
830 	epg = &sysptmap[sysptsize * NPTEPG];
831 	while(pg < epg) {
832 		*sg++ = SG_NV;
833 		*pg++ = PG_NV;
834 	}
835 }
836 
837 #if defined(M68040) || defined(M68060)
838 void
839 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
840 	st_entry_t	*sysseg;	/* System segment table		*/
841 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
842 	pt_entry_t	*pt;		/* Kernel page table		*/
843 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
844 	pt_entry_t	*sysptmap;	/* System page table		*/
845 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
846 	u_int		kbase;
847 {
848 	int		i;
849 	st_entry_t	sg_proto, *sg, *esg;
850 	pt_entry_t	pg_proto;
851 
852 	/*
853 	 * First invalidate the entire "segment table" pages
854 	 * (levels 1 and 2 have the same "invalid" values).
855 	 */
856 	sg  = sysseg;
857 	esg = &sg[kstsize * NPTEPG];
858 	while (sg < esg)
859 		*sg++ = SG_NV;
860 
861 	/*
862 	 * Initialize level 2 descriptors (which immediately
863 	 * follow the level 1 table). These should map 'pt' + 'sysptmap'.
864 	 * We need:
865 	 *	NPTEPG / SG4_LEV3SIZE
866 	 * level 2 descriptors to map each of the nptpages + 1
867 	 * pages of PTEs.  Note that we set the "used" bit
868 	 * now to save the HW the expense of doing it.
869 	 */
870 	i   = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE);
871 	sg  = &sysseg[SG4_LEV1SIZE];
872 	esg = &sg[i];
873 	sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V;
874 	while (sg < esg) {
875 		*sg++     = sg_proto;
876 		sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
877 	}
878 
879 	/*
880 	 * Initialize level 1 descriptors.  We need:
881 	 *	roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
882 	 * level 1 descriptors to map the 'num' level 2's.
883 	 */
884 	i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE;
885 	protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */;
886 	sg  = sysseg;
887 	esg = &sg[i];
888 	sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V;
889 	while (sg < esg) {
890 		*sg++     = sg_proto;
891 		sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
892 	}
893 
894 	/*
895 	 * Initialize sysptmap
896 	 */
897 	sg  = sysptmap;
898 	esg = &sg[(ptsize >> PGSHIFT) + sysptsize];
899 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
900 	while (sg < esg) {
901 		*sg++     = pg_proto;
902 		pg_proto += NBPG;
903 	}
904 	/*
905 	 * Invalidate rest of Sysptmap page
906 	 */
907 	esg = &sysptmap[sysptsize * NPTEPG];
908 	while (sg < esg)
909 		*sg++ = SG_NV;
910 }
911 #endif /* M68040 */
912 
913 #if defined(M68060)
914 int m68060_pcr_init = 0x21;	/* make this patchable */
915 #endif
916 
917 static void
918 initcpu()
919 {
920 	typedef void trapfun __P((void));
921 
922 	switch (cputype) {
923 
924 #if defined(M68060)
925 	case CPU_68060:
926 		{
927 			extern trapfun	*vectab[256];
928 			extern trapfun	buserr60, addrerr4060, fpfault;
929 #if defined(M060SP)
930 			extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[];
931 #else
932 			extern trapfun illinst;
933 #endif
934 
935 			asm volatile ("movl %0,d0; .word 0x4e7b,0x0808" : :
936 					"d"(m68060_pcr_init):"d0" );
937 
938 			/* bus/addrerr vectors */
939 			vectab[2] = buserr60;
940 			vectab[3] = addrerr4060;
941 
942 #if defined(M060SP)
943 			/* integer support */
944 			vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00];
945 
946 			/* floating point support */
947 			/*
948 			 * XXX maybe we really should run-time check for the
949 			 * stack frame format here:
950 			 */
951 			vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30];
952 
953 			vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38];
954 			vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40];
955 
956 			vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
957 			vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
958 			vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
959 			vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
960 			vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
961 			vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
962 #else
963 			vectab[61] = illinst;
964 #endif
965 			vectab[48] = fpfault;
966 		}
967 		break;
968 #endif /* defined(M68060) */
969 #if defined(M68040)
970 	case CPU_68040:
971 		{
972 			extern trapfun	*vectab[256];
973 			extern trapfun	buserr40, addrerr4060;
974 
975 			/* bus/addrerr vectors */
976 			vectab[2] = buserr40;
977 			vectab[3] = addrerr4060;
978 		}
979 		break;
980 #endif /* defined(M68040) */
981 #if defined(M68030) || defined(M68020)
982 	case CPU_68030:
983 	case CPU_68020:
984 		{
985 			extern trapfun	*vectab[256];
986 			extern trapfun	buserr2030, addrerr2030;
987 
988 			/* bus/addrerr vectors */
989 			vectab[2] = buserr2030;
990 			vectab[3] = addrerr2030;
991 		}
992 		break;
993 #endif /* defined(M68030) || defined(M68020) */
994 	}
995 
996 	DCIS();
997 }
998 
999 #ifdef DEBUG
1000 void
1001 dump_segtable(stp)
1002 	u_int *stp;
1003 {
1004 	u_int *s, *es;
1005 	int shift, i;
1006 
1007 	s = stp;
1008 	{
1009 		es = s + (ATARI_STSIZE >> 2);
1010 		shift = SG_ISHIFT;
1011 	}
1012 
1013 	/*
1014 	 * XXX need changes for 68040
1015 	 */
1016 	for (i = 0; s < es; s++, i++)
1017 		if (*s & SG_V)
1018 			printf("$%08lx: $%08lx\t", i << shift, *s & SG_FRAME);
1019 	printf("\n");
1020 }
1021 
1022 void
1023 dump_pagetable(ptp, i, n)
1024 	u_int *ptp, i, n;
1025 {
1026 	u_int *p, *ep;
1027 
1028 	p = ptp + i;
1029 	ep = p + n;
1030 	for (; p < ep; p++, i++)
1031 		if (*p & PG_V)
1032 			printf("$%08lx -> $%08lx\t", i, *p & PG_FRAME);
1033 	printf("\n");
1034 }
1035 
1036 u_int
1037 vmtophys(ste, vm)
1038 	u_int *ste, vm;
1039 {
1040 	ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME);
1041 		ste += (vm & SG_PMASK) >> PGSHIFT;
1042 	return((*ste & -NBPG) | (vm & (NBPG - 1)));
1043 }
1044 
1045 #endif
1046