xref: /netbsd-src/sys/arch/atari/atari/atari_init.c (revision 76dfffe33547c37f8bdd446e3e4ab0f3c16cea4b)
1 /*	$NetBSD: atari_init.c,v 1.21 1996/10/15 20:51:59 leo Exp $	*/
2 
3 /*
4  * Copyright (c) 1995 Leo Weppelman
5  * Copyright (c) 1994 Michael L. Hitch
6  * Copyright (c) 1993 Markus Wild
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Markus Wild.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <vm/vm.h>
39 #include <sys/user.h>
40 #include <sys/ioctl.h>
41 #include <sys/select.h>
42 #include <sys/tty.h>
43 #include <sys/proc.h>
44 #include <sys/buf.h>
45 #include <sys/msgbuf.h>
46 #include <sys/mbuf.h>
47 #include <sys/protosw.h>
48 #include <sys/domain.h>
49 #include <sys/dkbad.h>
50 #include <sys/reboot.h>
51 #include <sys/exec.h>
52 #include <sys/core.h>
53 #include <sys/kcore.h>
54 #include <vm/pmap.h>
55 #include <machine/vmparam.h>
56 #include <machine/pte.h>
57 #include <machine/cpu.h>
58 #include <machine/iomap.h>
59 #include <machine/mfp.h>
60 #include <machine/scu.h>
61 #include <machine/kcore.h>
62 #include <atari/atari/stalloc.h>
63 #include <atari/dev/ym2149reg.h>
64 
65 void start_c __P((int, u_int, u_int, u_int, char *));
66 static void cpu_init_kcorehdr __P((u_long));
67 static void mmu030_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
68 			      pt_entry_t *, u_int, u_int));
69 static void map_io_areas __P((pt_entry_t *, u_int, u_int));
70 static void set_machtype __P((void));
71 
72 #if defined(M68040) || defined(M68060)
73 static void mmu040_setup __P((st_entry_t *, u_int, pt_entry_t *, u_int,
74 			      pt_entry_t *, u_int, u_int));
75 #endif
76 
77 /*
78  * All info needed to generate a panic dump. All fields are setup by
79  * start_c().
80  * XXX: Should sheck usage of phys_segs. There is some unwanted overlap
81  *      here.... Also, the name is badly choosen. Phys_segs contains the
82  *      segment descriptions _after_ reservations are made.
83  * XXX: 'lowram' is obsoleted by the new panicdump format
84  */
85 static cpu_kcore_hdr_t cpu_kcore_hdr;
86 
87 extern u_int 	lowram;
88 extern u_int	Sysptsize, Sysseg_pa, proc0paddr;
89 extern pt_entry_t *Sysptmap;
90 extern st_entry_t *Sysseg;
91 u_int		*Sysmap;
92 int		machineid, mmutype, cpu040, astpending;
93 char		*vmmap;
94 pv_entry_t	pv_table;
95 #if defined(M68040) || defined(M68060)
96 extern int	protostfree;
97 #endif
98 
99 extern char		*esym;
100 extern struct pcb	*curpcb;
101 
102 /*
103  * This is the virtual address of physical page 0. Used by 'do_boot()'.
104  */
105 vm_offset_t	page_zero;
106 
107 /*
108  * Crude support for allocation in ST-ram. Currently only used to allocate
109  * video ram.
110  * The physical address is also returned because the video init needs it to
111  * setup the controller at the time the vm-system is not yet operational so
112  * 'kvtop()' cannot be used.
113  */
114 #ifndef ST_POOL_SIZE
115 #define	ST_POOL_SIZE	40			/* XXX: enough? */
116 #endif
117 
118 u_long	st_pool_size = ST_POOL_SIZE * NBPG;	/* Patchable	*/
119 u_long	st_pool_virt, st_pool_phys;
120 
121 /*
122  * this is the C-level entry function, it's called from locore.s.
123  * Preconditions:
124  *	Interrupts are disabled
125  *	PA == VA, we don't have to relocate addresses before enabling
126  *		the MMU
127  * 	Exec is no longer available (because we're loaded all over
128  *		low memory, no ExecBase is available anymore)
129  *
130  * It's purpose is:
131  *	Do the things that are done in locore.s in the hp300 version,
132  *		this includes allocation of kernel maps and enabling the MMU.
133  *
134  * Some of the code in here is `stolen' from Amiga MACH, and was
135  * written by Bryan Ford and Niklas Hallqvist.
136  *
137  * Very crude 68040 support by Michael L. Hitch.
138  */
139 
140 void
141 start_c(id, ttphystart, ttphysize, stphysize, esym_addr)
142 int	id;			/* Machine id				*/
143 u_int	ttphystart, ttphysize;	/* Start address and size of TT-ram	*/
144 u_int	stphysize;		/* Size of ST-ram	 		*/
145 char	*esym_addr;		/* Address of kernel '_esym' symbol	*/
146 {
147 	extern char	end[];
148 	extern void	etext __P((void));
149 	extern u_long	protorp[2];
150 	u_int		pstart;		/* Next available physical address*/
151 	u_int		vstart;		/* Next available virtual address */
152 	u_int		avail;
153 	pt_entry_t	*pt;
154 	u_int		ptsize, ptextra;
155 	u_int		tc, i;
156 	u_int		*pg;
157 	u_int		pg_proto;
158 	u_int		end_loaded;
159 	u_long		kbase;
160 	u_int		kstsize;
161 
162 	boot_segs[0].start       = 0;
163 	boot_segs[0].end         = stphysize;
164 	boot_segs[1].start       = ttphystart;
165 	boot_segs[1].end         = ttphystart + ttphysize;
166 	boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */
167 
168 	/*
169 	 * The following is a hack. We do not know how much ST memory we
170 	 * really need until after configuration has finished. At this
171 	 * time I have no idea how to grab ST memory at that time.
172 	 * The round_page() call is ment to correct errors made by
173 	 * binpatching!
174 	 */
175 	st_pool_size   = atari_round_page(st_pool_size);
176 	st_pool_phys   = stphysize - st_pool_size;
177 	stphysize      = st_pool_phys;
178 
179 	machineid      = id;
180 	esym           = esym_addr;
181 
182 	/*
183 	 * the kernel ends at end() or esym.
184 	 */
185 	if(esym == NULL)
186 		end_loaded = (u_int)end;
187 	else end_loaded = (u_int)esym;
188 
189 	/*
190 	 * If we have enough fast-memory to put the kernel in, do it!
191 	 */
192 	if(ttphysize >= end_loaded)
193 		kbase = ttphystart;
194 	else kbase = 0;
195 
196 	/*
197 	 * update these as soon as possible!
198 	 */
199 	PAGE_SIZE  = NBPG;
200 	PAGE_MASK  = NBPG-1;
201 	PAGE_SHIFT = PG_SHIFT;
202 
203 	/*
204 	 * Determine the type of machine we are running on. This needs
205 	 * to be done early!
206 	 */
207 	set_machtype();
208 
209 	/*
210 	 * We run the kernel from ST memory at the moment.
211 	 * The kernel segment table is put just behind the loaded image.
212 	 * pstart: start of usable ST memory
213 	 * avail : size of ST memory available.
214 	 */
215 	pstart = (u_int)end_loaded;
216 	pstart = atari_round_page(pstart);
217 	avail  = stphysize - pstart;
218 
219 	/*
220 	 * Calculate the number of pages needed for Sysseg.
221 	 * For the 68030, we need 256 descriptors (segment-table-entries).
222 	 * This easily fits into one page.
223 	 * For the 68040, both the level-1 and level-2 descriptors are
224 	 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE
225 	 * level-1 & level-2 tables.
226 	 */
227 #if defined(M68040) || defined(M68060)
228 	if (mmutype == MMU_68040)
229 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
230 	else
231 #endif
232 		kstsize = 1;
233 	/*
234 	 * allocate the kernel segment table
235 	 */
236 	Sysseg     = (st_entry_t *)pstart;
237 	Sysseg_pa  = (u_int)Sysseg + kbase;
238 	pstart    += kstsize * NBPG;
239 	avail     -= kstsize * NBPG;
240 
241 	/*
242 	 * Determine the number of pte's we need for extra's like
243 	 * ST I/O map's.
244 	 */
245 	ptextra = btoc(STIO_SIZE);
246 
247 	/*
248 	 * If present, add pci areas
249 	 */
250 	if (machineid & ATARI_HADES)
251 		ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE);
252 
253 	/*
254 	 * The 'pt' (the initial kernel pagetable) has to map the kernel and
255 	 * the I/O areas. The various I/O areas are mapped (virtually) at
256 	 * the top of the address space mapped by 'pt' (ie. just below Sysmap).
257 	 */
258 	pt      = (pt_entry_t *)pstart;
259 	ptsize  = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT;
260 	pstart += ptsize;
261 	avail  -= ptsize;
262 
263 	/*
264 	 * allocate kernel page table map
265 	 */
266 	Sysptmap = (pt_entry_t *)pstart;
267 	pstart  += NBPG;
268 	avail   -= NBPG;
269 
270 	/*
271 	 * Set Sysmap; mapped after page table pages. Because I too (LWP)
272 	 * didn't understand the reason for this, I borrowed the following
273 	 * (sligthly modified) comment from mac68k/locore.s:
274 	 * LAK:  There seems to be some confusion here about the next line,
275 	 * so I'll explain.  The kernel needs some way of dynamically modifying
276 	 * the page tables for its own virtual memory.  What it does is that it
277 	 * has a page table map.  This page table map is mapped right after the
278 	 * kernel itself (in our implementation; in HP's it was after the I/O
279 	 * space). Therefore, the first three (or so) entries in the segment
280 	 * table point to the first three pages of the page tables (which
281 	 * point to the kernel) and the next entry in the segment table points
282 	 * to the page table map (this is done later).  Therefore, the value
283 	 * of the pointer "Sysmap" will be something like 16M*3 = 48M.  When
284 	 * the kernel addresses this pointer (e.g., Sysmap[0]), it will get
285 	 * the first longword of the first page map (== pt[0]).  Since the
286 	 * page map mirrors the segment table, addressing any index of Sysmap
287 	 * will give you a PTE of the page maps which map the kernel.
288 	 */
289 	Sysmap = (u_int *)(ptsize << (SEGSHIFT - PGSHIFT));
290 
291 	/*
292 	 * Initialize segment tables
293 	 */
294 #if defined(M68040) || defined(M68060)
295 	if (mmutype == MMU_68040)
296 		mmu040_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
297 	else
298 #endif /* defined(M68040) || defined(M68060) */
299 		mmu030_setup(Sysseg, kstsize, pt, ptsize, Sysptmap, 1, kbase);
300 
301 	/*
302 	 * initialize kernel page table page(s).
303 	 * Assume load at VA 0.
304 	 * - Text pages are RO
305 	 * - Page zero is invalid
306 	 */
307 	pg_proto = (0 + kbase) | PG_RO | PG_V;
308 	pg       = pt;
309 	*pg++ = PG_NV; pg_proto += NBPG;
310 	for(i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG)
311 		*pg++ = pg_proto;
312 
313 	/*
314 	 * data, bss and dynamic tables are read/write
315 	 */
316 	pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
317 
318 #if defined(M68040) || defined(M68060)
319 	/*
320 	 * Map the kernel segment table cache invalidated for
321 	 * these machines (for the 68040 not strictly necessary, but
322 	 * recommended by Motorola; for the 68060 mandatory)
323 	 */
324 	if (mmutype == MMU_68040) {
325 	    for (; i < (u_int)Sysseg; i += NBPG, pg_proto += NBPG)
326 		*pg++ = pg_proto;
327 	    pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
328 	    for (; i < (u_int)&Sysseg[kstsize * NPTEPG]; i += NBPG,
329 							 pg_proto += NBPG)
330 		*pg++ = pg_proto;
331 	    pg_proto = (pg_proto & ~PG_CI) | PG_CCB;
332 	}
333 #endif /* defined(M68040) || defined(M68060) */
334 
335 	/*
336 	 * go till end of data allocated so far
337 	 * plus proc0 u-area (to be allocated)
338 	 */
339 	for(; i < pstart + USPACE; i += NBPG, pg_proto += NBPG)
340 		*pg++ = pg_proto;
341 
342 	/*
343 	 * invalidate remainder of kernel PT
344 	 */
345 	while(pg < &pt[ptsize/sizeof(pt_entry_t)])
346 		*pg++ = PG_NV;
347 
348 	/*
349 	 * Map various I/O areas
350 	 */
351 	map_io_areas(pt, ptsize, ptextra);
352 
353 	/*
354 	 * Save KVA of proc0 user-area and allocate it
355 	 */
356 	proc0paddr = pstart;
357 	pstart    += USPACE;
358 	avail     -= USPACE;
359 
360 	/*
361 	 * At this point, virtual and physical allocation starts to divert.
362 	 */
363 	vstart     = pstart;
364 
365 	/*
366 	 * Map the allocated space in ST-ram now. In the contig-case, there
367 	 * is no need to make a distinction between virtual and physical
368 	 * adresses. But I make it anyway to be prepared.
369 	 * Physcal space is already reserved!
370 	 */
371 	st_pool_virt = vstart;
372 	pg           = &pt[vstart / NBPG];
373 	pg_proto     = st_pool_phys | PG_RW | PG_CI | PG_V;
374 	vstart      += st_pool_size;
375 	while(pg_proto < (st_pool_phys + st_pool_size)) {
376 		*pg++     = pg_proto;
377 		pg_proto += NBPG;
378 	}
379 
380 	/*
381 	 * Map physical page_zero and page-zero+1 (First ST-ram page). We need
382 	 * to reference it in the reboot code. Two pages are mapped, because
383 	 * we must make sure 'doboot()' is contained in it (see the tricky
384 	 * copying there....).
385 	 */
386 	page_zero  = vstart;
387 	pg         = &pt[vstart / NBPG];
388 	*pg++      = PG_RW | PG_CI | PG_V;
389 	vstart    += NBPG;
390 	*pg        = PG_RW | PG_CI | PG_V | NBPG;
391 	vstart    += NBPG;
392 
393 	lowram  = 0 >> PGSHIFT; /* XXX */
394 
395 	/*
396 	 * Fill in usable segments. The page indexes will be initialized
397 	 * later when all reservations are made.
398 	 */
399 	usable_segs[0].start = 0;
400 	usable_segs[0].end   = stphysize;
401 	usable_segs[1].start = ttphystart;
402 	usable_segs[1].end   = ttphystart + ttphysize;
403 	usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */
404 
405 	if(kbase) {
406 		/*
407 		 * First page of ST-ram is unusable, reserve the space
408 		 * for the kernel in the TT-ram segment.
409 		 * Note: Because physical page-zero is partially mapped to ROM
410 		 *       by hardware, it is unusable.
411 		 */
412 		usable_segs[0].start  = NBPG;
413 		usable_segs[1].start += pstart;
414 	}
415 	else usable_segs[0].start += pstart;
416 
417 	/*
418 	 * As all segment sizes are now valid, calculate page indexes and
419 	 * available physical memory.
420 	 */
421 	usable_segs[0].first_page = 0;
422 	for (i = 1; usable_segs[i].start; i++) {
423 		usable_segs[i].first_page  = usable_segs[i-1].first_page;
424 		usable_segs[i].first_page +=
425 			(usable_segs[i-1].end - usable_segs[i-1].start) / NBPG;
426 	}
427 	for (i = 0, physmem = 0; usable_segs[i].start; i++)
428 		physmem += usable_segs[i].end - usable_segs[i].start;
429 	physmem >>= PGSHIFT;
430 
431 	/*
432 	 * get the pmap module in sync with reality.
433 	 */
434 	pmap_bootstrap(vstart, stio_addr, ptextra);
435 
436 	/*
437 	 * Prepare to enable the MMU.
438 	 * Setup and load SRP nolimit, share global, 4 byte PTE's
439 	 */
440 	protorp[0] = 0x80000202;
441 	protorp[1] = (u_int)Sysseg + kbase;	/* + segtable address */
442 	Sysseg_pa  = (u_int)Sysseg + kbase;
443 
444 	cpu_init_kcorehdr(kbase);
445 
446 	/*
447 	 * copy over the kernel (and all now initialized variables)
448 	 * to fastram.  DONT use bcopy(), this beast is much larger
449 	 * than 128k !
450 	 */
451 	if(kbase) {
452 		register u_long	*lp, *le, *fp;
453 
454 		lp = (u_long *)0;
455 		le = (u_long *)pstart;
456 		fp = (u_long *)kbase;
457 		while(lp < le)
458 			*fp++ = *lp++;
459 	}
460 #if defined(M68040) || defined(M68060)
461 	if (mmutype == MMU_68040) {
462 		/*
463 		 * movel Sysseg_pa,a0;
464 		 * movec a0,SRP;
465 		 * pflusha;
466 		 * movel #$0xc000,d0;
467 		 * movec d0,TC
468 		 */
469 		asm volatile ("movel %0,a0;.word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0");
470 		asm volatile (".word 0xf518" : : );
471 		asm volatile ("movel #0xc000,d0; .word 0x4e7b,0x0003" : : :"d0" );
472 	} else
473 #endif
474 	{
475 		asm volatile ("pmove %0@,srp" : : "a" (&protorp[0]));
476 		/*
477 		 * setup and load TC register.
478 		 * enable_cpr, enable_srp, pagesize=8k,
479 		 * A = 8 bits, B = 11 bits
480 		 */
481 		tc = 0x82d08b00;
482 		asm volatile ("pmove %0@,tc" : : "a" (&tc));
483 	}
484 
485 	/* Is this to fool the optimizer?? */
486 	i = *(int *)proc0paddr;
487 	*(volatile int *)proc0paddr = i;
488 
489 	/*
490 	 * Initialize the "u-area" pages.
491 	 * Must initialize p_addr before autoconfig or the
492 	 * fault handler will get a NULL reference.
493 	 */
494 	bzero((u_char *)proc0paddr, USPACE);
495 	proc0.p_addr = (struct user *)proc0paddr;
496 	curproc = &proc0;
497 	curpcb  = &((struct user *)proc0paddr)->u_pcb;
498 
499 	ym2149_init();
500 
501 	/*
502 	 * Initialize both MFP chips (if both present!) to generate
503 	 * auto-vectored interrupts with EOI. The active-edge registers are
504 	 * set up. The interrupt enable registers are set to disable all
505 	 * interrupts.
506 	 * A test on presence on the second MFP determines if this is a
507 	 * TT030 or a Falcon. This is added to 'machineid'.
508 	 */
509 	MFP->mf_iera  = MFP->mf_ierb = 0;
510 	MFP->mf_imra  = MFP->mf_imrb = 0;
511 	MFP->mf_aer   = MFP->mf_ddr  = 0;
512 	MFP->mf_vr    = 0x40;
513 	if(machineid & (ATARI_TT|ATARI_HADES)) {
514 		MFP2->mf_iera = MFP2->mf_ierb = 0;
515 		MFP2->mf_imra = MFP2->mf_imrb = 0;
516 		MFP2->mf_aer  = 0x80;
517 		MFP2->mf_vr   = 0x50;
518 	}
519 	if(machineid & ATARI_TT) {
520 		/*
521 		 * Initialize the SCU, to enable interrupts on the SCC (ipl5),
522 		 * MFP (ipl6) and softints (ipl1).
523 		 */
524 		SCU->sys_mask = SCU_MFP | SCU_SCC | SCU_SYS_SOFT;
525 #ifdef DDB
526 		/*
527 		 * This allows people with the correct hardware modification
528 		 * to drop into the debugger from an NMI.
529 		 */
530 		SCU->sys_mask |= SCU_IRQ7;
531 #endif
532 
533 	}
534 
535 	/*
536 	 * Initialize stmem allocator
537 	 */
538 	init_stmem();
539 }
540 
541 /*
542  * Try to figure out on what type of machine we are running
543  * Note: This module runs *before*
544  */
545 static void
546 set_machtype()
547 {
548 	if(!badbaddr((caddr_t)(PCI_CONFB_PHYS + PCI_CONFM_PHYS)))
549 		machineid |= ATARI_HADES;
550 	else {
551 		if(!badbaddr((caddr_t)&MFP2->mf_gpip))
552 			machineid |= ATARI_TT;
553 		else machineid |= ATARI_FALCON;
554 	}
555 }
556 
557 /*
558  * Do the dull work of mapping the various I/O areas. They MUST be Cache
559  * inhibited!
560  * All I/O areas are virtually mapped at the end of the pt-table.
561  */
562 static void
563 map_io_areas(pt, ptsize, ptextra)
564 pt_entry_t	*pt;
565 u_int		ptsize;		/* Size of 'pt' in bytes	*/
566 u_int		ptextra;	/* #of additional I/O pte's	*/
567 {
568 	vm_offset_t	ioaddr;
569 	pt_entry_t	*pg, *epg;
570 	pt_entry_t	pg_proto;
571 	u_long		mask;
572 
573 	ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * NBPG;
574 
575 	/*
576 	 * Map ST-IO area
577 	 */
578 	stio_addr = ioaddr;
579 	ioaddr   += STIO_SIZE;
580 	pg        = &pt[stio_addr / NBPG];
581 	epg       = &pg[btoc(STIO_SIZE)];
582 	pg_proto  = STIO_PHYS | PG_RW | PG_CI | PG_V;
583 	while(pg < epg) {
584 		*pg++     = pg_proto;
585 		pg_proto += NBPG;
586 	}
587 
588 	/*
589 	 * Map PCI areas
590 	 */
591 	if (machineid & ATARI_HADES) {
592 
593 		pci_conf_addr = ioaddr;
594 		ioaddr       += PCI_CONF_SIZE;
595 		pg            = &pt[pci_conf_addr / NBPG];
596 		epg           = &pg[btoc(PCI_CONF_SIZE)];
597 		mask          = PCI_CONFM_PHYS;
598 		pg_proto      = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V;
599 		for(; pg < epg; mask >>= 1)
600 			*pg++ = pg_proto | mask;
601 
602 		pci_io_addr   = ioaddr;
603 		ioaddr       += PCI_IO_SIZE;
604 		epg           = &pg[btoc(PCI_IO_SIZE)];
605 		pg_proto      = PCI_IO_PHYS | PG_RW | PG_CI | PG_V;
606 		while(pg < epg) {
607 			*pg++     = pg_proto;
608 			pg_proto += NBPG;
609 		}
610 
611 		pci_mem_addr  = ioaddr;
612 		ioaddr       += PCI_MEM_SIZE;
613 		epg           = &pg[btoc(PCI_MEM_SIZE)];
614 		pg_proto      = PCI_MEM_PHYS | PG_RW | PG_CI | PG_V;
615 		while(pg < epg) {
616 			*pg++     = pg_proto;
617 			pg_proto += NBPG;
618 		}
619 	}
620 }
621 
622 /*
623  * Used by dumpconf() to get the size of the machine-dependent panic-dump
624  * header in disk blocks.
625  */
626 int
627 cpu_dumpsize()
628 {
629 	int	size;
630 
631 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
632 	return (btodb(roundup(size, dbtob(1))));
633 }
634 
635 /*
636  * Called by dumpsys() to dump the machine-dependent header.
637  * XXX: Assumes that it will all fit in one diskblock.
638  */
639 int
640 cpu_dump(dump, p_blkno)
641 int	(*dump) __P((dev_t, daddr_t, caddr_t, size_t));
642 daddr_t	*p_blkno;
643 {
644 	int		buf[dbtob(1)/sizeof(int)];
645 	int		error;
646 	kcore_seg_t	*kseg_p;
647 	cpu_kcore_hdr_t	*chdr_p;
648 
649 	kseg_p = (kcore_seg_t *)buf;
650 	chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)];
651 
652 	/*
653 	 * Generate a segment header
654 	 */
655 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
656 	kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p));
657 
658 	/*
659 	 * Add the md header
660 	 */
661 	*chdr_p = cpu_kcore_hdr;
662 	error = dump(dumpdev, *p_blkno, (caddr_t)buf, dbtob(1));
663 	*p_blkno += 1;
664 	return (error);
665 }
666 
667 #if (NPHYS_RAM_SEGS < NMEM_SEGS)
668 #error "Configuration error: NPHYS_RAM_SEGS < NMEM_SEGS"
669 #endif
670 /*
671  * Initialize the cpu_kcore_header.
672  */
673 static void
674 cpu_init_kcorehdr(kbase)
675 u_long	kbase;
676 {
677 	int	i;
678 
679 	for (i = 0; i < NMEM_SEGS; i++) {
680 		cpu_kcore_hdr.ram_segs[i].start = boot_segs[i].start;
681 		cpu_kcore_hdr.ram_segs[i].size  = boot_segs[i].end
682 							- boot_segs[i].start;
683 	}
684 	cpu_kcore_hdr.mmutype   = mmutype;
685 	cpu_kcore_hdr.kernel_pa = kbase;
686 	cpu_kcore_hdr.sysseg_pa = (st_entry_t *)((u_int)Sysseg + kbase);
687 }
688 
689 void
690 mmu030_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
691 	st_entry_t	*sysseg;	/* System segment table		*/
692 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
693 	pt_entry_t	*pt;		/* Kernel page table		*/
694 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
695 	pt_entry_t	*sysptmap;	/* System page table		*/
696 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
697 	u_int		kbase;
698 {
699 	st_entry_t	sg_proto, *sg;
700 	pt_entry_t	pg_proto, *pg, *epg;
701 
702 	sg_proto = ((u_int)pt + kbase) | SG_RW | SG_V;
703 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
704 
705 	/*
706 	 * Map the page table pages in both the HW segment table
707 	 * and the software Sysptmap.  Note that Sysptmap is also
708 	 * considered a PT page, hence the +sysptsize.
709 	 */
710 	sg  = sysseg;
711 	pg  = sysptmap;
712 	epg = &pg[(ptsize >> PGSHIFT) + sysptsize];
713 	while(pg < epg) {
714 		*sg++ = sg_proto;
715 		*pg++ = pg_proto;
716 		sg_proto += NBPG;
717 		pg_proto += NBPG;
718 	}
719 
720 	/*
721 	 * invalidate the remainder of the tables
722 	 */
723 	epg = &sysptmap[sysptsize * NPTEPG];
724 	while(pg < epg) {
725 		*sg++ = SG_NV;
726 		*pg++ = PG_NV;
727 	}
728 }
729 
730 #if defined(M68040) || defined(M68060)
731 void
732 mmu040_setup(sysseg, kstsize, pt, ptsize, sysptmap, sysptsize, kbase)
733 	st_entry_t	*sysseg;	/* System segment table		*/
734 	u_int		kstsize;	/* size of 'sysseg' in pages	*/
735 	pt_entry_t	*pt;		/* Kernel page table		*/
736 	u_int		ptsize;		/* size	of 'pt' in bytes	*/
737 	pt_entry_t	*sysptmap;	/* System page table		*/
738 	u_int		sysptsize;	/* size of 'sysptmap' in pages	*/
739 	u_int		kbase;
740 {
741 	int		i;
742 	st_entry_t	sg_proto, *sg, *esg;
743 	pt_entry_t	pg_proto;
744 
745 	/*
746 	 * First invalidate the entire "segment table" pages
747 	 * (levels 1 and 2 have the same "invalid" values).
748 	 */
749 	sg  = sysseg;
750 	esg = &sg[kstsize * NPTEPG];
751 	while (sg < esg)
752 		*sg++ = SG_NV;
753 
754 	/*
755 	 * Initialize level 2 descriptors (which immediately
756 	 * follow the level 1 table). These should map 'pt' + 'sysptmap'.
757 	 * We need:
758 	 *	NPTEPG / SG4_LEV3SIZE
759 	 * level 2 descriptors to map each of the nptpages + 1
760 	 * pages of PTEs.  Note that we set the "used" bit
761 	 * now to save the HW the expense of doing it.
762 	 */
763 	i   = ((ptsize >> PGSHIFT) + sysptsize) * (NPTEPG / SG4_LEV3SIZE);
764 	sg  = &sysseg[SG4_LEV1SIZE];
765 	esg = &sg[i];
766 	sg_proto = ((u_int)pt + kbase) | SG_U | SG_RW | SG_V;
767 	while (sg < esg) {
768 		*sg++     = sg_proto;
769 		sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
770 	}
771 
772 	/*
773 	 * Initialize level 1 descriptors.  We need:
774 	 *	roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
775 	 * level 1 descriptors to map the 'num' level 2's.
776 	 */
777 	i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE;
778 	protostfree = (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */;
779 	sg  = sysseg;
780 	esg = &sg[i];
781 	sg_proto = ((u_int)&sg[SG4_LEV1SIZE] + kbase) | SG_U | SG_RW |SG_V;
782 	while (sg < esg) {
783 		*sg++     = sg_proto;
784 		sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
785 	}
786 
787 	/*
788 	 * Initialize sysptmap
789 	 */
790 	sg  = sysptmap;
791 	esg = &sg[(ptsize >> PGSHIFT) + sysptsize];
792 	pg_proto = ((u_int)pt + kbase) | PG_RW | PG_CI | PG_V;
793 	while (sg < esg) {
794 		*sg++     = pg_proto;
795 		pg_proto += NBPG;
796 	}
797 	/*
798 	 * Invalidate rest of Sysptmap page
799 	 */
800 	esg = &sysptmap[sysptsize * NPTEPG];
801 	while (sg < esg)
802 		*sg++ = SG_NV;
803 }
804 #endif /* M68040 */
805 
806 #ifdef DEBUG
807 void
808 dump_segtable(stp)
809 	u_int *stp;
810 {
811 	u_int *s, *es;
812 	int shift, i;
813 
814 	s = stp;
815 	{
816 		es = s + (ATARI_STSIZE >> 2);
817 		shift = SG_ISHIFT;
818 	}
819 
820 	/*
821 	 * XXX need changes for 68040
822 	 */
823 	for (i = 0; s < es; s++, i++)
824 		if (*s & SG_V)
825 			printf("$%08lx: $%08lx\t", i << shift, *s & SG_FRAME);
826 	printf("\n");
827 }
828 
829 void
830 dump_pagetable(ptp, i, n)
831 	u_int *ptp, i, n;
832 {
833 	u_int *p, *ep;
834 
835 	p = ptp + i;
836 	ep = p + n;
837 	for (; p < ep; p++, i++)
838 		if (*p & PG_V)
839 			printf("$%08lx -> $%08lx\t", i, *p & PG_FRAME);
840 	printf("\n");
841 }
842 
843 u_int
844 vmtophys(ste, vm)
845 	u_int *ste, vm;
846 {
847 	ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME);
848 		ste += (vm & SG_PMASK) >> PGSHIFT;
849 	return((*ste & -NBPG) | (vm & (NBPG - 1)));
850 }
851 
852 #endif
853