xref: /netbsd-src/sys/arch/hpcarm/hpcarm/hpc_machdep.c (revision 27578b9aac214cc7796ead81dcc5427e79d5f2a0)
1 /*	$NetBSD: hpc_machdep.c,v 1.18 2001/08/02 14:42:08 toshii Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * machdep.c
40  *
41  * Machine dependant functions for kernel setup
42  *
43  * This file needs a lot of work.
44  *
45  * Created      : 17/09/94
46  */
47 /*
48  * hpc_machdep.c
49  */
50 
51 #include "opt_cputypes.h"
52 #include "opt_ddb.h"
53 #include "opt_pmap_debug.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/reboot.h>
59 #include <sys/proc.h>
60 #include <sys/msgbuf.h>
61 #include <sys/exec.h>
62 
63 #include <dev/cons.h>
64 
65 #ifdef DDB
66 #include <machine/db_machdep.h>
67 #include <ddb/db_sym.h>
68 #include <ddb/db_extern.h>
69 #ifndef DB_ELFSIZE
70 #error Must define DB_ELFSIZE!
71 #endif
72 #define ELFSIZE		DB_ELFSIZE
73 #include <sys/exec_elf.h>
74 #endif
75 
76 #include <uvm/uvm.h>
77 
78 #include <machine/signal.h>
79 #include <machine/frame.h>
80 #include <machine/bootconfig.h>
81 #include <machine/cpu.h>
82 #include <machine/io.h>
83 #include <machine/irqhandler.h>
84 #include <machine/katelib.h>
85 #include <machine/pte.h>
86 #include <machine/bootinfo.h>
87 #include <machine/undefined.h>
88 #include <machine/rtc.h>
89 #include <hpc/hpc/platid.h>
90 #include <hpcarm/sa11x0/sa11x0_reg.h>
91 
92 #include <dev/hpc/bicons.h>
93 
94 #include "opt_ipkdb.h"
95 
96 /* XXX for consinit related hacks */
97 #include <sys/conf.h>
98 
99 /*
100  * Address to call from cpu_reset() to reset the machine.
101  * This is machine architecture dependant as it varies depending
102  * on where the ROM appears when you turn the MMU off.
103  */
104 
105 u_int cpu_reset_address = 0;
106 
107 /* Define various stack sizes in pages */
108 #define IRQ_STACK_SIZE	1
109 #define ABT_STACK_SIZE	1
110 #ifdef IPKDB
111 #define UND_STACK_SIZE	2
112 #else
113 #define UND_STACK_SIZE	1
114 #endif
115 
116 BootConfig bootconfig;		/* Boot config storage */
117 struct bootinfo *bootinfo, bootinfo_storage;
118 static char booted_kernel_storage[80];
119 char *booted_kernel = booted_kernel_storage;
120 
121 paddr_t physical_start;
122 paddr_t physical_freestart;
123 paddr_t physical_freeend;
124 paddr_t physical_end;
125 u_int free_pages;
126 int physmem = 0;
127 
128 #define biconscnpollc      nullcnpollc
129 cons_decl(bicons);
130 static struct consdev bicons = cons_init(bicons);
131 
132 #ifndef PMAP_STATIC_L1S
133 int max_processes = 64;			/* Default number */
134 #endif	/* !PMAP_STATIC_L1S */
135 
136 
137 /* Physical and virtual addresses for some global pages */
138 pv_addr_t systempage;
139 pv_addr_t irqstack;
140 pv_addr_t undstack;
141 pv_addr_t abtstack;
142 pv_addr_t kernelstack;
143 
144 char *boot_args = NULL;
145 char *boot_file = NULL;
146 
147 vaddr_t msgbufphys;
148 
149 extern u_int data_abort_handler_address;
150 extern u_int prefetch_abort_handler_address;
151 extern u_int undefined_handler_address;
152 extern int end;
153 
154 #ifdef PMAP_DEBUG
155 extern int pmap_debug_level;
156 #endif	/* PMAP_DEBUG */
157 
158 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
159 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
160 #define	KERNEL_PT_KERNEL	2	/* Page table for mapping kernel */
161 #define	KERNEL_PT_IO		3	/* Page table for mapping IO */
162 #define	KERNEL_PT_VMDATA	4	/* Page tables for mapping kernel VM */
163 #define	KERNEL_PT_VMDATA_NUM	(KERNEL_VM_SIZE >> (PDSHIFT + 2))
164 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
165 
166 pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
167 
168 struct user *proc0paddr;
169 
170 #ifdef CPU_SA110
171 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
172 extern unsigned int sa110_cache_clean_addr;
173 extern unsigned int sa110_cache_clean_size;
174 static vaddr_t sa110_cc_base;
175 #endif	/* CPU_SA110 */
176 /* Non-buffered non-cachable memory needed to enter idle mode */
177 vaddr_t sa11x0_idle_mem;
178 
179 /* Prototypes */
180 
181 void physcon_display_base	__P((u_int addr));
182 extern void consinit		__P((void));
183 
184 void map_section	__P((vaddr_t pt, vaddr_t va, vaddr_t pa,
185 			     int cacheable));
186 void map_pagetable	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
187 void map_entry		__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
188 void map_entry_nc	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
189 void map_entry_ro	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
190 vm_size_t map_chunk	__P((vaddr_t pd, vaddr_t pt, vaddr_t va,
191 			     vaddr_t pa, vm_size_t size, u_int acc,
192 			     u_int flg));
193 
194 void data_abort_handler		__P((trapframe_t *frame));
195 void prefetch_abort_handler	__P((trapframe_t *frame));
196 void undefinedinstruction_bounce	__P((trapframe_t *frame));
197 void zero_page_readonly		__P((void));
198 void zero_page_readwrite	__P((void));
199 
200 u_int cpu_get_control		__P((void));
201 
202 void rpc_sa110_cc_setup(void);
203 
204 #ifdef DEBUG_BEFOREMMU
205 static void fakecninit();
206 #endif
207 
208 #ifdef BOOT_DUMP
209 void dumppages(char *, int);
210 #endif
211 
212 extern int db_trapper();
213 
214 extern void dump_spl_masks	__P((void));
215 extern pt_entry_t *pmap_pte	__P((pmap_t pmap, vaddr_t va));
216 extern void db_machine_init	__P((void));
217 
218 extern void dumpsys	__P((void));
219 
220 /*
221  * void cpu_reboot(int howto, char *bootstr)
222  *
223  * Reboots the system
224  *
225  * Deal with any syncing, unmounting, dumping and shutdown hooks,
226  * then reset the CPU.
227  */
228 
229 void
230 cpu_reboot(howto, bootstr)
231 	int howto;
232 	char *bootstr;
233 {
234 	/*
235 	 * If we are still cold then hit the air brakes
236 	 * and crash to earth fast
237 	 */
238 	if (cold) {
239 		doshutdownhooks();
240 		printf("Halted while still in the ICE age.\n");
241 		printf("The operating system has halted.\n");
242 		printf("Please press any key to reboot.\n\n");
243 		cngetc();
244 		printf("rebooting...\n");
245 		cpu_reset();
246 		/*NOTREACHED*/
247 	}
248 
249 	/* Disable console buffering */
250 	cnpollc(1);
251 
252 	/*
253 	 * If RB_NOSYNC was not specified sync the discs.
254 	 * Note: Unless cold is set to 1 here, syslogd will die during the unmount.
255 	 * It looks like syslogd is getting woken up only to find that it cannot
256 	 * page part of the binary in as the filesystem has been unmounted.
257 	 */
258 	if (!(howto & RB_NOSYNC))
259 		bootsync();
260 
261 	/* Say NO to interrupts */
262 	splhigh();
263 
264 	/* Do a dump if requested. */
265 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
266 		dumpsys();
267 
268 
269 	/* Run any shutdown hooks */
270 	doshutdownhooks();
271 
272 	/* Make sure IRQ's are disabled */
273 	IRQdisable;
274 
275 	if (howto & RB_HALT) {
276 		printf("The operating system has halted.\n");
277 		printf("Please press any key to reboot.\n\n");
278 		cngetc();
279 	}
280 
281 	printf("rebooting...\n");
282 	cpu_reset();
283 	/*NOTREACHED*/
284 }
285 
286 /*
287  *
288  * Initial entry point on startup. This gets called before main() is
289  * entered.
290  * It should be responsible for setting up everything that must be
291  * in place when main is called.
292  * This includes
293  *   Taking a copy of the boot configuration structure.
294  *   Initialising the physical console so characters can be printed.
295  *   Setting up page tables for the kernel
296  */
297 
298 u_int
299 initarm(argc, argv, bi)
300 	int argc;
301 	char **argv;
302 	struct bootinfo *bi;
303 {
304 	int loop;
305 	u_int kerneldatasize, symbolsize;
306 	u_int l1pagetable;
307 	u_int l2pagetable;
308 	vaddr_t freemempos;
309 	extern char page0[], page0_end[];
310 	pv_addr_t kernel_l1pt;
311 	pv_addr_t kernel_ptpt;
312 #ifdef DDB
313 	Elf_Shdr *sh;
314 #endif
315 
316 	/*
317 	 * Heads up ... Setup the CPU / MMU / TLB functions
318 	 */
319 	set_cpufuncs();
320 
321 #ifdef DEBUG_BEFOREMMU
322 	/*
323 	 * At this point, we cannot call real consinit().
324 	 * Just call a faked up version of consinit(), which does the thing
325 	 * with MMU disabled.
326 	 */
327 	fakecninit();
328 #endif
329 
330 	/*
331 	 * XXX for now, overwrite bootconfig to hardcoded values.
332 	 * XXX kill bootconfig and directly call uvm_physload
333 	 */
334 	bootconfig.dram[0].address = 0xc0000000;
335 	bootconfig.dram[0].pages = 8192;
336 	bootconfig.dramblocks = 1;
337 	kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
338 
339 	symbolsize = 0;
340 #ifdef DDB
341 	if (! memcmp(&end, "\177ELF", 4)) {
342 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
343 		loop = ((Elf_Ehdr *)&end)->e_shnum;
344 		for(; loop; loop--, sh++)
345 			if (sh->sh_offset > 0 &&
346 			    (sh->sh_offset + sh->sh_size) > symbolsize)
347 				symbolsize = sh->sh_offset + sh->sh_size;
348 	}
349 #endif
350 
351 	printf("kernsize=0x%x\n", kerneldatasize);
352 	kerneldatasize += symbolsize;
353 	kerneldatasize = ((kerneldatasize - 1) & ~(NBPG * 4 - 1)) + NBPG * 8;
354 
355 	/* parse kernel args */
356 	strncpy(booted_kernel_storage, *argv, sizeof(booted_kernel_storage));
357 	for(argc--, argv++; argc; argc--, argv++)
358 		switch(**argv) {
359 		case 'a':
360 			boothowto |= RB_ASKNAME;
361 			break;
362 		case 's':
363 			boothowto |= RB_SINGLE;
364 			break;
365 		default:
366 			break;
367 		}
368 
369 	/* copy bootinfo into known kernel space */
370 	bootinfo_storage = *bi;
371 	bootinfo = &bootinfo_storage;
372 
373 #ifdef BOOTINFO_FB_WIDTH
374 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
375 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
376 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
377 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
378 #endif
379 
380 	/*
381 	 * hpcboot has loaded me with MMU disabled.
382 	 * So create kernel page tables and enable MMU
383 	 */
384 
385 	/*
386 	 * Set up the variables that define the availablilty of physcial
387 	 * memory
388 	 */
389 	physical_start = bootconfig.dram[0].address;
390 	physical_freestart = physical_start
391 	    + (KERNEL_TEXT_BASE - KERNEL_SPACE_START) + kerneldatasize;
392 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
393 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * NBPG;
394 	physical_freeend = physical_end;
395 /*	free_pages = bootconfig.drampages;*/
396 
397 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
398 		physmem += bootconfig.dram[loop].pages;
399 
400 	/* XXX handle UMA framebuffer memory */
401 
402 	/* Use the first 1MB to allocate things */
403 	freemempos = 0xc0000000;
404 	memset((void *)0xc0000000, 0, KERNEL_TEXT_BASE - 0xc0000000);
405 
406 	/*
407 	 * Right We have the bottom meg of memory mapped to 0x00000000
408 	 * so was can get at it. The kernel will ocupy the start of it.
409 	 * After the kernel/args we allocate some of the fixed page tables
410 	 * we need to get the system going.
411 	 * We allocate one page directory and 8 page tables and store the
412 	 * physical addresses in the kernel_pt_table array.
413 	 * Must remember that neither the page L1 or L2 page tables are the
414 	 * same size as a page !
415 	 *
416 	 * Ok the next bit of physical allocate may look complex but it is
417 	 * simple really. I have done it like this so that no memory gets
418 	 * wasted during the allocate of various pages and tables that are
419 	 * all different sizes.
420 	 * The start address will be page aligned.
421 	 * We allocate the kernel page directory on the first free 16KB
422 	 * boundry we find.
423 	 * We allocate the kernel page tables on the first 1KB boundry we find.
424 	 * We allocate 9 PT's. This means that in the process we
425 	 * KNOW that we will encounter at least 1 16KB boundry.
426 	 *
427 	 * Eventually if the top end of the memory gets used for process L1
428 	 * page tables the kernel L1 page table may be moved up there.
429 	 */
430 
431 #ifdef VERBOSE_INIT_ARM
432 	printf("Allocating page tables\n");
433 #endif
434 
435 	/* Define a macro to simplify memory allocation */
436 #define	valloc_pages(var, np)			\
437 	(var).pv_pa = (var).pv_va = freemempos;	\
438 	freemempos += (np) * NBPG;
439 #define	alloc_pages(var, np)			\
440 	(var) = freemempos;			\
441 	freemempos += (np) * NBPG;
442 
443 
444 	valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
445 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
446 		alloc_pages(kernel_pt_table[loop], PT_SIZE / NBPG);
447 	}
448 
449 	/*
450 	 * Allocate a page for the system page mapped to V0x00000000
451 	 * This page will just contain the system vectors and can be
452 	 * shared by all processes.
453 	 */
454 	valloc_pages(systempage, 1);
455 
456 	/* Allocate a page for the page table to map kernel page tables*/
457 	valloc_pages(kernel_ptpt, PT_SIZE / NBPG);
458 
459 	/* Allocate stacks for all modes */
460 	valloc_pages(irqstack, IRQ_STACK_SIZE);
461 	valloc_pages(abtstack, ABT_STACK_SIZE);
462 	valloc_pages(undstack, UND_STACK_SIZE);
463 	valloc_pages(kernelstack, UPAGES);
464 
465 #ifdef VERBOSE_INIT_ARM
466 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va);
467 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va);
468 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va);
469 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
470 #endif
471 
472 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG);
473 
474 	/*
475 	 * XXX Actually, we only need virtual space and don't need
476 	 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
477 	 */
478 #ifdef CPU_SA110
479 	/*
480 	 * XXX totally stuffed hack to work round problems introduced
481 	 * in recent versions of the pmap code. Due to the calls used there
482 	 * we cannot allocate virtual memory during bootstrap.
483 	 */
484 	for(;;) {
485 		alloc_pages(sa110_cc_base, 1);
486 		if (! (sa110_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
487 			break;
488 	}
489 	{
490 		vaddr_t dummy;
491 		alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / NBPG - 1);
492 	}
493 	sa110_cache_clean_addr = sa110_cc_base;
494 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
495 #endif	/* CPU_SA110 */
496 
497 	alloc_pages(sa11x0_idle_mem, 1);
498 
499 	/*
500 	 * Ok we have allocated physical pages for the primary kernel
501 	 * page tables
502 	 */
503 
504 #ifdef VERBOSE_INIT_ARM
505 	printf("Creating L1 page table\n");
506 #endif
507 
508 	/*
509 	 * Now we start consturction of the L1 page table
510 	 * We start by mapping the L2 page tables into the L1.
511 	 * This means that we can replace L1 mappings later on if necessary
512 	 */
513 	l1pagetable = kernel_l1pt.pv_pa;
514 
515 	/* Map the L2 pages tables in the L1 page table */
516 	map_pagetable(l1pagetable, 0x00000000,
517 	    kernel_pt_table[KERNEL_PT_SYS]);
518 	map_pagetable(l1pagetable, KERNEL_SPACE_START,
519 	    kernel_pt_table[KERNEL_PT_KERNEL]);
520 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
521 		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
522 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
523 	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
524 	    kernel_ptpt.pv_pa);
525 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
526 	map_pagetable(l1pagetable, SAIPIO_BASE,
527 	    kernel_pt_table[KERNEL_PT_IO]);
528 
529 
530 #ifdef VERBOSE_INIT_ARM
531 	printf("Mapping kernel\n");
532 #endif
533 
534 	/* Now we fill in the L2 pagetable for the kernel code/data */
535 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
536 
537 	/*
538 	 * XXX there is no ELF header to find RO region.
539 	 * XXX What should we do?
540 	 */
541 #if 0
542 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
543 		logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
544 		    physical_start, kernexec->a_text,
545 		    AP_KR, PT_CACHEABLE);
546 		logical += map_chunk(l1pagetable, l2pagetable,
547 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
548 		    kerneldatasize - kernexec->a_text, AP_KRW, PT_CACHEABLE);
549 	} else
550 #endif
551 		map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
552 		    KERNEL_TEXT_BASE, kerneldatasize,
553 		    AP_KRW, PT_CACHEABLE);
554 
555 #ifdef VERBOSE_INIT_ARM
556 	printf("Constructing L2 page tables\n");
557 #endif
558 
559 	/* Map the stack pages */
560 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
561 	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
562 	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
563 	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
564 	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
565 	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
566 	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
567 	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
568 	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
569 	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
570 	    PD_SIZE, AP_KRW, 0);
571 
572 	/* Map the page table that maps the kernel pages */
573 	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
574 
575 	/* Map a page for entering idle mode */
576 	map_entry_nc(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem);
577 
578 	/*
579 	 * Map entries in the page table used to map PTE's
580 	 * Basically every kernel page table gets mapped here
581 	 */
582 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
583 	l2pagetable = kernel_ptpt.pv_pa;
584 	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
585 	    kernel_pt_table[KERNEL_PT_SYS]);
586 	map_entry_nc(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
587 	    kernel_pt_table[KERNEL_PT_KERNEL]);
588 	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
589 	    kernel_pt_table[KERNEL_PT_KERNEL]);
590 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
591 		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
592 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
593 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
594 	}
595 	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
596 	    kernel_ptpt.pv_pa);
597 	map_entry_nc(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
598 	    kernel_pt_table[KERNEL_PT_IO]);
599 
600 	/*
601 	 * Map the system page in the kernel page table for the bottom 1Meg
602 	 * of the virtual memory map.
603 	 */
604 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
605 	map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
606 
607 	/* Map any I/O modules here, as we don't have real bus_space_map() */
608 	printf("mapping IO...");
609 	l2pagetable = kernel_pt_table[KERNEL_PT_IO];
610 	map_entry_nc(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE);
611 
612 #ifdef CPU_SA110
613 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
614 	map_chunk(0, l2pagetable, sa110_cache_clean_addr,
615 	    0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE,
616 	    AP_KRW, PT_CACHEABLE);
617 #endif
618 	/*
619 	 * Now we have the real page tables in place so we can switch to them.
620 	 * Once this is done we will be running with the REAL kernel page
621 	 * tables.
622 	 */
623 
624 	printf("done.\n");
625 
626 	/* Right set up the vectors at the bottom of page 0 */
627 	memcpy((char *)systempage.pv_va, page0, page0_end - page0);
628 
629 	/*
630 	 * Pages were allocated during the secondary bootstrap for the
631 	 * stacks for different CPU modes.
632 	 * We must now set the r13 registers in the different CPU modes to
633 	 * point to these stacks.
634 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
635 	 * of the stack memory.
636 	 */
637 	printf("init subsystems: stacks ");
638 
639 	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG);
640 	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG);
641 	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG);
642 #ifdef PMAP_DEBUG
643 	if (pmap_debug_level >= 0)
644 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
645 		    kernelstack.pv_pa);
646 #endif	/* PMAP_DEBUG */
647 
648 	/*
649 	 * Well we should set a data abort handler.
650 	 * Once things get going this will change as we will need a proper
651 	 * handler. Until then we will use a handler that just panics but
652 	 * tells us why.
653 	 * Initialisation of the vectors will just panic on a data abort.
654 	 * This just fills in a slighly better one.
655 	 */
656 	printf("vectors ");
657 	data_abort_handler_address = (u_int)data_abort_handler;
658 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
659 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
660 	printf("%08x %08x %08x\n", data_abort_handler_address,
661 	    prefetch_abort_handler_address, undefined_handler_address);
662 
663 	/* Initialise the undefined instruction handlers */
664 	printf("undefined ");
665 	undefined_init();
666 
667 	/* Set the page table address. */
668 	setttb(kernel_l1pt.pv_pa);
669 
670 #ifdef BOOT_DUMP
671 	dumppages((char *)0xc0000000, 16 * NBPG);
672 	dumppages((char *)0xb0100000, 64); /* XXX */
673 #endif
674 	/* Enable MMU, I-cache, D-cache, write buffer. */
675 	cpufunc_control(0x337f, 0x107d);
676 
677 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL)
678 		consinit();
679 	else {
680 		/* XXX this isn't useful for normal use, but helps debuging */
681 		biconscninit(&bicons);
682 		cn_tab = &bicons;
683 		cn_tab->cn_pri = CN_REMOTE;
684 	}
685 
686 #ifdef VERBOSE_INIT_ARM
687 	printf("freemempos=%08lx\n", freemempos);
688 	printf("MMU enabled. control=%08x\n", cpu_get_control());
689 #endif
690 
691 	/* Boot strap pmap telling it where the kernel page table is */
692 	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
693 
694 
695 #ifdef CPU_SA110
696 	if (cputype == CPU_ID_SA110)
697 		rpc_sa110_cc_setup();
698 #endif	/* CPU_SA110 */
699 
700 #ifdef IPKDB
701 	/* Initialise ipkdb */
702 	ipkdb_init();
703 	if (boothowto & RB_KDB)
704 		ipkdb_connect(0);
705 #endif	/* NIPKDB */
706 
707 #ifdef BOOT_DUMP
708 	dumppages((char *)kernel_l1pt.pv_va, 16);
709 	dumppages((char *)PROCESS_PAGE_TBLS_BASE, 16);
710 #endif
711 
712 #ifdef DDB
713 	{
714 		static struct undefined_handler uh;
715 
716 		uh.uh_handler = db_trapper;
717 		install_coproc_handler_static(0, &uh);
718 	}
719 	ddb_init(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
720 #endif
721 
722 	printf("kernsize=0x%x", kerneldatasize);
723 	printf(" (including 0x%x symbols)\n", symbolsize);
724 
725 #ifdef DDB
726 	if (boothowto & RB_KDB)
727 		Debugger();
728 #endif	/* DDB */
729 
730 	if (bootinfo->magic == BOOTINFO_MAGIC) {
731 		platid.dw.dw0 = bootinfo->platid_cpu;
732 		platid.dw.dw1 = bootinfo->platid_machine;
733 	}
734 
735 	/* We return the new stack pointer address */
736 	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
737 }
738 
739 void
740 consinit(void)
741 {
742 	static int consinit_called = 0;
743 
744 	if (consinit_called != 0)
745 		return;
746 
747 	consinit_called = 1;
748 	cninit();
749 }
750 
751 #ifdef DEBUG_BEFOREMMU
752 cons_decl(sacom);
753 void
754 fakecninit()
755 {
756 	static struct consdev fakecntab = cons_init(sacom);
757 	cn_tab = &fakecntab;
758 
759 	(*cn_tab->cn_init)(0);
760 	cn_tab->cn_pri = CN_REMOTE;
761 }
762 #endif
763 
764 #ifdef CPU_SA110
765 
766 /*
767  * For optimal cache cleaning we need two 16K banks of
768  * virtual address space that NOTHING else will access
769  * and then we alternate the cache cleaning between the
770  * two banks.
771  * The cache cleaning code requires requires 2 banks aligned
772  * on total size boundry so the banks can be alternated by
773  * eorring the size bit (assumes the bank size is a power of 2)
774  */
775 void
776 rpc_sa110_cc_setup(void)
777 {
778 	int loop;
779 	paddr_t kaddr;
780 	pt_entry_t *pte;
781 
782 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
783 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
784 		pte = pmap_pte(pmap_kernel(), (sa110_cc_base + loop));
785 		*pte = L2_PTE(kaddr, AP_KR);
786 	}
787 	sa110_cache_clean_addr = sa110_cc_base;
788 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
789 }
790 #endif	/* CPU_SA110 */
791 
792 #ifdef BOOT_DUMP
793 void dumppages(char *start, int nbytes)
794 {
795 	char *p = start;
796 	char *p1;
797 	int i;
798 
799 	for(i = nbytes; i > 0; i -= 16, p += 16) {
800 		for(p1 = p + 15; p != p1; p1--) {
801 			if (*p1)
802 				break;
803 		}
804 		if (! *p1)
805 			continue;
806 		printf("%08x %02x %02x %02x %02x %02x %02x %02x %02x"
807 		    " %02x %02x %02x %02x %02x %02x %02x %02x\n",
808 		    (unsigned int)p,
809 		    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
810 		    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
811 	}
812 }
813 #endif
814 
815 /* End of machdep.c */
816