xref: /netbsd-src/sys/arch/hpcarm/hpcarm/hpc_machdep.c (revision 17306b8fd0952c7489f93f0230818481e5a1e2c9)
1 /*	$NetBSD: hpc_machdep.c,v 1.14 2001/06/19 13:45:55 wiz Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * machdep.c
40  *
41  * Machine dependant functions for kernel setup
42  *
43  * This file needs a lot of work.
44  *
45  * Created      : 17/09/94
46  */
47 /*
48  * hpc_machdep.c
49  */
50 
51 #include "opt_cputypes.h"
52 #include "opt_ddb.h"
53 #include "opt_pmap_debug.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/reboot.h>
59 #include <sys/proc.h>
60 #include <sys/msgbuf.h>
61 #include <sys/exec.h>
62 
63 #include <dev/cons.h>
64 
65 #ifdef DDB
66 #include <machine/db_machdep.h>
67 #include <ddb/db_sym.h>
68 #include <ddb/db_extern.h>
69 #ifndef DB_ELFSIZE
70 #error Must define DB_ELFSIZE!
71 #endif
72 #define ELFSIZE		DB_ELFSIZE
73 #include <sys/exec_elf.h>
74 #endif
75 
76 #include <uvm/uvm.h>
77 
78 #include <machine/signal.h>
79 #include <machine/frame.h>
80 #include <machine/bootconfig.h>
81 #include <machine/cpu.h>
82 #include <machine/io.h>
83 #include <machine/irqhandler.h>
84 #include <machine/katelib.h>
85 #include <machine/pte.h>
86 #include <machine/bootinfo.h>
87 #include <machine/undefined.h>
88 #include <machine/rtc.h>
89 #include <hpc/hpc/platid.h>
90 #include <hpcarm/sa11x0/sa11x0_reg.h>
91 
92 #include <dev/hpc/bicons.h>
93 
94 #include "opt_ipkdb.h"
95 
96 /* XXX for consinit related hacks */
97 #include <sys/conf.h>
98 
99 /*
100  * Address to call from cpu_reset() to reset the machine.
101  * This is machine architecture dependant as it varies depending
102  * on where the ROM appears when you turn the MMU off.
103  */
104 
105 u_int cpu_reset_address = 0;
106 
107 /* Define various stack sizes in pages */
108 #define IRQ_STACK_SIZE	1
109 #define ABT_STACK_SIZE	1
110 #ifdef IPKDB
111 #define UND_STACK_SIZE	2
112 #else
113 #define UND_STACK_SIZE	1
114 #endif
115 
116 BootConfig bootconfig;		/* Boot config storage */
117 struct bootinfo *bootinfo, bootinfo_storage;
118 char booted_kernel[80];
119 
120 paddr_t physical_start;
121 paddr_t physical_freestart;
122 paddr_t physical_freeend;
123 paddr_t physical_end;
124 u_int free_pages;
125 int physmem = 0;
126 
127 #define biconscnpollc      nullcnpollc
128 cons_decl(bicons);
129 static struct consdev bicons = cons_init(bicons);
130 
131 #ifndef PMAP_STATIC_L1S
132 int max_processes = 64;			/* Default number */
133 #endif	/* !PMAP_STATIC_L1S */
134 
135 
136 /* Physical and virtual addresses for some global pages */
137 pv_addr_t systempage;
138 pv_addr_t irqstack;
139 pv_addr_t undstack;
140 pv_addr_t abtstack;
141 pv_addr_t kernelstack;
142 
143 char *boot_args = NULL;
144 char *boot_file = NULL;
145 
146 vm_offset_t msgbufphys;
147 
148 extern u_int data_abort_handler_address;
149 extern u_int prefetch_abort_handler_address;
150 extern u_int undefined_handler_address;
151 extern int end;
152 
153 #ifdef PMAP_DEBUG
154 extern int pmap_debug_level;
155 #endif	/* PMAP_DEBUG */
156 
157 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
158 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
159 #define	KERNEL_PT_KERNEL	2	/* Page table for mapping kernel */
160 #define	KERNEL_PT_IO		3	/* Page table for mapping IO */
161 #define	KERNEL_PT_VMDATA	4	/* Page tables for mapping kernel VM */
162 #define	KERNEL_PT_VMDATA_NUM	(KERNEL_VM_SIZE >> (PDSHIFT + 2))
163 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
164 
165 pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
166 
167 struct user *proc0paddr;
168 
169 #ifdef CPU_SA110
170 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
171 extern unsigned int sa110_cache_clean_addr;
172 extern unsigned int sa110_cache_clean_size;
173 static vaddr_t sa110_cc_base;
174 #endif	/* CPU_SA110 */
175 /* Non-buffered non-cachable memory needed to enter idle mode */
176 vaddr_t sa11x0_idle_mem;
177 
178 /* Prototypes */
179 
180 void physcon_display_base	__P((u_int addr));
181 extern void consinit		__P((void));
182 
183 void map_section	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa,
184 			     int cacheable));
185 void map_pagetable	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
186 void map_entry		__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
187 void map_entry_nc	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
188 void map_entry_ro	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
189 vm_size_t map_chunk	__P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
190 			     vm_offset_t pa, vm_size_t size, u_int acc,
191 			     u_int flg));
192 
193 void data_abort_handler		__P((trapframe_t *frame));
194 void prefetch_abort_handler	__P((trapframe_t *frame));
195 void undefinedinstruction_bounce	__P((trapframe_t *frame));
196 void zero_page_readonly		__P((void));
197 void zero_page_readwrite	__P((void));
198 
199 u_int cpu_get_control		__P((void));
200 
201 void rpc_sa110_cc_setup(void);
202 
203 #ifdef DEBUG_BEFOREMMU
204 static void fakecninit();
205 #endif
206 
207 #ifdef BOOT_DUMP
208 void dumppages(char *, int);
209 #endif
210 
211 extern int db_trapper();
212 
213 extern void dump_spl_masks	__P((void));
214 extern pt_entry_t *pmap_pte	__P((pmap_t pmap, vm_offset_t va));
215 extern void db_machine_init	__P((void));
216 
217 extern void dumpsys	__P((void));
218 
219 /*
220  * void cpu_reboot(int howto, char *bootstr)
221  *
222  * Reboots the system
223  *
224  * Deal with any syncing, unmounting, dumping and shutdown hooks,
225  * then reset the CPU.
226  */
227 
228 void
229 cpu_reboot(howto, bootstr)
230 	int howto;
231 	char *bootstr;
232 {
233 	/*
234 	 * If we are still cold then hit the air brakes
235 	 * and crash to earth fast
236 	 */
237 	if (cold) {
238 		doshutdownhooks();
239 		printf("Halted while still in the ICE age.\n");
240 		printf("The operating system has halted.\n");
241 		printf("Please press any key to reboot.\n\n");
242 		cngetc();
243 		printf("rebooting...\n");
244 		cpu_reset();
245 		/*NOTREACHED*/
246 	}
247 
248 	/* Disable console buffering */
249 	cnpollc(1);
250 
251 	/*
252 	 * If RB_NOSYNC was not specified sync the discs.
253 	 * Note: Unless cold is set to 1 here, syslogd will die during the unmount.
254 	 * It looks like syslogd is getting woken up only to find that it cannot
255 	 * page part of the binary in as the filesystem has been unmounted.
256 	 */
257 	if (!(howto & RB_NOSYNC))
258 		bootsync();
259 
260 	/* Say NO to interrupts */
261 	splhigh();
262 
263 	/* Do a dump if requested. */
264 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
265 		dumpsys();
266 
267 
268 	/* Run any shutdown hooks */
269 	doshutdownhooks();
270 
271 	/* Make sure IRQ's are disabled */
272 	IRQdisable;
273 
274 	if (howto & RB_HALT) {
275 		printf("The operating system has halted.\n");
276 		printf("Please press any key to reboot.\n\n");
277 		cngetc();
278 	}
279 
280 	printf("rebooting...\n");
281 	cpu_reset();
282 	/*NOTREACHED*/
283 }
284 
285 /*
286  *
287  * Initial entry point on startup. This gets called before main() is
288  * entered.
289  * It should be responsible for setting up everything that must be
290  * in place when main is called.
291  * This includes
292  *   Taking a copy of the boot configuration structure.
293  *   Initialising the physical console so characters can be printed.
294  *   Setting up page tables for the kernel
295  */
296 
297 u_int
298 initarm(argc, argv, bi)
299 	int argc;
300 	char **argv;
301 	struct bootinfo *bi;
302 {
303 	int loop;
304 	u_int kerneldatasize, symbolsize;
305 	u_int l1pagetable;
306 	u_int l2pagetable;
307 	vm_offset_t freemempos;
308 	extern char page0[], page0_end[];
309 	pv_addr_t kernel_l1pt;
310 	pv_addr_t kernel_ptpt;
311 #ifdef DDB
312 	Elf_Shdr *sh;
313 #endif
314 
315 	/*
316 	 * Heads up ... Setup the CPU / MMU / TLB functions
317 	 */
318 	set_cpufuncs();
319 
320 #ifdef DEBUG_BEFOREMMU
321 	/*
322 	 * At this point, we cannot call real consinit().
323 	 * Just call a faked up version of consinit(), which does the thing
324 	 * with MMU disabled.
325 	 */
326 	fakecninit();
327 #endif
328 
329 	/*
330 	 * XXX for now, overwrite bootconfig to hardcoded values.
331 	 * XXX kill bootconfig and directly call uvm_physload
332 	 */
333 	bootconfig.dram[0].address = 0xc0000000;
334 	bootconfig.dram[0].pages = 8192;
335 	bootconfig.dramblocks = 1;
336 	kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
337 
338 	symbolsize = 0;
339 #ifdef DDB
340 	if (! memcmp(&end, "\177ELF", 4)) {
341 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
342 		loop = ((Elf_Ehdr *)&end)->e_shnum;
343 		for(; loop; loop--, sh++)
344 			if (sh->sh_offset > 0 &&
345 			    (sh->sh_offset + sh->sh_size) > symbolsize)
346 				symbolsize = sh->sh_offset + sh->sh_size;
347 	}
348 #endif
349 
350 	printf("kernsize=0x%x\n", kerneldatasize);
351 	kerneldatasize += symbolsize;
352 	kerneldatasize = ((kerneldatasize - 1) & ~(NBPG * 4 - 1)) + NBPG * 8;
353 
354 	/* parse kernel args */
355 	strncpy(booted_kernel, *argv, sizeof(booted_kernel));
356 	for(argc--, argv++; argc; argc--, argv++)
357 		switch(**argv) {
358 		case 'a':
359 			boothowto |= RB_ASKNAME;
360 			break;
361 		case 's':
362 			boothowto |= RB_SINGLE;
363 			break;
364 		default:
365 			break;
366 		}
367 
368 	/* copy bootinfo into known kernel space */
369 	bootinfo_storage = *bi;
370 	bootinfo = &bootinfo_storage;
371 
372 #ifdef BOOTINFO_FB_WIDTH
373 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
374 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
375 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
376 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
377 #endif
378 
379 	/*
380 	 * hpcboot has loaded me with MMU disabled.
381 	 * So create kernel page tables and enable MMU
382 	 */
383 
384 	/*
385 	 * Set up the variables that define the availablilty of physcial
386 	 * memory
387 	 */
388 	physical_start = bootconfig.dram[0].address;
389 	physical_freestart = physical_start
390 	    + (KERNEL_TEXT_BASE - KERNEL_SPACE_START) + kerneldatasize;
391 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
392 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * NBPG;
393 	physical_freeend = physical_end;
394 /*	free_pages = bootconfig.drampages;*/
395 
396 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
397 		physmem += bootconfig.dram[loop].pages;
398 
399 	/* XXX handle UMA framebuffer memory */
400 
401 	/* Use the first 1MB to allocate things */
402 	freemempos = 0xc0000000;
403 	memset((void *)0xc0000000, 0, 0x80000);
404 
405 	/*
406 	 * Right We have the bottom meg of memory mapped to 0x00000000
407 	 * so was can get at it. The kernel will ocupy the start of it.
408 	 * After the kernel/args we allocate some of the fixed page tables
409 	 * we need to get the system going.
410 	 * We allocate one page directory and 8 page tables and store the
411 	 * physical addresses in the kernel_pt_table array.
412 	 * Must remember that neither the page L1 or L2 page tables are the
413 	 * same size as a page !
414 	 *
415 	 * Ok the next bit of physical allocate may look complex but it is
416 	 * simple really. I have done it like this so that no memory gets
417 	 * wasted during the allocate of various pages and tables that are
418 	 * all different sizes.
419 	 * The start address will be page aligned.
420 	 * We allocate the kernel page directory on the first free 16KB
421 	 * boundry we find.
422 	 * We allocate the kernel page tables on the first 1KB boundry we find.
423 	 * We allocate 9 PT's. This means that in the process we
424 	 * KNOW that we will encounter at least 1 16KB boundry.
425 	 *
426 	 * Eventually if the top end of the memory gets used for process L1
427 	 * page tables the kernel L1 page table may be moved up there.
428 	 */
429 
430 #ifdef VERBOSE_INIT_ARM
431 	printf("Allocating page tables\n");
432 #endif
433 
434 	/* Define a macro to simplify memory allocation */
435 #define	valloc_pages(var, np)			\
436 	(var).pv_pa = (var).pv_va = freemempos;	\
437 	freemempos += (np) * NBPG;
438 #define	alloc_pages(var, np)			\
439 	(var) = freemempos;			\
440 	freemempos += (np) * NBPG;
441 
442 
443 	valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
444 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
445 		alloc_pages(kernel_pt_table[loop], PT_SIZE / NBPG);
446 	}
447 
448 	/*
449 	 * Allocate a page for the system page mapped to V0x00000000
450 	 * This page will just contain the system vectors and can be
451 	 * shared by all processes.
452 	 */
453 	valloc_pages(systempage, 1);
454 
455 	/* Allocate a page for the page table to map kernel page tables*/
456 	valloc_pages(kernel_ptpt, PT_SIZE / NBPG);
457 
458 	/* Allocate stacks for all modes */
459 	valloc_pages(irqstack, IRQ_STACK_SIZE);
460 	valloc_pages(abtstack, ABT_STACK_SIZE);
461 	valloc_pages(undstack, UND_STACK_SIZE);
462 	valloc_pages(kernelstack, UPAGES);
463 
464 #ifdef VERBOSE_INIT_ARM
465 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va);
466 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va);
467 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va);
468 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
469 #endif
470 
471 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG);
472 
473 	/*
474 	 * XXX Actually, we only need virtual space and don't need
475 	 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
476 	 */
477 #ifdef CPU_SA110
478 	/*
479 	 * XXX totally stuffed hack to work round problems introduced
480 	 * in recent versions of the pmap code. Due to the calls used there
481 	 * we cannot allocate virtual memory during bootstrap.
482 	 */
483 	for(;;) {
484 		alloc_pages(sa110_cc_base, 1);
485 		if (! (sa110_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
486 			break;
487 	}
488 	{
489 		vaddr_t dummy;
490 		alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / NBPG - 1);
491 	}
492 	sa110_cache_clean_addr = sa110_cc_base;
493 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
494 #endif	/* CPU_SA110 */
495 
496 	alloc_pages(sa11x0_idle_mem, 1);
497 
498 	/*
499 	 * Ok we have allocated physical pages for the primary kernel
500 	 * page tables
501 	 */
502 
503 #ifdef VERBOSE_INIT_ARM
504 	printf("Creating L1 page table\n");
505 #endif
506 
507 	/*
508 	 * Now we start consturction of the L1 page table
509 	 * We start by mapping the L2 page tables into the L1.
510 	 * This means that we can replace L1 mappings later on if necessary
511 	 */
512 	l1pagetable = kernel_l1pt.pv_pa;
513 
514 	/* Map the L2 pages tables in the L1 page table */
515 	map_pagetable(l1pagetable, 0x00000000,
516 	    kernel_pt_table[KERNEL_PT_SYS]);
517 	map_pagetable(l1pagetable, KERNEL_SPACE_START,
518 	    kernel_pt_table[KERNEL_PT_KERNEL]);
519 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
520 		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
521 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
522 	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
523 	    kernel_ptpt.pv_pa);
524 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
525 	map_pagetable(l1pagetable, SAIPIO_BASE,
526 	    kernel_pt_table[KERNEL_PT_IO]);
527 
528 
529 #ifdef VERBOSE_INIT_ARM
530 	printf("Mapping kernel\n");
531 #endif
532 
533 	/* Now we fill in the L2 pagetable for the kernel code/data */
534 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
535 
536 	/*
537 	 * XXX there is no ELF header to find RO region.
538 	 * XXX What should we do?
539 	 */
540 #if 0
541 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
542 		logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
543 		    physical_start, kernexec->a_text,
544 		    AP_KR, PT_CACHEABLE);
545 		logical += map_chunk(l1pagetable, l2pagetable,
546 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
547 		    kerneldatasize - kernexec->a_text, AP_KRW, PT_CACHEABLE);
548 	} else
549 #endif
550 		map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
551 		    KERNEL_TEXT_BASE, kerneldatasize,
552 		    AP_KRW, PT_CACHEABLE);
553 
554 #ifdef VERBOSE_INIT_ARM
555 	printf("Constructing L2 page tables\n");
556 #endif
557 
558 	/* Map the stack pages */
559 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
560 	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
561 	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
562 	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
563 	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
564 	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
565 	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
566 	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
567 	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
568 	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
569 	    PD_SIZE, AP_KRW, 0);
570 
571 	/* Map the page table that maps the kernel pages */
572 	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
573 
574 	/* Map a page for entering idle mode */
575 	map_entry_nc(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem);
576 
577 	/*
578 	 * Map entries in the page table used to map PTE's
579 	 * Basically every kernel page table gets mapped here
580 	 */
581 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
582 	l2pagetable = kernel_ptpt.pv_pa;
583 	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
584 	    kernel_pt_table[KERNEL_PT_SYS]);
585 	map_entry_nc(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
586 	    kernel_pt_table[KERNEL_PT_KERNEL]);
587 	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
588 	    kernel_pt_table[KERNEL_PT_KERNEL]);
589 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
590 		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
591 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
592 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
593 	}
594 	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
595 	    kernel_ptpt.pv_pa);
596 	map_entry_nc(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
597 	    kernel_pt_table[KERNEL_PT_IO]);
598 
599 	/*
600 	 * Map the system page in the kernel page table for the bottom 1Meg
601 	 * of the virtual memory map.
602 	 */
603 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
604 	map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
605 
606 	/* Map any I/O modules here, as we don't have real bus_space_map() */
607 	printf("mapping IO...");
608 	l2pagetable = kernel_pt_table[KERNEL_PT_IO];
609 	map_entry_nc(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE);
610 
611 #ifdef CPU_SA110
612 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
613 	map_chunk(0, l2pagetable, sa110_cache_clean_addr,
614 	    0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE,
615 	    AP_KRW, PT_CACHEABLE);
616 #endif
617 	/*
618 	 * Now we have the real page tables in place so we can switch to them.
619 	 * Once this is done we will be running with the REAL kernel page
620 	 * tables.
621 	 */
622 
623 	printf("done.\n");
624 
625 	/* Right set up the vectors at the bottom of page 0 */
626 	memcpy((char *)systempage.pv_va, page0, page0_end - page0);
627 
628 	/*
629 	 * Pages were allocated during the secondary bootstrap for the
630 	 * stacks for different CPU modes.
631 	 * We must now set the r13 registers in the different CPU modes to
632 	 * point to these stacks.
633 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
634 	 * of the stack memory.
635 	 */
636 	printf("init subsystems: stacks ");
637 
638 	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG);
639 	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG);
640 	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG);
641 #ifdef PMAP_DEBUG
642 	if (pmap_debug_level >= 0)
643 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
644 		    kernelstack.pv_pa);
645 #endif	/* PMAP_DEBUG */
646 
647 	/*
648 	 * Well we should set a data abort handler.
649 	 * Once things get going this will change as we will need a proper
650 	 * handler. Until then we will use a handler that just panics but
651 	 * tells us why.
652 	 * Initialisation of the vectors will just panic on a data abort.
653 	 * This just fills in a slighly better one.
654 	 */
655 	printf("vectors ");
656 	data_abort_handler_address = (u_int)data_abort_handler;
657 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
658 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
659 	printf("%08x %08x %08x\n", data_abort_handler_address,
660 	    prefetch_abort_handler_address, undefined_handler_address);
661 
662 	/* Initialise the undefined instruction handlers */
663 	printf("undefined ");
664 	undefined_init();
665 
666 	/* Set the page table address. */
667 	setttb(kernel_l1pt.pv_pa);
668 
669 #ifdef BOOT_DUMP
670 	dumppages((char *)0xc0000000, 16 * NBPG);
671 	dumppages((char *)0xb0100000, 64); /* XXX */
672 #endif
673 	/* Enable MMU, I-cache, D-cache, write buffer. */
674 	cpufunc_control(0x337f, 0x107d);
675 
676 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL)
677 		consinit();
678 	else {
679 		/* XXX this isn't useful for normal use, but helps debuging */
680 		biconscninit(&bicons);
681 		cn_tab = &bicons;
682 		cn_tab->cn_pri = CN_REMOTE;
683 	}
684 
685 #ifdef VERBOSE_INIT_ARM
686 	printf("MMU enabled. control=%08x\n", cpu_get_control());
687 #endif
688 
689 	/* Boot strap pmap telling it where the kernel page table is */
690 	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
691 
692 
693 #ifdef CPU_SA110
694 	if (cputype == CPU_ID_SA110)
695 		rpc_sa110_cc_setup();
696 #endif	/* CPU_SA110 */
697 
698 #ifdef IPKDB
699 	/* Initialise ipkdb */
700 	ipkdb_init();
701 	if (boothowto & RB_KDB)
702 		ipkdb_connect(0);
703 #endif	/* NIPKDB */
704 
705 #ifdef BOOT_DUMP
706 	dumppages((char *)kernel_l1pt.pv_va, 16);
707 	dumppages((char *)PROCESS_PAGE_TBLS_BASE, 16);
708 #endif
709 
710 #ifdef DDB
711 	{
712 		static struct undefined_handler uh;
713 
714 		uh.uh_handler = db_trapper;
715 		install_coproc_handler_static(0, &uh);
716 	}
717 	ddb_init(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
718 #endif
719 
720 	printf("kernsize=0x%x", kerneldatasize);
721 	printf(" (including 0x%x symbols)\n", symbolsize);
722 
723 #ifdef DDB
724 	if (boothowto & RB_KDB)
725 		Debugger();
726 #endif	/* DDB */
727 
728 	if (bootinfo->magic == BOOTINFO_MAGIC) {
729 		platid.dw.dw0 = bootinfo->platid_cpu;
730 		platid.dw.dw1 = bootinfo->platid_machine;
731 	}
732 
733 	/* We return the new stack pointer address */
734 	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
735 }
736 
737 #ifdef DEBUG_BEFOREMMU
738 cons_decl(sacom);
739 void
740 fakecninit()
741 {
742 	static struct consdev fakecntab = cons_init(sacom);
743 	cn_tab = &fakecntab;
744 
745 	(*cn_tab->cn_init)(0);
746 	cn_tab->cn_pri = CN_REMOTE;
747 }
748 #endif
749 
750 #ifdef CPU_SA110
751 
752 /*
753  * For optimal cache cleaning we need two 16K banks of
754  * virtual address space that NOTHING else will access
755  * and then we alternate the cache cleaning between the
756  * two banks.
757  * The cache cleaning code requires requires 2 banks aligned
758  * on total size boundry so the banks can be alternated by
759  * eorring the size bit (assumes the bank size is a power of 2)
760  */
761 void
762 rpc_sa110_cc_setup(void)
763 {
764 	int loop;
765 	paddr_t kaddr;
766 	pt_entry_t *pte;
767 
768 	(void) pmap_extract(kernel_pmap, KERNEL_TEXT_BASE, &kaddr);
769 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
770 		pte = pmap_pte(kernel_pmap, (sa110_cc_base + loop));
771 		*pte = L2_PTE(kaddr, AP_KR);
772 	}
773 	sa110_cache_clean_addr = sa110_cc_base;
774 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
775 }
776 #endif	/* CPU_SA110 */
777 
778 #ifdef BOOT_DUMP
779 void dumppages(char *start, int nbytes)
780 {
781 	char *p = start;
782 	char *p1;
783 	int i;
784 
785 	for(i = nbytes; i > 0; i -= 16, p += 16) {
786 		for(p1 = p + 15; p != p1; p1--) {
787 			if (*p1)
788 				break;
789 		}
790 		if (! *p1)
791 			continue;
792 		printf("%08x %02x %02x %02x %02x %02x %02x %02x %02x"
793 		    " %02x %02x %02x %02x %02x %02x %02x %02x\n",
794 		    (unsigned int)p,
795 		    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
796 		    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
797 	}
798 }
799 #endif
800 
801 /* End of machdep.c */
802