xref: /netbsd-src/sys/arch/hpcarm/hpcarm/hpc_machdep.c (revision 9e5598221a2c624e83b603ca49c69b4e980cb723)
1 /*	$NetBSD: hpc_machdep.c,v 1.46 2002/04/12 18:50:33 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * machdep.c
40  *
41  * Machine dependant functions for kernel setup
42  *
43  * This file needs a lot of work.
44  *
45  * Created      : 17/09/94
46  */
47 /*
48  * hpc_machdep.c
49  */
50 
51 #include "opt_ddb.h"
52 #include "opt_pmap_debug.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/reboot.h>
58 #include <sys/proc.h>
59 #include <sys/msgbuf.h>
60 #include <sys/exec.h>
61 
62 #include <dev/cons.h>
63 
64 #ifdef DDB
65 #include <machine/db_machdep.h>
66 #include <ddb/db_sym.h>
67 #include <ddb/db_extern.h>
68 #ifndef DB_ELFSIZE
69 #error Must define DB_ELFSIZE!
70 #endif
71 #define ELFSIZE		DB_ELFSIZE
72 #include <sys/exec_elf.h>
73 #endif
74 
75 #include <uvm/uvm.h>
76 
77 #include <machine/signal.h>
78 #include <machine/frame.h>
79 #include <machine/bootconfig.h>
80 #include <machine/cpu.h>
81 #include <machine/io.h>
82 #include <machine/intr.h>
83 #include <arm/arm32/katelib.h>
84 #include <machine/bootinfo.h>
85 #include <arm/cpuconf.h>
86 #include <arm/undefined.h>
87 #include <machine/rtc.h>
88 #include <machine/platid.h>
89 #include <hpcarm/sa11x0/sa11x0_reg.h>
90 
91 #include <dev/hpc/bicons.h>
92 
93 #include "opt_ipkdb.h"
94 
95 /* XXX for consinit related hacks */
96 #include <sys/conf.h>
97 
98 /*
99  * Address to call from cpu_reset() to reset the machine.
100  * This is machine architecture dependant as it varies depending
101  * on where the ROM appears when you turn the MMU off.
102  */
103 
104 u_int cpu_reset_address = 0;
105 
106 /* Define various stack sizes in pages */
107 #define IRQ_STACK_SIZE	1
108 #define ABT_STACK_SIZE	1
109 #ifdef IPKDB
110 #define UND_STACK_SIZE	2
111 #else
112 #define UND_STACK_SIZE	1
113 #endif
114 
115 BootConfig bootconfig;		/* Boot config storage */
116 struct bootinfo *bootinfo, bootinfo_storage;
117 static char booted_kernel_storage[80];
118 char *booted_kernel = booted_kernel_storage;
119 
120 paddr_t physical_start;
121 paddr_t physical_freestart;
122 paddr_t physical_freeend;
123 paddr_t physical_end;
124 u_int free_pages;
125 int physmem = 0;
126 
127 #ifndef PMAP_STATIC_L1S
128 int max_processes = 64;			/* Default number */
129 #endif	/* !PMAP_STATIC_L1S */
130 
131 
132 /* Physical and virtual addresses for some global pages */
133 pv_addr_t systempage;
134 pv_addr_t irqstack;
135 pv_addr_t undstack;
136 pv_addr_t abtstack;
137 pv_addr_t kernelstack;
138 
139 char *boot_args = NULL;
140 char *boot_file = NULL;
141 
142 vaddr_t msgbufphys;
143 
144 extern u_int data_abort_handler_address;
145 extern u_int prefetch_abort_handler_address;
146 extern u_int undefined_handler_address;
147 extern int end;
148 
149 #ifdef PMAP_DEBUG
150 extern int pmap_debug_level;
151 #endif	/* PMAP_DEBUG */
152 
153 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
154 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
155 #define	KERNEL_PT_KERNEL	2	/* Page table for mapping kernel */
156 #define	KERNEL_PT_IO		3	/* Page table for mapping IO */
157 #define	KERNEL_PT_VMDATA	4	/* Page tables for mapping kernel VM */
158 #define	KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
159 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
160 
161 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
162 
163 struct user *proc0paddr;
164 
165 #ifdef CPU_SA110
166 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
167 extern unsigned int sa110_cache_clean_addr;
168 extern unsigned int sa110_cache_clean_size;
169 static vaddr_t sa110_cc_base;
170 #endif	/* CPU_SA110 */
171 
172 /* Non-buffered non-cachable memory needed to enter idle mode */
173 extern vaddr_t sa11x0_idle_mem;
174 
175 /* Prototypes */
176 
177 void physcon_display_base	__P((u_int addr));
178 void consinit		__P((void));
179 
180 void data_abort_handler		__P((trapframe_t *frame));
181 void prefetch_abort_handler	__P((trapframe_t *frame));
182 void undefinedinstruction_bounce	__P((trapframe_t *frame));
183 
184 u_int cpu_get_control		__P((void));
185 
186 void rpc_sa110_cc_setup(void);
187 
188 #ifdef DEBUG_BEFOREMMU
189 static void fakecninit();
190 #endif
191 
192 #ifdef BOOT_DUMP
193 void dumppages(char *, int);
194 #endif
195 
196 extern int db_trapper();
197 
198 extern void dump_spl_masks	__P((void));
199 
200 extern void dumpsys	__P((void));
201 
202 /*
203  * void cpu_reboot(int howto, char *bootstr)
204  *
205  * Reboots the system
206  *
207  * Deal with any syncing, unmounting, dumping and shutdown hooks,
208  * then reset the CPU.
209  */
210 
211 void
212 cpu_reboot(howto, bootstr)
213 	int howto;
214 	char *bootstr;
215 {
216 	/*
217 	 * If we are still cold then hit the air brakes
218 	 * and crash to earth fast
219 	 */
220 	if (cold) {
221 		doshutdownhooks();
222 		printf("Halted while still in the ICE age.\n");
223 		printf("The operating system has halted.\n");
224 		printf("Please press any key to reboot.\n\n");
225 		cngetc();
226 		printf("rebooting...\n");
227 		cpu_reset();
228 		/*NOTREACHED*/
229 	}
230 
231 	/* Disable console buffering */
232 	cnpollc(1);
233 
234 	/*
235 	 * If RB_NOSYNC was not specified sync the discs.
236 	 * Note: Unless cold is set to 1 here, syslogd will die during the unmount.
237 	 * It looks like syslogd is getting woken up only to find that it cannot
238 	 * page part of the binary in as the filesystem has been unmounted.
239 	 */
240 	if (!(howto & RB_NOSYNC))
241 		bootsync();
242 
243 	/* Say NO to interrupts */
244 	splhigh();
245 
246 	/* Do a dump if requested. */
247 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
248 		dumpsys();
249 
250 
251 	/* Run any shutdown hooks */
252 	doshutdownhooks();
253 
254 	/* Make sure IRQ's are disabled */
255 	IRQdisable;
256 
257 	if (howto & RB_HALT) {
258 		printf("The operating system has halted.\n");
259 		printf("Please press any key to reboot.\n\n");
260 		cngetc();
261 	}
262 
263 	printf("rebooting...\n");
264 	cpu_reset();
265 	/*NOTREACHED*/
266 }
267 
268 /*
269  *
270  * Initial entry point on startup. This gets called before main() is
271  * entered.
272  * It should be responsible for setting up everything that must be
273  * in place when main is called.
274  * This includes
275  *   Taking a copy of the boot configuration structure.
276  *   Initialising the physical console so characters can be printed.
277  *   Setting up page tables for the kernel
278  */
279 
280 u_int
281 initarm(argc, argv, bi)
282 	int argc;
283 	char **argv;
284 	struct bootinfo *bi;
285 {
286 	int loop;
287 	u_int kerneldatasize, symbolsize;
288 	u_int l1pagetable;
289 	vaddr_t freemempos;
290 	pv_addr_t kernel_l1pt;
291 	pv_addr_t kernel_ptpt;
292 #ifdef DDB
293 	Elf_Shdr *sh;
294 #endif
295 
296 	/*
297 	 * Heads up ... Setup the CPU / MMU / TLB functions
298 	 */
299 	set_cpufuncs();
300 
301 #ifdef DEBUG_BEFOREMMU
302 	/*
303 	 * At this point, we cannot call real consinit().
304 	 * Just call a faked up version of consinit(), which does the thing
305 	 * with MMU disabled.
306 	 */
307 	fakecninit();
308 #endif
309 
310 	/*
311 	 * XXX for now, overwrite bootconfig to hardcoded values.
312 	 * XXX kill bootconfig and directly call uvm_physload
313 	 */
314 	bootconfig.dram[0].address = 0xc0000000;
315 	bootconfig.dram[0].pages = 8192;
316 	bootconfig.dramblocks = 1;
317 	kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
318 
319 	symbolsize = 0;
320 #ifdef DDB
321 	if (! memcmp(&end, "\177ELF", 4)) {
322 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
323 		loop = ((Elf_Ehdr *)&end)->e_shnum;
324 		for(; loop; loop--, sh++)
325 			if (sh->sh_offset > 0 &&
326 			    (sh->sh_offset + sh->sh_size) > symbolsize)
327 				symbolsize = sh->sh_offset + sh->sh_size;
328 	}
329 #endif
330 
331 	printf("kernsize=0x%x\n", kerneldatasize);
332 	kerneldatasize += symbolsize;
333 	kerneldatasize = ((kerneldatasize - 1) & ~(NBPG * 4 - 1)) + NBPG * 8;
334 
335 	/* parse kernel args */
336 	strncpy(booted_kernel_storage, *argv, sizeof(booted_kernel_storage));
337 	for(argc--, argv++; argc; argc--, argv++)
338 		switch(**argv) {
339 		case 'a':
340 			boothowto |= RB_ASKNAME;
341 			break;
342 		case 's':
343 			boothowto |= RB_SINGLE;
344 			break;
345 		default:
346 			break;
347 		}
348 
349 	/* copy bootinfo into known kernel space */
350 	bootinfo_storage = *bi;
351 	bootinfo = &bootinfo_storage;
352 
353 #ifdef BOOTINFO_FB_WIDTH
354 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
355 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
356 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
357 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
358 #endif
359 
360 	/*
361 	 * hpcboot has loaded me with MMU disabled.
362 	 * So create kernel page tables and enable MMU
363 	 */
364 
365 	/*
366 	 * Set up the variables that define the availablilty of physcial
367 	 * memory
368 	 */
369 	physical_start = bootconfig.dram[0].address;
370 	physical_freestart = physical_start
371 	    + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
372 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
373 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * NBPG;
374 	physical_freeend = physical_end;
375 /*	free_pages = bootconfig.drampages;*/
376 
377 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
378 		physmem += bootconfig.dram[loop].pages;
379 
380 	/* XXX handle UMA framebuffer memory */
381 
382 	/* Use the first 1MB to allocate things */
383 	freemempos = 0xc0000000;
384 	memset((void *)0xc0000000, 0, KERNEL_TEXT_BASE - 0xc0000000);
385 
386 	/*
387 	 * Right We have the bottom meg of memory mapped to 0x00000000
388 	 * so was can get at it. The kernel will ocupy the start of it.
389 	 * After the kernel/args we allocate some of the fixed page tables
390 	 * we need to get the system going.
391 	 * We allocate one page directory and 8 page tables and store the
392 	 * physical addresses in the kernel_pt_table array.
393 	 * Must remember that neither the page L1 or L2 page tables are the
394 	 * same size as a page !
395 	 *
396 	 * Ok the next bit of physical allocate may look complex but it is
397 	 * simple really. I have done it like this so that no memory gets
398 	 * wasted during the allocate of various pages and tables that are
399 	 * all different sizes.
400 	 * The start address will be page aligned.
401 	 * We allocate the kernel page directory on the first free 16KB
402 	 * boundry we find.
403 	 * We allocate the kernel page tables on the first 1KB boundry we find.
404 	 * We allocate 9 PT's. This means that in the process we
405 	 * KNOW that we will encounter at least 1 16KB boundry.
406 	 *
407 	 * Eventually if the top end of the memory gets used for process L1
408 	 * page tables the kernel L1 page table may be moved up there.
409 	 */
410 
411 #ifdef VERBOSE_INIT_ARM
412 	printf("Allocating page tables\n");
413 #endif
414 
415 	/* Define a macro to simplify memory allocation */
416 #define	valloc_pages(var, np)			\
417 	(var).pv_pa = (var).pv_va = freemempos;	\
418 	freemempos += (np) * NBPG;
419 #define	alloc_pages(var, np)			\
420 	(var) = freemempos;			\
421 	freemempos += (np) * NBPG;
422 
423 
424 	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / NBPG);
425 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
426 		alloc_pages(kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE / NBPG);
427 		kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
428 	}
429 
430 	/*
431 	 * Allocate a page for the system page mapped to V0x00000000
432 	 * This page will just contain the system vectors and can be
433 	 * shared by all processes.
434 	 */
435 	valloc_pages(systempage, 1);
436 
437 	/* Allocate a page for the page table to map kernel page tables*/
438 	valloc_pages(kernel_ptpt, L2_TABLE_SIZE / NBPG);
439 
440 	/* Allocate stacks for all modes */
441 	valloc_pages(irqstack, IRQ_STACK_SIZE);
442 	valloc_pages(abtstack, ABT_STACK_SIZE);
443 	valloc_pages(undstack, UND_STACK_SIZE);
444 	valloc_pages(kernelstack, UPAGES);
445 
446 #ifdef VERBOSE_INIT_ARM
447 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va);
448 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va);
449 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va);
450 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
451 #endif
452 
453 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG);
454 
455 	/*
456 	 * XXX Actually, we only need virtual space and don't need
457 	 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
458 	 */
459 #ifdef CPU_SA110
460 	/*
461 	 * XXX totally stuffed hack to work round problems introduced
462 	 * in recent versions of the pmap code. Due to the calls used there
463 	 * we cannot allocate virtual memory during bootstrap.
464 	 */
465 	for(;;) {
466 		alloc_pages(sa110_cc_base, 1);
467 		if (! (sa110_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
468 			break;
469 	}
470 	{
471 		vaddr_t dummy;
472 		alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / NBPG - 1);
473 	}
474 	sa110_cache_clean_addr = sa110_cc_base;
475 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
476 #endif	/* CPU_SA110 */
477 
478 	alloc_pages(sa11x0_idle_mem, 1);
479 
480 	/*
481 	 * Ok we have allocated physical pages for the primary kernel
482 	 * page tables
483 	 */
484 
485 #ifdef VERBOSE_INIT_ARM
486 	printf("Creating L1 page table\n");
487 #endif
488 
489 	/*
490 	 * Now we start consturction of the L1 page table
491 	 * We start by mapping the L2 page tables into the L1.
492 	 * This means that we can replace L1 mappings later on if necessary
493 	 */
494 	l1pagetable = kernel_l1pt.pv_pa;
495 
496 	/* Map the L2 pages tables in the L1 page table */
497 	pmap_link_l2pt(l1pagetable, 0x00000000,
498 	    &kernel_pt_table[KERNEL_PT_SYS]);
499 	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
500 	    &kernel_pt_table[KERNEL_PT_KERNEL]);
501 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
502 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
503 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
504 	pmap_link_l2pt(l1pagetable, PTE_BASE,
505 	    &kernel_ptpt);
506 
507 	/* update the top of the kernel VM */
508 	pmap_curmaxkvaddr =
509 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
510 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
511 	pmap_link_l2pt(l1pagetable, SAIPIO_BASE,
512 	    &kernel_pt_table[KERNEL_PT_IO]);
513 
514 
515 #ifdef VERBOSE_INIT_ARM
516 	printf("Mapping kernel\n");
517 #endif
518 
519 	/* Now we fill in the L2 pagetable for the kernel code/data */
520 
521 	/*
522 	 * XXX there is no ELF header to find RO region.
523 	 * XXX What should we do?
524 	 */
525 #if 0
526 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
527 		logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
528 		    physical_start, kernexec->a_text,
529 		    VM_PROT_READ, PTE_CACHE);
530 		logical += pmap_map_chunk(l1pagetable,
531 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
532 		    kerneldatasize - kernexec->a_text,
533 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
534 	} else
535 #endif
536 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
537 		    KERNEL_TEXT_BASE, kerneldatasize,
538 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
539 
540 #ifdef VERBOSE_INIT_ARM
541 	printf("Constructing L2 page tables\n");
542 #endif
543 
544 	/* Map the stack pages */
545 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
546 	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
547 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
548 	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
549 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
550 	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
551 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
552 	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
553 
554 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
555 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
556 
557 	/* Map the page table that maps the kernel pages */
558 	pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa,
559 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
560 
561 	/* Map a page for entering idle mode */
562 	pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
563 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
564 
565 	/*
566 	 * Map entries in the page table used to map PTE's
567 	 * Basically every kernel page table gets mapped here
568 	 */
569 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
570 	pmap_map_entry(l1pagetable,
571 	    PTE_BASE + (0x00000000 >> (PGSHIFT-2)),
572 	    kernel_pt_table[KERNEL_PT_SYS].pv_pa,
573 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
574 	pmap_map_entry(l1pagetable,
575 	    PTE_BASE + (KERNEL_BASE >> (PGSHIFT-2)),
576 	    kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
577 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
578 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
579 		pmap_map_entry(l1pagetable,
580 		    PTE_BASE + ((KERNEL_VM_BASE +
581 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
582 		    kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
583 		    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
584 	}
585 	pmap_map_entry(l1pagetable,
586 	    PTE_BASE + (PTE_BASE >> (PGSHIFT-2)),
587 	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
588 	pmap_map_entry(l1pagetable,
589 	    PTE_BASE + (SAIPIO_BASE >> (PGSHIFT-2)),
590 	    kernel_pt_table[KERNEL_PT_IO].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
591 	    PTE_NOCACHE);
592 
593 	/* Map the vector page. */
594 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
595 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
596 
597 	/* Map any I/O modules here, as we don't have real bus_space_map() */
598 	printf("mapping IO...");
599 	pmap_map_entry(l1pagetable, SACOM3_BASE, SACOM3_HW_BASE,
600 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
601 
602 #ifdef CPU_SA110
603 	pmap_map_chunk(l1pagetable, sa110_cache_clean_addr, 0xe0000000,
604 	    CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
605 #endif
606 	/*
607 	 * Now we have the real page tables in place so we can switch to them.
608 	 * Once this is done we will be running with the REAL kernel page
609 	 * tables.
610 	 */
611 
612 	printf("done.\n");
613 
614 	/*
615 	 * Pages were allocated during the secondary bootstrap for the
616 	 * stacks for different CPU modes.
617 	 * We must now set the r13 registers in the different CPU modes to
618 	 * point to these stacks.
619 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
620 	 * of the stack memory.
621 	 */
622 	printf("init subsystems: stacks ");
623 
624 	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG);
625 	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG);
626 	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG);
627 #ifdef PMAP_DEBUG
628 	if (pmap_debug_level >= 0)
629 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
630 		    kernelstack.pv_pa);
631 #endif	/* PMAP_DEBUG */
632 
633 	/*
634 	 * Well we should set a data abort handler.
635 	 * Once things get going this will change as we will need a proper
636 	 * handler. Until then we will use a handler that just panics but
637 	 * tells us why.
638 	 * Initialisation of the vectors will just panic on a data abort.
639 	 * This just fills in a slighly better one.
640 	 */
641 	printf("vectors ");
642 	data_abort_handler_address = (u_int)data_abort_handler;
643 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
644 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
645 	printf("%08x %08x %08x\n", data_abort_handler_address,
646 	    prefetch_abort_handler_address, undefined_handler_address);
647 
648 	/* Initialise the undefined instruction handlers */
649 	printf("undefined ");
650 	undefined_init();
651 
652 	/* Set the page table address. */
653 	setttb(kernel_l1pt.pv_pa);
654 
655 #ifdef BOOT_DUMP
656 	dumppages((char *)0xc0000000, 16 * NBPG);
657 	dumppages((char *)0xb0100000, 64); /* XXX */
658 #endif
659 	/* Enable MMU, I-cache, D-cache, write buffer. */
660 	cpufunc_control(0x337f, 0x107d);
661 
662 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
663 
664 	consinit();
665 
666 #ifdef VERBOSE_INIT_ARM
667 	printf("freemempos=%08lx\n", freemempos);
668 	printf("MMU enabled. control=%08x\n", cpu_get_control());
669 #endif
670 
671 	/* Boot strap pmap telling it where the kernel page table is */
672 	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, kernel_ptpt);
673 
674 
675 #ifdef CPU_SA110
676 	if (cputype == CPU_ID_SA110)
677 		rpc_sa110_cc_setup();
678 #endif	/* CPU_SA110 */
679 
680 #ifdef IPKDB
681 	/* Initialise ipkdb */
682 	ipkdb_init();
683 	if (boothowto & RB_KDB)
684 		ipkdb_connect(0);
685 #endif	/* NIPKDB */
686 
687 #ifdef BOOT_DUMP
688 	dumppages((char *)kernel_l1pt.pv_va, 16);
689 	dumppages((char *)PTE_BASE, 16);
690 #endif
691 
692 #ifdef DDB
693 	{
694 		static struct undefined_handler uh;
695 
696 		uh.uh_handler = db_trapper;
697 		install_coproc_handler_static(0, &uh);
698 	}
699 	ddb_init(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
700 #endif
701 
702 	printf("kernsize=0x%x", kerneldatasize);
703 	printf(" (including 0x%x symbols)\n", symbolsize);
704 
705 #ifdef DDB
706 	if (boothowto & RB_KDB)
707 		Debugger();
708 #endif	/* DDB */
709 
710 	if (bootinfo->magic == BOOTINFO_MAGIC) {
711 		platid.dw.dw0 = bootinfo->platid_cpu;
712 		platid.dw.dw1 = bootinfo->platid_machine;
713 	}
714 
715 	/* We return the new stack pointer address */
716 	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
717 }
718 
719 void
720 consinit(void)
721 {
722 	static int consinit_called = 0;
723 
724 	if (consinit_called != 0)
725 		return;
726 
727 	consinit_called = 1;
728 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL)
729 		cninit();
730 	else {
731 		/*
732 		 * Nothing to do here.  Console initialization is done at
733 		 * autoconf device attach time.
734 		 */
735 	}
736 }
737 
738 #ifdef DEBUG_BEFOREMMU
739 cons_decl(sacom);
740 void
741 fakecninit()
742 {
743 	static struct consdev fakecntab = cons_init(sacom);
744 	cn_tab = &fakecntab;
745 
746 	(*cn_tab->cn_init)(0);
747 	cn_tab->cn_pri = CN_REMOTE;
748 }
749 #endif
750 
751 #ifdef CPU_SA110
752 
753 /*
754  * For optimal cache cleaning we need two 16K banks of
755  * virtual address space that NOTHING else will access
756  * and then we alternate the cache cleaning between the
757  * two banks.
758  * The cache cleaning code requires requires 2 banks aligned
759  * on total size boundry so the banks can be alternated by
760  * eorring the size bit (assumes the bank size is a power of 2)
761  */
762 void
763 rpc_sa110_cc_setup(void)
764 {
765 	int loop;
766 	paddr_t kaddr;
767 	pt_entry_t *pte;
768 
769 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
770 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
771 		pte = vtopte(sa110_cc_base + loop);
772 		*pte = L2_S_PROTO | kaddr |
773 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
774 	}
775 	sa110_cache_clean_addr = sa110_cc_base;
776 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
777 }
778 #endif	/* CPU_SA110 */
779 
780 #ifdef BOOT_DUMP
781 void dumppages(char *start, int nbytes)
782 {
783 	char *p = start;
784 	char *p1;
785 	int i;
786 
787 	for(i = nbytes; i > 0; i -= 16, p += 16) {
788 		for(p1 = p + 15; p != p1; p1--) {
789 			if (*p1)
790 				break;
791 		}
792 		if (! *p1)
793 			continue;
794 		printf("%08x %02x %02x %02x %02x %02x %02x %02x %02x"
795 		    " %02x %02x %02x %02x %02x %02x %02x %02x\n",
796 		    (unsigned int)p,
797 		    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
798 		    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
799 	}
800 }
801 #endif
802 
803 /* End of machdep.c */
804