xref: /netbsd-src/sys/arch/hpcarm/hpcarm/hpc_machdep.c (revision 27527e67bbdf8d9ec84fd58803048ed6d181ece2)
1 /*	$NetBSD: hpc_machdep.c,v 1.76 2006/01/26 11:12:20 peter Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by Brini.
21  * 4. The name of the company nor the name of the author may be used to
22  *    endorse or promote products derived from this software without specific
23  *    prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * RiscBSD kernel project
38  *
39  * machdep.c
40  *
41  * Machine dependant functions for kernel setup
42  *
43  * This file needs a lot of work.
44  *
45  * Created      : 17/09/94
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: hpc_machdep.c,v 1.76 2006/01/26 11:12:20 peter Exp $");
50 
51 #include "opt_ddb.h"
52 #include "opt_pmap_debug.h"
53 #include "fs_nfs.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/reboot.h>
59 #include <sys/proc.h>
60 #include <sys/msgbuf.h>
61 #include <sys/exec.h>
62 #include <sys/ksyms.h>
63 #include <sys/boot_flag.h>
64 
65 #include <dev/cons.h>
66 
67 #include "ksyms.h"
68 
69 #if NKSYMS || defined(DDB) || defined(LKM)
70 #include <machine/db_machdep.h>
71 #include <ddb/db_sym.h>
72 #include <ddb/db_extern.h>
73 #ifndef DB_ELFSIZE
74 #error Must define DB_ELFSIZE!
75 #endif
76 #define ELFSIZE		DB_ELFSIZE
77 #include <sys/exec_elf.h>
78 #endif
79 
80 #include <uvm/uvm.h>
81 
82 #include <machine/signal.h>
83 #include <machine/frame.h>
84 #include <machine/bootconfig.h>
85 #include <machine/cpu.h>
86 #include <machine/io.h>
87 #include <machine/intr.h>
88 #include <arm/arm32/katelib.h>
89 #include <machine/bootinfo.h>
90 #include <arm/cpuconf.h>
91 #include <arm/undefined.h>
92 #include <machine/rtc.h>
93 #include <machine/platid.h>
94 
95 #include <arm/sa11x0/sa11x0_reg.h>
96 
97 #include <dev/hpc/bicons.h>
98 
99 #include "opt_ipkdb.h"
100 
101 /* XXX for consinit related hacks */
102 #include <sys/conf.h>
103 
104 #ifdef NFS
105 #include <sys/mount.h>
106 #include <nfs/rpcv2.h>
107 #include <nfs/nfsproto.h>
108 #include <nfs/nfs.h>
109 #include <nfs/nfsmount.h>
110 #endif
111 
112 /* Kernel text starts 256K in from the bottom of the kernel address space. */
113 #define	KERNEL_TEXT_BASE	(KERNEL_BASE + 0x00040000)
114 #define	KERNEL_VM_BASE		(KERNEL_BASE + 0x00c00000)
115 #define	KERNEL_VM_SIZE		0x05000000
116 
117 /*
118  * Address to call from cpu_reset() to reset the machine.
119  * This is machine architecture dependant as it varies depending
120  * on where the ROM appears when you turn the MMU off.
121  */
122 
123 u_int cpu_reset_address = 0;
124 
125 /* Define various stack sizes in pages */
126 #define IRQ_STACK_SIZE	1
127 #define ABT_STACK_SIZE	1
128 #ifdef IPKDB
129 #define UND_STACK_SIZE	2
130 #else
131 #define UND_STACK_SIZE	1
132 #endif
133 
134 BootConfig bootconfig;		/* Boot config storage */
135 struct bootinfo *bootinfo, bootinfo_storage;
136 static char booted_kernel_storage[80];
137 char *booted_kernel = booted_kernel_storage;
138 
139 paddr_t physical_start;
140 paddr_t physical_freestart;
141 paddr_t physical_freeend;
142 paddr_t physical_end;
143 int physmem = 0;
144 
145 #ifndef PMAP_STATIC_L1S
146 int max_processes = 64;			/* Default number */
147 #endif	/* !PMAP_STATIC_L1S */
148 
149 
150 /* Physical and virtual addresses for some global pages */
151 pv_addr_t systempage;
152 pv_addr_t irqstack;
153 pv_addr_t undstack;
154 pv_addr_t abtstack;
155 pv_addr_t kernelstack;
156 
157 char *boot_args = NULL;
158 char boot_file[16];
159 
160 vaddr_t msgbufphys;
161 
162 extern u_int data_abort_handler_address;
163 extern u_int prefetch_abort_handler_address;
164 extern u_int undefined_handler_address;
165 extern int end;
166 
167 #ifdef PMAP_DEBUG
168 extern int pmap_debug_level;
169 #endif	/* PMAP_DEBUG */
170 
171 #define	KERNEL_PT_VMEM		0	/* Page table for mapping video memory */
172 #define	KERNEL_PT_SYS		1	/* Page table for mapping proc0 zero page */
173 #define	KERNEL_PT_KERNEL	2	/* Page table for mapping kernel */
174 #define	KERNEL_PT_IO		3	/* Page table for mapping IO */
175 #define	KERNEL_PT_VMDATA	4	/* Page tables for mapping kernel VM */
176 #define	KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
177 #define	NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
178 
179 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
180 
181 struct user *proc0paddr;
182 
183 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
184 extern unsigned int sa1_cache_clean_addr;
185 extern unsigned int sa1_cache_clean_size;
186 static vaddr_t sa1_cc_base;
187 
188 /* Non-buffered non-cachable memory needed to enter idle mode */
189 extern vaddr_t sa11x0_idle_mem;
190 
191 /* Prototypes */
192 
193 void physcon_display_base(u_int addr);
194 void consinit(void);
195 
196 void data_abort_handler(trapframe_t *);
197 void prefetch_abort_handler(trapframe_t *);
198 void undefinedinstruction_bounce(trapframe_t *);
199 
200 u_int cpu_get_control(void);
201 
202 void rpc_sa110_cc_setup(void);
203 
204 #ifdef DEBUG_BEFOREMMU
205 static void fakecninit();
206 #endif
207 
208 #ifdef BOOT_DUMP
209 void dumppages(char *, int);
210 #endif
211 
212 u_int initarm(int, char **, struct bootinfo *);
213 extern void dump_spl_masks(void);
214 extern void dumpsys(void);
215 
216 /*
217  * Reboots the system.
218  *
219  * Deal with any syncing, unmounting, dumping and shutdown hooks,
220  * then reset the CPU.
221  */
222 void
223 cpu_reboot(int howto, char *bootstr)
224 {
225 	/*
226 	 * If we are still cold then hit the air brakes
227 	 * and crash to earth fast.
228 	 */
229 	if (cold) {
230 		doshutdownhooks();
231 		printf("Halted while still in the ICE age.\n");
232 		printf("The operating system has halted.\n");
233 		printf("Please press any key to reboot.\n\n");
234 		cngetc();
235 		printf("rebooting...\n");
236 		cpu_reset();
237 		/*NOTREACHED*/
238 	}
239 
240 	/* Disable console buffering */
241 	cnpollc(1);
242 
243 	/*
244 	 * If RB_NOSYNC was not specified sync the discs.
245 	 * Note: Unless cold is set to 1 here, syslogd will die during
246 	 * the unmount.  It looks like syslogd is getting woken up only
247 	 * to find that it cannot page part of the binary in as the
248 	 * file system has been unmounted.
249 	 */
250 	if (!(howto & RB_NOSYNC))
251 		bootsync();
252 
253 	/* Say NO to interrupts */
254 	splhigh();
255 
256 	/* Do a dump if requested. */
257 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
258 		dumpsys();
259 
260 
261 	/* Run any shutdown hooks */
262 	doshutdownhooks();
263 
264 	/* Make sure IRQs are disabled */
265 	IRQdisable;
266 
267 	if (howto & RB_HALT) {
268 		printf("The operating system has halted.\n");
269 		printf("Please press any key to reboot.\n\n");
270 		cngetc();
271 	}
272 
273 	printf("rebooting...\n");
274 	cpu_reset();
275 	/*NOTREACHED*/
276 }
277 
278 /* Number of DRAM pages which are installed */
279 /* Units are 4K pages, so 8192 is 32 MB of memory */
280 #ifndef DRAM_PAGES
281 #define DRAM_PAGES	8192
282 #endif
283 
284 /*
285  * Initial entry point on startup. This gets called before main() is
286  * entered.
287  * It should be responsible for setting up everything that must be
288  * in place when main is called.
289  * This includes:
290  *   Taking a copy of the boot configuration structure.
291  *   Initialising the physical console so characters can be printed.
292  *   Setting up page tables for the kernel.
293  */
294 u_int
295 initarm(int argc, char **argv, struct bootinfo *bi)
296 {
297 	int loop;
298 	u_int kerneldatasize, symbolsize;
299 	u_int l1pagetable;
300 	vaddr_t freemempos;
301 	pv_addr_t kernel_l1pt;
302 	vsize_t pt_size;
303 #if NKSYMS || defined(DDB) || defined(LKM)
304 	Elf_Shdr *sh;
305 #endif
306 
307 	/*
308 	 * Heads up ... Setup the CPU / MMU / TLB functions
309 	 */
310 	set_cpufuncs();
311 
312 #ifdef DEBUG_BEFOREMMU
313 	/*
314 	 * At this point, we cannot call real consinit().
315 	 * Just call a faked up version of consinit(), which does the thing
316 	 * with MMU disabled.
317 	 */
318 	fakecninit();
319 #endif
320 
321 	/*
322 	 * XXX for now, overwrite bootconfig to hardcoded values.
323 	 * XXX kill bootconfig and directly call uvm_physload
324 	 */
325 	bootconfig.dram[0].address = 0xc0000000;
326 	bootconfig.dram[0].pages = DRAM_PAGES;
327 	bootconfig.dramblocks = 1;
328 	kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
329 
330 	symbolsize = 0;
331 #if NKSYMS || defined(DDB) || defined(LKM)
332 	if (!memcmp(&end, "\177ELF", 4)) {
333 		sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
334 		loop = ((Elf_Ehdr *)&end)->e_shnum;
335 		for (; loop; loop--, sh++)
336 			if (sh->sh_offset > 0 &&
337 			    (sh->sh_offset + sh->sh_size) > symbolsize)
338 				symbolsize = sh->sh_offset + sh->sh_size;
339 	}
340 #endif
341 
342 	printf("kernsize=0x%x\n", kerneldatasize);
343 	kerneldatasize += symbolsize;
344 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
345 	    PAGE_SIZE * 8;
346 
347 	/* parse kernel args */
348 	boothowto = 0;
349 	boot_file[0] = '\0';
350 	strncpy(booted_kernel_storage, *argv, sizeof(booted_kernel_storage));
351 	for (argc--, argv++; argc; argc--, argv++)
352 		switch (**argv) {
353 		case 'b':
354 			/* boot device: -b=sd0 etc. */
355 #ifdef NFS
356 			if (strcmp(*argv + 2, "nfs") == 0)
357 				mountroot = nfs_mountroot;
358 			else
359 				strncpy(boot_file, *argv + 2,
360 				    sizeof(boot_file));
361 #else /* NFS */
362 			strncpy(boot_file, *argv + 2, sizeof(boot_file));
363 #endif /* NFS */
364 			break;
365 		default:
366 			BOOT_FLAG(**argv, boothowto);
367 			break;
368 		}
369 
370 	/* copy bootinfo into known kernel space */
371 	bootinfo_storage = *bi;
372 	bootinfo = &bootinfo_storage;
373 
374 #ifdef BOOTINFO_FB_WIDTH
375 	bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
376 	bootinfo->fb_width = BOOTINFO_FB_WIDTH;
377 	bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
378 	bootinfo->fb_type = BOOTINFO_FB_TYPE;
379 #endif
380 
381 	/*
382 	 * hpcboot has loaded me with MMU disabled.
383 	 * So create kernel page tables and enable MMU.
384 	 */
385 
386 	/*
387 	 * Set up the variables that define the availablilty of physcial
388 	 * memory.
389 	 */
390 	physical_start = bootconfig.dram[0].address;
391 	physical_freestart = physical_start
392 	    + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
393 	physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
394 	    + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
395 	physical_freeend = physical_end;
396 
397 	for (loop = 0; loop < bootconfig.dramblocks; ++loop)
398 		physmem += bootconfig.dram[loop].pages;
399 
400 	/* XXX handle UMA framebuffer memory */
401 
402 	/* Use the first 256kB to allocate things */
403 	freemempos = KERNEL_BASE;
404 	memset((void *)KERNEL_BASE, 0, KERNEL_TEXT_BASE - KERNEL_BASE);
405 
406 	/*
407 	 * Right. We have the bottom meg of memory mapped to 0x00000000
408 	 * so was can get at it. The kernel will occupy the start of it.
409 	 * After the kernel/args we allocate some of the fixed page tables
410 	 * we need to get the system going.
411 	 * We allocate one page directory and 8 page tables and store the
412 	 * physical addresses in the kernel_pt_table array.
413 	 * Must remember that neither the page L1 or L2 page tables are the
414 	 * same size as a page !
415 	 *
416 	 * Ok, the next bit of physical allocate may look complex but it is
417 	 * simple really. I have done it like this so that no memory gets
418 	 * wasted during the allocate of various pages and tables that are
419 	 * all different sizes.
420 	 * The start address will be page aligned.
421 	 * We allocate the kernel page directory on the first free 16KB
422 	 * boundary we find.
423 	 * We allocate the kernel page tables on the first 1KB boundary we find.
424 	 * We allocate 9 PT's. This means that in the process we
425 	 * KNOW that we will encounter at least 1 16KB boundary.
426 	 *
427 	 * Eventually if the top end of the memory gets used for process L1
428 	 * page tables the kernel L1 page table may be moved up there.
429 	 */
430 
431 #ifdef VERBOSE_INIT_ARM
432 	printf("Allocating page tables\n");
433 #endif
434 
435 	/* Define a macro to simplify memory allocation */
436 #define	valloc_pages(var, np)			\
437 	(var).pv_pa = (var).pv_va = freemempos;	\
438 	freemempos += (np) * PAGE_SIZE;
439 #define	alloc_pages(var, np)			\
440 	(var) = freemempos;			\
441 	freemempos += (np) * PAGE_SIZE;
442 
443 
444 	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
445 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
446 		alloc_pages(kernel_pt_table[loop].pv_pa,
447 		    L2_TABLE_SIZE / PAGE_SIZE);
448 		kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
449 	}
450 
451 	/*
452 	 * Allocate a page for the system page mapped to V0x00000000
453 	 * This page will just contain the system vectors and can be
454 	 * shared by all processes.
455 	 */
456 	valloc_pages(systempage, 1);
457 
458 	pt_size = round_page(freemempos) - KERNEL_BASE;
459 
460 	/* Allocate stacks for all modes */
461 	valloc_pages(irqstack, IRQ_STACK_SIZE);
462 	valloc_pages(abtstack, ABT_STACK_SIZE);
463 	valloc_pages(undstack, UND_STACK_SIZE);
464 	valloc_pages(kernelstack, UPAGES);
465 
466 #ifdef VERBOSE_INIT_ARM
467 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
468 	    irqstack.pv_va);
469 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
470 	    abtstack.pv_va);
471 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
472 	    undstack.pv_va);
473 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
474 	    kernelstack.pv_va);
475 #endif
476 
477 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
478 
479 	/*
480 	 * XXX Actually, we only need virtual space and don't need
481 	 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
482 	 */
483 	/*
484 	 * XXX totally stuffed hack to work round problems introduced
485 	 * in recent versions of the pmap code. Due to the calls used there
486 	 * we cannot allocate virtual memory during bootstrap.
487 	 */
488 	for (;;) {
489 		alloc_pages(sa1_cc_base, 1);
490 		if (! (sa1_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
491 			break;
492 	}
493 	{
494 		vaddr_t dummy;
495 		alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
496 	}
497 	sa1_cache_clean_addr = sa1_cc_base;
498 	sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
499 
500 	alloc_pages(sa11x0_idle_mem, 1);
501 
502 	/*
503 	 * Ok, we have allocated physical pages for the primary kernel
504 	 * page tables.
505 	 */
506 
507 #ifdef VERBOSE_INIT_ARM
508 	printf("Creating L1 page table\n");
509 #endif
510 
511 	/*
512 	 * Now we start construction of the L1 page table.
513 	 * We start by mapping the L2 page tables into the L1.
514 	 * This means that we can replace L1 mappings later on if necessary.
515 	 */
516 	l1pagetable = kernel_l1pt.pv_pa;
517 
518 	/* Map the L2 pages tables in the L1 page table */
519 	pmap_link_l2pt(l1pagetable, 0x00000000,
520 	    &kernel_pt_table[KERNEL_PT_SYS]);
521 	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
522 	    &kernel_pt_table[KERNEL_PT_KERNEL]);
523 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
524 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
525 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
526 
527 	/* update the top of the kernel VM */
528 	pmap_curmaxkvaddr =
529 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
530 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
531 	pmap_link_l2pt(l1pagetable, SAIPIO_BASE,
532 	    &kernel_pt_table[KERNEL_PT_IO]);
533 
534 
535 #ifdef VERBOSE_INIT_ARM
536 	printf("Mapping kernel\n");
537 #endif
538 
539 	/* Now we fill in the L2 pagetable for the kernel code/data */
540 
541 	/*
542 	 * XXX there is no ELF header to find RO region.
543 	 * XXX What should we do?
544 	 */
545 #if 0
546 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
547 		logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
548 		    physical_start, kernexec->a_text,
549 		    VM_PROT_READ, PTE_CACHE);
550 		logical += pmap_map_chunk(l1pagetable,
551 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
552 		    kerneldatasize - kernexec->a_text,
553 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
554 	} else
555 #endif
556 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
557 		    KERNEL_TEXT_BASE, kerneldatasize,
558 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
559 
560 #ifdef VERBOSE_INIT_ARM
561 	printf("Constructing L2 page tables\n");
562 #endif
563 
564 	/* Map the stack pages */
565 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
566 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
567 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
568 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
569 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
570 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
571 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
572 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
573 
574 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
575 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
576 
577 	/* Map page tables */
578 	pmap_map_chunk(l1pagetable, KERNEL_BASE, KERNEL_BASE, pt_size,
579 	    VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
580 
581 	/* Map a page for entering idle mode */
582 	pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
583 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
584 
585 	/* Map the vector page. */
586 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
587 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
588 
589 	/* Map any I/O modules here, as we don't have real bus_space_map() */
590 	printf("mapping IO...");
591 	pmap_map_entry(l1pagetable, SACOM3_BASE, SACOM3_HW_BASE,
592 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
593 
594 	pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xe0000000,
595 	    CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
596 	/*
597 	 * Now we have the real page tables in place so we can switch to them.
598 	 * Once this is done we will be running with the REAL kernel page
599 	 * tables.
600 	 */
601 
602 	printf("done.\n");
603 
604 	/*
605 	 * Pages were allocated during the secondary bootstrap for the
606 	 * stacks for different CPU modes.
607 	 * We must now set the r13 registers in the different CPU modes to
608 	 * point to these stacks.
609 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
610 	 * of the stack memory.
611 	 */
612 	printf("init subsystems: stacks ");
613 
614 	set_stackptr(PSR_IRQ32_MODE,
615 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
616 	set_stackptr(PSR_ABT32_MODE,
617 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
618 	set_stackptr(PSR_UND32_MODE,
619 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
620 #ifdef PMAP_DEBUG
621 	if (pmap_debug_level >= 0)
622 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
623 		    kernelstack.pv_pa);
624 #endif	/* PMAP_DEBUG */
625 
626 	/*
627 	 * Well we should set a data abort handler.
628 	 * Once things get going this will change as we will need a proper
629 	 * handler. Until then we will use a handler that just panics but
630 	 * tells us why.
631 	 * Initialisation of the vectors will just panic on a data abort.
632 	 * This just fills in a slightly better one.
633 	 */
634 	printf("vectors ");
635 	data_abort_handler_address = (u_int)data_abort_handler;
636 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
637 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
638 	printf("%08x %08x %08x\n", data_abort_handler_address,
639 	    prefetch_abort_handler_address, undefined_handler_address);
640 
641 	/* Initialise the undefined instruction handlers */
642 	printf("undefined ");
643 	undefined_init();
644 
645 	/* Set the page table address. */
646 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
647 	setttb(kernel_l1pt.pv_pa);
648 	cpu_tlb_flushID();
649 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
650 
651 	/*
652 	 * Moved from cpu_startup() as data_abort_handler() references
653 	 * this during uvm init.
654 	 */
655 	proc0paddr = (struct user *)kernelstack.pv_va;
656 	lwp0.l_addr = proc0paddr;
657 
658 #ifdef BOOT_DUMP
659 	dumppages((char *)0xc0000000, 16 * PAGE_SIZE);
660 	dumppages((char *)0xb0100000, 64); /* XXX */
661 #endif
662 	/* Enable MMU, I-cache, D-cache, write buffer. */
663 	cpufunc_control(0x337f, 0x107d);
664 
665 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
666 
667 	consinit();
668 
669 #ifdef VERBOSE_INIT_ARM
670 	printf("freemempos=%08lx\n", freemempos);
671 	printf("MMU enabled. control=%08x\n", cpu_get_control());
672 #endif
673 
674 	/* Load memory into UVM. */
675 	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
676 	for (loop = 0; loop < bootconfig.dramblocks; loop++) {
677 		paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
678 		paddr_t dblk_end = dblk_start
679 			+ (bootconfig.dram[loop].pages * PAGE_SIZE);
680 
681 		if (dblk_start < physical_freestart)
682 			dblk_start = physical_freestart;
683 		if (dblk_end > physical_freeend)
684 			dblk_end = physical_freeend;
685 
686 		uvm_page_physload(atop(dblk_start), atop(dblk_end),
687 		    atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
688 	}
689 
690 	/* Boot strap pmap telling it where the kernel page table is */
691 	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, KERNEL_VM_BASE,
692 	    KERNEL_VM_BASE + KERNEL_VM_SIZE);
693 
694 	if (cputype == CPU_ID_SA110)
695 		rpc_sa110_cc_setup();
696 
697 #ifdef IPKDB
698 	/* Initialise ipkdb */
699 	ipkdb_init();
700 	if (boothowto & RB_KDB)
701 		ipkdb_connect(0);
702 #endif	/* NIPKDB */
703 
704 #ifdef BOOT_DUMP
705 	dumppages((char *)kernel_l1pt.pv_va, 16);
706 	dumppages((char *)PTE_BASE, 16);
707 #endif
708 
709 #ifdef DDB
710 	db_machine_init();
711 #endif
712 #if NKSYMS || defined(DDB) || defined(LKM)
713 	ksyms_init(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
714 #endif
715 
716 	printf("kernsize=0x%x", kerneldatasize);
717 	printf(" (including 0x%x symbols)\n", symbolsize);
718 
719 #ifdef DDB
720 	if (boothowto & RB_KDB)
721 		Debugger();
722 #endif	/* DDB */
723 
724 	if (bootinfo->magic == BOOTINFO_MAGIC) {
725 		platid.dw.dw0 = bootinfo->platid_cpu;
726 		platid.dw.dw1 = bootinfo->platid_machine;
727 	}
728 
729 	/* We return the new stack pointer address */
730 	return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
731 }
732 
733 void
734 consinit(void)
735 {
736 	static int consinit_called = 0;
737 
738 	if (consinit_called != 0)
739 		return;
740 
741 	consinit_called = 1;
742 	if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL)
743 		cninit();
744 	else {
745 		/*
746 		 * Nothing to do here.  Console initialization is done at
747 		 * autoconf device attach time.
748 		 */
749 	}
750 }
751 
752 #ifdef DEBUG_BEFOREMMU
753 cons_decl(sacom);
754 void
755 fakecninit(void)
756 {
757 	static struct consdev fakecntab = cons_init(sacom);
758 	cn_tab = &fakecntab;
759 
760 	(*cn_tab->cn_init)(0);
761 	cn_tab->cn_pri = CN_REMOTE;
762 }
763 #endif
764 
765 
766 /*
767  * For optimal cache cleaning we need two 16K banks of
768  * virtual address space that NOTHING else will access
769  * and then we alternate the cache cleaning between the
770  * two banks.
771  * The cache cleaning code requires requires 2 banks aligned
772  * on total size boundary so the banks can be alternated by
773  * eorring the size bit (assumes the bank size is a power of 2)
774  */
775 void
776 rpc_sa110_cc_setup(void)
777 {
778 	int loop;
779 	paddr_t kaddr;
780 	pt_entry_t *pte;
781 
782 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
783 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
784 		pte = vtopte(sa1_cc_base + loop);
785 		*pte = L2_S_PROTO | kaddr |
786 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
787 		PTE_SYNC(pte);
788 	}
789 	sa1_cache_clean_addr = sa1_cc_base;
790 	sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
791 }
792 
793 #ifdef BOOT_DUMP
794 void dumppages(char *start, int nbytes)
795 {
796 	char *p = start;
797 	char *p1;
798 	int i;
799 
800 	for (i = nbytes; i > 0; i -= 16, p += 16) {
801 		for (p1 = p + 15; p != p1; p1--) {
802 			if (*p1)
803 				break;
804 		}
805 		if (!*p1)
806 			continue;
807 		printf("%08x %02x %02x %02x %02x %02x %02x %02x %02x"
808 		    " %02x %02x %02x %02x %02x %02x %02x %02x\n",
809 		    (unsigned int)p,
810 		    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
811 		    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
812 	}
813 }
814 #endif
815 
816 /* End of machdep.c */
817