xref: /netbsd-src/sys/arch/evbarm/ixm1200/ixm1200_machdep.c (revision 1580a27b92f58fcdcb23fdfbc04a7c2b54a0b7c8)
1 /*	$NetBSD: ixm1200_machdep.c,v 1.58 2017/11/06 03:47:46 christos Exp $ */
2 
3 /*
4  * Copyright (c) 2002, 2003
5  *	Ichiro FUKUHARA <ichiro@ichiro.org>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*
30  * Copyright (c) 1997,1998 Mark Brinicombe.
31  * Copyright (c) 1997,1998 Causality Limited.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 3. All advertising materials mentioning features or use of this software
43  *    must display the following acknowledgement:
44  *      This product includes software developed by Mark Brinicombe
45  *      for the NetBSD Project.
46  * 4. The name of the company nor the name of the author may be used to
47  *    endorse or promote products derived from this software without specific
48  *    prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
51  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
52  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
54  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
55  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
56  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60  * SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ixm1200_machdep.c,v 1.58 2017/11/06 03:47:46 christos Exp $");
65 
66 #include "opt_ddb.h"
67 #include "opt_modular.h"
68 #include "opt_pmap_debug.h"
69 
70 #include <sys/param.h>
71 #include <sys/device.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/exec.h>
75 #include <sys/proc.h>
76 #include <sys/msgbuf.h>
77 #include <sys/reboot.h>
78 #include <sys/termios.h>
79 #include <sys/ksyms.h>
80 #include <sys/bus.h>
81 #include <sys/cpu.h>
82 
83 #include <uvm/uvm_extern.h>
84 
85 #include <dev/cons.h>
86 
87 #include "ksyms.h"
88 
89 #if NKSYMS || defined(DDB) || defined(MODULAR)
90 #include <machine/db_machdep.h>
91 #include <ddb/db_sym.h>
92 #include <ddb/db_extern.h>
93 #include <sys/exec_elf.h>
94 #endif
95 
96 #include <machine/bootconfig.h>
97 #include <arm/locore.h>
98 #include <arm/undefined.h>
99 
100 #include <arm/arm32/machdep.h>
101 
102 #include <arm/ixp12x0/ixp12x0reg.h>
103 #include <arm/ixp12x0/ixp12x0var.h>
104 #include <arm/ixp12x0/ixp12x0_comreg.h>
105 #include <arm/ixp12x0/ixp12x0_comvar.h>
106 #include <arm/ixp12x0/ixp12x0_pcireg.h>
107 
108 #include <evbarm/ixm1200/ixm1200reg.h>
109 #include <evbarm/ixm1200/ixm1200var.h>
110 
111 /* XXX for consinit related hacks */
112 #include <sys/conf.h>
113 
114 void ixp12x0_reset(void) __attribute__((noreturn));
115 
116 /* Kernel text starts 2MB in from the bottom of the kernel address space. */
117 #define	KERNEL_TEXT_BASE	(KERNEL_BASE + 0x00200000)
118 #define	KERNEL_VM_BASE		(KERNEL_BASE + 0x01000000)
119 
120 /*
121  * The range 0xc1000000 - 0xccffffff is available for kernel VM space
122  * Core-logic registers and I/O mappings occupy 0xfd000000 - 0xffffffff
123  */
124 #define KERNEL_VM_SIZE		0x0C000000
125 
126 /*
127  * Address to call from cpu_reset() to reset the machine.
128  * This is machine architecture dependent as it varies depending
129  * on where the ROM appears when you turn the MMU off.
130  */
131 
132 /*
133  * Define the default console speed for the board.
134  */
135 #ifndef CONMODE
136 #define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB)) | CS8) /* 8N1 */
137 #endif
138 #ifndef CONSPEED
139 #define CONSPEED B38400
140 #endif
141 #ifndef CONADDR
142 #define CONADDR IXPCOM_UART_BASE
143 #endif
144 
145 BootConfig bootconfig;          /* Boot config storage */
146 char *boot_args = NULL;
147 char *boot_file = NULL;
148 
149 vaddr_t physical_start;
150 vaddr_t physical_freestart;
151 vaddr_t physical_freeend;
152 vaddr_t physical_end;
153 u_int free_pages;
154 
155 /*int debug_flags;*/
156 #ifndef PMAP_STATIC_L1S
157 int max_processes = 64;                 /* Default number */
158 #endif  /* !PMAP_STATIC_L1S */
159 
160 paddr_t msgbufphys;
161 
162 extern int end;
163 
164 #ifdef PMAP_DEBUG
165 extern int pmap_debug_level;
166 #endif  /* PMAP_DEBUG */
167 
168 #define KERNEL_PT_SYS		0	/* Page table for mapping proc0 zero page */
169 #define KERNEL_PT_KERNEL	1	/* Page table for mapping kernel */
170 #define KERNEL_PT_KERNEL_NUM	2
171 #define KERNEL_PT_IO		(KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
172 					/* Page table for mapping IO */
173 #define KERNEL_PT_VMDATA	(KERNEL_PT_IO + 1)
174 					/* Page tables for mapping kernel VM */
175 #define KERNEL_PT_VMDATA_NUM	4	/* start with 16MB of KVM */
176 #define NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
177 
178 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
179 
180 #ifdef CPU_IXP12X0
181 #define CPU_IXP12X0_CACHE_CLEAN_SIZE (0x4000 * 2)
182 extern unsigned int ixp12x0_cache_clean_addr;
183 extern unsigned int ixp12x0_cache_clean_size;
184 static vaddr_t ixp12x0_cc_base;
185 #endif  /* CPU_IXP12X0 */
186 
187 /* Prototypes */
188 
189 void consinit(void);
190 u_int cpu_get_control(void);
191 
192 void ixdp_ixp12x0_cc_setup(void);
193 
194 /*
195  * void cpu_reboot(int howto, char *bootstr)
196  *
197  * Reboots the system
198  *
199  * Deal with any syncing, unmounting, dumping and shutdown hooks,
200  * then reset the CPU.
201  */
202 
203 void
204 cpu_reboot(int howto, char *bootstr)
205 {
206 	/*
207 	 * If we are still cold then hit the air brakes
208 	 * and crash to earth fast
209 	 */
210 	if (cold) {
211 		doshutdownhooks();
212 		pmf_system_shutdown(boothowto);
213 		printf("Halted while still in the ICE age.\n");
214 		printf("The operating system has halted.\n");
215 		printf("Please press any key to reboot.\n\n");
216 		cngetc();
217 		printf("rebooting...\n");
218 		ixp12x0_reset();
219 	}
220 
221 	/* Disable console buffering */
222 	cnpollc(1);
223 
224 	/*
225 	 * If RB_NOSYNC was not specified sync the discs.
226 	 * Note: Unless cold is set to 1 here, syslogd will die during the unmount.
227 	 * It looks like syslogd is getting woken up only to find that it cannot
228 	 * page part of the binary in as the filesystem has been unmounted.
229 	 */
230 	if (!(howto & RB_NOSYNC))
231 		bootsync();
232 
233 	/* Say NO to interrupts */
234 	splhigh();
235 
236 	/* Do a dump if requested. */
237 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
238 		dumpsys();
239 
240 	/* Run any shutdown hooks */
241 	doshutdownhooks();
242 
243 	pmf_system_shutdown(boothowto);
244 
245 	/* Make sure IRQ's are disabled */
246 	IRQdisable;
247 
248 	if (howto & RB_HALT) {
249 		printf("The operating system has halted.\n");
250 		printf("Please press any key to reboot.\n\n");
251 		cngetc();
252 	}
253 
254 	printf("rebooting...\n");
255 
256 	/* all interrupts are disabled */
257 	disable_interrupts(I32_bit);
258 
259 	ixp12x0_reset();
260 
261 	/* ...and if that didn't work, just croak. */
262 	printf("RESET FAILED!\n");
263 	for (;;);
264 }
265 
266 /* Static device mappings. */
267 static const struct pmap_devmap ixm1200_devmap[] = {
268 	/* StrongARM System and Peripheral Registers */
269 	{
270 		IXP12X0_SYS_VBASE,
271 		IXP12X0_SYS_HWBASE,
272 		IXP12X0_SYS_SIZE,
273 		VM_PROT_READ|VM_PROT_WRITE,
274 		PTE_NOCACHE,
275 	},
276 	/* PCI Registers Accessible Through StrongARM Core */
277 	{
278 		IXP12X0_PCI_VBASE, IXP12X0_PCI_HWBASE,
279 		IXP12X0_PCI_SIZE,
280 		VM_PROT_READ|VM_PROT_WRITE,
281 		PTE_NOCACHE,
282 	},
283 	/* PCI Registers Accessible Through I/O Cycle Access */
284 	{
285 		IXP12X0_PCI_IO_VBASE, IXP12X0_PCI_IO_HWBASE,
286 		IXP12X0_PCI_IO_SIZE,
287 		VM_PROT_READ|VM_PROT_WRITE,
288 		PTE_NOCACHE,
289 	},
290 	/* PCI Type0 Configuration Space */
291 	{
292 		IXP12X0_PCI_TYPE0_VBASE, IXP12X0_PCI_TYPE0_HWBASE,
293 		IXP12X0_PCI_TYPE0_SIZE,
294 		VM_PROT_READ|VM_PROT_WRITE,
295 		PTE_NOCACHE,
296 	},
297 	/* PCI Type1 Configuration Space */
298 	{
299 		IXP12X0_PCI_TYPE1_VBASE, IXP12X0_PCI_TYPE1_HWBASE,
300 		IXP12X0_PCI_TYPE1_SIZE,
301 		VM_PROT_READ|VM_PROT_WRITE,
302 		PTE_NOCACHE,
303 	},
304 	{
305 		0,
306 		0,
307 		0,
308 		0,
309 		0
310 	},
311 };
312 
313 /*
314  * Initial entry point on startup. This gets called before main() is
315  * entered.
316  * It should be responsible for setting up everything that must be
317  * in place when main is called.
318  * This includes
319  *   Taking a copy of the boot configuration structure.
320  *   Initialising the physical console so characters can be printed.
321  *   Setting up page tables for the kernel
322  *   Relocating the kernel to the bottom of physical memory
323  */
324 u_int
325 initarm(void *arg)
326 {
327         int loop;
328 	int loop1;
329 	u_int kerneldatasize, symbolsize;
330 	vaddr_t l1pagetable;
331 	vaddr_t freemempos;
332 #if NKSYMS || defined(DDB) || defined(MODULAR)
333         Elf_Shdr *sh;
334 #endif
335 
336 	cpu_reset_address = ixp12x0_reset;
337 
338         /*
339          * Since we map v0xf0000000 == p0x90000000, it's possible for
340          * us to initialize the console now.
341          */
342 	consinit();
343 
344 #ifdef VERBOSE_INIT_ARM
345 	/* Talk to the user */
346 	printf("\nNetBSD/evbarm (IXM1200) booting ...\n");
347 #endif
348 
349 	/*
350 	 * Heads up ... Setup the CPU / MMU / TLB functions
351 	 */
352 	if (set_cpufuncs())
353 		panic("CPU not recognized!");
354 
355 	/* XXX overwrite bootconfig to hardcoded values */
356 	bootconfig.dram[0].address = 0xc0000000;
357 	bootconfig.dram[0].pages   = 0x10000000 / PAGE_SIZE; /* SDRAM 256MB */
358 	bootconfig.dramblocks = 1;
359 
360 	kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
361 
362 	symbolsize = 0;
363 
364 #ifdef PMAP_DEBUG
365 	pmap_debug(-1);
366 #endif
367 
368 #if NKSYMS || defined(DDB) || defined(MODULAR)
369         if (! memcmp(&end, "\177ELF", 4)) {
370                 sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
371                 loop = ((Elf_Ehdr *)&end)->e_shnum;
372                 for(; loop; loop--, sh++)
373                         if (sh->sh_offset > 0 &&
374                             (sh->sh_offset + sh->sh_size) > symbolsize)
375                                 symbolsize = sh->sh_offset + sh->sh_size;
376         }
377 #endif
378 #ifdef VERBOSE_INIT_ARM
379 	printf("kernsize=0x%x\n", kerneldatasize);
380 #endif
381 	kerneldatasize += symbolsize;
382 	kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8;
383 
384 	/*
385 	 * Set up the variables that define the availablilty of physcial
386 	 * memory
387 	 */
388 	physical_start = bootconfig.dram[0].address;
389 	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);
390 
391 	physical_freestart = physical_start
392 		+ (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
393 	physical_freeend = physical_end;
394 
395 	physmem = (physical_end - physical_start) / PAGE_SIZE;
396 
397 	freemempos = 0xc0000000;
398 
399 #ifdef VERBOSE_INIT_ARM
400 	printf("Allocating page tables\n");
401 #endif
402 	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
403 
404 #ifdef VERBOSE_INIT_ARM
405 	printf("CP15 Register1 = 0x%08x\n", cpu_get_control());
406 	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
407 		physical_freestart, free_pages, free_pages);
408 	printf("physical_start = 0x%08lx, physical_end = 0x%08lx\n",
409 		physical_start, physical_end);
410 #endif
411 
412 	/* Define a macro to simplify memory allocation */
413 #define valloc_pages(var, np)			\
414 	alloc_pages((var).pv_pa, (np));		\
415 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
416 #define alloc_pages(var, np)				\
417 	(var) = freemempos;				\
418 	memset((char *)(var), 0, ((np) * PAGE_SIZE));	\
419 	freemempos += (np) * PAGE_SIZE;
420 
421 	loop1 = 0;
422 	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
423 		/* Are we 16KB aligned for an L1 ? */
424 		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
425 		    && kernel_l1pt.pv_pa == 0) {
426 			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
427 		} else {
428 			valloc_pages(kernel_pt_table[loop1],
429 			    L2_TABLE_SIZE / PAGE_SIZE);
430 			++loop1;
431 		}
432 	}
433 
434 #ifdef DIAGNOSTIC
435 	/* This should never be able to happen but better confirm that. */
436 	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
437 		panic("initarm: Failed to align the kernel page directory");
438 #endif
439 
440 	/*
441 	 * Allocate a page for the system page mapped to V0x00000000
442 	 * This page will just contain the system vectors and can be
443 	 * shared by all processes.
444 	 */
445 	alloc_pages(systempage.pv_pa, 1);
446 
447 	/* Allocate stacks for all modes */
448 	valloc_pages(irqstack, IRQ_STACK_SIZE);
449 	valloc_pages(abtstack, ABT_STACK_SIZE);
450 	valloc_pages(undstack, UND_STACK_SIZE);
451 	valloc_pages(kernelstack, UPAGES);
452 
453 #ifdef VERBOSE_INIT_ARM
454 	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va);
455 	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va);
456 	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va);
457 	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
458 #endif
459 
460 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
461 
462 #ifdef CPU_IXP12X0
463         /*
464          * XXX totally stuffed hack to work round problems introduced
465          * in recent versions of the pmap code. Due to the calls used there
466          * we cannot allocate virtual memory during bootstrap.
467          */
468 	for(;;) {
469 		alloc_pages(ixp12x0_cc_base, 1);
470 		if (! (ixp12x0_cc_base & (CPU_IXP12X0_CACHE_CLEAN_SIZE - 1)))
471 			break;
472 	}
473 	{
474 		vaddr_t dummy;
475 		alloc_pages(dummy, CPU_IXP12X0_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
476 	}
477 	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
478 	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
479 #endif /* CPU_IXP12X0 */
480 
481 #ifdef VERBOSE_INIT_ARM
482 	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
483 #endif
484 
485 	/*
486 	 * Now we start construction of the L1 page table
487 	 * We start by mapping the L2 page tables into the L1.
488 	 * This means that we can replace L1 mappings later on if necessary
489 	 */
490 	l1pagetable = kernel_l1pt.pv_pa;
491 
492 	/* Map the L2 pages tables in the L1 page table */
493 	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
494 	    &kernel_pt_table[KERNEL_PT_SYS]);
495 
496 	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
497 		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
498 		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
499 
500 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
501 		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
502 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
503 
504 	/* update the top of the kernel VM */
505 	pmap_curmaxkvaddr =
506 	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
507 
508 	pmap_link_l2pt(l1pagetable, IXP12X0_IO_VBASE,
509 	    &kernel_pt_table[KERNEL_PT_IO]);
510 
511 #ifdef VERBOSE_INIT_ARM
512 	printf("Mapping kernel\n");
513 #endif
514 
515 #if XXX
516 	/* Now we fill in the L2 pagetable for the kernel code/data */
517 	{
518 		extern char etext[], _end[];
519 		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
520 		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
521 		u_int logical;
522 
523 		textsize = (textsize + PGOFSET) & ~PGOFSET;
524 		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
525 
526 		logical = 0x00200000;   /* offset of kernel in RAM */
527 
528 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
529 		    physical_start + logical, textsize,
530 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
531 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
532 		    physical_start + logical, totalsize - textsize,
533 		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
534 	}
535 #else
536 	{
537 		pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
538                     KERNEL_TEXT_BASE, kerneldatasize,
539                     VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
540 	}
541 #endif
542 
543 #ifdef VERBOSE_INIT_ARM
544         printf("Constructing L2 page tables\n");
545 #endif
546 
547 	/* Map the stack pages */
548 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
549 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
550 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
551 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
552 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
553 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
554 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
555 	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
556 
557 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
558 	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
559 
560 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
561 		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
562 		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
563 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
564 	}
565 
566 	/* Map the vector page. */
567 	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
568 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
569 
570 #ifdef VERBOSE_INIT_ARM
571 	printf("systempage (vector page): p0x%08lx v0x%08lx\n",
572 	       systempage.pv_pa, vector_page);
573 #endif
574 
575 	/* Map the statically mapped devices. */
576 	pmap_devmap_bootstrap(l1pagetable, ixm1200_devmap);
577 
578 #ifdef VERBOSE_INIT_ARM
579 	printf("done.\n");
580 #endif
581 
582 	/*
583 	 * Map the Dcache Flush page.
584 	 * Hw Ref Manual 3.2.4.5 Software Dcache Flush
585 	 */
586 	pmap_map_chunk(l1pagetable, ixp12x0_cache_clean_addr, 0xe0000000,
587 	    CPU_IXP12X0_CACHE_CLEAN_SIZE, VM_PROT_READ, PTE_CACHE);
588 
589 	/*
590 	 * Now we have the real page tables in place so we can switch to them.
591 	 * Once this is done we will be running with the REAL kernel page
592 	 * tables.
593 	 */
594 
595 	/* Switch tables */
596 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
597 	cpu_setttb(kernel_l1pt.pv_pa, true);
598 	cpu_tlb_flushID();
599 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
600 
601 	/*
602 	 * Moved here from cpu_startup() as data_abort_handler() references
603 	 * this during init
604 	 */
605 	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
606 
607 	/*
608 	 * We must now clean the cache again....
609 	 * Cleaning may be done by reading new data to displace any
610 	 * dirty data in the cache. This will have happened in cpu_setttb()
611 	 * but since we are boot strapping the addresses used for the read
612 	 * may have just been remapped and thus the cache could be out
613 	 * of sync. A re-clean after the switch will cure this.
614 	 * After booting there are no gross reloations of the kernel thus
615 	 * this problem will not occur after initarm().
616 	 */
617 	cpu_idcache_wbinv_all();
618 
619 	arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
620 
621 	/*
622 	 * Pages were allocated during the secondary bootstrap for the
623 	 * stacks for different CPU modes.
624 	 * We must now set the r13 registers in the different CPU modes to
625 	 * point to these stacks.
626 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
627 	 * of the stack memory.
628 	 */
629 #ifdef VERBOSE_INIT_ARM
630 	printf("init subsystems: stacks ");
631 #endif
632 
633 	set_stackptr(PSR_IRQ32_MODE,
634 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
635 	set_stackptr(PSR_ABT32_MODE,
636 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
637 	set_stackptr(PSR_UND32_MODE,
638 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
639 #ifdef PMAP_DEBUG
640 	if (pmap_debug_level >= 0)
641 		printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
642 		    kernelstack.pv_pa);
643 #endif  /* PMAP_DEBUG */
644 
645 	/*
646 	 * Well we should set a data abort handler.
647 	 * Once things get going this will change as we will need a proper
648 	 * handler. Until then we will use a handler that just panics but
649 	 * tells us why.
650 	 * Initialisation of the vetcors will just panic on a data abort.
651 	 * This just fills in a slightly better one.
652 	 */
653 #ifdef VERBOSE_INIT_ARM
654 	printf("vectors ");
655 #endif
656 	data_abort_handler_address = (u_int)data_abort_handler;
657 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
658 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
659 #ifdef VERBOSE_INIT_ARM
660 	printf("\ndata_abort_handler_address = %08x\n", data_abort_handler_address);
661 	printf("prefetch_abort_handler_address = %08x\n", prefetch_abort_handler_address);
662 	printf("undefined_handler_address = %08x\n", undefined_handler_address);
663 #endif
664 
665 	/* Initialise the undefined instruction handlers */
666 #ifdef VERBOSE_INIT_ARM
667 	printf("undefined ");
668 #endif
669 	undefined_init();
670 
671 	/* Load memory into UVM. */
672 #ifdef VERBOSE_INIT_ARM
673 	printf("page ");
674 #endif
675 	uvm_md_init();
676 	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
677 	    atop(physical_freestart), atop(physical_freeend),
678 	    VM_FREELIST_DEFAULT);
679 
680 	/* Boot strap pmap telling it where the kernel page table is */
681 #ifdef VERBOSE_INIT_ARM
682 	printf("pmap ");
683 #endif
684 	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
685 
686 	/* Setup the IRQ system */
687 #ifdef VERBOSE_INIT_ARM
688 	printf("irq ");
689 #endif
690 	ixp12x0_intr_init();
691 
692 #ifdef VERBOSE_INIT_ARM
693 	printf("done.\n");
694 #endif
695 
696 #ifdef VERBOSE_INIT_ARM
697 	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
698 		physical_freestart, free_pages, free_pages);
699 	printf("freemempos=%08lx\n", freemempos);
700 	printf("switching to new L1 page table  @%#lx... \n", kernel_l1pt.pv_pa);
701 #endif
702 
703 	consinit();
704 #ifdef VERBOSE_INIT_ARM
705 	printf("consinit \n");
706 #endif
707 
708 	ixdp_ixp12x0_cc_setup();
709 
710 #ifdef VERBOSE_INIT_ARM
711 	printf("bootstrap done.\n");
712 #endif
713 
714 #if NKSYMS || defined(DDB) || defined(MODULAR)
715 	ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
716 #endif
717 
718 #ifdef DDB
719 	db_machine_init();
720 	if (boothowto & RB_KDB)
721 		Debugger();
722 #endif
723 
724 	/* We return the new stack pointer address */
725 	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
726 }
727 
728 void
729 consinit(void)
730 {
731 	static int consinit_called = 0;
732 
733 	if (consinit_called != 0)
734 		return;
735 
736 	consinit_called = 1;
737 
738 	pmap_devmap_register(ixm1200_devmap);
739 
740 	if (ixpcomcnattach(&ixp12x0_bs_tag,
741 			   IXPCOM_UART_HWBASE, IXPCOM_UART_VBASE,
742 			   CONSPEED, CONMODE))
743 		panic("can't init serial console @%lx", IXPCOM_UART_HWBASE);
744 }
745 
746 /*
747  * For optimal cache cleaning we need two 16K banks of
748  * virtual address space that NOTHING else will access
749  * and then we alternate the cache cleaning between the
750  * two banks.
751  * The cache cleaning code requires requires 2 banks aligned
752  * on total size boundry so the banks can be alternated by
753  * eorring the size bit (assumes the bank size is a power of 2)
754  */
755 void
756 ixdp_ixp12x0_cc_setup(void)
757 {
758 	int loop;
759 	paddr_t kaddr;
760 
761 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
762 	for (loop = 0; loop < CPU_IXP12X0_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
763 		pt_entry_t * const ptep = vtopte(ixp12x0_cc_base + loop);
764 		const pt_entry_t npte = L2_S_PROTO | kaddr |
765                     L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
766 		l2pte_set(ptep, npte, 0);
767 		PTE_SYNC(ptep);
768         }
769 	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
770 	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
771 }
772