1 /* $NetBSD: sa11x0_hpc_machdep.c,v 1.23 2023/08/03 08:16:31 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Machine dependent functions for kernel setup.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: sa11x0_hpc_machdep.c,v 1.23 2023/08/03 08:16:31 mrg Exp $");
44
45 #include "opt_ddb.h"
46 #include "opt_dram_pages.h"
47 #include "opt_modular.h"
48 #include "ksyms.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/reboot.h>
54 #include <sys/proc.h>
55 #include <sys/msgbuf.h>
56 #include <sys/exec.h>
57 #include <sys/ksyms.h>
58 #include <sys/conf.h> /* XXX for consinit related hacks */
59 #include <sys/device.h>
60 #include <sys/termios.h>
61 #include <sys/bus.h>
62 #include <sys/cpu.h>
63 #include <sys/intr.h>
64
65 #if NKSYMS || defined(DDB) || defined(MODULAR)
66 #include <machine/db_machdep.h>
67 #include <ddb/db_sym.h>
68 #include <ddb/db_extern.h>
69 #include <sys/exec_elf.h>
70 #endif
71
72 #include <uvm/uvm.h>
73
74 #include <arm/arm32/machdep.h>
75 #include <arm/sa11x0/sa11x0_reg.h>
76 #include <arm/locore.h>
77 #include <arm/undefined.h>
78
79 #include <machine/bootconfig.h>
80 #include <machine/bootinfo.h>
81 #include <machine/io.h>
82 #include <machine/platid.h>
83 #include <machine/platid_mask.h>
84 #include <machine/rtc.h>
85 #include <machine/signal.h>
86
87 #include <dev/cons.h>
88 #include <dev/hpc/apm/apmvar.h>
89 #include <dev/hpc/bicons.h>
90
91 /* Kernel text starts 256K in from the bottom of the kernel address space. */
92 #define KERNEL_TEXT_BASE (KERNEL_BASE + 0x00040000)
93 #define KERNEL_VM_BASE (KERNEL_BASE + 0x00C00000)
94 #define KERNEL_VM_SIZE 0x05000000
95
96 extern BootConfig bootconfig; /* Boot config storage */
97
98 extern paddr_t physical_start;
99 extern paddr_t physical_freestart;
100 extern paddr_t physical_freeend;
101 extern paddr_t physical_end;
102
103 extern paddr_t msgbufphys;
104
105 extern int end;
106
107 #define KERNEL_PT_VMEM 0 /* Page table for mapping video memory */
108 #define KERNEL_PT_SYS 1 /* Page table for mapping proc0 zero page */
109 #define KERNEL_PT_IO 2 /* Page table for mapping IO */
110 #define KERNEL_PT_KERNEL 3 /* Page table for mapping kernel */
111 #define KERNEL_PT_KERNEL_NUM 4
112 #define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
113 /* Page tables for mapping kernel VM */
114 #define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
115 #define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
116
117 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
118
119 #define CPU_SA110_CACHE_CLEAN_SIZE (0x4000 * 2)
120 extern unsigned int sa1_cache_clean_addr;
121 extern unsigned int sa1_cache_clean_size;
122 static vaddr_t sa1_cc_base;
123
124 /* Non-buffered non-cacheable memory needed to enter idle mode */
125 extern vaddr_t sa11x0_idle_mem;
126
127 /* Prototypes */
128 void data_abort_handler(trapframe_t *);
129 void prefetch_abort_handler(trapframe_t *);
130 void undefinedinstruction_bounce(trapframe_t *);
131 u_int cpu_get_control(void);
132
133 vaddr_t init_sa11x0(int, char **, struct bootinfo *);
134
135 #ifdef BOOT_DUMP
136 void dumppages(char *, int);
137 #endif
138
139 #ifdef DEBUG_BEFOREMMU
140 static void fakecninit(void);
141 #endif
142
143 /* Mode dependent sleep function holder */
144 extern void (*__sleep_func)(void *);
145 extern void *__sleep_ctx;
146
147 /* Number of DRAM pages which are installed */
148 /* Units are 4K pages, so 8192 is 32 MB of memory */
149 #ifndef DRAM_PAGES
150 #define DRAM_PAGES 8192
151 #endif
152
153 /*
154 * Static device mappings. These peripheral registers are mapped at
155 * fixed virtual addresses very early in initarm() so that we can use
156 * them while booting the kernel and stay at the same address
157 * throughout whole kernel's life time.
158 */
159 static const struct pmap_devmap sa11x0_devmap[] = {
160 /* Physical/virtual address for UART #3. */
161 DEVMAP_ENTRY(
162 SACOM3_VBASE,
163 SACOM3_BASE,
164 0x24
165 ),
166 DEVMAP_ENTRY_END
167 };
168
169 /*
170 * It should be responsible for setting up everything that must be
171 * in place when main is called.
172 * This includes:
173 * Initializing the physical console so characters can be printed.
174 * Setting up page tables for the kernel.
175 */
176 vaddr_t
init_sa11x0(int argc,char ** argv,struct bootinfo * bi)177 init_sa11x0(int argc, char **argv, struct bootinfo *bi)
178 {
179 u_int kerneldatasize, symbolsize;
180 u_int l1pagetable;
181 vaddr_t freemempos;
182 vsize_t pt_size;
183 int loop;
184 #if NKSYMS || defined(DDB) || defined(MODULAR)
185 Elf_Shdr *sh;
186 #endif
187
188 #ifdef DEBUG_BEFOREMMU
189 /*
190 * At this point, we cannot call real consinit().
191 * Just call a faked up version of consinit(), which does the thing
192 * with MMU disabled.
193 */
194 fakecninit();
195 #endif
196
197 /*
198 * XXX for now, overwrite bootconfig to hardcoded values.
199 * XXX kill bootconfig and directly call uvm_physload
200 */
201 bootconfig.dram[0].address = 0xc0000000;
202 bootconfig.dram[0].pages = DRAM_PAGES;
203 bootconfig.dramblocks = 1;
204
205 kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
206 symbolsize = 0;
207 #if NKSYMS || defined(DDB) || defined(MODULAR)
208 if (!memcmp(&end, "\177ELF", 4)) {
209 /*
210 * XXXGCC12.
211 * This accesses beyond what "int end" technically supplies.
212 */
213 #pragma GCC push_options
214 #pragma GCC diagnostic ignored "-Warray-bounds"
215 sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
216 #pragma GCC pop_options
217 loop = ((Elf_Ehdr *)&end)->e_shnum;
218 for (; loop; loop--, sh++)
219 if (sh->sh_offset > 0 &&
220 (sh->sh_offset + sh->sh_size) > symbolsize)
221 symbolsize = sh->sh_offset + sh->sh_size;
222 }
223 #endif
224
225 printf("kernsize=0x%x\n", kerneldatasize);
226 kerneldatasize += symbolsize;
227 kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
228 PAGE_SIZE * 8;
229
230 /*
231 * hpcboot has loaded me with MMU disabled.
232 * So create kernel page tables and enable MMU.
233 */
234
235 /*
236 * Set up the variables that define the availability of physical
237 * memory.
238 */
239 physical_start = bootconfig.dram[0].address;
240 physical_freestart = physical_start
241 + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
242 physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
243 + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
244 physical_freeend = physical_end;
245
246 for (loop = 0; loop < bootconfig.dramblocks; ++loop)
247 physmem += bootconfig.dram[loop].pages;
248
249 /* XXX handle UMA framebuffer memory */
250
251 /* Use the first 256kB to allocate things */
252 freemempos = KERNEL_BASE;
253 memset((void *)KERNEL_BASE, 0, KERNEL_TEXT_BASE - KERNEL_BASE);
254
255 /*
256 * Right. We have the bottom meg of memory mapped to 0x00000000
257 * so was can get at it. The kernel will occupy the start of it.
258 * After the kernel/args we allocate some of the fixed page tables
259 * we need to get the system going.
260 * We allocate one page directory and NUM_KERNEL_PTS page tables
261 * and store the physical addresses in the kernel_pt_table array.
262 * Must remember that neither the page L1 or L2 page tables are the
263 * same size as a page !
264 *
265 * Ok, the next bit of physical allocate may look complex but it is
266 * simple really. I have done it like this so that no memory gets
267 * wasted during the allocate of various pages and tables that are
268 * all different sizes.
269 * The start address will be page aligned.
270 * We allocate the kernel page directory on the first free 16KB
271 * boundary we find.
272 * We allocate the kernel page tables on the first 1KB boundary we
273 * find. We allocate at least 9 PT's (12 currently). This means
274 * that in the process we KNOW that we will encounter at least one
275 * 16KB boundary.
276 *
277 * Eventually if the top end of the memory gets used for process L1
278 * page tables the kernel L1 page table may be moved up there.
279 */
280
281 #ifdef VERBOSE_INIT_ARM
282 printf("Allocating page tables\n");
283 #endif
284
285 /* Define a macro to simplify memory allocation */
286 #define valloc_pages(var, np) \
287 do { \
288 alloc_pages((var).pv_pa, (np)); \
289 (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; \
290 } while (0)
291 #define alloc_pages(var, np) \
292 do { \
293 (var) = freemempos; \
294 freemempos += (np) * PAGE_SIZE; \
295 if (freemempos > KERNEL_TEXT_BASE) \
296 panic("%s: out of memory", __func__); \
297 } while (0)
298
299 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
300 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
301 alloc_pages(kernel_pt_table[loop].pv_pa,
302 L2_TABLE_SIZE / PAGE_SIZE);
303 kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
304 }
305
306 /* This should never be able to happen but better confirm that. */
307 if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
308 panic("initarm: Failed to align the kernel page directory");
309
310 /*
311 * Allocate a page for the system page mapped to V0x00000000
312 * This page will just contain the system vectors and can be
313 * shared by all processes.
314 */
315 valloc_pages(systempage, 1);
316
317 pt_size = round_page(freemempos) - physical_start;
318
319 /* Allocate stacks for all modes */
320 valloc_pages(irqstack, IRQ_STACK_SIZE);
321 valloc_pages(abtstack, ABT_STACK_SIZE);
322 valloc_pages(undstack, UND_STACK_SIZE);
323 valloc_pages(kernelstack, UPAGES);
324
325 #ifdef VERBOSE_INIT_ARM
326 printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
327 irqstack.pv_va);
328 printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
329 abtstack.pv_va);
330 printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
331 undstack.pv_va);
332 printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
333 kernelstack.pv_va);
334 #endif
335
336 alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
337
338 /*
339 * XXX Actually, we only need virtual space and don't need
340 * XXX physical memory for sa110_cc_base and sa11x0_idle_mem.
341 */
342 /*
343 * XXX totally stuffed hack to work round problems introduced
344 * in recent versions of the pmap code. Due to the calls used there
345 * we cannot allocate virtual memory during bootstrap.
346 */
347 for (;;) {
348 alloc_pages(sa1_cc_base, 1);
349 if (!(sa1_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1)))
350 break;
351 }
352 alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
353
354 sa1_cache_clean_addr = sa1_cc_base;
355 sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
356
357 alloc_pages(sa11x0_idle_mem, 1);
358
359 /*
360 * Ok, we have allocated physical pages for the primary kernel
361 * page tables.
362 */
363
364 #ifdef VERBOSE_INIT_ARM
365 printf("Creating L1 page table\n");
366 #endif
367
368 /*
369 * Now we start construction of the L1 page table.
370 * We start by mapping the L2 page tables into the L1.
371 * This means that we can replace L1 mappings later on if necessary.
372 */
373 l1pagetable = kernel_l1pt.pv_pa;
374
375 /* Map the L2 pages tables in the L1 page table */
376 pmap_link_l2pt(l1pagetable, 0x00000000,
377 &kernel_pt_table[KERNEL_PT_SYS]);
378 #define SAIPIO_BASE 0xd0000000 /* XXX XXX */
379 pmap_link_l2pt(l1pagetable, SAIPIO_BASE,
380 &kernel_pt_table[KERNEL_PT_IO]);
381 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop)
382 pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
383 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
384 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
385 pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
386 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
387
388 /* update the top of the kernel VM */
389 pmap_curmaxkvaddr =
390 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
391
392 #ifdef VERBOSE_INIT_ARM
393 printf("Mapping kernel\n");
394 #endif
395
396 /* Now we fill in the L2 pagetable for the kernel code/data */
397
398 /*
399 * XXX there is no ELF header to find RO region.
400 * XXX What should we do?
401 */
402 #if 0
403 if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
404 logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
405 physical_start, kernexec->a_text,
406 VM_PROT_READ, PTE_CACHE);
407 logical += pmap_map_chunk(l1pagetable,
408 KERNEL_TEXT_BASE + logical, physical_start + logical,
409 kerneldatasize - kernexec->a_text,
410 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
411 } else
412 #endif
413 pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
414 KERNEL_TEXT_BASE - KERNEL_BASE + physical_start,
415 kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
416
417 #ifdef VERBOSE_INIT_ARM
418 printf("Constructing L2 page tables\n");
419 #endif
420
421 /* Map the stack pages */
422 pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
423 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
424 pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
425 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
426 pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
427 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
428 pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
429 UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
430
431 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
432 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
433
434 /* Map page tables */
435 pmap_map_chunk(l1pagetable, KERNEL_BASE, physical_start, pt_size,
436 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
437
438 /* Map a page for entering idle mode */
439 pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
440 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
441
442 /* Map the vector page. */
443 pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
444 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
445
446 /* Map the statically mapped devices. */
447 pmap_devmap_bootstrap(l1pagetable, sa11x0_devmap);
448
449 pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xe0000000,
450 CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
451
452 /*
453 * Now we have the real page tables in place so we can switch to them.
454 * Once this is done we will be running with the REAL kernel page
455 * tables.
456 */
457
458 #ifdef VERBOSE_INIT_ARM
459 printf("done.\n");
460 #endif
461
462 /*
463 * Pages were allocated during the secondary bootstrap for the
464 * stacks for different CPU modes.
465 * We must now set the r13 registers in the different CPU modes to
466 * point to these stacks.
467 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
468 * of the stack memory.
469 */
470 #ifdef VERBOSE_INIT_ARM
471 printf("init subsystems: stacks ");
472 #endif
473
474 set_stackptr(PSR_IRQ32_MODE,
475 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
476 set_stackptr(PSR_ABT32_MODE,
477 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
478 set_stackptr(PSR_UND32_MODE,
479 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
480 #ifdef VERBOSE_INIT_ARM
481 printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
482 kernelstack.pv_pa);
483 #endif /* VERBOSE_INIT_ARM */
484
485 /*
486 * Well we should set a data abort handler.
487 * Once things get going this will change as we will need a proper
488 * handler. Until then we will use a handler that just panics but
489 * tells us why.
490 * Initialization of the vectors will just panic on a data abort.
491 * This just fills in a slightly better one.
492 */
493 #ifdef VERBOSE_INIT_ARM
494 printf("vectors ");
495 #endif
496 data_abort_handler_address = (u_int)data_abort_handler;
497 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
498 undefined_handler_address = (u_int)undefinedinstruction_bounce;
499 #ifdef DEBUG
500 printf("%08x %08x %08x\n", data_abort_handler_address,
501 prefetch_abort_handler_address, undefined_handler_address);
502 #endif
503
504 /* Initialize the undefined instruction handlers */
505 #ifdef VERBOSE_INIT_ARM
506 printf("undefined\n");
507 #endif
508 undefined_init();
509
510 /* Set the page table address. */
511 #ifdef VERBOSE_INIT_ARM
512 printf("switching to new L1 page table @%#lx...\n", kernel_l1pt.pv_pa);
513 #endif
514 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
515 cpu_setttb(kernel_l1pt.pv_pa, true);
516 cpu_tlb_flushID();
517 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
518
519 /*
520 * Moved from cpu_startup() as data_abort_handler() references
521 * this during uvm init.
522 */
523 uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
524
525 #ifdef BOOT_DUMP
526 dumppages((char *)0xc0000000, 16 * PAGE_SIZE);
527 dumppages((char *)0xb0100000, 64); /* XXX */
528 #endif
529 /* Enable MMU, I-cache, D-cache, write buffer. */
530 cpufunc_control(0x337f, 0x107d);
531
532 arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
533
534 consinit();
535
536 #ifdef VERBOSE_INIT_ARM
537 printf("bootstrap done.\n");
538 #endif
539
540 #ifdef VERBOSE_INIT_ARM
541 printf("freemempos=%08lx\n", freemempos);
542 printf("MMU enabled. control=%08x\n", cpu_get_control());
543 #endif
544
545 /* Load memory into UVM. */
546 uvm_md_init();
547 for (loop = 0; loop < bootconfig.dramblocks; loop++) {
548 paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
549 paddr_t dblk_end = dblk_start
550 + (bootconfig.dram[loop].pages * PAGE_SIZE);
551
552 if (dblk_start < physical_freestart)
553 dblk_start = physical_freestart;
554 if (dblk_end > physical_freeend)
555 dblk_end = physical_freeend;
556
557 uvm_page_physload(atop(dblk_start), atop(dblk_end),
558 atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
559 }
560
561 /* Boot strap pmap telling it where managed kernel virtual memory is */
562 pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
563
564 #ifdef BOOT_DUMP
565 dumppages((char *)kernel_l1pt.pv_va, 16);
566 #endif
567
568 #ifdef DDB
569 db_machine_init();
570 #endif
571 #if NKSYMS || defined(DDB) || defined(MODULAR)
572 ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
573 #endif
574
575 printf("kernsize=0x%x", kerneldatasize);
576 printf(" (including 0x%x symbols)\n", symbolsize);
577
578 #ifdef DDB
579 if (boothowto & RB_KDB)
580 Debugger();
581 #endif /* DDB */
582
583 /* We return the new stack pointer address */
584 return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
585 }
586
587 void
consinit(void)588 consinit(void)
589 {
590 static int consinit_called = 0;
591
592 if (consinit_called != 0)
593 return;
594
595 consinit_called = 1;
596 if (bootinfo->bi_cnuse == BI_CNUSE_SERIAL) {
597 cninit();
598 }
599 }
600
601 #ifdef DEBUG_BEFOREMMU
602 cons_decl(sacom);
603
604 static void
fakecninit(void)605 fakecninit(void)
606 {
607 static struct consdev fakecntab = cons_init(sacom);
608 cn_tab = &fakecntab;
609
610 (*cn_tab->cn_init)(0);
611 cn_tab->cn_pri = CN_REMOTE;
612 }
613 #endif
614