1 /* $NetBSD: arm32_kvminit.c,v 1.69 2022/04/02 11:16:07 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 * Written by Hiroyuki Bessho for Genetec Corporation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Genetec Corporation may not be used to endorse or
16 * promote products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Copyright (c) 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
50 * written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 *
64 * Copyright (c) 1997,1998 Mark Brinicombe.
65 * Copyright (c) 1997,1998 Causality Limited.
66 * All rights reserved.
67 *
68 * Redistribution and use in source and binary forms, with or without
69 * modification, are permitted provided that the following conditions
70 * are met:
71 * 1. Redistributions of source code must retain the above copyright
72 * notice, this list of conditions and the following disclaimer.
73 * 2. Redistributions in binary form must reproduce the above copyright
74 * notice, this list of conditions and the following disclaimer in the
75 * documentation and/or other materials provided with the distribution.
76 * 3. All advertising materials mentioning features or use of this software
77 * must display the following acknowledgement:
78 * This product includes software developed by Mark Brinicombe
79 * for the NetBSD Project.
80 * 4. The name of the company nor the name of the author may be used to
81 * endorse or promote products derived from this software without specific
82 * prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 * SUCH DAMAGE.
95 *
96 * Copyright (c) 2007 Microsoft
97 * All rights reserved.
98 *
99 * Redistribution and use in source and binary forms, with or without
100 * modification, are permitted provided that the following conditions
101 * are met:
102 * 1. Redistributions of source code must retain the above copyright
103 * notice, this list of conditions and the following disclaimer.
104 * 2. Redistributions in binary form must reproduce the above copyright
105 * notice, this list of conditions and the following disclaimer in the
106 * documentation and/or other materials provided with the distribution.
107 * 3. All advertising materials mentioning features or use of this software
108 * must display the following acknowledgement:
109 * This product includes software developed by Microsoft
110 *
111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 * SUCH DAMAGE.
122 */
123
124 #include "opt_arm_debug.h"
125 #include "opt_arm_start.h"
126 #include "opt_efi.h"
127 #include "opt_fdt.h"
128 #include "opt_multiprocessor.h"
129
130 #include <sys/cdefs.h>
131 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.69 2022/04/02 11:16:07 skrll Exp $");
132
133 #include <sys/param.h>
134
135 #include <sys/asan.h>
136 #include <sys/bus.h>
137 #include <sys/device.h>
138 #include <sys/kernel.h>
139 #include <sys/reboot.h>
140
141 #include <dev/cons.h>
142
143 #include <uvm/uvm_extern.h>
144
145 #include <arm/arm32/machdep.h>
146 #include <arm/bootconfig.h>
147 #include <arm/db_machdep.h>
148 #include <arm/locore.h>
149 #include <arm/undefined.h>
150
151 #if defined(FDT)
152 #include <arch/evbarm/fdt/platform.h>
153 #include <arm/fdt/arm_fdtvar.h>
154 #include <dev/fdt/fdt_memory.h>
155 #endif
156
157 #ifdef MULTIPROCESSOR
158 #ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP
159 #error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack
160 #endif
161 #endif
162
163 #ifdef VERBOSE_INIT_ARM
164 #define VPRINTF(...) printf(__VA_ARGS__)
165 #else
166 #define VPRINTF(...) __nothing
167 #endif
168
169 #if defined(__HAVE_GENERIC_START)
170 #if defined(KERNEL_BASE_VOFFSET)
171 #error KERNEL_BASE_VOFFSET should not be defined with __HAVE_GENERIC_START
172 #endif
173 #endif
174
175 #if defined(EFI_RUNTIME)
176 #if !defined(ARM_MMU_EXTENDED)
177 #error EFI_RUNTIME is only supported with ARM_MMU_EXTENDED
178 #endif
179 #endif
180
181 struct bootmem_info bootmem_info;
182
183 extern void *msgbufaddr;
184 paddr_t msgbufphys;
185 paddr_t physical_start;
186 paddr_t physical_end;
187
188 extern char etext[];
189 extern char __data_start[], _edata[];
190 extern char __bss_start[], __bss_end__[];
191 extern char _end[];
192
193 /* Page tables for mapping kernel VM */
194 #define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */
195
196 #ifdef KASAN
197 vaddr_t kasan_kernelstart;
198 vaddr_t kasan_kernelsize;
199
200 #define KERNEL_L2PT_KASAN_NUM howmany(VM_KERNEL_KASAN_SIZE, L2_S_SEGSIZE)
201 bool kasan_l2pts_created __attribute__((__section__(".data"))) = false;
202 pv_addr_t kasan_l2pt[KERNEL_L2PT_KASAN_NUM];
203 #else
204 #define KERNEL_L2PT_KASAN_NUM 0
205 #endif
206
207 u_long kern_vtopdiff __attribute__((__section__(".data")));
208
209 void
arm32_bootmem_init(paddr_t memstart,psize_t memsize,vsize_t kernelstart)210 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
211 {
212 struct bootmem_info * const bmi = &bootmem_info;
213 pv_addr_t *pv = bmi->bmi_freeblocks;
214
215 /*
216 * FDT/generic start fills in kern_vtopdiff early
217 */
218 #if defined(__HAVE_GENERIC_START)
219 extern char KERNEL_BASE_virt[];
220 extern char const __stop__init_memory[];
221
222 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff);
223
224 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt);
225 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory);
226
227 kernelstart = KERN_VTOPHYS(kstartva);
228
229 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart);
230 #else
231 vaddr_t kendva = round_page((vaddr_t)_end);
232
233 #if defined(KERNEL_BASE_VOFFSET)
234 kern_vtopdiff = KERNEL_BASE_VOFFSET;
235 #else
236 KASSERT(memstart == kernelstart);
237 kern_vtopdiff = KERNEL_BASE + memstart;
238 #endif
239 #endif
240 paddr_t kernelend = KERN_VTOPHYS(kendva);
241
242 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__,
243 memstart, memsize);
244 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__,
245 kernelstart, kernelend);
246
247 physical_start = bmi->bmi_start = memstart;
248 physical_end = bmi->bmi_end = memstart + memsize;
249 #ifndef ARM_HAS_LPAE
250 if (physical_end == 0) {
251 physical_end = -PAGE_SIZE;
252 memsize -= PAGE_SIZE;
253 bmi->bmi_end -= PAGE_SIZE;
254 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n",
255 __func__);
256 }
257 #endif
258 physmem = memsize / PAGE_SIZE;
259
260 /*
261 * Let's record where the kernel lives.
262 */
263
264 bmi->bmi_kernelstart = kernelstart;
265 bmi->bmi_kernelend = kernelend;
266
267 #if defined(FDT)
268 fdt_memory_remove_range(bmi->bmi_kernelstart,
269 bmi->bmi_kernelend - bmi->bmi_kernelstart);
270 #endif
271
272 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart,
273 kernelend);
274
275 #if 0
276 // XXX Makes RPI abort
277 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0);
278 #endif
279 /*
280 * Now the rest of the free memory must be after the kernel.
281 */
282 pv->pv_pa = bmi->bmi_kernelend;
283 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
284 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
285 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
286 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
287 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
288 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
289 pv++;
290
291 /*
292 * Add a free block for any memory before the kernel.
293 */
294 if (bmi->bmi_start < bmi->bmi_kernelstart) {
295 pv->pv_pa = bmi->bmi_start;
296 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
297 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa;
298 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
299 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
300 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
301 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
302 pv++;
303 }
304
305 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
306
307 SLIST_INIT(&bmi->bmi_freechunks);
308 SLIST_INIT(&bmi->bmi_chunks);
309 }
310
311 static bool
concat_pvaddr(pv_addr_t * acc_pv,pv_addr_t * pv)312 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
313 {
314 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
315 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
316 && acc_pv->pv_prot == pv->pv_prot
317 && acc_pv->pv_cache == pv->pv_cache) {
318 #if 0
319 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
320 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size,
321 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size);
322 #endif
323 acc_pv->pv_size += pv->pv_size;
324 return true;
325 }
326
327 return false;
328 }
329
330 static void
add_pages(struct bootmem_info * bmi,pv_addr_t * pv)331 add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
332 {
333 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
334 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
335 pv_addr_t * const pv0 = (*pvp);
336 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
337 if (concat_pvaddr(pv0, pv)) {
338 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
339 __func__, "appending", pv,
340 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
341 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
342 pv = SLIST_NEXT(pv0, pv_list);
343 if (pv != NULL && concat_pvaddr(pv0, pv)) {
344 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
345 __func__, "merging", pv,
346 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
347 pv0->pv_pa,
348 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
349 SLIST_REMOVE_AFTER(pv0, pv_list);
350 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
351 }
352 return;
353 }
354 KASSERT(pv->pv_va != (*pvp)->pv_va);
355 pvp = &SLIST_NEXT(*pvp, pv_list);
356 }
357 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
358 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
359 KASSERT(new_pv != NULL);
360 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
361 *new_pv = *pv;
362 SLIST_NEXT(new_pv, pv_list) = *pvp;
363 (*pvp) = new_pv;
364
365 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
366 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
367 new_pv->pv_size / PAGE_SIZE);
368 if (SLIST_NEXT(new_pv, pv_list)) {
369 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
370 } else {
371 VPRINTF("at tail\n");
372 }
373 }
374
375 static void
valloc_pages(struct bootmem_info * bmi,pv_addr_t * pv,size_t npages,int prot,int cache,bool zero_p)376 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
377 int prot, int cache, bool zero_p)
378 {
379 size_t nbytes = npages * PAGE_SIZE;
380 pv_addr_t *free_pv = bmi->bmi_freeblocks;
381 size_t free_idx = 0;
382 static bool l1pt_found;
383
384 KASSERT(npages > 0);
385
386 /*
387 * If we haven't allocated the kernel L1 page table and we are aligned
388 * at a L1 table boundary, alloc the memory for it.
389 */
390 if (!l1pt_found
391 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
392 && free_pv->pv_size >= L1_TABLE_SIZE) {
393 l1pt_found = true;
394 VPRINTF(" l1pt");
395
396 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
397 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
398 add_pages(bmi, &kernel_l1pt);
399 #if defined(EFI_RUNTIME)
400 valloc_pages(bmi, &efirt_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
401 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
402 add_pages(bmi, &efirt_l1pt);
403 #endif
404 }
405
406 while (nbytes > free_pv->pv_size) {
407 free_pv++;
408 free_idx++;
409 if (free_idx == bmi->bmi_nfreeblocks) {
410 panic("%s: could not allocate %zu bytes",
411 __func__, nbytes);
412 }
413 }
414
415 /*
416 * As we allocate the memory, make sure that we don't walk over
417 * our current first level translation table.
418 */
419 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
420
421 #if defined(FDT)
422 fdt_memory_remove_range(free_pv->pv_pa, nbytes);
423 #endif
424 pv->pv_pa = free_pv->pv_pa;
425 pv->pv_va = free_pv->pv_va;
426 pv->pv_size = nbytes;
427 pv->pv_prot = prot;
428 pv->pv_cache = cache;
429
430 /*
431 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
432 * just use PTE_CACHE.
433 */
434 if (cache == PTE_PAGETABLE
435 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
436 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
437 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
438 pv->pv_cache = PTE_CACHE;
439
440 free_pv->pv_pa += nbytes;
441 free_pv->pv_va += nbytes;
442 free_pv->pv_size -= nbytes;
443 if (free_pv->pv_size == 0) {
444 --bmi->bmi_nfreeblocks;
445 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
446 free_pv[0] = free_pv[1];
447 }
448 }
449
450 bmi->bmi_freepages -= npages;
451
452 if (zero_p)
453 memset((void *)pv->pv_va, 0, nbytes);
454 }
455
456 void
arm32_kernel_vm_init(vaddr_t kernel_vm_base,vaddr_t vectors,vaddr_t iovbase,const struct pmap_devmap * devmap,bool mapallmem_p)457 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
458 const struct pmap_devmap *devmap, bool mapallmem_p)
459 {
460 struct bootmem_info * const bmi = &bootmem_info;
461 #ifdef MULTIPROCESSOR
462 const size_t cpu_num = arm_cpu_max;
463 #else
464 const size_t cpu_num = 1;
465 #endif
466
467 #ifdef ARM_HAS_VBAR
468 const bool map_vectors_p = false;
469 #elif defined(CPU_ARMV7) || defined(CPU_ARM11)
470 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH
471 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0;
472 #else
473 const bool map_vectors_p = true;
474 #endif
475
476 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
477 KASSERT(mapallmem_p);
478 #ifdef ARM_MMU_EXTENDED
479 /*
480 * The direct map VA space ends at the start of the kernel VM space.
481 */
482 pmap_directlimit = kernel_vm_base;
483 #else
484 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
485 #endif /* ARM_MMU_EXTENDED */
486 #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
487
488 /*
489 * Calculate the number of L2 pages needed for mapping the
490 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,
491 * and 1 for IO
492 */
493 size_t kernel_size = bmi->bmi_kernelend;
494 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
495 kernel_size += L1_TABLE_SIZE;
496 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM;
497 kernel_size += PAGE_SIZE * KERNEL_L2PT_KASAN_NUM;
498 if (map_vectors_p) {
499 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */
500 }
501 if (iovbase) {
502 kernel_size += PAGE_SIZE; /* L2PT for IO */
503 }
504 kernel_size +=
505 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
506 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
507 kernel_size += round_page(MSGBUFSIZE);
508 kernel_size += 0x10000; /* slop */
509 if (!mapallmem_p) {
510 kernel_size += PAGE_SIZE
511 * howmany(kernel_size, L2_S_SEGSIZE);
512 }
513 kernel_size = round_page(kernel_size);
514
515 /*
516 * Now we know how many L2 pages it will take.
517 */
518 const size_t KERNEL_L2PT_KERNEL_NUM =
519 howmany(kernel_size, L2_S_SEGSIZE);
520
521 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
522 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
523
524 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
525 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
526 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
527 pv_addr_t msgbuf;
528 pv_addr_t text;
529 pv_addr_t data;
530 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11];
531 #if ARM_MMU_XSCALE == 1
532 pv_addr_t minidataclean;
533 #endif
534
535 /*
536 * We need to allocate some fixed page tables to get the kernel going.
537 *
538 * We are going to allocate our bootstrap pages from the beginning of
539 * the free space that we just calculated. We allocate one page
540 * directory and a number of page tables and store the physical
541 * addresses in the bmi_l2pts array in bootmem_info.
542 *
543 * The kernel page directory must be on a 16K boundary. The page
544 * tables must be on 4K boundaries. What we do is allocate the
545 * page directory on the first 16K boundary that we encounter, and
546 * the page tables on 4K boundaries otherwise. Since we allocate
547 * at least 3 L2 page tables, we are guaranteed to encounter at
548 * least one 16K aligned region.
549 */
550
551 VPRINTF("%s: allocating page tables for", __func__);
552 for (size_t i = 0; i < __arraycount(chunks); i++) {
553 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
554 }
555
556 kernel_l1pt.pv_pa = 0;
557 kernel_l1pt.pv_va = 0;
558
559 #if defined(EFI_RUNTIME)
560 efirt_l1pt.pv_pa = 0;
561 efirt_l1pt.pv_va = 0;
562 #endif
563 /*
564 * Allocate the L2 pages, but if we get to a page that is aligned for
565 * an L1 page table, we will allocate the pages for it first and then
566 * allocate the L2 page.
567 */
568
569 if (map_vectors_p) {
570 /*
571 * First allocate L2 page for the vectors.
572 */
573 VPRINTF(" vector");
574 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1,
575 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
576 add_pages(bmi, &bmi->bmi_vector_l2pt);
577 }
578
579 /*
580 * Now allocate L2 pages for the kernel
581 */
582 VPRINTF(" kernel");
583 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
584 valloc_pages(bmi, &kernel_l2pt[idx], 1,
585 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
586 add_pages(bmi, &kernel_l2pt[idx]);
587 }
588
589 /*
590 * Now allocate L2 pages for the initial kernel VA space.
591 */
592 VPRINTF(" vm");
593 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
594 valloc_pages(bmi, &vmdata_l2pt[idx], 1,
595 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
596 add_pages(bmi, &vmdata_l2pt[idx]);
597 }
598
599 #ifdef KASAN
600 /*
601 * Now allocate L2 pages for the KASAN shadow map l2pt VA space.
602 */
603 VPRINTF(" kasan");
604 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; ++idx) {
605 valloc_pages(bmi, &kasan_l2pt[idx], 1,
606 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
607 add_pages(bmi, &kasan_l2pt[idx]);
608 }
609
610 #endif
611 /*
612 * If someone wanted a L2 page for I/O, allocate it now.
613 */
614 if (iovbase) {
615 VPRINTF(" io");
616 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1,
617 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
618 add_pages(bmi, &bmi->bmi_io_l2pt);
619 }
620
621 VPRINTF("%s: allocating stacks\n", __func__);
622
623 /* Allocate stacks for all modes and CPUs */
624 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
625 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
626 add_pages(bmi, &abtstack);
627 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
628 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
629 add_pages(bmi, &fiqstack);
630 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
631 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
632 add_pages(bmi, &irqstack);
633 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
634 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
635 add_pages(bmi, &undstack);
636 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
637 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
638 add_pages(bmi, &idlestack);
639 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
640 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
641 add_pages(bmi, &kernelstack);
642
643 /* Allocate the message buffer from the end of memory. */
644 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
645 valloc_pages(bmi, &msgbuf, msgbuf_pgs,
646 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false);
647 add_pages(bmi, &msgbuf);
648 msgbufphys = msgbuf.pv_pa;
649 msgbufaddr = (void *)msgbuf.pv_va;
650
651 #ifdef KASAN
652 kasan_kernelstart = KERNEL_BASE;
653 kasan_kernelsize = (msgbuf.pv_va + round_page(MSGBUFSIZE)) - KERNEL_BASE;
654 #endif
655
656 if (map_vectors_p) {
657 /*
658 * Allocate a page for the system vector page.
659 * This page will just contain the system vectors and can be
660 * shared by all processes.
661 */
662 VPRINTF(" vector");
663
664 valloc_pages(bmi, &systempage, 1,
665 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
666 PTE_CACHE, true);
667 }
668 systempage.pv_va = vectors;
669
670 /*
671 * If the caller needed a few extra pages for some reason, allocate
672 * them now.
673 */
674 #if ARM_MMU_XSCALE == 1
675 #if (ARM_NMMUS > 1)
676 if (xscale_use_minidata)
677 #endif
678 valloc_pages(bmi, &minidataclean, 1,
679 VM_PROT_READ | VM_PROT_WRITE, 0, true);
680 #endif
681
682 /*
683 * Ok we have allocated physical pages for the primary kernel
684 * page tables and stacks. Let's just confirm that.
685 */
686 if (kernel_l1pt.pv_va == 0
687 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
688 panic("%s: Failed to allocate or align the kernel "
689 "page directory", __func__);
690
691 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n",
692 kernel_l1pt.pv_va, kernel_l1pt.pv_pa);
693
694 /*
695 * Now we start construction of the L1 page table
696 * We start by mapping the L2 page tables into the L1.
697 * This means that we can replace L1 mappings later on if necessary
698 */
699 vaddr_t l1pt_va = kernel_l1pt.pv_va;
700 paddr_t l1pt_pa = kernel_l1pt.pv_pa;
701
702 if (map_vectors_p) {
703 /* Map the L2 pages tables in the L1 page table */
704 const vaddr_t va = systempage.pv_va & -L2_S_SEGSIZE;
705
706 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_vector_l2pt);
707
708 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
709 __func__, bmi->bmi_vector_l2pt.pv_va,
710 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va, "(vectors)");
711 }
712
713 /*
714 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel
715 * start PA
716 */
717 const vaddr_t kernel_base =
718 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE);
719
720 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__,
721 kernel_base, KERNEL_L2PT_KERNEL_NUM);
722
723 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
724 const vaddr_t va = kernel_base + idx * L2_S_SEGSIZE;
725
726 pmap_link_l2pt(l1pt_va, va, &kernel_l2pt[idx]);
727
728 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
729 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa,
730 va, "(kernel)");
731 }
732
733 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__,
734 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM);
735
736 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
737 const vaddr_t va = kernel_vm_base + idx * L2_S_SEGSIZE;
738
739 pmap_link_l2pt(l1pt_va, va, &vmdata_l2pt[idx]);
740
741 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
742 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
743 va, "(vm)");
744 }
745 if (iovbase) {
746 const vaddr_t va = iovbase & -L2_S_SEGSIZE;
747
748 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_io_l2pt);
749
750 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
751 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
752 va, "(io)");
753 }
754
755 #ifdef KASAN
756 VPRINTF("%s: kasan_shadow_base %x KERNEL_L2PT_KASAN_NUM %d\n", __func__,
757 VM_KERNEL_KASAN_BASE, KERNEL_L2PT_KASAN_NUM);
758
759 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; idx++) {
760 const vaddr_t va = VM_KERNEL_KASAN_BASE + idx * L2_S_SEGSIZE;
761
762 pmap_link_l2pt(l1pt_va, va, &kasan_l2pt[idx]);
763
764 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n",
765 __func__, kasan_l2pt[idx].pv_va, kasan_l2pt[idx].pv_pa,
766 va, "(kasan)");
767 }
768 kasan_l2pts_created = true;
769 #endif
770
771 /* update the top of the kernel VM */
772 pmap_curmaxkvaddr =
773 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
774
775 // This could be done earlier and then the kernel data and pages
776 // allocated above would get merged (concatentated)
777
778 VPRINTF("Mapping kernel\n");
779
780 extern char etext[];
781 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
782 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart;
783
784 textsize = (textsize + PGOFSET) & ~PGOFSET;
785
786 /* start at offset of kernel in RAM */
787
788 text.pv_pa = bmi->bmi_kernelstart;
789 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart);
790 text.pv_size = textsize;
791 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE;
792 text.pv_cache = PTE_CACHE;
793
794 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
795 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
796
797 add_pages(bmi, &text);
798
799 data.pv_pa = text.pv_pa + textsize;
800 data.pv_va = text.pv_va + textsize;
801 data.pv_size = totalsize - textsize;
802 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
803 data.pv_cache = PTE_CACHE;
804
805 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
806 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
807
808 add_pages(bmi, &data);
809
810 VPRINTF("Listing Chunks\n");
811
812 pv_addr_t *lpv;
813 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) {
814 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx "
815 "(PA %#lx, prot %d, cache %d)\n",
816 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1,
817 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache);
818 }
819 VPRINTF("\nMapping Chunks\n");
820
821 pv_addr_t cur_pv;
822 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
823 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
824 cur_pv = *pv;
825 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va);
826 pv = SLIST_NEXT(pv, pv_list);
827 } else {
828 cur_pv.pv_va = KERNEL_BASE;
829 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va);
830 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa;
831 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
832 cur_pv.pv_cache = PTE_CACHE;
833 }
834 while (pv != NULL) {
835 if (mapallmem_p) {
836 if (concat_pvaddr(&cur_pv, pv)) {
837 pv = SLIST_NEXT(pv, pv_list);
838 continue;
839 }
840 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
841 /*
842 * See if we can extend the current pv to emcompass the
843 * hole, and if so do it and retry the concatenation.
844 */
845 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
846 && cur_pv.pv_cache == PTE_CACHE) {
847 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
848 continue;
849 }
850
851 /*
852 * We couldn't so emit the current chunk and then
853 */
854 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
855 "(PA %#lx, prot %d, cache %d)\n",
856 __func__,
857 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
858 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
859 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
860 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
861
862 /*
863 * set the current chunk to the hole and try again.
864 */
865 cur_pv.pv_pa += cur_pv.pv_size;
866 cur_pv.pv_va += cur_pv.pv_size;
867 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
868 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
869 cur_pv.pv_cache = PTE_CACHE;
870 continue;
871 }
872 }
873
874 /*
875 * The new pv didn't concatenate so emit the current one
876 * and use the new pv as the current pv.
877 */
878 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
879 "(PA %#lx, prot %d, cache %d)\n",
880 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
881 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
882 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
883 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
884 cur_pv = *pv;
885 pv = SLIST_NEXT(pv, pv_list);
886 }
887
888 /*
889 * If we are mapping all of memory, let's map the rest of memory.
890 */
891 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
892 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
893 && cur_pv.pv_cache == PTE_CACHE) {
894 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
895 } else {
896 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base,
897 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size,
898 kernel_vm_base);
899 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
900 "(PA %#lx, prot %d, cache %d)\n",
901 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
902 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
903 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
904 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
905 cur_pv.pv_pa += cur_pv.pv_size;
906 cur_pv.pv_va += cur_pv.pv_size;
907 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
908 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
909 cur_pv.pv_cache = PTE_CACHE;
910 }
911 }
912
913 /*
914 * The amount we can direct map is limited by the start of the
915 * virtual part of the kernel address space. Don't overrun
916 * into it.
917 */
918 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) {
919 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va;
920 }
921
922 /*
923 * Now we map the final chunk.
924 */
925 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
926 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
927 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
928 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
929 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
930
931 /*
932 * Now we map the stuff that isn't directly after the kernel
933 */
934 if (map_vectors_p) {
935 /* Map the vector page. */
936 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
937 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE);
938 }
939
940 /* Map the Mini-Data cache clean area. */
941 #if ARM_MMU_XSCALE == 1
942 #if (ARM_NMMUS > 1)
943 if (xscale_use_minidata)
944 #endif
945 xscale_setup_minidata(l1pt_va, minidataclean.pv_va,
946 minidataclean.pv_pa);
947 #endif
948
949 /*
950 * Map integrated peripherals at same address in first level page
951 * table so that we can continue to use console.
952 */
953 if (devmap)
954 pmap_devmap_bootstrap(l1pt_va, devmap);
955
956 /* Tell the user about where all the bits and pieces live. */
957 VPRINTF("%22s Physical Virtual Num\n", " ");
958 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " ");
959
960 #ifdef VERBOSE_INIT_ARM
961 static const char mem_fmt[] =
962 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
963 static const char mem_fmt_nov[] =
964 "%20s: 0x%08lx 0x%08lx %zu\n";
965 #endif
966
967 #if 0
968 // XXX Doesn't make sense if kernel not at bottom of RAM
969 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
970 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1),
971 (int)physmem);
972 #endif
973 VPRINTF(mem_fmt, "text section",
974 text.pv_pa, text.pv_pa + text.pv_size - 1,
975 text.pv_va, text.pv_va + text.pv_size - 1,
976 (int)(text.pv_size / PAGE_SIZE));
977 VPRINTF(mem_fmt, "data section",
978 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata),
979 (vaddr_t)__data_start, (vaddr_t)_edata,
980 (int)((round_page((vaddr_t)_edata)
981 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
982 VPRINTF(mem_fmt, "bss section",
983 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__),
984 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
985 (int)((round_page((vaddr_t)__bss_end__)
986 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
987 VPRINTF(mem_fmt, "L1 page directory",
988 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
989 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
990 L1_TABLE_SIZE / PAGE_SIZE);
991 #if defined(EFI_RUNTIME)
992 VPRINTF(mem_fmt, "EFI L1 page directory",
993 efirt_l1pt.pv_pa, efirt_l1pt.pv_pa + L1_TABLE_SIZE - 1,
994 efirt_l1pt.pv_va, efirt_l1pt.pv_va + L1_TABLE_SIZE - 1,
995 L1_TABLE_SIZE / PAGE_SIZE);
996 #endif
997 VPRINTF(mem_fmt, "ABT stack (CPU 0)",
998 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
999 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
1000 ABT_STACK_SIZE);
1001 VPRINTF(mem_fmt, "FIQ stack (CPU 0)",
1002 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
1003 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
1004 FIQ_STACK_SIZE);
1005 VPRINTF(mem_fmt, "IRQ stack (CPU 0)",
1006 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
1007 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
1008 IRQ_STACK_SIZE);
1009 VPRINTF(mem_fmt, "UND stack (CPU 0)",
1010 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
1011 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
1012 UND_STACK_SIZE);
1013 VPRINTF(mem_fmt, "IDLE stack (CPU 0)",
1014 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
1015 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
1016 UPAGES);
1017 VPRINTF(mem_fmt, "SVC stack",
1018 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
1019 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
1020 UPAGES);
1021 VPRINTF(mem_fmt, "Message Buffer",
1022 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
1023 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
1024 (int)msgbuf_pgs);
1025 if (map_vectors_p) {
1026 VPRINTF(mem_fmt, "Exception Vectors",
1027 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
1028 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
1029 1);
1030 }
1031 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
1032 pv = &bmi->bmi_freeblocks[i];
1033
1034 VPRINTF(mem_fmt_nov, "Free Memory",
1035 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
1036 pv->pv_size / PAGE_SIZE);
1037 }
1038 /*
1039 * Now we have the real page tables in place so we can switch to them.
1040 * Once this is done we will be running with the REAL kernel page
1041 * tables.
1042 */
1043
1044 VPRINTF("TTBR0=%#x", armreg_ttbr_read());
1045 #ifdef _ARM_ARCH_6
1046 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x",
1047 armreg_ttbr1_read(), armreg_ttbcr_read(),
1048 armreg_contextidr_read());
1049 #endif
1050 VPRINTF("\n");
1051
1052 /* Switch tables */
1053 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa);
1054
1055 cpu_ttb = l1pt_pa;
1056
1057 cpu_domains(DOMAIN_DEFAULT);
1058
1059 cpu_idcache_wbinv_all();
1060
1061 #ifdef __HAVE_GENERIC_START
1062
1063 /*
1064 * Turn on caches and set SCTLR/ACTLR
1065 */
1066 cpu_setup(boot_args);
1067 #endif
1068
1069 VPRINTF(" ttb");
1070
1071 #ifdef ARM_MMU_EXTENDED
1072 /*
1073 * TTBCR should have been initialized by the MD start code.
1074 */
1075 KASSERT((armreg_contextidr_read() & 0xff) == 0);
1076 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N));
1077 /*
1078 * Disable lookups via TTBR0 until there is an activated pmap.
1079 */
1080 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
1081 cpu_setttb(l1pt_pa, KERNEL_PID);
1082 isb();
1083 #else
1084 cpu_setttb(l1pt_pa, true);
1085 #endif
1086
1087 cpu_tlb_flushID();
1088
1089 #ifdef KASAN
1090 extern uint8_t start_stacks_bottom[];
1091 kasan_early_init((void *)start_stacks_bottom);
1092 #endif
1093
1094 #ifdef ARM_MMU_EXTENDED
1095 VPRINTF("\nsctlr=%#x actlr=%#x\n",
1096 armreg_sctlr_read(), armreg_auxctl_read());
1097 #else
1098 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read());
1099 #endif
1100
1101 #ifdef MULTIPROCESSOR
1102 #ifndef __HAVE_GENERIC_START
1103 /*
1104 * Kick the secondaries to load the TTB. After which they'll go
1105 * back to sleep to wait for the final kick so they will hatch.
1106 */
1107 VPRINTF(" hatchlings");
1108 cpu_boot_secondary_processors();
1109 #endif
1110 #endif
1111
1112 VPRINTF(" OK\n");
1113 }
1114