1 /* $NetBSD: arm32_kvminit.c,v 1.66 2020/10/30 18:54:36 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 3. All advertising materials mentioning features or use of this software 108 * must display the following acknowledgement: 109 * This product includes software developed by Microsoft 110 * 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 121 * SUCH DAMAGE. 122 */ 123 124 #include "opt_arm_debug.h" 125 #include "opt_arm_start.h" 126 #include "opt_fdt.h" 127 #include "opt_multiprocessor.h" 128 129 #include <sys/cdefs.h> 130 __KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.66 2020/10/30 18:54:36 skrll Exp $"); 131 132 #include <sys/param.h> 133 134 #include <sys/asan.h> 135 #include <sys/bus.h> 136 #include <sys/device.h> 137 #include <sys/kernel.h> 138 #include <sys/reboot.h> 139 140 #include <dev/cons.h> 141 142 #include <uvm/uvm_extern.h> 143 144 #include <arm/arm32/machdep.h> 145 #include <arm/bootconfig.h> 146 #include <arm/db_machdep.h> 147 #include <arm/locore.h> 148 #include <arm/undefined.h> 149 150 #if defined(FDT) 151 #include <arch/evbarm/fdt/platform.h> 152 #include <arm/fdt/arm_fdtvar.h> 153 #endif 154 155 #ifdef MULTIPROCESSOR 156 #ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP 157 #error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack 158 #endif 159 #endif 160 161 #ifdef VERBOSE_INIT_ARM 162 #define VPRINTF(...) printf(__VA_ARGS__) 163 #else 164 #define VPRINTF(...) __nothing 165 #endif 166 167 struct bootmem_info bootmem_info; 168 169 extern void *msgbufaddr; 170 paddr_t msgbufphys; 171 paddr_t physical_start; 172 paddr_t physical_end; 173 174 extern char etext[]; 175 extern char __data_start[], _edata[]; 176 extern char __bss_start[], __bss_end__[]; 177 extern char _end[]; 178 179 /* Page tables for mapping kernel VM */ 180 #define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 181 182 #ifdef KASAN 183 vaddr_t kasan_kernelstart; 184 vaddr_t kasan_kernelsize; 185 186 #define KERNEL_L2PT_KASAN_NUM howmany(VM_KERNEL_KASAN_SIZE, L2_S_SEGSIZE) 187 bool kasan_l2pts_created __attribute__((__section__(".data"))) = false; 188 pv_addr_t kasan_l2pt[KERNEL_L2PT_KASAN_NUM]; 189 #else 190 #define KERNEL_L2PT_KASAN_NUM 0 191 #endif 192 193 u_long kern_vtopdiff __attribute__((__section__(".data"))); 194 195 void 196 arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 197 { 198 struct bootmem_info * const bmi = &bootmem_info; 199 pv_addr_t *pv = bmi->bmi_freeblocks; 200 201 /* 202 * FDT/generic start fills in kern_vtopdiff early 203 */ 204 #if defined(__HAVE_GENERIC_START) 205 extern char KERNEL_BASE_virt[]; 206 extern char const __stop__init_memory[]; 207 208 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff); 209 210 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt); 211 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory); 212 213 kernelstart = KERN_VTOPHYS(kstartva); 214 215 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart); 216 #else 217 vaddr_t kendva = round_page((vaddr_t)_end); 218 219 #if defined(KERNEL_BASE_VOFFSET) 220 kern_vtopdiff = KERNEL_BASE_VOFFSET; 221 #else 222 KASSERT(memstart == kernelstart); 223 kern_vtopdiff = KERNEL_BASE + memstart; 224 #endif 225 #endif 226 paddr_t kernelend = KERN_VTOPHYS(kendva); 227 228 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__, 229 memstart, memsize); 230 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__, 231 kernelstart, kernelend); 232 233 physical_start = bmi->bmi_start = memstart; 234 physical_end = bmi->bmi_end = memstart + memsize; 235 #ifndef ARM_HAS_LPAE 236 if (physical_end == 0) { 237 physical_end = -PAGE_SIZE; 238 memsize -= PAGE_SIZE; 239 bmi->bmi_end -= PAGE_SIZE; 240 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n", 241 __func__); 242 } 243 #endif 244 physmem = memsize / PAGE_SIZE; 245 246 /* 247 * Let's record where the kernel lives. 248 */ 249 250 bmi->bmi_kernelstart = kernelstart; 251 bmi->bmi_kernelend = kernelend; 252 253 #if defined(FDT) 254 fdt_add_reserved_memory_range(bmi->bmi_kernelstart, 255 bmi->bmi_kernelend - bmi->bmi_kernelstart); 256 #endif 257 258 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart, 259 kernelend); 260 261 #if 0 262 // XXX Makes RPI abort 263 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0); 264 #endif 265 /* 266 * Now the rest of the free memory must be after the kernel. 267 */ 268 pv->pv_pa = bmi->bmi_kernelend; 269 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 270 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 271 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 272 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 273 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 274 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 275 pv++; 276 277 /* 278 * Add a free block for any memory before the kernel. 279 */ 280 if (bmi->bmi_start < bmi->bmi_kernelstart) { 281 pv->pv_pa = bmi->bmi_start; 282 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 283 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 284 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 285 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 286 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 287 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 288 pv++; 289 } 290 291 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 292 293 SLIST_INIT(&bmi->bmi_freechunks); 294 SLIST_INIT(&bmi->bmi_chunks); 295 } 296 297 static bool 298 concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 299 { 300 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 301 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 302 && acc_pv->pv_prot == pv->pv_prot 303 && acc_pv->pv_cache == pv->pv_cache) { 304 #if 0 305 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 306 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size, 307 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size); 308 #endif 309 acc_pv->pv_size += pv->pv_size; 310 return true; 311 } 312 313 return false; 314 } 315 316 static void 317 add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 318 { 319 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 320 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 321 pv_addr_t * const pv0 = (*pvp); 322 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 323 if (concat_pvaddr(pv0, pv)) { 324 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 325 __func__, "appending", pv, 326 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 327 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 328 pv = SLIST_NEXT(pv0, pv_list); 329 if (pv != NULL && concat_pvaddr(pv0, pv)) { 330 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 331 __func__, "merging", pv, 332 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 333 pv0->pv_pa, 334 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 335 SLIST_REMOVE_AFTER(pv0, pv_list); 336 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 337 } 338 return; 339 } 340 KASSERT(pv->pv_va != (*pvp)->pv_va); 341 pvp = &SLIST_NEXT(*pvp, pv_list); 342 } 343 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 344 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 345 KASSERT(new_pv != NULL); 346 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 347 *new_pv = *pv; 348 SLIST_NEXT(new_pv, pv_list) = *pvp; 349 (*pvp) = new_pv; 350 351 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 352 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 353 new_pv->pv_size / PAGE_SIZE); 354 if (SLIST_NEXT(new_pv, pv_list)) { 355 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 356 } else { 357 VPRINTF("at tail\n"); 358 } 359 } 360 361 static void 362 valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 363 int prot, int cache, bool zero_p) 364 { 365 size_t nbytes = npages * PAGE_SIZE; 366 pv_addr_t *free_pv = bmi->bmi_freeblocks; 367 size_t free_idx = 0; 368 static bool l1pt_found; 369 370 KASSERT(npages > 0); 371 372 /* 373 * If we haven't allocated the kernel L1 page table and we are aligned 374 * at a L1 table boundary, alloc the memory for it. 375 */ 376 if (!l1pt_found 377 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 378 && free_pv->pv_size >= L1_TABLE_SIZE) { 379 l1pt_found = true; 380 VPRINTF(" l1pt"); 381 382 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 383 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 384 add_pages(bmi, &kernel_l1pt); 385 } 386 387 while (nbytes > free_pv->pv_size) { 388 free_pv++; 389 free_idx++; 390 if (free_idx == bmi->bmi_nfreeblocks) { 391 panic("%s: could not allocate %zu bytes", 392 __func__, nbytes); 393 } 394 } 395 396 /* 397 * As we allocate the memory, make sure that we don't walk over 398 * our current first level translation table. 399 */ 400 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 401 402 #if defined(FDT) 403 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes); 404 #endif 405 pv->pv_pa = free_pv->pv_pa; 406 pv->pv_va = free_pv->pv_va; 407 pv->pv_size = nbytes; 408 pv->pv_prot = prot; 409 pv->pv_cache = cache; 410 411 /* 412 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 413 * just use PTE_CACHE. 414 */ 415 if (cache == PTE_PAGETABLE 416 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 417 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 418 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 419 pv->pv_cache = PTE_CACHE; 420 421 free_pv->pv_pa += nbytes; 422 free_pv->pv_va += nbytes; 423 free_pv->pv_size -= nbytes; 424 if (free_pv->pv_size == 0) { 425 --bmi->bmi_nfreeblocks; 426 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 427 free_pv[0] = free_pv[1]; 428 } 429 } 430 431 bmi->bmi_freepages -= npages; 432 433 if (zero_p) 434 memset((void *)pv->pv_va, 0, nbytes); 435 } 436 437 void 438 arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 439 const struct pmap_devmap *devmap, bool mapallmem_p) 440 { 441 struct bootmem_info * const bmi = &bootmem_info; 442 #ifdef MULTIPROCESSOR 443 const size_t cpu_num = arm_cpu_max; 444 #else 445 const size_t cpu_num = 1; 446 #endif 447 448 #ifdef ARM_HAS_VBAR 449 const bool map_vectors_p = false; 450 #elif defined(CPU_ARMV7) || defined(CPU_ARM11) 451 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 452 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 453 #else 454 const bool map_vectors_p = true; 455 #endif 456 457 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 458 KASSERT(mapallmem_p); 459 #ifdef ARM_MMU_EXTENDED 460 /* 461 * The direct map VA space ends at the start of the kernel VM space. 462 */ 463 pmap_directlimit = kernel_vm_base; 464 #else 465 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 466 #endif /* ARM_MMU_EXTENDED */ 467 #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 468 469 /* 470 * Calculate the number of L2 pages needed for mapping the 471 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 472 * and 1 for IO 473 */ 474 size_t kernel_size = bmi->bmi_kernelend; 475 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 476 kernel_size += L1_TABLE_SIZE; 477 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 478 kernel_size += PAGE_SIZE * KERNEL_L2PT_KASAN_NUM; 479 if (map_vectors_p) { 480 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 481 } 482 if (iovbase) { 483 kernel_size += PAGE_SIZE; /* L2PT for IO */ 484 } 485 kernel_size += 486 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 487 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 488 kernel_size += round_page(MSGBUFSIZE); 489 kernel_size += 0x10000; /* slop */ 490 if (!mapallmem_p) { 491 kernel_size += PAGE_SIZE 492 * howmany(kernel_size, L2_S_SEGSIZE); 493 } 494 kernel_size = round_page(kernel_size); 495 496 /* 497 * Now we know how many L2 pages it will take. 498 */ 499 const size_t KERNEL_L2PT_KERNEL_NUM = 500 howmany(kernel_size, L2_S_SEGSIZE); 501 502 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 503 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 504 505 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 506 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 507 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 508 pv_addr_t msgbuf; 509 pv_addr_t text; 510 pv_addr_t data; 511 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11]; 512 #if ARM_MMU_XSCALE == 1 513 pv_addr_t minidataclean; 514 #endif 515 516 /* 517 * We need to allocate some fixed page tables to get the kernel going. 518 * 519 * We are going to allocate our bootstrap pages from the beginning of 520 * the free space that we just calculated. We allocate one page 521 * directory and a number of page tables and store the physical 522 * addresses in the bmi_l2pts array in bootmem_info. 523 * 524 * The kernel page directory must be on a 16K boundary. The page 525 * tables must be on 4K boundaries. What we do is allocate the 526 * page directory on the first 16K boundary that we encounter, and 527 * the page tables on 4K boundaries otherwise. Since we allocate 528 * at least 3 L2 page tables, we are guaranteed to encounter at 529 * least one 16K aligned region. 530 */ 531 532 VPRINTF("%s: allocating page tables for", __func__); 533 for (size_t i = 0; i < __arraycount(chunks); i++) { 534 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 535 } 536 537 kernel_l1pt.pv_pa = 0; 538 kernel_l1pt.pv_va = 0; 539 540 /* 541 * Allocate the L2 pages, but if we get to a page that is aligned for 542 * an L1 page table, we will allocate the pages for it first and then 543 * allocate the L2 page. 544 */ 545 546 if (map_vectors_p) { 547 /* 548 * First allocate L2 page for the vectors. 549 */ 550 VPRINTF(" vector"); 551 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 552 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 553 add_pages(bmi, &bmi->bmi_vector_l2pt); 554 } 555 556 /* 557 * Now allocate L2 pages for the kernel 558 */ 559 VPRINTF(" kernel"); 560 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 561 valloc_pages(bmi, &kernel_l2pt[idx], 1, 562 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 563 add_pages(bmi, &kernel_l2pt[idx]); 564 } 565 566 /* 567 * Now allocate L2 pages for the initial kernel VA space. 568 */ 569 VPRINTF(" vm"); 570 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 571 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 572 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 573 add_pages(bmi, &vmdata_l2pt[idx]); 574 } 575 576 #ifdef KASAN 577 /* 578 * Now allocate L2 pages for the KASAN shadow map l2pt VA space. 579 */ 580 VPRINTF(" kasan"); 581 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; ++idx) { 582 valloc_pages(bmi, &kasan_l2pt[idx], 1, 583 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 584 add_pages(bmi, &kasan_l2pt[idx]); 585 } 586 587 #endif 588 /* 589 * If someone wanted a L2 page for I/O, allocate it now. 590 */ 591 if (iovbase) { 592 VPRINTF(" io"); 593 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 594 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 595 add_pages(bmi, &bmi->bmi_io_l2pt); 596 } 597 598 VPRINTF("%s: allocating stacks\n", __func__); 599 600 /* Allocate stacks for all modes and CPUs */ 601 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 602 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 603 add_pages(bmi, &abtstack); 604 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 605 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 606 add_pages(bmi, &fiqstack); 607 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 608 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 609 add_pages(bmi, &irqstack); 610 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 611 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 612 add_pages(bmi, &undstack); 613 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 614 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 615 add_pages(bmi, &idlestack); 616 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 617 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 618 add_pages(bmi, &kernelstack); 619 620 /* Allocate the message buffer from the end of memory. */ 621 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 622 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 623 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false); 624 add_pages(bmi, &msgbuf); 625 msgbufphys = msgbuf.pv_pa; 626 msgbufaddr = (void *)msgbuf.pv_va; 627 628 #ifdef KASAN 629 kasan_kernelstart = KERNEL_BASE; 630 kasan_kernelsize = (msgbuf.pv_va + round_page(MSGBUFSIZE)) - KERNEL_BASE; 631 #endif 632 633 if (map_vectors_p) { 634 /* 635 * Allocate a page for the system vector page. 636 * This page will just contain the system vectors and can be 637 * shared by all processes. 638 */ 639 VPRINTF(" vector"); 640 641 valloc_pages(bmi, &systempage, 1, 642 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 643 PTE_CACHE, true); 644 } 645 systempage.pv_va = vectors; 646 647 /* 648 * If the caller needed a few extra pages for some reason, allocate 649 * them now. 650 */ 651 #if ARM_MMU_XSCALE == 1 652 #if (ARM_NMMUS > 1) 653 if (xscale_use_minidata) 654 #endif 655 valloc_pages(bmi, &minidataclean, 1, 656 VM_PROT_READ | VM_PROT_WRITE, 0, true); 657 #endif 658 659 /* 660 * Ok we have allocated physical pages for the primary kernel 661 * page tables and stacks. Let's just confirm that. 662 */ 663 if (kernel_l1pt.pv_va == 0 664 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 665 panic("%s: Failed to allocate or align the kernel " 666 "page directory", __func__); 667 668 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n", 669 kernel_l1pt.pv_va, kernel_l1pt.pv_pa); 670 671 /* 672 * Now we start construction of the L1 page table 673 * We start by mapping the L2 page tables into the L1. 674 * This means that we can replace L1 mappings later on if necessary 675 */ 676 vaddr_t l1pt_va = kernel_l1pt.pv_va; 677 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 678 679 if (map_vectors_p) { 680 /* Map the L2 pages tables in the L1 page table */ 681 const vaddr_t va = systempage.pv_va & -L2_S_SEGSIZE; 682 683 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_vector_l2pt); 684 685 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 686 __func__, bmi->bmi_vector_l2pt.pv_va, 687 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va, "(vectors)"); 688 } 689 690 /* 691 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel 692 * start PA 693 */ 694 const vaddr_t kernel_base = 695 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE); 696 697 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__, 698 kernel_base, KERNEL_L2PT_KERNEL_NUM); 699 700 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 701 const vaddr_t va = kernel_base + idx * L2_S_SEGSIZE; 702 703 pmap_link_l2pt(l1pt_va, va, &kernel_l2pt[idx]); 704 705 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 706 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa, 707 va, "(kernel)"); 708 } 709 710 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__, 711 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM); 712 713 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 714 const vaddr_t va = kernel_vm_base + idx * L2_S_SEGSIZE; 715 716 pmap_link_l2pt(l1pt_va, va, &vmdata_l2pt[idx]); 717 718 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 719 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 720 va, "(vm)"); 721 } 722 if (iovbase) { 723 const vaddr_t va = iovbase & -L2_S_SEGSIZE; 724 725 pmap_link_l2pt(l1pt_va, va, &bmi->bmi_io_l2pt); 726 727 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 728 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 729 va, "(io)"); 730 } 731 732 #ifdef KASAN 733 VPRINTF("%s: kasan_shadow_base %x KERNEL_L2PT_KASAN_NUM %d\n", __func__, 734 VM_KERNEL_KASAN_BASE, KERNEL_L2PT_KASAN_NUM); 735 736 for (size_t idx = 0; idx < KERNEL_L2PT_KASAN_NUM; idx++) { 737 const vaddr_t va = VM_KERNEL_KASAN_BASE + idx * L2_S_SEGSIZE; 738 739 pmap_link_l2pt(l1pt_va, va, &kasan_l2pt[idx]); 740 741 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx %s\n", 742 __func__, kasan_l2pt[idx].pv_va, kasan_l2pt[idx].pv_pa, 743 va, "(kasan)"); 744 } 745 kasan_l2pts_created = true; 746 #endif 747 748 /* update the top of the kernel VM */ 749 pmap_curmaxkvaddr = 750 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 751 752 // This could be done earlier and then the kernel data and pages 753 // allocated above would get merged (concatentated) 754 755 VPRINTF("Mapping kernel\n"); 756 757 extern char etext[]; 758 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 759 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart; 760 761 textsize = (textsize + PGOFSET) & ~PGOFSET; 762 763 /* start at offset of kernel in RAM */ 764 765 text.pv_pa = bmi->bmi_kernelstart; 766 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart); 767 text.pv_size = textsize; 768 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE; 769 text.pv_cache = PTE_CACHE; 770 771 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 772 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 773 774 add_pages(bmi, &text); 775 776 data.pv_pa = text.pv_pa + textsize; 777 data.pv_va = text.pv_va + textsize; 778 data.pv_size = totalsize - textsize; 779 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 780 data.pv_cache = PTE_CACHE; 781 782 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 783 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 784 785 add_pages(bmi, &data); 786 787 VPRINTF("Listing Chunks\n"); 788 789 pv_addr_t *lpv; 790 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 791 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx " 792 "(PA %#lx, prot %d, cache %d)\n", 793 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 794 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 795 } 796 VPRINTF("\nMapping Chunks\n"); 797 798 pv_addr_t cur_pv; 799 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 800 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 801 cur_pv = *pv; 802 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 803 pv = SLIST_NEXT(pv, pv_list); 804 } else { 805 cur_pv.pv_va = KERNEL_BASE; 806 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va); 807 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 808 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 809 cur_pv.pv_cache = PTE_CACHE; 810 } 811 while (pv != NULL) { 812 if (mapallmem_p) { 813 if (concat_pvaddr(&cur_pv, pv)) { 814 pv = SLIST_NEXT(pv, pv_list); 815 continue; 816 } 817 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 818 /* 819 * See if we can extend the current pv to emcompass the 820 * hole, and if so do it and retry the concatenation. 821 */ 822 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 823 && cur_pv.pv_cache == PTE_CACHE) { 824 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 825 continue; 826 } 827 828 /* 829 * We couldn't so emit the current chunk and then 830 */ 831 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 832 "(PA %#lx, prot %d, cache %d)\n", 833 __func__, 834 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 835 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 836 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 837 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 838 839 /* 840 * set the current chunk to the hole and try again. 841 */ 842 cur_pv.pv_pa += cur_pv.pv_size; 843 cur_pv.pv_va += cur_pv.pv_size; 844 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 845 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 846 cur_pv.pv_cache = PTE_CACHE; 847 continue; 848 } 849 } 850 851 /* 852 * The new pv didn't concatenate so emit the current one 853 * and use the new pv as the current pv. 854 */ 855 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 856 "(PA %#lx, prot %d, cache %d)\n", 857 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 858 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 859 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 860 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 861 cur_pv = *pv; 862 pv = SLIST_NEXT(pv, pv_list); 863 } 864 865 /* 866 * If we are mapping all of memory, let's map the rest of memory. 867 */ 868 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 869 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 870 && cur_pv.pv_cache == PTE_CACHE) { 871 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 872 } else { 873 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 874 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 875 kernel_vm_base); 876 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 877 "(PA %#lx, prot %d, cache %d)\n", 878 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 879 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 880 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 881 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 882 cur_pv.pv_pa += cur_pv.pv_size; 883 cur_pv.pv_va += cur_pv.pv_size; 884 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 885 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 886 cur_pv.pv_cache = PTE_CACHE; 887 } 888 } 889 890 /* 891 * The amount we can direct map is limited by the start of the 892 * virtual part of the kernel address space. Don't overrun 893 * into it. 894 */ 895 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 896 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 897 } 898 899 /* 900 * Now we map the final chunk. 901 */ 902 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 903 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 904 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 905 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 906 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 907 908 /* 909 * Now we map the stuff that isn't directly after the kernel 910 */ 911 if (map_vectors_p) { 912 /* Map the vector page. */ 913 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 914 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE); 915 } 916 917 /* Map the Mini-Data cache clean area. */ 918 #if ARM_MMU_XSCALE == 1 919 #if (ARM_NMMUS > 1) 920 if (xscale_use_minidata) 921 #endif 922 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 923 minidataclean.pv_pa); 924 #endif 925 926 /* 927 * Map integrated peripherals at same address in first level page 928 * table so that we can continue to use console. 929 */ 930 if (devmap) 931 pmap_devmap_bootstrap(l1pt_va, devmap); 932 933 /* Tell the user about where all the bits and pieces live. */ 934 VPRINTF("%22s Physical Virtual Num\n", " "); 935 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " "); 936 937 #ifdef VERBOSE_INIT_ARM 938 static const char mem_fmt[] = 939 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 940 static const char mem_fmt_nov[] = 941 "%20s: 0x%08lx 0x%08lx %zu\n"; 942 #endif 943 944 #if 0 945 // XXX Doesn't make sense if kernel not at bottom of RAM 946 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 947 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1), 948 (int)physmem); 949 #endif 950 VPRINTF(mem_fmt, "text section", 951 text.pv_pa, text.pv_pa + text.pv_size - 1, 952 text.pv_va, text.pv_va + text.pv_size - 1, 953 (int)(text.pv_size / PAGE_SIZE)); 954 VPRINTF(mem_fmt, "data section", 955 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata), 956 (vaddr_t)__data_start, (vaddr_t)_edata, 957 (int)((round_page((vaddr_t)_edata) 958 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 959 VPRINTF(mem_fmt, "bss section", 960 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__), 961 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 962 (int)((round_page((vaddr_t)__bss_end__) 963 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 964 VPRINTF(mem_fmt, "L1 page directory", 965 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 966 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 967 L1_TABLE_SIZE / PAGE_SIZE); 968 VPRINTF(mem_fmt, "ABT stack (CPU 0)", 969 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 970 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 971 ABT_STACK_SIZE); 972 VPRINTF(mem_fmt, "FIQ stack (CPU 0)", 973 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 974 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 975 FIQ_STACK_SIZE); 976 VPRINTF(mem_fmt, "IRQ stack (CPU 0)", 977 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 978 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 979 IRQ_STACK_SIZE); 980 VPRINTF(mem_fmt, "UND stack (CPU 0)", 981 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 982 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 983 UND_STACK_SIZE); 984 VPRINTF(mem_fmt, "IDLE stack (CPU 0)", 985 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 986 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 987 UPAGES); 988 VPRINTF(mem_fmt, "SVC stack", 989 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 990 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 991 UPAGES); 992 VPRINTF(mem_fmt, "Message Buffer", 993 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 994 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 995 (int)msgbuf_pgs); 996 if (map_vectors_p) { 997 VPRINTF(mem_fmt, "Exception Vectors", 998 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 999 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 1000 1); 1001 } 1002 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 1003 pv = &bmi->bmi_freeblocks[i]; 1004 1005 VPRINTF(mem_fmt_nov, "Free Memory", 1006 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 1007 pv->pv_size / PAGE_SIZE); 1008 } 1009 /* 1010 * Now we have the real page tables in place so we can switch to them. 1011 * Once this is done we will be running with the REAL kernel page 1012 * tables. 1013 */ 1014 1015 VPRINTF("TTBR0=%#x", armreg_ttbr_read()); 1016 #ifdef _ARM_ARCH_6 1017 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 1018 armreg_ttbr1_read(), armreg_ttbcr_read(), 1019 armreg_contextidr_read()); 1020 #endif 1021 VPRINTF("\n"); 1022 1023 /* Switch tables */ 1024 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa); 1025 1026 cpu_ttb = l1pt_pa; 1027 1028 cpu_domains(DOMAIN_DEFAULT); 1029 1030 cpu_idcache_wbinv_all(); 1031 1032 #ifdef __HAVE_GENERIC_START 1033 1034 /* 1035 * Turn on caches and set SCTLR/ACTLR 1036 */ 1037 cpu_setup(boot_args); 1038 #endif 1039 1040 VPRINTF(" ttb"); 1041 1042 #ifdef ARM_MMU_EXTENDED 1043 /* 1044 * TTBCR should have been initialized by the MD start code. 1045 */ 1046 KASSERT((armreg_contextidr_read() & 0xff) == 0); 1047 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 1048 /* 1049 * Disable lookups via TTBR0 until there is an activated pmap. 1050 */ 1051 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 1052 cpu_setttb(l1pt_pa, KERNEL_PID); 1053 isb(); 1054 #else 1055 cpu_setttb(l1pt_pa, true); 1056 #endif 1057 1058 cpu_tlb_flushID(); 1059 1060 #ifdef KASAN 1061 extern uint8_t start_stacks_bottom[]; 1062 kasan_early_init((void *)start_stacks_bottom); 1063 #endif 1064 1065 #ifdef ARM_MMU_EXTENDED 1066 VPRINTF("\nsctlr=%#x actlr=%#x\n", 1067 armreg_sctlr_read(), armreg_auxctl_read()); 1068 #else 1069 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read()); 1070 #endif 1071 1072 #ifdef MULTIPROCESSOR 1073 #ifndef __HAVE_GENERIC_START 1074 /* 1075 * Kick the secondaries to load the TTB. After which they'll go 1076 * back to sleep to wait for the final kick so they will hatch. 1077 */ 1078 VPRINTF(" hatchlings"); 1079 cpu_boot_secondary_processors(); 1080 #endif 1081 #endif 1082 1083 VPRINTF(" OK\n"); 1084 } 1085