1*e65a0eaaSskrll /* $NetBSD: asan.h,v 1.8 2022/04/02 11:16:07 skrll Exp $ */
2d329adb0Sskrll
3d329adb0Sskrll /*
4d329adb0Sskrll * Copyright (c) 2020 The NetBSD Foundation, Inc.
5d329adb0Sskrll * All rights reserved.
6d329adb0Sskrll *
7d329adb0Sskrll * This code is derived from software contributed to The NetBSD Foundation
8b813e108Smaxv * by Nick Hudson, and is part of the KASAN subsystem of the NetBSD kernel.
9d329adb0Sskrll *
10d329adb0Sskrll * Redistribution and use in source and binary forms, with or without
11d329adb0Sskrll * modification, are permitted provided that the following conditions
12d329adb0Sskrll * are met:
13d329adb0Sskrll * 1. Redistributions of source code must retain the above copyright
14d329adb0Sskrll * notice, this list of conditions and the following disclaimer.
15d329adb0Sskrll * 2. Redistributions in binary form must reproduce the above copyright
16d329adb0Sskrll * notice, this list of conditions and the following disclaimer in the
17d329adb0Sskrll * documentation and/or other materials provided with the distribution.
18d329adb0Sskrll *
19d329adb0Sskrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20d329adb0Sskrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21d329adb0Sskrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22d329adb0Sskrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23d329adb0Sskrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24d329adb0Sskrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25d329adb0Sskrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26d329adb0Sskrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27d329adb0Sskrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28d329adb0Sskrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29d329adb0Sskrll * POSSIBILITY OF SUCH DAMAGE.
30d329adb0Sskrll */
31d329adb0Sskrll
32*e65a0eaaSskrll #include "opt_efi.h"
33*e65a0eaaSskrll
34d329adb0Sskrll #include <sys/atomic.h>
35d329adb0Sskrll #include <sys/ksyms.h>
36d329adb0Sskrll
379fc45356Sriastradh #include <uvm/uvm.h>
389fc45356Sriastradh
39d329adb0Sskrll #include <arm/vmparam.h>
40d329adb0Sskrll #include <arm/arm32/machdep.h>
41d329adb0Sskrll #include <arm/arm32/pmap.h>
42d329adb0Sskrll
43d329adb0Sskrll #define KASAN_MD_SHADOW_START VM_KERNEL_KASAN_BASE
44d329adb0Sskrll #define KASAN_MD_SHADOW_END VM_KERNEL_KASAN_END
45d329adb0Sskrll #define __MD_KERNMEM_BASE KERNEL_BASE
46d329adb0Sskrll
47d329adb0Sskrll static inline int8_t *
kasan_md_addr_to_shad(const void * addr)48d329adb0Sskrll kasan_md_addr_to_shad(const void *addr)
49d329adb0Sskrll {
50d329adb0Sskrll vaddr_t va = (vaddr_t)addr;
51d329adb0Sskrll return (int8_t *)(KASAN_MD_SHADOW_START +
52d329adb0Sskrll ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
53d329adb0Sskrll }
54d329adb0Sskrll
55d329adb0Sskrll static inline bool
kasan_md_unsupported(vaddr_t addr)56d329adb0Sskrll kasan_md_unsupported(vaddr_t addr)
57d329adb0Sskrll {
58d329adb0Sskrll return addr < VM_MIN_KERNEL_ADDRESS ||
59d329adb0Sskrll addr >= KASAN_MD_SHADOW_START;
60d329adb0Sskrll }
61d329adb0Sskrll
62d329adb0Sskrll /* -------------------------------------------------------------------------- */
63d329adb0Sskrll
64d329adb0Sskrll /*
65d329adb0Sskrll * Early mapping, used to map just the stack at boot time. We rely on the fact
66d329adb0Sskrll * that VA = PA + KERNEL_BASE.
67d329adb0Sskrll */
68d329adb0Sskrll
69a236d6e4Sskrll /*
70a236d6e4Sskrll * KASAN_NEARLYPAGES is hard to work out.
71a236d6e4Sskrll *
72a236d6e4Sskrll * The INIT_ARM_TOTAL_STACK shadow is reduced by the KASAN_SHADOW_SCALE_SIZE
73a236d6e4Sskrll * factor. This shadow mapping is likely to span more than one L2 page tables
74a236d6e4Sskrll * and, as a result, more than one PAGE_SIZE block. The L2 page tables might
75a236d6e4Sskrll * span more than one L1 page table entry as well.
76a236d6e4Sskrll *
77a236d6e4Sskrll * To ensure we have enough start with the assumption of 1 L1 page table, and
78a236d6e4Sskrll * the number of pages to map the shadow... then double for the spanning as
79a236d6e4Sskrll * described above
80a236d6e4Sskrll */
81a236d6e4Sskrll
82a236d6e4Sskrll #define KASAN_NEARLYPAGES \
83a236d6e4Sskrll (2 * (1 + howmany(INIT_ARM_TOTAL_STACK / KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE)))
84d329adb0Sskrll
85d329adb0Sskrll static bool __md_early __read_mostly;
86a236d6e4Sskrll static size_t __md_nearlyl1pts __attribute__((__section__(".data"))) = 0;
87d329adb0Sskrll static size_t __md_nearlypages __attribute__((__section__(".data")));
88d329adb0Sskrll static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE]
89d329adb0Sskrll __aligned(PAGE_SIZE) __attribute__((__section__(".data")));
90d329adb0Sskrll
91d329adb0Sskrll static vaddr_t
__md_palloc(void)92d329adb0Sskrll __md_palloc(void)
93d329adb0Sskrll {
94d329adb0Sskrll paddr_t pa;
95d329adb0Sskrll
96d329adb0Sskrll if (__predict_false(__md_early)) {
97d329adb0Sskrll KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES,
98d329adb0Sskrll "__md_nearlypages %zu", __md_nearlypages);
99d329adb0Sskrll
100d329adb0Sskrll vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE);
101d329adb0Sskrll __md_nearlypages++;
102d329adb0Sskrll __builtin_memset((void *)va, 0, PAGE_SIZE);
103d329adb0Sskrll
104d329adb0Sskrll return KERN_VTOPHYS(va);
105d329adb0Sskrll }
106d329adb0Sskrll
107d329adb0Sskrll if (!uvm.page_init_done) {
108d329adb0Sskrll if (uvm_page_physget(&pa) == false)
109d329adb0Sskrll panic("KASAN can't get a page");
110d329adb0Sskrll
111d329adb0Sskrll return pa;
112d329adb0Sskrll }
113d329adb0Sskrll
114d329adb0Sskrll struct vm_page *pg;
115d329adb0Sskrll retry:
116d329adb0Sskrll pg = uvm_pagealloc(NULL, 0, NULL, 0);
117d329adb0Sskrll if (pg == NULL) {
118d329adb0Sskrll uvm_wait(__func__);
119d329adb0Sskrll goto retry;
120d329adb0Sskrll }
121d329adb0Sskrll pa = VM_PAGE_TO_PHYS(pg);
122d329adb0Sskrll
123d329adb0Sskrll return pa;
124d329adb0Sskrll }
125d329adb0Sskrll
126d329adb0Sskrll static void
kasan_md_shadow_map_page(vaddr_t va)127d329adb0Sskrll kasan_md_shadow_map_page(vaddr_t va)
128d329adb0Sskrll {
129d329adb0Sskrll const uint32_t mask = L1_TABLE_SIZE - 1;
130d329adb0Sskrll const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask);
131d329adb0Sskrll pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb);
132d329adb0Sskrll
133d329adb0Sskrll const size_t l1slot = l1pte_index(va);
134d329adb0Sskrll vaddr_t l2ptva;
135d329adb0Sskrll
136d329adb0Sskrll KASSERT((va & PAGE_MASK) == 0);
137d329adb0Sskrll
138a236d6e4Sskrll extern bool kasan_l2pts_created;
139a236d6e4Sskrll if (__predict_true(kasan_l2pts_created)) {
140d329adb0Sskrll /*
141d329adb0Sskrll * The shadow map area L2PTs were allocated and mapped
142d329adb0Sskrll * by arm32_kernel_vm_init. Use the array of pv_addr_t
143d329adb0Sskrll * to get the l2ptva.
144d329adb0Sskrll */
145d329adb0Sskrll extern pv_addr_t kasan_l2pt[];
146d329adb0Sskrll const size_t off = va - KASAN_MD_SHADOW_START;
147d329adb0Sskrll const size_t segoff = off & (L2_S_SEGSIZE - 1);
148d329adb0Sskrll const size_t idx = off / L2_S_SEGSIZE;
149d329adb0Sskrll const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va;
150d329adb0Sskrll l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL;
151a236d6e4Sskrll } else {
152a236d6e4Sskrll /*
153a236d6e4Sskrll * An L1PT entry is/may be required for bootstrap tables. As a
154a236d6e4Sskrll * page gives enough space to multiple L2PTs the previous call
155a236d6e4Sskrll * might have already created the L2PT.
156a236d6e4Sskrll */
157a236d6e4Sskrll if (!l1pte_page_p(pdep[l1slot])) {
158a236d6e4Sskrll const paddr_t l2ptpa = __md_palloc();
159a236d6e4Sskrll const vaddr_t segl2va = va & -L2_S_SEGSIZE;
160a236d6e4Sskrll const size_t segl1slot = l1pte_index(segl2va);
161a236d6e4Sskrll
162a236d6e4Sskrll __md_nearlyl1pts++;
163a236d6e4Sskrll
164a236d6e4Sskrll const pd_entry_t npde =
165a236d6e4Sskrll L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL);
166a236d6e4Sskrll
167a236d6e4Sskrll l1pte_set(pdep + segl1slot, npde);
168a236d6e4Sskrll /*
169a236d6e4Sskrll * No need for PDE_SYNC_RANGE here as we're creating
170a236d6e4Sskrll * the bootstrap tables
171a236d6e4Sskrll */
172a236d6e4Sskrll }
173a236d6e4Sskrll l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot]));
174d329adb0Sskrll }
175d329adb0Sskrll
176d329adb0Sskrll pt_entry_t * l2pt = (pt_entry_t *)l2ptva;
177d329adb0Sskrll pt_entry_t * const ptep = &l2pt[l2pte_index(va)];
178d329adb0Sskrll
179d329adb0Sskrll if (!l2pte_valid_p(*ptep)) {
180d329adb0Sskrll const int prot = VM_PROT_READ | VM_PROT_WRITE;
181d329adb0Sskrll const paddr_t pa = __md_palloc();
182d329adb0Sskrll pt_entry_t npte =
183d329adb0Sskrll L2_S_PROTO |
184d329adb0Sskrll pa |
185a236d6e4Sskrll (__md_early ? 0 : pte_l2_s_cache_mode_pt) |
186d329adb0Sskrll L2_S_PROT(PTE_KERNEL, prot);
187d329adb0Sskrll l2pte_set(ptep, npte, 0);
188a236d6e4Sskrll
189a236d6e4Sskrll if (!__md_early)
190d329adb0Sskrll PTE_SYNC(ptep);
191a236d6e4Sskrll
192d329adb0Sskrll __builtin_memset((void *)va, 0, PAGE_SIZE);
193d329adb0Sskrll }
194d329adb0Sskrll }
195d329adb0Sskrll
196d329adb0Sskrll /*
197d329adb0Sskrll * Map the init stacks of the BP and APs. We will map the rest in kasan_init.
198d329adb0Sskrll */
199d329adb0Sskrll static void
kasan_md_early_init(void * stack)200d329adb0Sskrll kasan_md_early_init(void *stack)
201d329adb0Sskrll {
202d329adb0Sskrll
203a236d6e4Sskrll /*
204a236d6e4Sskrll * We come through here twice. The first time is for generic_start
205a236d6e4Sskrll * and the bootstrap tables. The second is for arm32_kernel_vm_init
206a236d6e4Sskrll * and the real tables.
207a236d6e4Sskrll *
208a236d6e4Sskrll * In the first we have to create L1PT entries, whereas in the
209a236d6e4Sskrll * second arm32_kernel_vm_init has setup kasan_l1pts (and the L1PT
210a236d6e4Sskrll * entries for them
211a236d6e4Sskrll */
212d329adb0Sskrll __md_early = true;
213a236d6e4Sskrll __md_nearlypages = __md_nearlyl1pts;
214a236d6e4Sskrll kasan_shadow_map(stack, INIT_ARM_TOTAL_STACK);
215d329adb0Sskrll __md_early = false;
216d329adb0Sskrll }
217d329adb0Sskrll
218d329adb0Sskrll static void
kasan_md_init(void)219d329adb0Sskrll kasan_md_init(void)
220d329adb0Sskrll {
221d329adb0Sskrll extern vaddr_t kasan_kernelstart;
222d329adb0Sskrll extern vaddr_t kasan_kernelsize;
223d329adb0Sskrll
224d329adb0Sskrll kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
225d329adb0Sskrll
226d329adb0Sskrll /* The VAs we've created until now. */
22705a83c05Sskrll vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
2286887058aSskrll kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
229d329adb0Sskrll }
230d329adb0Sskrll
231d329adb0Sskrll
232d329adb0Sskrll static inline bool
__md_unwind_end(const char * name)233d329adb0Sskrll __md_unwind_end(const char *name)
234d329adb0Sskrll {
235d329adb0Sskrll static const char * const vectors[] = {
236d329adb0Sskrll "undefined_entry",
237d329adb0Sskrll "swi_entry",
238d329adb0Sskrll "prefetch_abort_entry",
239d329adb0Sskrll "data_abort_entry",
240d329adb0Sskrll "address_exception_entry",
241d329adb0Sskrll "irq_entry",
242d329adb0Sskrll "fiqvector"
243d329adb0Sskrll };
244d329adb0Sskrll
245d329adb0Sskrll for (size_t i = 0; i < __arraycount(vectors); i++) {
246d329adb0Sskrll if (!strncmp(name, vectors[i], strlen(vectors[i])))
247d329adb0Sskrll return true;
248d329adb0Sskrll }
249d329adb0Sskrll
250d329adb0Sskrll return false;
251d329adb0Sskrll }
252d329adb0Sskrll
253d329adb0Sskrll static void
kasan_md_unwind(void)254d329adb0Sskrll kasan_md_unwind(void)
255d329adb0Sskrll {
256d329adb0Sskrll uint32_t lr, *fp;
257d329adb0Sskrll const char *mod;
258d329adb0Sskrll const char *sym;
259d329adb0Sskrll size_t nsym;
260d329adb0Sskrll int error;
261d329adb0Sskrll
262d329adb0Sskrll fp = (uint32_t *)__builtin_frame_address(0);
263d329adb0Sskrll nsym = 0;
264d329adb0Sskrll
265d329adb0Sskrll while (1) {
266d329adb0Sskrll /*
267d329adb0Sskrll * normal frame
268d329adb0Sskrll * fp[ 0] saved code pointer
269d329adb0Sskrll * fp[-1] saved lr value
270d329adb0Sskrll * fp[-2] saved sp value
271d329adb0Sskrll * fp[-3] saved fp value
272d329adb0Sskrll */
273d329adb0Sskrll lr = fp[-1];
274d329adb0Sskrll
275d329adb0Sskrll if (lr < VM_MIN_KERNEL_ADDRESS) {
276d329adb0Sskrll break;
277d329adb0Sskrll }
278d329adb0Sskrll error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
279d329adb0Sskrll if (error) {
280d329adb0Sskrll break;
281d329adb0Sskrll }
282d329adb0Sskrll printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
283d329adb0Sskrll if (__md_unwind_end(sym)) {
284d329adb0Sskrll break;
285d329adb0Sskrll }
286d329adb0Sskrll
287d329adb0Sskrll fp = (uint32_t *)fp[-3];
288d329adb0Sskrll if (fp == NULL) {
289d329adb0Sskrll break;
290d329adb0Sskrll }
291d329adb0Sskrll nsym++;
292d329adb0Sskrll
293d329adb0Sskrll if (nsym >= 15) {
294d329adb0Sskrll break;
295d329adb0Sskrll }
296d329adb0Sskrll }
297d329adb0Sskrll }
298