1 /* $NetBSD: asan.h,v 1.8 2022/04/02 11:16:07 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nick Hudson, and is part of the KASAN subsystem of the NetBSD kernel.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_efi.h"
33
34 #include <sys/atomic.h>
35 #include <sys/ksyms.h>
36
37 #include <uvm/uvm.h>
38
39 #include <arm/vmparam.h>
40 #include <arm/arm32/machdep.h>
41 #include <arm/arm32/pmap.h>
42
43 #define KASAN_MD_SHADOW_START VM_KERNEL_KASAN_BASE
44 #define KASAN_MD_SHADOW_END VM_KERNEL_KASAN_END
45 #define __MD_KERNMEM_BASE KERNEL_BASE
46
47 static inline int8_t *
kasan_md_addr_to_shad(const void * addr)48 kasan_md_addr_to_shad(const void *addr)
49 {
50 vaddr_t va = (vaddr_t)addr;
51 return (int8_t *)(KASAN_MD_SHADOW_START +
52 ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
53 }
54
55 static inline bool
kasan_md_unsupported(vaddr_t addr)56 kasan_md_unsupported(vaddr_t addr)
57 {
58 return addr < VM_MIN_KERNEL_ADDRESS ||
59 addr >= KASAN_MD_SHADOW_START;
60 }
61
62 /* -------------------------------------------------------------------------- */
63
64 /*
65 * Early mapping, used to map just the stack at boot time. We rely on the fact
66 * that VA = PA + KERNEL_BASE.
67 */
68
69 /*
70 * KASAN_NEARLYPAGES is hard to work out.
71 *
72 * The INIT_ARM_TOTAL_STACK shadow is reduced by the KASAN_SHADOW_SCALE_SIZE
73 * factor. This shadow mapping is likely to span more than one L2 page tables
74 * and, as a result, more than one PAGE_SIZE block. The L2 page tables might
75 * span more than one L1 page table entry as well.
76 *
77 * To ensure we have enough start with the assumption of 1 L1 page table, and
78 * the number of pages to map the shadow... then double for the spanning as
79 * described above
80 */
81
82 #define KASAN_NEARLYPAGES \
83 (2 * (1 + howmany(INIT_ARM_TOTAL_STACK / KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE)))
84
85 static bool __md_early __read_mostly;
86 static size_t __md_nearlyl1pts __attribute__((__section__(".data"))) = 0;
87 static size_t __md_nearlypages __attribute__((__section__(".data")));
88 static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE]
89 __aligned(PAGE_SIZE) __attribute__((__section__(".data")));
90
91 static vaddr_t
__md_palloc(void)92 __md_palloc(void)
93 {
94 paddr_t pa;
95
96 if (__predict_false(__md_early)) {
97 KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES,
98 "__md_nearlypages %zu", __md_nearlypages);
99
100 vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE);
101 __md_nearlypages++;
102 __builtin_memset((void *)va, 0, PAGE_SIZE);
103
104 return KERN_VTOPHYS(va);
105 }
106
107 if (!uvm.page_init_done) {
108 if (uvm_page_physget(&pa) == false)
109 panic("KASAN can't get a page");
110
111 return pa;
112 }
113
114 struct vm_page *pg;
115 retry:
116 pg = uvm_pagealloc(NULL, 0, NULL, 0);
117 if (pg == NULL) {
118 uvm_wait(__func__);
119 goto retry;
120 }
121 pa = VM_PAGE_TO_PHYS(pg);
122
123 return pa;
124 }
125
126 static void
kasan_md_shadow_map_page(vaddr_t va)127 kasan_md_shadow_map_page(vaddr_t va)
128 {
129 const uint32_t mask = L1_TABLE_SIZE - 1;
130 const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask);
131 pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb);
132
133 const size_t l1slot = l1pte_index(va);
134 vaddr_t l2ptva;
135
136 KASSERT((va & PAGE_MASK) == 0);
137
138 extern bool kasan_l2pts_created;
139 if (__predict_true(kasan_l2pts_created)) {
140 /*
141 * The shadow map area L2PTs were allocated and mapped
142 * by arm32_kernel_vm_init. Use the array of pv_addr_t
143 * to get the l2ptva.
144 */
145 extern pv_addr_t kasan_l2pt[];
146 const size_t off = va - KASAN_MD_SHADOW_START;
147 const size_t segoff = off & (L2_S_SEGSIZE - 1);
148 const size_t idx = off / L2_S_SEGSIZE;
149 const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va;
150 l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL;
151 } else {
152 /*
153 * An L1PT entry is/may be required for bootstrap tables. As a
154 * page gives enough space to multiple L2PTs the previous call
155 * might have already created the L2PT.
156 */
157 if (!l1pte_page_p(pdep[l1slot])) {
158 const paddr_t l2ptpa = __md_palloc();
159 const vaddr_t segl2va = va & -L2_S_SEGSIZE;
160 const size_t segl1slot = l1pte_index(segl2va);
161
162 __md_nearlyl1pts++;
163
164 const pd_entry_t npde =
165 L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL);
166
167 l1pte_set(pdep + segl1slot, npde);
168 /*
169 * No need for PDE_SYNC_RANGE here as we're creating
170 * the bootstrap tables
171 */
172 }
173 l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot]));
174 }
175
176 pt_entry_t * l2pt = (pt_entry_t *)l2ptva;
177 pt_entry_t * const ptep = &l2pt[l2pte_index(va)];
178
179 if (!l2pte_valid_p(*ptep)) {
180 const int prot = VM_PROT_READ | VM_PROT_WRITE;
181 const paddr_t pa = __md_palloc();
182 pt_entry_t npte =
183 L2_S_PROTO |
184 pa |
185 (__md_early ? 0 : pte_l2_s_cache_mode_pt) |
186 L2_S_PROT(PTE_KERNEL, prot);
187 l2pte_set(ptep, npte, 0);
188
189 if (!__md_early)
190 PTE_SYNC(ptep);
191
192 __builtin_memset((void *)va, 0, PAGE_SIZE);
193 }
194 }
195
196 /*
197 * Map the init stacks of the BP and APs. We will map the rest in kasan_init.
198 */
199 static void
kasan_md_early_init(void * stack)200 kasan_md_early_init(void *stack)
201 {
202
203 /*
204 * We come through here twice. The first time is for generic_start
205 * and the bootstrap tables. The second is for arm32_kernel_vm_init
206 * and the real tables.
207 *
208 * In the first we have to create L1PT entries, whereas in the
209 * second arm32_kernel_vm_init has setup kasan_l1pts (and the L1PT
210 * entries for them
211 */
212 __md_early = true;
213 __md_nearlypages = __md_nearlyl1pts;
214 kasan_shadow_map(stack, INIT_ARM_TOTAL_STACK);
215 __md_early = false;
216 }
217
218 static void
kasan_md_init(void)219 kasan_md_init(void)
220 {
221 extern vaddr_t kasan_kernelstart;
222 extern vaddr_t kasan_kernelsize;
223
224 kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
225
226 /* The VAs we've created until now. */
227 vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
228 kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
229 }
230
231
232 static inline bool
__md_unwind_end(const char * name)233 __md_unwind_end(const char *name)
234 {
235 static const char * const vectors[] = {
236 "undefined_entry",
237 "swi_entry",
238 "prefetch_abort_entry",
239 "data_abort_entry",
240 "address_exception_entry",
241 "irq_entry",
242 "fiqvector"
243 };
244
245 for (size_t i = 0; i < __arraycount(vectors); i++) {
246 if (!strncmp(name, vectors[i], strlen(vectors[i])))
247 return true;
248 }
249
250 return false;
251 }
252
253 static void
kasan_md_unwind(void)254 kasan_md_unwind(void)
255 {
256 uint32_t lr, *fp;
257 const char *mod;
258 const char *sym;
259 size_t nsym;
260 int error;
261
262 fp = (uint32_t *)__builtin_frame_address(0);
263 nsym = 0;
264
265 while (1) {
266 /*
267 * normal frame
268 * fp[ 0] saved code pointer
269 * fp[-1] saved lr value
270 * fp[-2] saved sp value
271 * fp[-3] saved fp value
272 */
273 lr = fp[-1];
274
275 if (lr < VM_MIN_KERNEL_ADDRESS) {
276 break;
277 }
278 error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
279 if (error) {
280 break;
281 }
282 printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
283 if (__md_unwind_end(sym)) {
284 break;
285 }
286
287 fp = (uint32_t *)fp[-3];
288 if (fp == NULL) {
289 break;
290 }
291 nsym++;
292
293 if (nsym >= 15) {
294 break;
295 }
296 }
297 }
298