1 /* $NetBSD: asan.h,v 1.19 2023/04/16 14:01:51 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved.
6 *
7 * This code is part of the KASAN subsystem of the NetBSD kernel.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/atomic.h>
32 #include <sys/ksyms.h>
33
34 #include <uvm/uvm.h>
35
36 #include <aarch64/pmap.h>
37 #include <aarch64/vmparam.h>
38 #include <aarch64/armreg.h>
39 #include <aarch64/machdep.h>
40
41 #include <arm/cpufunc.h>
42
43 #define __MD_VIRTUAL_SHIFT 48 /* 49bit address space, cut half */
44 #define __MD_KERNMEM_BASE 0xFFFF000000000000 /* kern mem base address */
45
46 #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
47 #define KASAN_MD_SHADOW_START (AARCH64_DIRECTMAP_END)
48 #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
49
50 static bool __md_early __read_mostly = true;
51
52 static inline int8_t *
kasan_md_addr_to_shad(const void * addr)53 kasan_md_addr_to_shad(const void *addr)
54 {
55 vaddr_t va = (vaddr_t)addr;
56 return (int8_t *)(KASAN_MD_SHADOW_START +
57 ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
58 }
59
60 static inline bool
kasan_md_unsupported(vaddr_t addr)61 kasan_md_unsupported(vaddr_t addr)
62 {
63 return (addr < VM_MIN_KERNEL_ADDRESS) ||
64 (addr >= VM_KERNEL_IO_BASE);
65 }
66
67 static paddr_t
__md_palloc(void)68 __md_palloc(void)
69 {
70 paddr_t pa;
71
72 if (__predict_false(__md_early)) {
73 pa = (paddr_t)pmapboot_pagealloc();
74 return pa;
75 }
76
77 vaddr_t va;
78 if (!uvm.page_init_done) {
79 va = uvm_pageboot_alloc(PAGE_SIZE);
80 pa = AARCH64_KVA_TO_PA(va);
81 } else {
82 struct vm_page *pg;
83 retry:
84 pg = uvm_pagealloc(NULL, 0, NULL, 0);
85 if (pg == NULL) {
86 uvm_wait(__func__);
87 goto retry;
88 }
89
90 pa = VM_PAGE_TO_PHYS(pg);
91 va = AARCH64_PA_TO_KVA(pa);
92 }
93
94 __builtin_memset((void *)va, 0, PAGE_SIZE);
95 return pa;
96 }
97
98 static inline paddr_t
__md_palloc_large(void)99 __md_palloc_large(void)
100 {
101 struct pglist pglist;
102 int ret;
103
104 if (!uvm.page_init_done)
105 return 0;
106
107 ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
108 &pglist, 1, 0);
109 if (ret != 0)
110 return 0;
111
112 /* The page may not be zeroed. */
113 return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
114 }
115
116 static void
kasan_md_shadow_map_page(vaddr_t va)117 kasan_md_shadow_map_page(vaddr_t va)
118 {
119 pd_entry_t *l0, *l1, *l2, *l3;
120 paddr_t l0pa, pa;
121 pd_entry_t pde;
122 size_t idx;
123
124 l0pa = reg_ttbr1_el1_read();
125 if (__predict_false(__md_early)) {
126 l0 = (void *)KERN_PHYSTOV(l0pa);
127 } else {
128 l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
129 }
130
131 idx = l0pde_index(va);
132 pde = l0[idx];
133 if (!l0pde_valid(pde)) {
134 pa = __md_palloc();
135 atomic_swap_64(&l0[idx], pa | L0_TABLE);
136 } else {
137 pa = l0pde_pa(pde);
138 }
139 if (__predict_false(__md_early)) {
140 l1 = (void *)KERN_PHYSTOV(pa);
141 } else {
142 l1 = (void *)AARCH64_PA_TO_KVA(pa);
143 }
144
145 idx = l1pde_index(va);
146 pde = l1[idx];
147 if (!l1pde_valid(pde)) {
148 pa = __md_palloc();
149 atomic_swap_64(&l1[idx], pa | L1_TABLE);
150 } else {
151 pa = l1pde_pa(pde);
152 }
153 if (__predict_false(__md_early)) {
154 l2 = (void *)KERN_PHYSTOV(pa);
155 } else {
156 l2 = (void *)AARCH64_PA_TO_KVA(pa);
157 }
158
159 idx = l2pde_index(va);
160 pde = l2[idx];
161 if (!l2pde_valid(pde)) {
162 /* If possible, use L2_BLOCK to map it in advance. */
163 if ((pa = __md_palloc_large()) != 0) {
164 atomic_swap_64(&l2[idx], pa | L2_BLOCK |
165 LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
166 LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
167 aarch64_tlbi_by_va(va);
168 __builtin_memset((void *)va, 0, L2_SIZE);
169 return;
170 }
171 pa = __md_palloc();
172 atomic_swap_64(&l2[idx], pa | L2_TABLE);
173 } else if (l2pde_is_block(pde)) {
174 /* This VA is already mapped as a block. */
175 return;
176 } else {
177 pa = l2pde_pa(pde);
178 }
179 if (__predict_false(__md_early)) {
180 l3 = (void *)KERN_PHYSTOV(pa);
181 } else {
182 l3 = (void *)AARCH64_PA_TO_KVA(pa);
183 }
184
185 idx = l3pte_index(va);
186 pde = l3[idx];
187 if (!l3pte_valid(pde)) {
188 pa = __md_palloc();
189 atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
190 LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
191 LX_BLKPAG_AP_RW | LX_BLKPAG_ATTR_NORMAL_WB);
192 }
193 dsb(ishst);
194 isb();
195 }
196
197 static void
kasan_md_early_init(void * stack)198 kasan_md_early_init(void *stack)
199 {
200 kasan_shadow_map(stack, USPACE);
201 __md_early = false;
202 }
203
204 static void
kasan_md_init(void)205 kasan_md_init(void)
206 {
207
208 CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
209
210 extern vaddr_t kasan_kernelstart;
211 extern vaddr_t kasan_kernelsize;
212
213 kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
214
215 /* The VAs we've created until now. */
216 vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
217 kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
218 }
219
220 static inline bool
__md_unwind_end(const char * name)221 __md_unwind_end(const char *name)
222 {
223 if (!strncmp(name, "el0_trap", 8) ||
224 !strncmp(name, "el1_trap", 8)) {
225 return true;
226 }
227
228 return false;
229 }
230
231 static void
kasan_md_unwind(void)232 kasan_md_unwind(void)
233 {
234 uint64_t lr, *fp;
235 const char *mod;
236 const char *sym;
237 size_t nsym;
238 int error;
239
240 fp = (uint64_t *)__builtin_frame_address(0);
241 nsym = 0;
242
243 while (1) {
244 /*
245 * normal stack frame
246 * fp[0] saved fp(x29) value
247 * fp[1] saved lr(x30) value
248 */
249 lr = fp[1];
250
251 if (lr < VM_MIN_KERNEL_ADDRESS) {
252 break;
253 }
254 error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
255 if (error) {
256 break;
257 }
258 printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
259 if (__md_unwind_end(sym)) {
260 break;
261 }
262
263 fp = (uint64_t *)fp[0];
264 if (fp == NULL) {
265 break;
266 }
267 nsym++;
268
269 if (nsym >= 15) {
270 break;
271 }
272 }
273 }
274