xref: /netbsd-src/sys/arch/aarch64/include/asan.h (revision eceb233b9bd0dfebb902ed73b531ae6964fa3f9b)
1 /*	$NetBSD: asan.h,v 1.13 2020/09/20 15:30:11 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  * All rights reserved.
6  *
7  * This code is part of the KASAN subsystem of the NetBSD kernel.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/atomic.h>
32 #include <sys/ksyms.h>
33 
34 #include <uvm/uvm.h>
35 
36 #include <aarch64/pmap.h>
37 #include <aarch64/vmparam.h>
38 #include <aarch64/cpufunc.h>
39 #include <aarch64/armreg.h>
40 #include <aarch64/machdep.h>
41 
42 #define __MD_VIRTUAL_SHIFT	48	/* 49bit address space, cut half */
43 #define __MD_KERNMEM_BASE	0xFFFF000000000000 /* kern mem base address */
44 
45 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
46 #define KASAN_MD_SHADOW_START	(AARCH64_KSEG_END)
47 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
48 
49 static bool __md_early __read_mostly = true;
50 
51 static inline int8_t *
52 kasan_md_addr_to_shad(const void *addr)
53 {
54 	vaddr_t va = (vaddr_t)addr;
55 	return (int8_t *)(KASAN_MD_SHADOW_START +
56 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
57 }
58 
59 static inline bool
60 kasan_md_unsupported(vaddr_t addr)
61 {
62 	return (addr < VM_MIN_KERNEL_ADDRESS) ||
63 	    (addr >= VM_KERNEL_IO_ADDRESS);
64 }
65 
66 static paddr_t
67 __md_palloc(void)
68 {
69 	paddr_t pa;
70 
71 	if (__predict_false(__md_early)) {
72 		pa = (paddr_t)pmapboot_pagealloc();
73 		return pa;
74 	}
75 
76 	vaddr_t va;
77 	if (!uvm.page_init_done) {
78 		va = uvm_pageboot_alloc(PAGE_SIZE);
79 		pa = AARCH64_KVA_TO_PA(va);
80 	} else {
81 		struct vm_page *pg;
82 retry:
83 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
84 		if (pg == NULL) {
85 			uvm_wait(__func__);
86 			goto retry;
87 		}
88 
89 		pa = VM_PAGE_TO_PHYS(pg);
90 		va = AARCH64_PA_TO_KVA(pa);
91 	}
92 
93 	__builtin_memset((void *)va, 0, PAGE_SIZE);
94 	return pa;
95 }
96 
97 static inline paddr_t
98 __md_palloc_large(void)
99 {
100 	struct pglist pglist;
101 	int ret;
102 
103 	if (!uvm.page_init_done)
104 		return 0;
105 
106 	ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
107 	    &pglist, 1, 0);
108 	if (ret != 0)
109 		return 0;
110 
111 	/* The page may not be zeroed. */
112 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
113 }
114 
115 static void
116 kasan_md_shadow_map_page(vaddr_t va)
117 {
118 	pd_entry_t *l0, *l1, *l2, *l3;
119 	paddr_t l0pa, pa;
120 	pd_entry_t pde;
121 	size_t idx;
122 
123 	l0pa = reg_ttbr1_el1_read();
124 	if (__predict_false(__md_early)) {
125 		l0 = (void *)KERN_PHYSTOV(l0pa);
126 	} else {
127 		l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
128 	}
129 
130 	idx = l0pde_index(va);
131 	pde = l0[idx];
132 	if (!l0pde_valid(pde)) {
133 		pa = __md_palloc();
134 		atomic_swap_64(&l0[idx], pa | L0_TABLE);
135 	} else {
136 		pa = l0pde_pa(pde);
137 	}
138 	if (__predict_false(__md_early)) {
139 		l1 = (void *)KERN_PHYSTOV(pa);
140 	} else {
141 		l1 = (void *)AARCH64_PA_TO_KVA(pa);
142 	}
143 
144 	idx = l1pde_index(va);
145 	pde = l1[idx];
146 	if (!l1pde_valid(pde)) {
147 		pa = __md_palloc();
148 		atomic_swap_64(&l1[idx], pa | L1_TABLE);
149 	} else {
150 		pa = l1pde_pa(pde);
151 	}
152 	if (__predict_false(__md_early)) {
153 		l2 = (void *)KERN_PHYSTOV(pa);
154 	} else {
155 		l2 = (void *)AARCH64_PA_TO_KVA(pa);
156 	}
157 
158 	idx = l2pde_index(va);
159 	pde = l2[idx];
160 	if (!l2pde_valid(pde)) {
161 		/* If possible, use L2_BLOCK to map it in advance. */
162 		if ((pa = __md_palloc_large()) != 0) {
163 			atomic_swap_64(&l2[idx], pa | L2_BLOCK |
164 			    LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
165 			    LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
166 			aarch64_tlbi_by_va(va);
167 			__builtin_memset((void *)va, 0, L2_SIZE);
168 			return;
169 		}
170 		pa = __md_palloc();
171 		atomic_swap_64(&l2[idx], pa | L2_TABLE);
172 	} else if (l2pde_is_block(pde)) {
173 		/* This VA is already mapped as a block. */
174 		return;
175 	} else {
176 		pa = l2pde_pa(pde);
177 	}
178 	if (__predict_false(__md_early)) {
179 		l3 = (void *)KERN_PHYSTOV(pa);
180 	} else {
181 		l3 = (void *)AARCH64_PA_TO_KVA(pa);
182 	}
183 
184 	idx = l3pte_index(va);
185 	pde = l3[idx];
186 	if (!l3pte_valid(pde)) {
187 		pa = __md_palloc();
188 		atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
189 		    LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
190 		    LX_BLKPAG_AP_RW);
191 		aarch64_tlbi_by_va(va);
192 	}
193 }
194 
195 static void
196 kasan_md_early_init(void *stack)
197 {
198 	kasan_shadow_map(stack, USPACE);
199 	__md_early = false;
200 }
201 
202 static void
203 kasan_md_init(void)
204 {
205 
206 	CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
207 
208 	/* The VAs we've created until now. */
209 	vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
210 	kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
211 	    eva - VM_MIN_KERNEL_ADDRESS);
212 }
213 
214 static inline bool
215 __md_unwind_end(const char *name)
216 {
217 	if (!strncmp(name, "el0_trap", 8) ||
218 	    !strncmp(name, "el1_trap", 8)) {
219 		return true;
220 	}
221 
222 	return false;
223 }
224 
225 static void
226 kasan_md_unwind(void)
227 {
228 	uint64_t lr, *fp;
229 	const char *mod;
230 	const char *sym;
231 	size_t nsym;
232 	int error;
233 
234 	fp = (uint64_t *)__builtin_frame_address(0);
235 	nsym = 0;
236 
237 	while (1) {
238 		/*
239 		 * normal stack frame
240 		 *  fp[0]  saved fp(x29) value
241 		 *  fp[1]  saved lr(x30) value
242 		 */
243 		lr = fp[1];
244 
245 		if (lr < VM_MIN_KERNEL_ADDRESS) {
246 			break;
247 		}
248 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
249 		if (error) {
250 			break;
251 		}
252 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
253 		if (__md_unwind_end(sym)) {
254 			break;
255 		}
256 
257 		fp = (uint64_t *)fp[0];
258 		if (fp == NULL) {
259 			break;
260 		}
261 		nsym++;
262 
263 		if (nsym >= 15) {
264 			break;
265 		}
266 	}
267 }
268