xref: /netbsd-src/sys/arch/arm/include/asan.h (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: asan.h,v 1.7 2021/01/27 08:40:32 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nick Hudson, and is part of the KASAN subsystem of the NetBSD kernel.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/atomic.h>
33 #include <sys/ksyms.h>
34 
35 #include <uvm/uvm.h>
36 
37 #include <arm/vmparam.h>
38 #include <arm/arm32/machdep.h>
39 #include <arm/arm32/pmap.h>
40 
41 #define KASAN_MD_SHADOW_START	VM_KERNEL_KASAN_BASE
42 #define KASAN_MD_SHADOW_END	VM_KERNEL_KASAN_END
43 #define __MD_KERNMEM_BASE	KERNEL_BASE
44 
45 static inline int8_t *
46 kasan_md_addr_to_shad(const void *addr)
47 {
48 	vaddr_t va = (vaddr_t)addr;
49 	return (int8_t *)(KASAN_MD_SHADOW_START +
50 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
51 }
52 
53 static inline bool
54 kasan_md_unsupported(vaddr_t addr)
55 {
56 	return addr < VM_MIN_KERNEL_ADDRESS ||
57 	    addr >= KASAN_MD_SHADOW_START;
58 }
59 
60 /* -------------------------------------------------------------------------- */
61 
62 /*
63  * Early mapping, used to map just the stack at boot time. We rely on the fact
64  * that VA = PA + KERNEL_BASE.
65  */
66 
67 /*
68  * KASAN_NEARLYPAGES is hard to work out.
69  *
70  * The INIT_ARM_TOTAL_STACK shadow is reduced by the KASAN_SHADOW_SCALE_SIZE
71  * factor. This shadow mapping is likely to span more than one L2 page tables
72  * and, as a result, more than one PAGE_SIZE block. The L2 page tables might
73  * span more than one L1 page table entry as well.
74  *
75  * To ensure we have enough start with the assumption of 1 L1 page table, and
76  * the number of pages to map the shadow... then double for the spanning as
77  * described above
78  */
79 
80 #define KASAN_NEARLYPAGES	\
81     (2 * (1 + howmany(INIT_ARM_TOTAL_STACK / KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE)))
82 
83 static bool __md_early __read_mostly;
84 static size_t __md_nearlyl1pts __attribute__((__section__(".data"))) = 0;
85 static size_t __md_nearlypages __attribute__((__section__(".data")));
86 static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE]
87     __aligned(PAGE_SIZE)  __attribute__((__section__(".data")));
88 
89 static vaddr_t
90 __md_palloc(void)
91 {
92 	paddr_t pa;
93 
94 	if (__predict_false(__md_early)) {
95 		KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES,
96 		    "__md_nearlypages %zu", __md_nearlypages);
97 
98 		vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE);
99 		__md_nearlypages++;
100 		__builtin_memset((void *)va, 0, PAGE_SIZE);
101 
102 		return KERN_VTOPHYS(va);
103 	}
104 
105 	if (!uvm.page_init_done) {
106 		if (uvm_page_physget(&pa) == false)
107 			panic("KASAN can't get a page");
108 
109 		return pa;
110 	}
111 
112 	struct vm_page *pg;
113 retry:
114 	pg = uvm_pagealloc(NULL, 0, NULL, 0);
115 	if (pg == NULL) {
116 		uvm_wait(__func__);
117 		goto retry;
118 	}
119 	pa = VM_PAGE_TO_PHYS(pg);
120 
121 	return pa;
122 }
123 
124 static void
125 kasan_md_shadow_map_page(vaddr_t va)
126 {
127 	const uint32_t mask = L1_TABLE_SIZE - 1;
128 	const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask);
129 	pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb);
130 
131 	const size_t l1slot = l1pte_index(va);
132 	vaddr_t l2ptva;
133 
134 	KASSERT((va & PAGE_MASK) == 0);
135 
136 	extern bool kasan_l2pts_created;
137 	if (__predict_true(kasan_l2pts_created)) {
138 		/*
139 		 * The shadow map area L2PTs were allocated and mapped
140 		 * by arm32_kernel_vm_init.  Use the array of pv_addr_t
141 		 * to get the l2ptva.
142 		 */
143 		extern pv_addr_t kasan_l2pt[];
144 		const size_t off = va - KASAN_MD_SHADOW_START;
145 		const size_t segoff = off & (L2_S_SEGSIZE - 1);
146 		const size_t idx = off / L2_S_SEGSIZE;
147 		const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va;
148 		l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL;
149 	} else {
150 		/*
151 		 * An L1PT entry is/may be required for bootstrap tables.  As a
152 		 * page gives enough space to multiple L2PTs the previous call
153 		 * might have already created the L2PT.
154 		 */
155 		if (!l1pte_page_p(pdep[l1slot])) {
156 			const paddr_t l2ptpa = __md_palloc();
157 			const vaddr_t segl2va = va & -L2_S_SEGSIZE;
158 			const size_t segl1slot = l1pte_index(segl2va);
159 
160 			__md_nearlyl1pts++;
161 
162 			const pd_entry_t npde =
163 			    L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL);
164 
165 			l1pte_set(pdep + segl1slot, npde);
166 			/*
167 			 * No need for PDE_SYNC_RANGE here as we're creating
168 			 * the bootstrap tables
169 			*/
170 		}
171 		l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot]));
172 	}
173 
174 	pt_entry_t * l2pt = (pt_entry_t *)l2ptva;
175 	pt_entry_t * const ptep = &l2pt[l2pte_index(va)];
176 
177 	if (!l2pte_valid_p(*ptep)) {
178 		const int prot = VM_PROT_READ | VM_PROT_WRITE;
179 		const paddr_t pa = __md_palloc();
180 		pt_entry_t npte =
181 		    L2_S_PROTO |
182 		    pa |
183 		    (__md_early ? 0 : pte_l2_s_cache_mode_pt) |
184 		    L2_S_PROT(PTE_KERNEL, prot);
185 		l2pte_set(ptep, npte, 0);
186 
187 		if (!__md_early)
188 			PTE_SYNC(ptep);
189 
190 		__builtin_memset((void *)va, 0, PAGE_SIZE);
191 	}
192 }
193 
194 /*
195  * Map the init stacks of the BP and APs. We will map the rest in kasan_init.
196  */
197 static void
198 kasan_md_early_init(void *stack)
199 {
200 
201 	/*
202 	 * We come through here twice.  The first time is for generic_start
203 	 * and the bootstrap tables.  The second is for arm32_kernel_vm_init
204 	 * and the real tables.
205 	 *
206 	 * In the first we have to create L1PT entries, whereas in the
207 	 * second arm32_kernel_vm_init has setup kasan_l1pts (and the L1PT
208 	 * entries for them
209 	 */
210 	__md_early = true;
211 	__md_nearlypages = __md_nearlyl1pts;
212 	kasan_shadow_map(stack, INIT_ARM_TOTAL_STACK);
213 	__md_early = false;
214 }
215 
216 static void
217 kasan_md_init(void)
218 {
219 	extern vaddr_t kasan_kernelstart;
220 	extern vaddr_t kasan_kernelsize;
221 
222 	kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
223 
224 	/* The VAs we've created until now. */
225 	vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
226 	kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
227 }
228 
229 
230 static inline bool
231 __md_unwind_end(const char *name)
232 {
233 	static const char * const vectors[] = {
234 		"undefined_entry",
235 		"swi_entry",
236 		"prefetch_abort_entry",
237 		"data_abort_entry",
238 		"address_exception_entry",
239 		"irq_entry",
240 		"fiqvector"
241 	};
242 
243 	for (size_t i = 0; i < __arraycount(vectors); i++) {
244 		if (!strncmp(name, vectors[i], strlen(vectors[i])))
245 			return true;
246 	}
247 
248 	return false;
249 }
250 
251 static void
252 kasan_md_unwind(void)
253 {
254 	uint32_t lr, *fp;
255 	const char *mod;
256 	const char *sym;
257 	size_t nsym;
258 	int error;
259 
260 	fp = (uint32_t *)__builtin_frame_address(0);
261 	nsym = 0;
262 
263 	while (1) {
264 		/*
265 		 * normal frame
266 		 *  fp[ 0] saved code pointer
267 		 *  fp[-1] saved lr value
268 		 *  fp[-2] saved sp value
269 		 *  fp[-3] saved fp value
270 		 */
271 		lr = fp[-1];
272 
273 		if (lr < VM_MIN_KERNEL_ADDRESS) {
274 			break;
275 		}
276 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
277 		if (error) {
278 			break;
279 		}
280 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
281 		if (__md_unwind_end(sym)) {
282 			break;
283 		}
284 
285 		fp = (uint32_t *)fp[-3];
286 		if (fp == NULL) {
287 			break;
288 		}
289 		nsym++;
290 
291 		if (nsym >= 15) {
292 			break;
293 		}
294 	}
295 }
296