xref: /netbsd-src/sys/arch/amd64/include/asan.h (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: asan.h,v 1.7 2020/06/23 17:21:55 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/ksyms.h>
33 
34 #include <amd64/pmap.h>
35 #include <amd64/vmparam.h>
36 
37 #ifdef __HAVE_PCPU_AREA
38 #error "PCPU area not allowed with KASAN"
39 #endif
40 #ifdef __HAVE_DIRECT_MAP
41 #error "DMAP not allowed with KASAN"
42 #endif
43 
44 #define __MD_VIRTUAL_SHIFT	47	/* 48bit address space, cut half */
45 #define __MD_KERNMEM_BASE	0xFFFF800000000000 /* kern mem base address */
46 
47 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
48 #define KASAN_MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
49 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
50 
51 /* -------------------------------------------------------------------------- */
52 
53 /*
54  * Early mapping, used to map just the stack at boot time. We rely on the fact
55  * that VA = PA + KERNBASE.
56  */
57 
58 static bool __md_early __read_mostly = true;
59 static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
60 static size_t __md_earlytaken = 0;
61 
62 static paddr_t
63 __md_early_palloc(void)
64 {
65 	paddr_t ret;
66 
67 	KASSERT(__md_earlytaken < 8);
68 
69 	ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
70 	__md_earlytaken++;
71 
72 	ret -= KERNBASE;
73 
74 	return ret;
75 }
76 
77 static void
78 __md_early_shadow_map_page(vaddr_t va)
79 {
80 	extern struct bootspace bootspace;
81 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
82 	pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
83 	paddr_t pa;
84 
85 	if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
86 		pa = __md_early_palloc();
87 		pdir[pl4_pi(va)] = pa | pteflags;
88 	}
89 	pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
90 
91 	if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
92 		pa = __md_early_palloc();
93 		pdir[pl3_pi(va)] = pa | pteflags;
94 	}
95 	pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
96 
97 	if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
98 		pa = __md_early_palloc();
99 		pdir[pl2_pi(va)] = pa | pteflags;
100 	}
101 	pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
102 
103 	if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
104 		pa = __md_early_palloc();
105 		pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
106 	}
107 }
108 
109 /* -------------------------------------------------------------------------- */
110 
111 static inline int8_t *
112 kasan_md_addr_to_shad(const void *addr)
113 {
114 	vaddr_t va = (vaddr_t)addr;
115 	return (int8_t *)(KASAN_MD_SHADOW_START +
116 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
117 }
118 
119 static inline bool
120 kasan_md_unsupported(vaddr_t addr)
121 {
122 	return (addr >= (vaddr_t)PTE_BASE &&
123 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
124 }
125 
126 static paddr_t
127 __md_palloc(void)
128 {
129 	/* The page is zeroed. */
130 	return pmap_get_physpage();
131 }
132 
133 static inline paddr_t
134 __md_palloc_large(void)
135 {
136 	struct pglist pglist;
137 	int ret;
138 
139 	if (!uvm.page_init_done)
140 		return 0;
141 
142 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
143 	    &pglist, 1, 0);
144 	if (ret != 0)
145 		return 0;
146 
147 	/* The page may not be zeroed. */
148 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
149 }
150 
151 static void
152 kasan_md_shadow_map_page(vaddr_t va)
153 {
154 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
155 	paddr_t pa;
156 
157 	if (__predict_false(__md_early)) {
158 		__md_early_shadow_map_page(va);
159 		return;
160 	}
161 
162 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
163 		pa = __md_palloc();
164 		L4_BASE[pl4_i(va)] = pa | pteflags;
165 	}
166 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
167 		pa = __md_palloc();
168 		L3_BASE[pl3_i(va)] = pa | pteflags;
169 	}
170 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
171 		if ((pa = __md_palloc_large()) != 0) {
172 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
173 			    pmap_pg_g;
174 			__insn_barrier();
175 			__builtin_memset((void *)va, 0, NBPD_L2);
176 			return;
177 		}
178 		pa = __md_palloc();
179 		L2_BASE[pl2_i(va)] = pa | pteflags;
180 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
181 		return;
182 	}
183 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
184 		pa = __md_palloc();
185 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
186 	}
187 }
188 
189 /*
190  * Map only the current stack. We will map the rest in kasan_init.
191  */
192 static void
193 kasan_md_early_init(void *stack)
194 {
195 	kasan_shadow_map(stack, USPACE);
196 	__md_early = false;
197 }
198 
199 /*
200  * Create the shadow mapping. We don't create the 'User' area, because we
201  * exclude it from the monitoring. The 'Main' area is created dynamically
202  * in pmap_growkernel.
203  */
204 static void
205 kasan_md_init(void)
206 {
207 	extern struct bootspace bootspace;
208 	size_t i;
209 
210 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
211 
212 	/* Kernel. */
213 	for (i = 0; i < BTSPACE_NSEGS; i++) {
214 		if (bootspace.segs[i].type == BTSEG_NONE) {
215 			continue;
216 		}
217 		kasan_shadow_map((void *)bootspace.segs[i].va,
218 		    bootspace.segs[i].sz);
219 	}
220 
221 	/* Boot region. */
222 	kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
223 
224 	/* Module map. */
225 	kasan_shadow_map((void *)bootspace.smodule,
226 	    (size_t)(bootspace.emodule - bootspace.smodule));
227 
228 	/* The bootstrap spare va. */
229 	kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
230 }
231 
232 static inline bool
233 __md_unwind_end(const char *name)
234 {
235 	if (!strcmp(name, "syscall") ||
236 	    !strcmp(name, "alltraps") ||
237 	    !strcmp(name, "handle_syscall") ||
238 	    !strncmp(name, "Xtrap", 5) ||
239 	    !strncmp(name, "Xintr", 5) ||
240 	    !strncmp(name, "Xhandle", 7) ||
241 	    !strncmp(name, "Xresume", 7) ||
242 	    !strncmp(name, "Xstray", 6) ||
243 	    !strncmp(name, "Xhold", 5) ||
244 	    !strncmp(name, "Xrecurse", 8) ||
245 	    !strcmp(name, "Xdoreti") ||
246 	    !strncmp(name, "Xsoft", 5)) {
247 		return true;
248 	}
249 
250 	return false;
251 }
252 
253 static void
254 kasan_md_unwind(void)
255 {
256 	uint64_t *rbp, rip;
257 	const char *mod;
258 	const char *sym;
259 	size_t nsym;
260 	int error;
261 
262 	rbp = (uint64_t *)__builtin_frame_address(0);
263 	nsym = 0;
264 
265 	while (1) {
266 		/* 8(%rbp) contains the saved %rip. */
267 		rip = *(rbp + 1);
268 
269 		if (rip < KERNBASE) {
270 			break;
271 		}
272 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
273 		if (error) {
274 			break;
275 		}
276 		printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
277 		if (__md_unwind_end(sym)) {
278 			break;
279 		}
280 
281 		rbp = (uint64_t *)*(rbp);
282 		if (rbp == 0) {
283 			break;
284 		}
285 		nsym++;
286 
287 		if (nsym >= 15) {
288 			break;
289 		}
290 	}
291 }
292