xref: /netbsd-src/sys/arch/amd64/include/asan.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: asan.h,v 1.9 2020/09/10 14:10:46 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  * All rights reserved.
6  *
7  * This code is part of the KASAN subsystem of the NetBSD kernel.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/ksyms.h>
32 
33 #include <uvm/uvm.h>
34 
35 #include <amd64/pmap.h>
36 #include <amd64/vmparam.h>
37 
38 #ifdef __HAVE_PCPU_AREA
39 #error "PCPU area not allowed with KASAN"
40 #endif
41 #ifdef __HAVE_DIRECT_MAP
42 #error "DMAP not allowed with KASAN"
43 #endif
44 
45 #define __MD_VIRTUAL_SHIFT	47	/* 48bit address space, cut half */
46 #define __MD_KERNMEM_BASE	0xFFFF800000000000 /* kern mem base address */
47 
48 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
49 #define KASAN_MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
50 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
51 
52 /* -------------------------------------------------------------------------- */
53 
54 /*
55  * Early mapping, used to map just the stack at boot time. We rely on the fact
56  * that VA = PA + KERNBASE.
57  */
58 
59 static bool __md_early __read_mostly = true;
60 static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
61 static size_t __md_earlytaken = 0;
62 
63 static paddr_t
64 __md_early_palloc(void)
65 {
66 	paddr_t ret;
67 
68 	KASSERT(__md_earlytaken < 8);
69 
70 	ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
71 	__md_earlytaken++;
72 
73 	ret -= KERNBASE;
74 
75 	return ret;
76 }
77 
78 static void
79 __md_early_shadow_map_page(vaddr_t va)
80 {
81 	extern struct bootspace bootspace;
82 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
83 	pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
84 	paddr_t pa;
85 
86 	if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
87 		pa = __md_early_palloc();
88 		pdir[pl4_pi(va)] = pa | pteflags;
89 	}
90 	pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
91 
92 	if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
93 		pa = __md_early_palloc();
94 		pdir[pl3_pi(va)] = pa | pteflags;
95 	}
96 	pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
97 
98 	if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
99 		pa = __md_early_palloc();
100 		pdir[pl2_pi(va)] = pa | pteflags;
101 	}
102 	pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
103 
104 	if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
105 		pa = __md_early_palloc();
106 		pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
107 	}
108 }
109 
110 /* -------------------------------------------------------------------------- */
111 
112 static inline int8_t *
113 kasan_md_addr_to_shad(const void *addr)
114 {
115 	vaddr_t va = (vaddr_t)addr;
116 	return (int8_t *)(KASAN_MD_SHADOW_START +
117 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
118 }
119 
120 static inline bool
121 kasan_md_unsupported(vaddr_t addr)
122 {
123 	return (addr >= (vaddr_t)PTE_BASE &&
124 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
125 }
126 
127 static paddr_t
128 __md_palloc(void)
129 {
130 	/* The page is zeroed. */
131 	return pmap_get_physpage();
132 }
133 
134 static inline paddr_t
135 __md_palloc_large(void)
136 {
137 	struct pglist pglist;
138 	int ret;
139 
140 	if (!uvm.page_init_done)
141 		return 0;
142 
143 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
144 	    &pglist, 1, 0);
145 	if (ret != 0)
146 		return 0;
147 
148 	/* The page may not be zeroed. */
149 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
150 }
151 
152 static void
153 kasan_md_shadow_map_page(vaddr_t va)
154 {
155 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
156 	paddr_t pa;
157 
158 	if (__predict_false(__md_early)) {
159 		__md_early_shadow_map_page(va);
160 		return;
161 	}
162 
163 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
164 		pa = __md_palloc();
165 		L4_BASE[pl4_i(va)] = pa | pteflags;
166 	}
167 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
168 		pa = __md_palloc();
169 		L3_BASE[pl3_i(va)] = pa | pteflags;
170 	}
171 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
172 		if ((pa = __md_palloc_large()) != 0) {
173 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
174 			    pmap_pg_g;
175 			__insn_barrier();
176 			__builtin_memset((void *)va, 0, NBPD_L2);
177 			return;
178 		}
179 		pa = __md_palloc();
180 		L2_BASE[pl2_i(va)] = pa | pteflags;
181 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
182 		return;
183 	}
184 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
185 		pa = __md_palloc();
186 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
187 	}
188 }
189 
190 /*
191  * Map only the current stack. We will map the rest in kasan_init.
192  */
193 static void
194 kasan_md_early_init(void *stack)
195 {
196 	kasan_shadow_map(stack, USPACE);
197 	__md_early = false;
198 }
199 
200 /*
201  * Create the shadow mapping. We don't create the 'User' area, because we
202  * exclude it from the monitoring. The 'Main' area is created dynamically
203  * in pmap_growkernel.
204  */
205 static void
206 kasan_md_init(void)
207 {
208 	extern struct bootspace bootspace;
209 	size_t i;
210 
211 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
212 
213 	/* Kernel. */
214 	for (i = 0; i < BTSPACE_NSEGS; i++) {
215 		if (bootspace.segs[i].type == BTSEG_NONE) {
216 			continue;
217 		}
218 		kasan_shadow_map((void *)bootspace.segs[i].va,
219 		    bootspace.segs[i].sz);
220 	}
221 
222 	/* Boot region. */
223 	kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
224 
225 	/* Module map. */
226 	kasan_shadow_map((void *)bootspace.smodule,
227 	    (size_t)(bootspace.emodule - bootspace.smodule));
228 
229 	/* The bootstrap spare va. */
230 	kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
231 }
232 
233 static inline bool
234 __md_unwind_end(const char *name)
235 {
236 	if (!strcmp(name, "syscall") ||
237 	    !strcmp(name, "alltraps") ||
238 	    !strcmp(name, "handle_syscall") ||
239 	    !strncmp(name, "Xtrap", 5) ||
240 	    !strncmp(name, "Xintr", 5) ||
241 	    !strncmp(name, "Xhandle", 7) ||
242 	    !strncmp(name, "Xresume", 7) ||
243 	    !strncmp(name, "Xstray", 6) ||
244 	    !strncmp(name, "Xhold", 5) ||
245 	    !strncmp(name, "Xrecurse", 8) ||
246 	    !strcmp(name, "Xdoreti") ||
247 	    !strncmp(name, "Xsoft", 5)) {
248 		return true;
249 	}
250 
251 	return false;
252 }
253 
254 static void
255 kasan_md_unwind(void)
256 {
257 	uint64_t *rbp, rip;
258 	const char *mod;
259 	const char *sym;
260 	size_t nsym;
261 	int error;
262 
263 	rbp = (uint64_t *)__builtin_frame_address(0);
264 	nsym = 0;
265 
266 	while (1) {
267 		/* 8(%rbp) contains the saved %rip. */
268 		rip = *(rbp + 1);
269 
270 		if (rip < KERNBASE) {
271 			break;
272 		}
273 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
274 		if (error) {
275 			break;
276 		}
277 		printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
278 		if (__md_unwind_end(sym)) {
279 			break;
280 		}
281 
282 		rbp = (uint64_t *)*(rbp);
283 		if (rbp == 0) {
284 			break;
285 		}
286 		nsym++;
287 
288 		if (nsym >= 15) {
289 			break;
290 		}
291 	}
292 }
293