xref: /netbsd-src/sys/arch/amd64/include/msan.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: msan.h,v 1.6 2020/11/18 16:13:34 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net
5  * All rights reserved.
6  *
7  * This code is part of the KMSAN subsystem of the NetBSD kernel.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/ksyms.h>
32 
33 #include <uvm/uvm.h>
34 
35 #include <amd64/pmap.h>
36 #include <amd64/vmparam.h>
37 
38 #ifdef __HAVE_PCPU_AREA
39 #error "PCPU area not allowed with KMSAN"
40 #endif
41 #ifdef __HAVE_DIRECT_MAP
42 #error "DMAP not allowed with KMSAN"
43 #endif
44 
45 /*
46  * One big shadow, divided in two sub-shadows (SHAD and ORIG), themselves
47  * divided in two regions (MAIN and KERN).
48  */
49 
50 #define __MD_SHADOW_SIZE	0x20000000000ULL	/* 4 * NBPD_L4 */
51 #define __MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KMSAN * NBPD_L4)))
52 #define __MD_SHADOW_END		(__MD_SHADOW_START + __MD_SHADOW_SIZE)
53 
54 #define __MD_SHAD_MAIN_START	(__MD_SHADOW_START)
55 #define __MD_SHAD_KERN_START	(__MD_SHADOW_START + 0x8000000000ULL)
56 
57 #define __MD_ORIG_MAIN_START	(__MD_SHAD_KERN_START + 0x8000000000ULL)
58 #define __MD_ORIG_KERN_START	(__MD_ORIG_MAIN_START + 0x8000000000ULL)
59 
60 #define __MD_PTR_BASE		0xFFFFFFFF80000000ULL
61 #define __MD_ORIG_TYPE		__BITS(31,28)
62 
63 static inline int8_t *
64 kmsan_md_addr_to_shad(const void *addr)
65 {
66 	vaddr_t va = (vaddr_t)addr;
67 
68 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
69 		return (int8_t *)(__MD_SHAD_MAIN_START + (va - vm_min_kernel_address));
70 	} else if (va >= KERNBASE) {
71 		return (int8_t *)(__MD_SHAD_KERN_START + (va - KERNBASE));
72 	} else {
73 		panic("%s: impossible, va=%p", __func__, (void *)va);
74 	}
75 }
76 
77 static inline int8_t *
78 kmsan_md_addr_to_orig(const void *addr)
79 {
80 	vaddr_t va = (vaddr_t)addr;
81 
82 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
83 		return (int8_t *)(__MD_ORIG_MAIN_START + (va - vm_min_kernel_address));
84 	} else if (va >= KERNBASE) {
85 		return (int8_t *)(__MD_ORIG_KERN_START + (va - KERNBASE));
86 	} else {
87 		panic("%s: impossible, va=%p", __func__, (void *)va);
88 	}
89 }
90 
91 static inline bool
92 kmsan_md_unsupported(vaddr_t addr)
93 {
94 	return (addr >= (vaddr_t)PTE_BASE &&
95 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
96 }
97 
98 static inline paddr_t
99 __md_palloc(void)
100 {
101 	/* The page is zeroed. */
102 	return pmap_get_physpage();
103 }
104 
105 static inline paddr_t
106 __md_palloc_large(void)
107 {
108 	struct pglist pglist;
109 	int ret;
110 
111 	if (!uvm.page_init_done)
112 		return 0;
113 
114 	kmsan_init_arg(sizeof(psize_t) + 4 * sizeof(paddr_t) +
115 	    sizeof(struct pglist *) + 2 * sizeof(int));
116 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
117 	    &pglist, 1, 0);
118 	if (ret != 0)
119 		return 0;
120 
121 	/* The page may not be zeroed. */
122 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
123 }
124 
125 static void
126 kmsan_md_shadow_map_page(vaddr_t va)
127 {
128 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
129 	paddr_t pa;
130 
131 	KASSERT(va >= __MD_SHADOW_START && va < __MD_SHADOW_END);
132 
133 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
134 		pa = __md_palloc();
135 		L4_BASE[pl4_i(va)] = pa | pteflags;
136 	}
137 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
138 		pa = __md_palloc();
139 		L3_BASE[pl3_i(va)] = pa | pteflags;
140 	}
141 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
142 		if ((pa = __md_palloc_large()) != 0) {
143 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
144 			    pmap_pg_g;
145 			__insn_barrier();
146 			__builtin_memset((void *)va, 0, NBPD_L2);
147 			return;
148 		}
149 		pa = __md_palloc();
150 		L2_BASE[pl2_i(va)] = pa | pteflags;
151 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
152 		return;
153 	}
154 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
155 		pa = __md_palloc();
156 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
157 	}
158 }
159 
160 static void
161 kmsan_md_init(void)
162 {
163 	extern struct bootspace bootspace;
164 	size_t i;
165 
166 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KMSAN);
167 
168 	/* Kernel. */
169 	for (i = 0; i < BTSPACE_NSEGS; i++) {
170 		if (bootspace.segs[i].type == BTSEG_NONE) {
171 			continue;
172 		}
173 		kmsan_shadow_map((void *)bootspace.segs[i].va,
174 		    bootspace.segs[i].sz);
175 	}
176 
177 	/* Boot region. */
178 	kmsan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
179 
180 	/* Module map. */
181 	kmsan_shadow_map((void *)bootspace.smodule,
182 	    (size_t)(bootspace.emodule - bootspace.smodule));
183 
184 	/* The bootstrap spare va. */
185 	kmsan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
186 }
187 
188 static inline msan_orig_t
189 kmsan_md_orig_encode(int type, uintptr_t ptr)
190 {
191 	msan_orig_t ret;
192 
193 	ret = (ptr & 0xFFFFFFFF) & ~__MD_ORIG_TYPE;
194 	ret |= __SHIFTIN(type, __MD_ORIG_TYPE);
195 
196 	return ret;
197 }
198 
199 static inline void
200 kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
201 {
202 	*type = __SHIFTOUT(orig, __MD_ORIG_TYPE);
203 	*ptr = (uintptr_t)(orig & ~__MD_ORIG_TYPE) | __MD_PTR_BASE;
204 }
205 
206 static inline bool
207 kmsan_md_is_pc(uintptr_t ptr)
208 {
209 	extern uint8_t __rodata_start;
210 
211 	return (ptr < (uintptr_t)&__rodata_start);
212 }
213 
214 static inline bool
215 __md_unwind_end(const char *name)
216 {
217 	if (!strcmp(name, "syscall") ||
218 	    !strcmp(name, "alltraps") ||
219 	    !strcmp(name, "handle_syscall") ||
220 	    !strncmp(name, "Xtrap", 5) ||
221 	    !strncmp(name, "Xintr", 5) ||
222 	    !strncmp(name, "Xhandle", 7) ||
223 	    !strncmp(name, "Xresume", 7) ||
224 	    !strncmp(name, "Xstray", 6) ||
225 	    !strncmp(name, "Xhold", 5) ||
226 	    !strncmp(name, "Xrecurse", 8) ||
227 	    !strcmp(name, "Xdoreti") ||
228 	    !strncmp(name, "Xsoft", 5)) {
229 		return true;
230 	}
231 
232 	return false;
233 }
234 
235 static void
236 kmsan_md_unwind(void)
237 {
238 	uint64_t *rbp, rip;
239 	const char *mod;
240 	const char *sym;
241 	size_t nsym;
242 	int error;
243 
244 	rbp = (uint64_t *)__builtin_frame_address(0);
245 	nsym = 0;
246 
247 	while (1) {
248 		/* 8(%rbp) contains the saved %rip. */
249 		rip = *(rbp + 1);
250 
251 		if (rip < KERNBASE) {
252 			break;
253 		}
254 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
255 		if (error) {
256 			break;
257 		}
258 		kmsan_printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
259 		if (__md_unwind_end(sym)) {
260 			break;
261 		}
262 
263 		rbp = (uint64_t *)*(rbp);
264 		if (rbp == 0) {
265 			break;
266 		}
267 		nsym++;
268 
269 		if (nsym >= 15) {
270 			break;
271 		}
272 	}
273 }
274