xref: /netbsd-src/sys/arch/amd64/include/msan.h (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /*	$NetBSD: msan.h,v 1.4 2020/06/07 23:15:51 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/ksyms.h>
33 
34 #include <amd64/pmap.h>
35 #include <amd64/vmparam.h>
36 
37 #ifdef __HAVE_PCPU_AREA
38 #error "PCPU area not allowed with KMSAN"
39 #endif
40 #ifdef __HAVE_DIRECT_MAP
41 #error "DMAP not allowed with KMSAN"
42 #endif
43 
44 /*
45  * One big shadow, divided in two sub-shadows (SHAD and ORIG), themselves
46  * divided in two regions (MAIN and KERN).
47  */
48 
49 #define __MD_SHADOW_SIZE	0x20000000000ULL	/* 4 * NBPD_L4 */
50 #define __MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KMSAN * NBPD_L4)))
51 #define __MD_SHADOW_END		(__MD_SHADOW_START + __MD_SHADOW_SIZE)
52 
53 #define __MD_SHAD_MAIN_START	(__MD_SHADOW_START)
54 #define __MD_SHAD_KERN_START	(__MD_SHADOW_START + 0x8000000000ULL)
55 
56 #define __MD_ORIG_MAIN_START	(__MD_SHAD_KERN_START + 0x8000000000ULL)
57 #define __MD_ORIG_KERN_START	(__MD_ORIG_MAIN_START + 0x8000000000ULL)
58 
59 #define __MD_PTR_BASE		0xFFFFFFFF80000000ULL
60 #define __MD_ORIG_TYPE		__BITS(31,28)
61 
62 static inline int8_t *
63 kmsan_md_addr_to_shad(const void *addr)
64 {
65 	vaddr_t va = (vaddr_t)addr;
66 
67 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
68 		return (int8_t *)(__MD_SHAD_MAIN_START + (va - vm_min_kernel_address));
69 	} else if (va >= KERNBASE) {
70 		return (int8_t *)(__MD_SHAD_KERN_START + (va - KERNBASE));
71 	} else {
72 		panic("%s: impossible, va=%p", __func__, (void *)va);
73 	}
74 }
75 
76 static inline int8_t *
77 kmsan_md_addr_to_orig(const void *addr)
78 {
79 	vaddr_t va = (vaddr_t)addr;
80 
81 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
82 		return (int8_t *)(__MD_ORIG_MAIN_START + (va - vm_min_kernel_address));
83 	} else if (va >= KERNBASE) {
84 		return (int8_t *)(__MD_ORIG_KERN_START + (va - KERNBASE));
85 	} else {
86 		panic("%s: impossible, va=%p", __func__, (void *)va);
87 	}
88 }
89 
90 static inline bool
91 kmsan_md_unsupported(vaddr_t addr)
92 {
93 	return (addr >= (vaddr_t)PTE_BASE &&
94 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
95 }
96 
97 static inline paddr_t
98 __md_palloc(void)
99 {
100 	/* The page is zeroed. */
101 	return pmap_get_physpage();
102 }
103 
104 static inline paddr_t
105 __md_palloc_large(void)
106 {
107 	struct pglist pglist;
108 	int ret;
109 
110 	if (!uvm.page_init_done)
111 		return 0;
112 
113 	kmsan_init_arg(sizeof(psize_t) + 4 * sizeof(paddr_t) +
114 	    sizeof(struct pglist *) + 2 * sizeof(int));
115 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
116 	    &pglist, 1, 0);
117 	if (ret != 0)
118 		return 0;
119 
120 	/* The page may not be zeroed. */
121 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
122 }
123 
124 static void
125 kmsan_md_shadow_map_page(vaddr_t va)
126 {
127 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
128 	paddr_t pa;
129 
130 	KASSERT(va >= __MD_SHADOW_START && va < __MD_SHADOW_END);
131 
132 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
133 		pa = __md_palloc();
134 		L4_BASE[pl4_i(va)] = pa | pteflags;
135 	}
136 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
137 		pa = __md_palloc();
138 		L3_BASE[pl3_i(va)] = pa | pteflags;
139 	}
140 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
141 		if ((pa = __md_palloc_large()) != 0) {
142 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
143 			    pmap_pg_g;
144 			__insn_barrier();
145 			__builtin_memset((void *)va, 0, NBPD_L2);
146 			return;
147 		}
148 		pa = __md_palloc();
149 		L2_BASE[pl2_i(va)] = pa | pteflags;
150 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
151 		return;
152 	}
153 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
154 		pa = __md_palloc();
155 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
156 	}
157 }
158 
159 static void
160 kmsan_md_init(void)
161 {
162 	extern struct bootspace bootspace;
163 	size_t i;
164 
165 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KMSAN);
166 
167 	/* Kernel. */
168 	for (i = 0; i < BTSPACE_NSEGS; i++) {
169 		if (bootspace.segs[i].type == BTSEG_NONE) {
170 			continue;
171 		}
172 		kmsan_shadow_map((void *)bootspace.segs[i].va,
173 		    bootspace.segs[i].sz);
174 	}
175 
176 	/* Boot region. */
177 	kmsan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
178 
179 	/* Module map. */
180 	kmsan_shadow_map((void *)bootspace.smodule,
181 	    (size_t)(bootspace.emodule - bootspace.smodule));
182 
183 	/* The bootstrap spare va. */
184 	kmsan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
185 }
186 
187 static inline msan_orig_t
188 kmsan_md_orig_encode(int type, uintptr_t ptr)
189 {
190 	msan_orig_t ret;
191 
192 	ret = (ptr & 0xFFFFFFFF) & ~__MD_ORIG_TYPE;
193 	ret |= __SHIFTIN(type, __MD_ORIG_TYPE);
194 
195 	return ret;
196 }
197 
198 static inline void
199 kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
200 {
201 	*type = __SHIFTOUT(orig, __MD_ORIG_TYPE);
202 	*ptr = (uintptr_t)(orig & ~__MD_ORIG_TYPE) | __MD_PTR_BASE;
203 }
204 
205 static inline bool
206 kmsan_md_is_pc(uintptr_t ptr)
207 {
208 	extern uint8_t __rodata_start;
209 
210 	return (ptr < (uintptr_t)&__rodata_start);
211 }
212 
213 static inline bool
214 __md_unwind_end(const char *name)
215 {
216 	if (!strcmp(name, "syscall") ||
217 	    !strcmp(name, "alltraps") ||
218 	    !strcmp(name, "handle_syscall") ||
219 	    !strncmp(name, "Xtrap", 5) ||
220 	    !strncmp(name, "Xintr", 5) ||
221 	    !strncmp(name, "Xhandle", 7) ||
222 	    !strncmp(name, "Xresume", 7) ||
223 	    !strncmp(name, "Xstray", 6) ||
224 	    !strncmp(name, "Xhold", 5) ||
225 	    !strncmp(name, "Xrecurse", 8) ||
226 	    !strcmp(name, "Xdoreti") ||
227 	    !strncmp(name, "Xsoft", 5)) {
228 		return true;
229 	}
230 
231 	return false;
232 }
233 
234 static void
235 kmsan_md_unwind(void)
236 {
237 	uint64_t *rbp, rip;
238 	const char *mod;
239 	const char *sym;
240 	size_t nsym;
241 	int error;
242 
243 	rbp = (uint64_t *)__builtin_frame_address(0);
244 	nsym = 0;
245 
246 	while (1) {
247 		/* 8(%rbp) contains the saved %rip. */
248 		rip = *(rbp + 1);
249 
250 		if (rip < KERNBASE) {
251 			break;
252 		}
253 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
254 		if (error) {
255 			break;
256 		}
257 		kmsan_printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
258 		if (__md_unwind_end(sym)) {
259 			break;
260 		}
261 
262 		rbp = (uint64_t *)*(rbp);
263 		if (rbp == 0) {
264 			break;
265 		}
266 		nsym++;
267 
268 		if (nsym >= 15) {
269 			break;
270 		}
271 	}
272 }
273