1*a30f4371Sriastradh /* $NetBSD: asan.h,v 1.12 2022/09/13 09:39:49 riastradh Exp $ */
2790d0b79Smaxv
3790d0b79Smaxv /*
4b813e108Smaxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5790d0b79Smaxv * All rights reserved.
6790d0b79Smaxv *
7b813e108Smaxv * This code is part of the KASAN subsystem of the NetBSD kernel.
8790d0b79Smaxv *
9790d0b79Smaxv * Redistribution and use in source and binary forms, with or without
10790d0b79Smaxv * modification, are permitted provided that the following conditions
11790d0b79Smaxv * are met:
12790d0b79Smaxv * 1. Redistributions of source code must retain the above copyright
13790d0b79Smaxv * notice, this list of conditions and the following disclaimer.
14790d0b79Smaxv * 2. Redistributions in binary form must reproduce the above copyright
15790d0b79Smaxv * notice, this list of conditions and the following disclaimer in the
16790d0b79Smaxv * documentation and/or other materials provided with the distribution.
17790d0b79Smaxv *
18b813e108Smaxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19b813e108Smaxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20b813e108Smaxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21b813e108Smaxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22b813e108Smaxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23b813e108Smaxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24b813e108Smaxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25b813e108Smaxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26b813e108Smaxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27b813e108Smaxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28b813e108Smaxv * SUCH DAMAGE.
29790d0b79Smaxv */
30790d0b79Smaxv
31*a30f4371Sriastradh #ifndef _AMD64_ASAN_H_
32*a30f4371Sriastradh #define _AMD64_ASAN_H_
33*a30f4371Sriastradh
34790d0b79Smaxv #include <sys/ksyms.h>
35790d0b79Smaxv
369fc45356Sriastradh #include <uvm/uvm.h>
379fc45356Sriastradh
38790d0b79Smaxv #include <amd64/pmap.h>
39790d0b79Smaxv #include <amd64/vmparam.h>
40790d0b79Smaxv
41448b1ee9Sriastradh #include <x86/bootspace.h>
42448b1ee9Sriastradh
43b48aa253Shannken #include <machine/pmap_private.h>
44b48aa253Shannken
45790d0b79Smaxv #ifdef __HAVE_PCPU_AREA
46790d0b79Smaxv #error "PCPU area not allowed with KASAN"
47790d0b79Smaxv #endif
48790d0b79Smaxv #ifdef __HAVE_DIRECT_MAP
49790d0b79Smaxv #error "DMAP not allowed with KASAN"
50790d0b79Smaxv #endif
51790d0b79Smaxv
52790d0b79Smaxv #define __MD_VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
53e2cfaf39Smaxv #define __MD_KERNMEM_BASE 0xFFFF800000000000 /* kern mem base address */
54790d0b79Smaxv
55790d0b79Smaxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
56790d0b79Smaxv #define KASAN_MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
57790d0b79Smaxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
58790d0b79Smaxv
5987945294Smaxv /* -------------------------------------------------------------------------- */
6087945294Smaxv
6187945294Smaxv /*
6287945294Smaxv * Early mapping, used to map just the stack at boot time. We rely on the fact
6387945294Smaxv * that VA = PA + KERNBASE.
6487945294Smaxv */
6587945294Smaxv
66790d0b79Smaxv static bool __md_early __read_mostly = true;
67790d0b79Smaxv static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
68790d0b79Smaxv static size_t __md_earlytaken = 0;
69790d0b79Smaxv
7087945294Smaxv static paddr_t
__md_early_palloc(void)7187945294Smaxv __md_early_palloc(void)
7287945294Smaxv {
7387945294Smaxv paddr_t ret;
7487945294Smaxv
7587945294Smaxv KASSERT(__md_earlytaken < 8);
7687945294Smaxv
7787945294Smaxv ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
7887945294Smaxv __md_earlytaken++;
7987945294Smaxv
8087945294Smaxv ret -= KERNBASE;
8187945294Smaxv
8287945294Smaxv return ret;
8387945294Smaxv }
8487945294Smaxv
8587945294Smaxv static void
__md_early_shadow_map_page(vaddr_t va)8687945294Smaxv __md_early_shadow_map_page(vaddr_t va)
8787945294Smaxv {
8887945294Smaxv extern struct bootspace bootspace;
8987945294Smaxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
9087945294Smaxv pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
9187945294Smaxv paddr_t pa;
9287945294Smaxv
9387945294Smaxv if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
9487945294Smaxv pa = __md_early_palloc();
9587945294Smaxv pdir[pl4_pi(va)] = pa | pteflags;
9687945294Smaxv }
9787945294Smaxv pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
9887945294Smaxv
9987945294Smaxv if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
10087945294Smaxv pa = __md_early_palloc();
10187945294Smaxv pdir[pl3_pi(va)] = pa | pteflags;
10287945294Smaxv }
10387945294Smaxv pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
10487945294Smaxv
10587945294Smaxv if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
10687945294Smaxv pa = __md_early_palloc();
10787945294Smaxv pdir[pl2_pi(va)] = pa | pteflags;
10887945294Smaxv }
10987945294Smaxv pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
11087945294Smaxv
11187945294Smaxv if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
11287945294Smaxv pa = __md_early_palloc();
11387945294Smaxv pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
11487945294Smaxv }
11587945294Smaxv }
11687945294Smaxv
11787945294Smaxv /* -------------------------------------------------------------------------- */
11887945294Smaxv
119790d0b79Smaxv static inline int8_t *
kasan_md_addr_to_shad(const void * addr)120790d0b79Smaxv kasan_md_addr_to_shad(const void *addr)
121790d0b79Smaxv {
122790d0b79Smaxv vaddr_t va = (vaddr_t)addr;
123790d0b79Smaxv return (int8_t *)(KASAN_MD_SHADOW_START +
124e2cfaf39Smaxv ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
125790d0b79Smaxv }
126790d0b79Smaxv
127790d0b79Smaxv static inline bool
kasan_md_unsupported(vaddr_t addr)128790d0b79Smaxv kasan_md_unsupported(vaddr_t addr)
129790d0b79Smaxv {
130790d0b79Smaxv return (addr >= (vaddr_t)PTE_BASE &&
131790d0b79Smaxv addr < ((vaddr_t)PTE_BASE + NBPD_L4));
132790d0b79Smaxv }
133790d0b79Smaxv
134790d0b79Smaxv static paddr_t
__md_palloc(void)135790d0b79Smaxv __md_palloc(void)
136790d0b79Smaxv {
13729bc5ad9Smaxv /* The page is zeroed. */
13887945294Smaxv return pmap_get_physpage();
139790d0b79Smaxv }
140790d0b79Smaxv
14129bc5ad9Smaxv static inline paddr_t
__md_palloc_large(void)14229bc5ad9Smaxv __md_palloc_large(void)
14329bc5ad9Smaxv {
14429bc5ad9Smaxv struct pglist pglist;
14529bc5ad9Smaxv int ret;
14629bc5ad9Smaxv
14729bc5ad9Smaxv if (!uvm.page_init_done)
14829bc5ad9Smaxv return 0;
14929bc5ad9Smaxv
15029bc5ad9Smaxv ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
15129bc5ad9Smaxv &pglist, 1, 0);
15229bc5ad9Smaxv if (ret != 0)
15329bc5ad9Smaxv return 0;
15429bc5ad9Smaxv
15529bc5ad9Smaxv /* The page may not be zeroed. */
15629bc5ad9Smaxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
15729bc5ad9Smaxv }
15829bc5ad9Smaxv
159790d0b79Smaxv static void
kasan_md_shadow_map_page(vaddr_t va)160790d0b79Smaxv kasan_md_shadow_map_page(vaddr_t va)
161790d0b79Smaxv {
16229bc5ad9Smaxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
163790d0b79Smaxv paddr_t pa;
164790d0b79Smaxv
16587945294Smaxv if (__predict_false(__md_early)) {
16687945294Smaxv __md_early_shadow_map_page(va);
16787945294Smaxv return;
16887945294Smaxv }
16987945294Smaxv
170790d0b79Smaxv if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
171790d0b79Smaxv pa = __md_palloc();
17229bc5ad9Smaxv L4_BASE[pl4_i(va)] = pa | pteflags;
173790d0b79Smaxv }
174790d0b79Smaxv if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
175790d0b79Smaxv pa = __md_palloc();
17629bc5ad9Smaxv L3_BASE[pl3_i(va)] = pa | pteflags;
177790d0b79Smaxv }
178790d0b79Smaxv if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
17929bc5ad9Smaxv if ((pa = __md_palloc_large()) != 0) {
18029bc5ad9Smaxv L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
18129bc5ad9Smaxv pmap_pg_g;
18229bc5ad9Smaxv __insn_barrier();
18329bc5ad9Smaxv __builtin_memset((void *)va, 0, NBPD_L2);
18429bc5ad9Smaxv return;
18529bc5ad9Smaxv }
186790d0b79Smaxv pa = __md_palloc();
18729bc5ad9Smaxv L2_BASE[pl2_i(va)] = pa | pteflags;
18829bc5ad9Smaxv } else if (L2_BASE[pl2_i(va)] & PTE_PS) {
18929bc5ad9Smaxv return;
190790d0b79Smaxv }
191790d0b79Smaxv if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
192790d0b79Smaxv pa = __md_palloc();
19329bc5ad9Smaxv L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
194790d0b79Smaxv }
195790d0b79Smaxv }
196790d0b79Smaxv
197790d0b79Smaxv /*
198790d0b79Smaxv * Map only the current stack. We will map the rest in kasan_init.
199790d0b79Smaxv */
200790d0b79Smaxv static void
kasan_md_early_init(void * stack)201790d0b79Smaxv kasan_md_early_init(void *stack)
202790d0b79Smaxv {
203790d0b79Smaxv kasan_shadow_map(stack, USPACE);
204790d0b79Smaxv __md_early = false;
205790d0b79Smaxv }
206790d0b79Smaxv
207790d0b79Smaxv /*
208790d0b79Smaxv * Create the shadow mapping. We don't create the 'User' area, because we
209790d0b79Smaxv * exclude it from the monitoring. The 'Main' area is created dynamically
210790d0b79Smaxv * in pmap_growkernel.
211790d0b79Smaxv */
212790d0b79Smaxv static void
kasan_md_init(void)213790d0b79Smaxv kasan_md_init(void)
214790d0b79Smaxv {
215790d0b79Smaxv extern struct bootspace bootspace;
216790d0b79Smaxv size_t i;
217790d0b79Smaxv
218790d0b79Smaxv CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
219790d0b79Smaxv
220790d0b79Smaxv /* Kernel. */
221790d0b79Smaxv for (i = 0; i < BTSPACE_NSEGS; i++) {
222790d0b79Smaxv if (bootspace.segs[i].type == BTSEG_NONE) {
223790d0b79Smaxv continue;
224790d0b79Smaxv }
225790d0b79Smaxv kasan_shadow_map((void *)bootspace.segs[i].va,
226790d0b79Smaxv bootspace.segs[i].sz);
227790d0b79Smaxv }
228790d0b79Smaxv
229790d0b79Smaxv /* Boot region. */
230790d0b79Smaxv kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
231790d0b79Smaxv
232790d0b79Smaxv /* Module map. */
233790d0b79Smaxv kasan_shadow_map((void *)bootspace.smodule,
234790d0b79Smaxv (size_t)(bootspace.emodule - bootspace.smodule));
235790d0b79Smaxv
236790d0b79Smaxv /* The bootstrap spare va. */
237790d0b79Smaxv kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
238790d0b79Smaxv }
239790d0b79Smaxv
240790d0b79Smaxv static inline bool
__md_unwind_end(const char * name)241790d0b79Smaxv __md_unwind_end(const char *name)
242790d0b79Smaxv {
243790d0b79Smaxv if (!strcmp(name, "syscall") ||
24424b0ceedSmaxv !strcmp(name, "alltraps") ||
245790d0b79Smaxv !strcmp(name, "handle_syscall") ||
24624b0ceedSmaxv !strncmp(name, "Xtrap", 5) ||
247790d0b79Smaxv !strncmp(name, "Xintr", 5) ||
248790d0b79Smaxv !strncmp(name, "Xhandle", 7) ||
249790d0b79Smaxv !strncmp(name, "Xresume", 7) ||
250790d0b79Smaxv !strncmp(name, "Xstray", 6) ||
251790d0b79Smaxv !strncmp(name, "Xhold", 5) ||
252790d0b79Smaxv !strncmp(name, "Xrecurse", 8) ||
253790d0b79Smaxv !strcmp(name, "Xdoreti") ||
254790d0b79Smaxv !strncmp(name, "Xsoft", 5)) {
255790d0b79Smaxv return true;
256790d0b79Smaxv }
257790d0b79Smaxv
258790d0b79Smaxv return false;
259790d0b79Smaxv }
260790d0b79Smaxv
261790d0b79Smaxv static void
kasan_md_unwind(void)262790d0b79Smaxv kasan_md_unwind(void)
263790d0b79Smaxv {
264790d0b79Smaxv uint64_t *rbp, rip;
265790d0b79Smaxv const char *mod;
266790d0b79Smaxv const char *sym;
267790d0b79Smaxv size_t nsym;
268790d0b79Smaxv int error;
269790d0b79Smaxv
270790d0b79Smaxv rbp = (uint64_t *)__builtin_frame_address(0);
271790d0b79Smaxv nsym = 0;
272790d0b79Smaxv
273790d0b79Smaxv while (1) {
274790d0b79Smaxv /* 8(%rbp) contains the saved %rip. */
275790d0b79Smaxv rip = *(rbp + 1);
276790d0b79Smaxv
277790d0b79Smaxv if (rip < KERNBASE) {
278790d0b79Smaxv break;
279790d0b79Smaxv }
280790d0b79Smaxv error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
281790d0b79Smaxv if (error) {
282790d0b79Smaxv break;
283790d0b79Smaxv }
284790d0b79Smaxv printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
285790d0b79Smaxv if (__md_unwind_end(sym)) {
286790d0b79Smaxv break;
287790d0b79Smaxv }
288790d0b79Smaxv
289790d0b79Smaxv rbp = (uint64_t *)*(rbp);
290790d0b79Smaxv if (rbp == 0) {
291790d0b79Smaxv break;
292790d0b79Smaxv }
293790d0b79Smaxv nsym++;
294790d0b79Smaxv
295790d0b79Smaxv if (nsym >= 15) {
296790d0b79Smaxv break;
297790d0b79Smaxv }
298790d0b79Smaxv }
299790d0b79Smaxv }
300*a30f4371Sriastradh
301*a30f4371Sriastradh #endif /* _AMD64_ASAN_H_ */
302