1 /* $NetBSD: asan.h,v 1.3 2020/07/19 11:47:48 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nick Hudson. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/atomic.h> 33 #include <sys/ksyms.h> 34 35 #include <arm/vmparam.h> 36 #include <arm/arm32/machdep.h> 37 #include <arm/arm32/pmap.h> 38 39 #define KASAN_MD_SHADOW_START VM_KERNEL_KASAN_BASE 40 #define KASAN_MD_SHADOW_END VM_KERNEL_KASAN_END 41 #define __MD_KERNMEM_BASE KERNEL_BASE 42 43 static inline int8_t * 44 kasan_md_addr_to_shad(const void *addr) 45 { 46 vaddr_t va = (vaddr_t)addr; 47 return (int8_t *)(KASAN_MD_SHADOW_START + 48 ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT)); 49 } 50 51 static inline bool 52 kasan_md_unsupported(vaddr_t addr) 53 { 54 return addr < VM_MIN_KERNEL_ADDRESS || 55 addr >= KASAN_MD_SHADOW_START; 56 } 57 58 /* -------------------------------------------------------------------------- */ 59 60 /* 61 * Early mapping, used to map just the stack at boot time. We rely on the fact 62 * that VA = PA + KERNEL_BASE. 63 */ 64 65 #define KASAN_NEARLYPAGES 3 66 67 static bool __md_early __read_mostly; 68 static size_t __md_nearlypages __attribute__((__section__(".data"))); 69 static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE] 70 __aligned(PAGE_SIZE) __attribute__((__section__(".data"))); 71 72 static vaddr_t 73 __md_palloc(void) 74 { 75 paddr_t pa; 76 77 if (__predict_false(__md_early)) { 78 KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES, 79 "__md_nearlypages %zu", __md_nearlypages); 80 81 vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE); 82 __md_nearlypages++; 83 __builtin_memset((void *)va, 0, PAGE_SIZE); 84 85 return KERN_VTOPHYS(va); 86 } 87 88 if (!uvm.page_init_done) { 89 if (uvm_page_physget(&pa) == false) 90 panic("KASAN can't get a page"); 91 92 return pa; 93 } 94 95 struct vm_page *pg; 96 retry: 97 pg = uvm_pagealloc(NULL, 0, NULL, 0); 98 if (pg == NULL) { 99 uvm_wait(__func__); 100 goto retry; 101 } 102 pa = VM_PAGE_TO_PHYS(pg); 103 104 return pa; 105 } 106 107 static void 108 kasan_md_shadow_map_page(vaddr_t va) 109 { 110 const uint32_t mask = L1_TABLE_SIZE - 1; 111 const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask); 112 pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb); 113 114 const size_t l1slot = l1pte_index(va); 115 vaddr_t l2ptva; 116 117 KASSERT((va & PAGE_MASK) == 0); 118 KASSERT(__md_early || l1pte_page_p(pdep[l1slot])); 119 120 if (!l1pte_page_p(pdep[l1slot])) { 121 KASSERT(__md_early); 122 const paddr_t l2ptpa = __md_palloc(); 123 const vaddr_t segl2va = va & -L2_S_SEGSIZE; 124 const size_t segl1slot = l1pte_index(segl2va); 125 126 const pd_entry_t npde = 127 L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL); 128 129 l1pte_set(pdep + segl1slot, npde); 130 PDE_SYNC_RANGE(pdep, PAGE_SIZE / L2_T_SIZE); 131 132 l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot])); 133 } else { 134 /* 135 * The shadow map area L2PTs were allocated and mapped 136 * by arm32_kernel_vm_init. Use the array of pv_addr_t 137 * to get the l2ptva. 138 */ 139 extern pv_addr_t kasan_l2pt[]; 140 const size_t off = va - KASAN_MD_SHADOW_START; 141 const size_t segoff = off & (L2_S_SEGSIZE - 1); 142 const size_t idx = off / L2_S_SEGSIZE; 143 const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va; 144 l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL; 145 } 146 147 pt_entry_t * l2pt = (pt_entry_t *)l2ptva; 148 pt_entry_t * const ptep = &l2pt[l2pte_index(va)]; 149 150 if (!l2pte_valid_p(*ptep)) { 151 const int prot = VM_PROT_READ | VM_PROT_WRITE; 152 const paddr_t pa = __md_palloc(); 153 pt_entry_t npte = 154 L2_S_PROTO | 155 pa | 156 pte_l2_s_cache_mode_pt | 157 L2_S_PROT(PTE_KERNEL, prot); 158 159 l2pte_set(ptep, npte, 0); 160 PTE_SYNC(ptep); 161 __builtin_memset((void *)va, 0, PAGE_SIZE); 162 } 163 } 164 165 /* 166 * Map the init stacks of the BP and APs. We will map the rest in kasan_init. 167 */ 168 #define INIT_ARM_STACK_SHIFT 10 169 #define INIT_ARM_STACK_SIZE (1 << INIT_ARM_STACK_SHIFT) 170 171 static void 172 kasan_md_early_init(void *stack) 173 { 174 175 __md_early = true; 176 __md_nearlypages = 0; 177 kasan_shadow_map(stack, INIT_ARM_STACK_SIZE * MAXCPUS); 178 __md_early = false; 179 } 180 181 static void 182 kasan_md_init(void) 183 { 184 extern vaddr_t kasan_kernelstart; 185 extern vaddr_t kasan_kernelsize; 186 187 kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize); 188 189 /* The VAs we've created until now. */ 190 vaddr_t eva; 191 192 eva = pmap_growkernel(VM_KERNEL_VM_BASE); 193 kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE); 194 } 195 196 197 static inline bool 198 __md_unwind_end(const char *name) 199 { 200 static const char * const vectors[] = { 201 "undefined_entry", 202 "swi_entry", 203 "prefetch_abort_entry", 204 "data_abort_entry", 205 "address_exception_entry", 206 "irq_entry", 207 "fiqvector" 208 }; 209 210 for (size_t i = 0; i < __arraycount(vectors); i++) { 211 if (!strncmp(name, vectors[i], strlen(vectors[i]))) 212 return true; 213 } 214 215 return false; 216 } 217 218 static void 219 kasan_md_unwind(void) 220 { 221 uint32_t lr, *fp; 222 const char *mod; 223 const char *sym; 224 size_t nsym; 225 int error; 226 227 fp = (uint32_t *)__builtin_frame_address(0); 228 nsym = 0; 229 230 while (1) { 231 /* 232 * normal frame 233 * fp[ 0] saved code pointer 234 * fp[-1] saved lr value 235 * fp[-2] saved sp value 236 * fp[-3] saved fp value 237 */ 238 lr = fp[-1]; 239 240 if (lr < VM_MIN_KERNEL_ADDRESS) { 241 break; 242 } 243 error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC); 244 if (error) { 245 break; 246 } 247 printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod); 248 if (__md_unwind_end(sym)) { 249 break; 250 } 251 252 fp = (uint32_t *)fp[-3]; 253 if (fp == NULL) { 254 break; 255 } 256 nsym++; 257 258 if (nsym >= 15) { 259 break; 260 } 261 } 262 } 263