1 /*- 2 * Copyright (c) 2021 The FreeBSD Foundation 3 * 4 * This software was developed by Andrew Turner under sponsorship from 5 * the FreeBSD Foundation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * This manages pointer authentication. As it needs to enable the use of 31 * pointer authentication and change the keys we must built this with 32 * pointer authentication disabled. 33 */ 34 #ifdef __ARM_FEATURE_PAC_DEFAULT 35 #error Must be built with pointer authentication disabled 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/libkern.h> 41 #include <sys/proc.h> 42 #include <sys/reboot.h> 43 44 #include <machine/armreg.h> 45 #include <machine/cpu.h> 46 #include <machine/cpu_feat.h> 47 #include <machine/reg.h> 48 #include <machine/vmparam.h> 49 50 #define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB) 51 52 static bool __read_mostly enable_ptrauth = false; 53 54 /* Functions called from assembly. */ 55 void ptrauth_start(void); 56 struct thread *ptrauth_switch(struct thread *); 57 void ptrauth_exit_el0(struct thread *); 58 void ptrauth_enter_el0(struct thread *); 59 60 static bool 61 ptrauth_disable(void) 62 { 63 const char *family, *maker, *product; 64 65 family = kern_getenv("smbios.system.family"); 66 maker = kern_getenv("smbios.system.maker"); 67 product = kern_getenv("smbios.system.product"); 68 if (family == NULL || maker == NULL || product == NULL) 69 return (false); 70 71 /* 72 * The Dev Kit appears to be configured to trap upon access to PAC 73 * registers, but the kernel boots at EL1 and so we have no way to 74 * inspect or change this configuration. As a workaround, simply 75 * disable PAC on this platform. 76 */ 77 if (strcmp(maker, "Microsoft Corporation") == 0 && 78 strcmp(family, "Surface") == 0 && 79 strcmp(product, "Windows Dev Kit 2023") == 0) 80 return (true); 81 82 return (false); 83 } 84 85 static bool 86 ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused) 87 { 88 uint64_t isar1; 89 int pac_enable; 90 91 /* 92 * Allow the sysadmin to disable pointer authentication globally, 93 * e.g. on broken hardware. 94 */ 95 pac_enable = 1; 96 TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable); 97 if (!pac_enable) { 98 if (boothowto & RB_VERBOSE) 99 printf("Pointer authentication is disabled\n"); 100 return (false); 101 } 102 103 if (!get_kernel_reg(ID_AA64ISAR1_EL1, &isar1)) 104 return (false); 105 106 if (ptrauth_disable()) 107 return (false); 108 109 /* 110 * This assumes if there is pointer authentication on the boot CPU 111 * it will also be available on any non-boot CPUs. If this is ever 112 * not the case we will have to add a quirk. 113 */ 114 return (ID_AA64ISAR1_APA_VAL(isar1) > 0 || 115 ID_AA64ISAR1_API_VAL(isar1) > 0); 116 } 117 118 static void 119 ptrauth_enable(const struct cpu_feat *feat __unused, 120 cpu_feat_errata errata_status __unused, u_int *errata_list __unused, 121 u_int errata_count __unused) 122 { 123 enable_ptrauth = true; 124 elf64_addr_mask.code |= PAC_ADDR_MASK; 125 elf64_addr_mask.data |= PAC_ADDR_MASK; 126 } 127 128 129 static struct cpu_feat feat_pauth = { 130 .feat_name = "FEAT_PAuth", 131 .feat_check = ptrauth_check, 132 .feat_enable = ptrauth_enable, 133 .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM, 134 }; 135 DATA_SET(cpu_feat_set, feat_pauth); 136 137 /* Copy the keys when forking a new process */ 138 void 139 ptrauth_fork(struct thread *new_td, struct thread *orig_td) 140 { 141 if (!enable_ptrauth) 142 return; 143 144 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, 145 sizeof(new_td->td_md.md_ptrauth_user)); 146 } 147 148 /* Generate new userspace keys when executing a new process */ 149 void 150 ptrauth_exec(struct thread *td) 151 { 152 if (!enable_ptrauth) 153 return; 154 155 arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user), 156 0); 157 } 158 159 /* 160 * Copy the user keys when creating a new userspace thread until it's clear 161 * how the ABI expects the various keys to be assigned. 162 */ 163 void 164 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td) 165 { 166 if (!enable_ptrauth) 167 return; 168 169 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, 170 sizeof(new_td->td_md.md_ptrauth_user)); 171 } 172 173 /* Generate new kernel keys when executing a new kernel thread */ 174 void 175 ptrauth_thread_alloc(struct thread *td) 176 { 177 if (!enable_ptrauth) 178 return; 179 180 arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern), 181 0); 182 } 183 184 /* 185 * Load the userspace keys. We can't use WRITE_SPECIALREG as we need 186 * to set the architecture extension. 187 */ 188 #define LOAD_KEY(space, name, reg) \ 189 __asm __volatile( \ 190 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyLo_EL1))", %0 \n" \ 191 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyHi_EL1))", %1 \n" \ 192 :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \ 193 "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi)) 194 195 void 196 ptrauth_thread0(struct thread *td) 197 { 198 if (!enable_ptrauth) 199 return; 200 201 /* TODO: Generate a random number here */ 202 memset(&td->td_md.md_ptrauth_kern, 0, 203 sizeof(td->td_md.md_ptrauth_kern)); 204 LOAD_KEY(kern, apia, APIA); 205 /* 206 * No isb as this is called before ptrauth_start so can rely on 207 * the instruction barrier there. 208 */ 209 } 210 211 /* 212 * Enable pointer authentication. After this point userspace and the kernel 213 * can sign return addresses, etc. based on their keys 214 * 215 * This assumes either all or no CPUs have pointer authentication support, 216 * and, if supported, all CPUs have the same algorithm. 217 */ 218 void 219 ptrauth_start(void) 220 { 221 uint64_t sctlr; 222 223 if (!enable_ptrauth) 224 return; 225 226 /* Enable pointer authentication */ 227 sctlr = READ_SPECIALREG(sctlr_el1); 228 sctlr |= SCTLR_PTRAUTH; 229 WRITE_SPECIALREG(sctlr_el1, sctlr); 230 isb(); 231 } 232 233 #ifdef SMP 234 void 235 ptrauth_mp_start(uint64_t cpu) 236 { 237 struct ptrauth_key start_key; 238 uint64_t sctlr; 239 240 if (!enable_ptrauth) 241 return; 242 243 /* 244 * We need a key until we call sched_throw, however we don't have 245 * a thread until then. Create a key just for use within 246 * init_secondary and whatever it calls. As init_secondary never 247 * returns it is safe to do so from within it. 248 * 249 * As it's only used for a short length of time just use the cpu 250 * as the key. 251 */ 252 start_key.pa_key_lo = cpu; 253 start_key.pa_key_hi = ~cpu; 254 255 __asm __volatile( 256 ".arch_extension pauth \n" 257 "msr "__XSTRING(APIAKeyLo_EL1_REG)", %0 \n" 258 "msr "__XSTRING(APIAKeyHi_EL1_REG)", %1 \n" 259 ".arch_extension nopauth \n" 260 :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi)); 261 262 /* Enable pointer authentication */ 263 sctlr = READ_SPECIALREG(sctlr_el1); 264 sctlr |= SCTLR_PTRAUTH; 265 WRITE_SPECIALREG(sctlr_el1, sctlr); 266 isb(); 267 } 268 #endif 269 270 struct thread * 271 ptrauth_switch(struct thread *td) 272 { 273 if (enable_ptrauth) { 274 LOAD_KEY(kern, apia, APIA); 275 isb(); 276 } 277 278 return (td); 279 } 280 281 /* Called when we are exiting uerspace and entering the kernel */ 282 void 283 ptrauth_exit_el0(struct thread *td) 284 { 285 if (!enable_ptrauth) 286 return; 287 288 LOAD_KEY(kern, apia, APIA); 289 isb(); 290 } 291 292 /* Called when we are about to exit the kernel and enter userspace */ 293 void 294 ptrauth_enter_el0(struct thread *td) 295 { 296 if (!enable_ptrauth) 297 return; 298 299 LOAD_KEY(user, apia, APIA); 300 LOAD_KEY(user, apib, APIB); 301 LOAD_KEY(user, apda, APDA); 302 LOAD_KEY(user, apdb, APDB); 303 LOAD_KEY(user, apga, APGA); 304 /* 305 * No isb as this is called from the exception handler so can rely 306 * on the eret instruction to be the needed context synchronizing event. 307 */ 308 } 309