1 /* $OpenBSD: exec_subr.c,v 1.68 2024/11/02 10:02:23 jsg Exp $ */ 2 /* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */ 3 4 /* 5 * Copyright (c) 1993, 1994 Christopher G. Demetriou 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Christopher G. Demetriou. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/proc.h> 37 #include <sys/malloc.h> 38 #include <sys/vnode.h> 39 #include <sys/exec.h> 40 #include <sys/mman.h> 41 #include <sys/resourcevar.h> 42 43 #include <uvm/uvm_extern.h> 44 45 /* 46 * new_vmcmd(): 47 * create a new vmcmd structure and fill in its fields based 48 * on function call arguments. make sure objects ref'd by 49 * the vmcmd are 'held'. 50 */ 51 52 void 53 new_vmcmd(struct exec_vmcmd_set *evsp, 54 int (*proc)(struct proc *, struct exec_vmcmd *), u_long len, u_long addr, 55 struct vnode *vp, u_long offset, u_int prot, int flags) 56 { 57 struct exec_vmcmd *vcp; 58 59 if (evsp->evs_used >= evsp->evs_cnt) 60 vmcmdset_extend(evsp); 61 vcp = &evsp->evs_cmds[evsp->evs_used++]; 62 vcp->ev_proc = proc; 63 vcp->ev_len = len; 64 vcp->ev_addr = addr; 65 if ((vcp->ev_vp = vp) != NULL) 66 vref(vp); 67 vcp->ev_offset = offset; 68 vcp->ev_prot = prot; 69 vcp->ev_flags = flags; 70 } 71 72 void 73 vmcmdset_extend(struct exec_vmcmd_set *evsp) 74 { 75 struct exec_vmcmd *nvcp; 76 u_int ocnt; 77 78 #ifdef DIAGNOSTIC 79 if (evsp->evs_used < evsp->evs_cnt) 80 panic("vmcmdset_extend: not necessary"); 81 #endif 82 83 ocnt = evsp->evs_cnt; 84 KASSERT(ocnt > 0); 85 /* figure out number of entries in new set */ 86 evsp->evs_cnt += ocnt; 87 88 /* reallocate the command set */ 89 nvcp = mallocarray(evsp->evs_cnt, sizeof(*nvcp), M_EXEC, 90 M_WAITOK); 91 memcpy(nvcp, evsp->evs_cmds, ocnt * sizeof(*nvcp)); 92 if (evsp->evs_cmds != evsp->evs_start) 93 free(evsp->evs_cmds, M_EXEC, ocnt * sizeof(*nvcp)); 94 evsp->evs_cmds = nvcp; 95 } 96 97 void 98 kill_vmcmds(struct exec_vmcmd_set *evsp) 99 { 100 struct exec_vmcmd *vcp; 101 int i; 102 103 for (i = 0; i < evsp->evs_used; i++) { 104 vcp = &evsp->evs_cmds[i]; 105 if (vcp->ev_vp != NULLVP) 106 vrele(vcp->ev_vp); 107 } 108 109 /* 110 * Free old vmcmds and reset the array. 111 */ 112 evsp->evs_used = 0; 113 if (evsp->evs_cmds != evsp->evs_start) 114 free(evsp->evs_cmds, M_EXEC, 115 evsp->evs_cnt * sizeof(struct exec_vmcmd)); 116 evsp->evs_cmds = evsp->evs_start; 117 evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE; 118 } 119 120 int 121 exec_process_vmcmds(struct proc *p, struct exec_package *epp) 122 { 123 struct exec_vmcmd *base_vc = NULL; 124 int error = 0; 125 int i; 126 127 for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) { 128 struct exec_vmcmd *vcp; 129 130 vcp = &epp->ep_vmcmds.evs_cmds[i]; 131 132 if (vcp->ev_flags & VMCMD_RELATIVE) { 133 #ifdef DIAGNOSTIC 134 if (base_vc == NULL) 135 panic("exec_process_vmcmds: RELATIVE no base"); 136 #endif 137 vcp->ev_addr += base_vc->ev_addr; 138 } 139 error = (*vcp->ev_proc)(p, vcp); 140 if (vcp->ev_flags & VMCMD_BASE) { 141 base_vc = vcp; 142 } 143 } 144 145 kill_vmcmds(&epp->ep_vmcmds); 146 147 return (error); 148 } 149 150 /* 151 * vmcmd_map_pagedvn(): 152 * handle vmcmd which specifies that a vnode should be mmap'd. 153 * appropriate for handling demand-paged text and data segments. 154 */ 155 156 int 157 vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd) 158 { 159 /* 160 * note that if you're going to map part of a process as being 161 * paged from a vnode, that vnode had damn well better be marked as 162 * VTEXT. that's handled in the routine which sets up the vmcmd to 163 * call this routine. 164 */ 165 struct uvm_object *uobj; 166 unsigned int flags = UVM_FLAG_COPYONW | UVM_FLAG_FIXED; 167 int error; 168 169 /* 170 * map the vnode in using uvm_map. 171 */ 172 173 if (cmd->ev_len == 0) 174 return (0); 175 if (cmd->ev_offset & PAGE_MASK) 176 return (EINVAL); 177 if (cmd->ev_addr & PAGE_MASK) 178 return (EINVAL); 179 if (cmd->ev_len & PAGE_MASK) 180 return (EINVAL); 181 182 /* 183 * first, attach to the object 184 */ 185 186 uobj = uvn_attach(cmd->ev_vp, PROT_READ | PROT_EXEC); 187 if (uobj == NULL) 188 return (ENOMEM); 189 190 /* 191 * do the map 192 */ 193 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len, 194 uobj, cmd->ev_offset, 0, 195 UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY, 196 MADV_NORMAL, flags)); 197 198 /* 199 * check for error 200 */ 201 202 if (error) { 203 /* 204 * error: detach from object 205 */ 206 uobj->pgops->pgo_detach(uobj); 207 } else { 208 if (cmd->ev_flags & VMCMD_IMMUTABLE) 209 uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr, 210 round_page(cmd->ev_addr + cmd->ev_len), 1); 211 #ifdef PMAP_CHECK_COPYIN 212 if (PMAP_CHECK_COPYIN && 213 ((cmd->ev_flags & VMCMD_IMMUTABLE) && (cmd->ev_prot & PROT_EXEC))) 214 uvm_map_check_copyin_add(&p->p_vmspace->vm_map, 215 cmd->ev_addr, round_page(cmd->ev_addr + cmd->ev_len)); 216 #endif 217 } 218 219 return (error); 220 } 221 222 /* 223 * vmcmd_map_readvn(): 224 * handle vmcmd which specifies that a vnode should be read from. 225 * appropriate for non-demand-paged text/data segments, i.e. impure 226 * objects (a la OMAGIC and NMAGIC). 227 */ 228 229 int 230 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd) 231 { 232 int error; 233 vm_prot_t prot; 234 235 if (cmd->ev_len == 0) 236 return (0); 237 238 prot = cmd->ev_prot; 239 240 KASSERT((cmd->ev_addr & PAGE_MASK) == 0); 241 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 242 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 243 UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY, 244 MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); 245 246 if (error) 247 return (error); 248 249 error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr, 250 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT, 251 p->p_ucred, NULL, p); 252 if (error) 253 return (error); 254 255 if ((prot & PROT_WRITE) == 0) { 256 /* 257 * we had to map in the area at PROT_WRITE so that vn_rdwr() 258 * could write to it. however, the caller seems to want 259 * it mapped read-only, so now we are going to have to call 260 * uvm_map_protect() to fix up the protection. ICK. 261 */ 262 error = (uvm_map_protect(&p->p_vmspace->vm_map, 263 cmd->ev_addr, round_page(cmd->ev_len), 264 prot, 0, FALSE, TRUE)); 265 } 266 if (error == 0) { 267 if (cmd->ev_flags & VMCMD_IMMUTABLE) 268 uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr, 269 round_page(cmd->ev_addr + cmd->ev_len), 1); 270 } 271 return (error); 272 } 273 274 /* 275 * vmcmd_map_zero(): 276 * handle vmcmd which specifies a zero-filled address space region. 277 */ 278 279 int 280 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd) 281 { 282 int error; 283 284 if (cmd->ev_len == 0) 285 return (0); 286 287 KASSERT((cmd->ev_addr & PAGE_MASK) == 0); 288 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 289 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 290 UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY, 291 MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW | 292 (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0))); 293 if (cmd->ev_flags & VMCMD_IMMUTABLE) 294 uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr, 295 round_page(cmd->ev_addr + cmd->ev_len), 1); 296 return error; 297 } 298 299 /* 300 * vmcmd_mutable(): 301 * handle vmcmd which changes an address space region.back to mutable 302 */ 303 304 int 305 vmcmd_mutable(struct proc *p, struct exec_vmcmd *cmd) 306 { 307 if (cmd->ev_len == 0) 308 return (0); 309 310 /* ev_addr, ev_len may be misaligned, so maximize the region */ 311 uvm_map_immutable(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr), 312 round_page(cmd->ev_addr + cmd->ev_len), 0); 313 return 0; 314 } 315 316 /* 317 * vmcmd_randomize(): 318 * handle vmcmd which specifies a randomized address space region. 319 */ 320 #define RANDOMIZE_CTX_THRESHOLD 512 321 int 322 vmcmd_randomize(struct proc *p, struct exec_vmcmd *cmd) 323 { 324 int error; 325 struct arc4random_ctx *ctx; 326 char *buf; 327 size_t sublen, off = 0; 328 size_t len = cmd->ev_len; 329 330 if (len == 0) 331 return (0); 332 if (len > ELF_RANDOMIZE_LIMIT) 333 return (EINVAL); 334 335 buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 336 if (len < RANDOMIZE_CTX_THRESHOLD) { 337 arc4random_buf(buf, len); 338 error = copyout(buf, (void *)cmd->ev_addr, len); 339 explicit_bzero(buf, len); 340 } else { 341 ctx = arc4random_ctx_new(); 342 do { 343 sublen = MIN(len, PAGE_SIZE); 344 arc4random_ctx_buf(ctx, buf, sublen); 345 error = copyout(buf, (void *)cmd->ev_addr + off, sublen); 346 if (error) 347 break; 348 off += sublen; 349 len -= sublen; 350 sched_pause(yield); 351 } while (len); 352 arc4random_ctx_free(ctx); 353 explicit_bzero(buf, PAGE_SIZE); 354 } 355 free(buf, M_TEMP, PAGE_SIZE); 356 return (error); 357 } 358 359 #ifndef MAXSSIZ_GUARD 360 #define MAXSSIZ_GUARD (1024 * 1024) 361 #endif 362 363 /* 364 * exec_setup_stack(): Set up the stack segment for an executable. 365 * 366 * Note that the ep_ssize parameter must be set to be the current stack 367 * limit; this is adjusted in the body of execve() to yield the 368 * appropriate stack segment usage once the argument length is 369 * calculated. 370 * 371 * This function returns an int for uniformity with other (future) formats' 372 * stack setup functions. They might have errors to return. 373 */ 374 375 int 376 exec_setup_stack(struct proc *p, struct exec_package *epp) 377 { 378 vsize_t dist = 0; 379 380 #ifdef MACHINE_STACK_GROWS_UP 381 epp->ep_maxsaddr = USRSTACK; 382 epp->ep_minsaddr = USRSTACK + MAXSSIZ; 383 #else 384 epp->ep_maxsaddr = USRSTACK - MAXSSIZ - MAXSSIZ_GUARD; 385 epp->ep_minsaddr = USRSTACK; 386 #endif 387 epp->ep_ssize = round_page(lim_cur(RLIMIT_STACK)); 388 389 #ifdef VM_MIN_STACK_ADDRESS 390 dist = USRSTACK - MAXSSIZ - MAXSSIZ_GUARD - VM_MIN_STACK_ADDRESS; 391 if (dist >> PAGE_SHIFT > 0xffffffff) 392 dist = (vsize_t)arc4random() << PAGE_SHIFT; 393 else 394 dist = (vsize_t)arc4random_uniform(dist >> PAGE_SHIFT) << PAGE_SHIFT; 395 #else 396 if (stackgap_random != 0) { 397 dist = arc4random() & (stackgap_random - 1); 398 dist = trunc_page(dist); 399 } 400 #endif 401 402 #ifdef MACHINE_STACK_GROWS_UP 403 epp->ep_maxsaddr += dist; 404 epp->ep_minsaddr += dist; 405 #else 406 epp->ep_maxsaddr -= dist; 407 epp->ep_minsaddr -= dist; 408 #endif 409 410 /* 411 * set up commands for stack. note that this takes *two*, one to 412 * map the part of the stack which we can access, and one to map 413 * the part which we can't. 414 * 415 * arguably, it could be made into one, but that would require the 416 * addition of another mapping proc, which is unnecessary 417 * 418 * note that in memory, things assumed to be: 0 ....... ep_maxsaddr 419 * <stack> ep_minsaddr 420 */ 421 #ifdef MACHINE_STACK_GROWS_UP 422 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, 423 ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), 424 epp->ep_maxsaddr + epp->ep_ssize, 425 NULLVP, 0, PROT_NONE, VMCMD_IMMUTABLE); 426 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, 427 epp->ep_maxsaddr, 428 NULLVP, 0, PROT_READ | PROT_WRITE, VMCMD_STACK | VMCMD_IMMUTABLE); 429 #else 430 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, 431 ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), 432 epp->ep_maxsaddr, 433 NULLVP, 0, PROT_NONE, VMCMD_IMMUTABLE); 434 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, 435 (epp->ep_minsaddr - epp->ep_ssize), 436 NULLVP, 0, PROT_READ | PROT_WRITE, VMCMD_STACK | VMCMD_IMMUTABLE); 437 #endif 438 439 return (0); 440 } 441