1 /* $OpenBSD: exec_subr.c,v 1.50 2015/03/14 03:38:50 jsg Exp $ */ 2 /* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */ 3 4 /* 5 * Copyright (c) 1993, 1994 Christopher G. Demetriou 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Christopher G. Demetriou. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/proc.h> 37 #include <sys/malloc.h> 38 #include <sys/vnode.h> 39 #include <sys/exec.h> 40 #include <sys/mman.h> 41 #include <sys/resourcevar.h> 42 43 #include <uvm/uvm_extern.h> 44 45 #ifdef DEBUG 46 /* 47 * new_vmcmd(): 48 * create a new vmcmd structure and fill in its fields based 49 * on function call arguments. make sure objects ref'd by 50 * the vmcmd are 'held'. 51 * 52 * If not debugging, this is a macro, so it's expanded inline. 53 */ 54 55 void 56 new_vmcmd(struct exec_vmcmd_set *evsp, 57 int (*proc)(struct proc *, struct exec_vmcmd *), u_long len, u_long addr, 58 struct vnode *vp, u_long offset, u_int prot, int flags) 59 { 60 struct exec_vmcmd *vcp; 61 62 if (evsp->evs_used >= evsp->evs_cnt) 63 vmcmdset_extend(evsp); 64 vcp = &evsp->evs_cmds[evsp->evs_used++]; 65 vcp->ev_proc = proc; 66 vcp->ev_len = len; 67 vcp->ev_addr = addr; 68 if ((vcp->ev_vp = vp) != NULL) 69 vref(vp); 70 vcp->ev_offset = offset; 71 vcp->ev_prot = prot; 72 vcp->ev_flags = flags; 73 } 74 #endif /* DEBUG */ 75 76 void 77 vmcmdset_extend(struct exec_vmcmd_set *evsp) 78 { 79 struct exec_vmcmd *nvcp; 80 u_int ocnt; 81 82 #ifdef DIAGNOSTIC 83 if (evsp->evs_used < evsp->evs_cnt) 84 panic("vmcmdset_extend: not necessary"); 85 #endif 86 87 ocnt = evsp->evs_cnt; 88 KASSERT(ocnt > 0); 89 /* figure out number of entries in new set */ 90 evsp->evs_cnt += ocnt; 91 92 /* reallocate the command set */ 93 nvcp = mallocarray(evsp->evs_cnt, sizeof(*nvcp), M_EXEC, 94 M_WAITOK); 95 memcpy(nvcp, evsp->evs_cmds, ocnt * sizeof(*nvcp)); 96 if (evsp->evs_cmds != evsp->evs_start) 97 free(evsp->evs_cmds, M_EXEC, ocnt * sizeof(*nvcp)); 98 evsp->evs_cmds = nvcp; 99 } 100 101 void 102 kill_vmcmds(struct exec_vmcmd_set *evsp) 103 { 104 struct exec_vmcmd *vcp; 105 int i; 106 107 for (i = 0; i < evsp->evs_used; i++) { 108 vcp = &evsp->evs_cmds[i]; 109 if (vcp->ev_vp != NULLVP) 110 vrele(vcp->ev_vp); 111 } 112 113 /* 114 * Free old vmcmds and reset the array. 115 */ 116 evsp->evs_used = 0; 117 if (evsp->evs_cmds != evsp->evs_start) 118 free(evsp->evs_cmds, M_EXEC, 0); 119 evsp->evs_cmds = evsp->evs_start; 120 evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE; 121 } 122 123 int 124 exec_process_vmcmds(struct proc *p, struct exec_package *epp) 125 { 126 struct exec_vmcmd *base_vc = NULL; 127 int error = 0; 128 int i; 129 130 for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) { 131 struct exec_vmcmd *vcp; 132 133 vcp = &epp->ep_vmcmds.evs_cmds[i]; 134 135 if (vcp->ev_flags & VMCMD_RELATIVE) { 136 #ifdef DIAGNOSTIC 137 if (base_vc == NULL) 138 panic("exec_process_vmcmds: RELATIVE no base"); 139 #endif 140 vcp->ev_addr += base_vc->ev_addr; 141 } 142 error = (*vcp->ev_proc)(p, vcp); 143 if (vcp->ev_flags & VMCMD_BASE) { 144 base_vc = vcp; 145 } 146 } 147 148 kill_vmcmds(&epp->ep_vmcmds); 149 150 return (error); 151 } 152 153 /* 154 * vmcmd_map_pagedvn(): 155 * handle vmcmd which specifies that a vnode should be mmap'd. 156 * appropriate for handling demand-paged text and data segments. 157 */ 158 159 int 160 vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd) 161 { 162 /* 163 * note that if you're going to map part of a process as being 164 * paged from a vnode, that vnode had damn well better be marked as 165 * VTEXT. that's handled in the routine which sets up the vmcmd to 166 * call this routine. 167 */ 168 struct uvm_object *uobj; 169 int error; 170 171 /* 172 * map the vnode in using uvm_map. 173 */ 174 175 if (cmd->ev_len == 0) 176 return (0); 177 if (cmd->ev_offset & PAGE_MASK) 178 return (EINVAL); 179 if (cmd->ev_addr & PAGE_MASK) 180 return (EINVAL); 181 if (cmd->ev_len & PAGE_MASK) 182 return (EINVAL); 183 184 /* 185 * first, attach to the object 186 */ 187 188 uobj = uvn_attach(cmd->ev_vp, PROT_READ | PROT_EXEC); 189 if (uobj == NULL) 190 return (ENOMEM); 191 192 /* 193 * do the map 194 */ 195 196 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len, 197 uobj, cmd->ev_offset, 0, 198 UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY, 199 MADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED)); 200 201 /* 202 * check for error 203 */ 204 205 if (error) { 206 /* 207 * error: detach from object 208 */ 209 uobj->pgops->pgo_detach(uobj); 210 } 211 212 return (error); 213 } 214 215 /* 216 * vmcmd_map_readvn(): 217 * handle vmcmd which specifies that a vnode should be read from. 218 * appropriate for non-demand-paged text/data segments, i.e. impure 219 * objects (a la OMAGIC and NMAGIC). 220 */ 221 222 int 223 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd) 224 { 225 int error; 226 vm_prot_t prot; 227 228 if (cmd->ev_len == 0) 229 return (0); 230 231 prot = cmd->ev_prot; 232 233 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */ 234 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 235 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 236 UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY, 237 MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); 238 239 if (error) 240 return (error); 241 242 error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr, 243 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT, 244 p->p_ucred, NULL, p); 245 if (error) 246 return (error); 247 248 if ((prot & PROT_WRITE) == 0) { 249 /* 250 * we had to map in the area at PROT_WRITE so that vn_rdwr() 251 * could write to it. however, the caller seems to want 252 * it mapped read-only, so now we are going to have to call 253 * uvm_map_protect() to fix up the protection. ICK. 254 */ 255 return (uvm_map_protect(&p->p_vmspace->vm_map, 256 trunc_page(cmd->ev_addr), 257 round_page(cmd->ev_addr + cmd->ev_len), 258 prot, FALSE)); 259 } 260 return (0); 261 } 262 263 /* 264 * vmcmd_map_zero(): 265 * handle vmcmd which specifies a zero-filled address space region. 266 */ 267 268 int 269 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd) 270 { 271 if (cmd->ev_len == 0) 272 return (0); 273 274 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */ 275 return (uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 276 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 277 UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY, 278 MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW))); 279 } 280 281 /* 282 * vmcmd_randomize(): 283 * handle vmcmd which specifies a randomized address space region. 284 */ 285 286 int 287 vmcmd_randomize(struct proc *p, struct exec_vmcmd *cmd) 288 { 289 char *buf; 290 int error; 291 size_t off = 0, len; 292 293 if (cmd->ev_len == 0) 294 return (0); 295 if (cmd->ev_len > ELF_RANDOMIZE_LIMIT) 296 return (EINVAL); 297 298 buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 299 len = cmd->ev_len; 300 do { 301 size_t sublen = MIN(len, PAGE_SIZE); 302 303 arc4random_buf(buf, sublen); 304 error = copyout(buf, (void *)cmd->ev_addr + off, sublen); 305 if (error) 306 break; 307 off += sublen; 308 len -= sublen; 309 if (len) 310 yield(); 311 } while (len); 312 free(buf, M_TEMP, PAGE_SIZE); 313 314 return (error); 315 } 316 317 /* 318 * exec_setup_stack(): Set up the stack segment for an executable. 319 * 320 * Note that the ep_ssize parameter must be set to be the current stack 321 * limit; this is adjusted in the body of execve() to yield the 322 * appropriate stack segment usage once the argument length is 323 * calculated. 324 * 325 * This function returns an int for uniformity with other (future) formats' 326 * stack setup functions. They might have errors to return. 327 */ 328 329 int 330 exec_setup_stack(struct proc *p, struct exec_package *epp) 331 { 332 vaddr_t sgap; 333 334 #ifdef MACHINE_STACK_GROWS_UP 335 epp->ep_maxsaddr = USRSTACK; 336 epp->ep_minsaddr = USRSTACK + MAXSSIZ; 337 #else 338 epp->ep_maxsaddr = USRSTACK - MAXSSIZ; 339 epp->ep_minsaddr = USRSTACK; 340 #endif 341 epp->ep_ssize = round_page(p->p_rlimit[RLIMIT_STACK].rlim_cur); 342 343 if (stackgap_random != 0) { 344 sgap = arc4random() & (stackgap_random - 1); 345 sgap = trunc_page(sgap); 346 347 #ifdef MACHINE_STACK_GROWS_UP 348 epp->ep_maxsaddr += sgap; 349 epp->ep_minsaddr += sgap; 350 #else 351 epp->ep_maxsaddr -= sgap; 352 epp->ep_minsaddr -= sgap; 353 #endif 354 } 355 356 /* 357 * set up commands for stack. note that this takes *two*, one to 358 * map the part of the stack which we can access, and one to map 359 * the part which we can't. 360 * 361 * arguably, it could be made into one, but that would require the 362 * addition of another mapping proc, which is unnecessary 363 * 364 * note that in memory, things assumed to be: 0 ....... ep_maxsaddr 365 * <stack> ep_minsaddr 366 */ 367 #ifdef MACHINE_STACK_GROWS_UP 368 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, 369 ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), 370 epp->ep_maxsaddr + epp->ep_ssize, NULLVP, 0, PROT_NONE); 371 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, 372 epp->ep_maxsaddr, NULLVP, 0, 373 PROT_READ | PROT_WRITE); 374 #else 375 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, 376 ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), 377 epp->ep_maxsaddr, NULLVP, 0, PROT_NONE); 378 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, 379 (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0, 380 PROT_READ | PROT_WRITE); 381 #endif 382 383 return (0); 384 } 385