xref: /netbsd-src/sys/kern/exec_subr.c (revision daf6c4152fcddc27c445489775ed1f66ab4ea9a9)
1 /*	$NetBSD: exec_subr.c,v 1.67 2011/01/17 07:13:31 uebayasi Exp $	*/
2 
3 /*
4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by Christopher G. Demetriou.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.67 2011/01/17 07:13:31 uebayasi Exp $");
35 
36 #include "opt_pax.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/kmem.h>
42 #include <sys/vnode.h>
43 #include <sys/filedesc.h>
44 #include <sys/exec.h>
45 #include <sys/mman.h>
46 #include <sys/resourcevar.h>
47 #include <sys/device.h>
48 
49 #ifdef PAX_MPROTECT
50 #include <sys/pax.h>
51 #endif /* PAX_MPROTECT */
52 
53 #include <uvm/uvm_extern.h>
54 
55 #define	VMCMD_EVCNT_DECL(name)					\
56 static struct evcnt vmcmd_ev_##name =				\
57     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
58 EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
59 
60 #define	VMCMD_EVCNT_INCR(name)					\
61     vmcmd_ev_##name.ev_count++
62 
63 VMCMD_EVCNT_DECL(calls);
64 VMCMD_EVCNT_DECL(extends);
65 VMCMD_EVCNT_DECL(kills);
66 
67 /*
68  * new_vmcmd():
69  *	create a new vmcmd structure and fill in its fields based
70  *	on function call arguments.  make sure objects ref'd by
71  *	the vmcmd are 'held'.
72  */
73 
74 void
75 new_vmcmd(struct exec_vmcmd_set *evsp,
76     int (*proc)(struct lwp * l, struct exec_vmcmd *),
77     vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset,
78     u_int prot, int flags)
79 {
80 	struct exec_vmcmd    *vcp;
81 
82 	VMCMD_EVCNT_INCR(calls);
83 	KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT));
84 	KASSERT(vp == NULL || vp->v_usecount > 0);
85 
86 	if (evsp->evs_used >= evsp->evs_cnt)
87 		vmcmdset_extend(evsp);
88 	vcp = &evsp->evs_cmds[evsp->evs_used++];
89 	vcp->ev_proc = proc;
90 	vcp->ev_len = len;
91 	vcp->ev_addr = addr;
92 	if ((vcp->ev_vp = vp) != NULL)
93 		vref(vp);
94 	vcp->ev_offset = offset;
95 	vcp->ev_prot = prot;
96 	vcp->ev_flags = flags;
97 }
98 
99 void
100 vmcmdset_extend(struct exec_vmcmd_set *evsp)
101 {
102 	struct exec_vmcmd *nvcp;
103 	u_int ocnt;
104 
105 #ifdef DIAGNOSTIC
106 	if (evsp->evs_used < evsp->evs_cnt)
107 		panic("vmcmdset_extend: not necessary");
108 #endif
109 
110 	/* figure out number of entries in new set */
111 	if ((ocnt = evsp->evs_cnt) != 0) {
112 		evsp->evs_cnt += ocnt;
113 		VMCMD_EVCNT_INCR(extends);
114 	} else
115 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
116 
117 	/* allocate it */
118 	nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
119 
120 	/* free the old struct, if there was one, and record the new one */
121 	if (ocnt) {
122 		memcpy(nvcp, evsp->evs_cmds,
123 		    (ocnt * sizeof(struct exec_vmcmd)));
124 		kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
125 	}
126 	evsp->evs_cmds = nvcp;
127 }
128 
129 void
130 kill_vmcmds(struct exec_vmcmd_set *evsp)
131 {
132 	struct exec_vmcmd *vcp;
133 	u_int i;
134 
135 	VMCMD_EVCNT_INCR(kills);
136 
137 	if (evsp->evs_cnt == 0)
138 		return;
139 
140 	for (i = 0; i < evsp->evs_used; i++) {
141 		vcp = &evsp->evs_cmds[i];
142 		if (vcp->ev_vp != NULL)
143 			vrele(vcp->ev_vp);
144 	}
145 	kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
146 	evsp->evs_used = evsp->evs_cnt = 0;
147 }
148 
149 /*
150  * vmcmd_map_pagedvn():
151  *	handle vmcmd which specifies that a vnode should be mmap'd.
152  *	appropriate for handling demand-paged text and data segments.
153  */
154 
155 int
156 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
157 {
158 	struct uvm_object *uobj;
159 	struct vnode *vp = cmd->ev_vp;
160 	struct proc *p = l->l_proc;
161 	int error;
162 	vm_prot_t prot, maxprot;
163 
164 	KASSERT(vp->v_iflag & VI_TEXT);
165 
166 	/*
167 	 * map the vnode in using uvm_map.
168 	 */
169 
170         if (cmd->ev_len == 0)
171                 return(0);
172         if (cmd->ev_offset & PAGE_MASK)
173                 return(EINVAL);
174 	if (cmd->ev_addr & PAGE_MASK)
175 		return(EINVAL);
176 	if (cmd->ev_len & PAGE_MASK)
177 		return(EINVAL);
178 
179 	prot = cmd->ev_prot;
180 	maxprot = UVM_PROT_ALL;
181 #ifdef PAX_MPROTECT
182 	pax_mprotect(l, &prot, &maxprot);
183 #endif /* PAX_MPROTECT */
184 
185 	/*
186 	 * check the file system's opinion about mmapping the file
187 	 */
188 
189 	error = VOP_MMAP(vp, prot, l->l_cred);
190 	if (error)
191 		return error;
192 
193 	if ((vp->v_vflag & VV_MAPPED) == 0) {
194 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
195 		vp->v_vflag |= VV_MAPPED;
196 		VOP_UNLOCK(vp);
197 	}
198 
199 	/*
200 	 * do the map, reference the object for this map entry
201 	 */
202 	uobj = &vp->v_uobj;
203 	vref(vp);
204 
205 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
206 		uobj, cmd->ev_offset, 0,
207 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
208 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
209 	if (error) {
210 		uobj->pgops->pgo_detach(uobj);
211 	}
212 	return error;
213 }
214 
215 /*
216  * vmcmd_map_readvn():
217  *	handle vmcmd which specifies that a vnode should be read from.
218  *	appropriate for non-demand-paged text/data segments, i.e. impure
219  *	objects (a la OMAGIC and NMAGIC).
220  */
221 int
222 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
223 {
224 	struct proc *p = l->l_proc;
225 	int error;
226 	long diff;
227 
228 	if (cmd->ev_len == 0)
229 		return 0;
230 
231 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
232 	cmd->ev_addr -= diff;			/* required by uvm_map */
233 	cmd->ev_offset -= diff;
234 	cmd->ev_len += diff;
235 
236 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
237 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
238 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
239 			UVM_ADV_NORMAL,
240 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
241 
242 	if (error)
243 		return error;
244 
245 	return vmcmd_readvn(l, cmd);
246 }
247 
248 int
249 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
250 {
251 	struct proc *p = l->l_proc;
252 	int error;
253 	vm_prot_t prot, maxprot;
254 
255 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
256 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
257 	    l->l_cred, NULL, l);
258 	if (error)
259 		return error;
260 
261 	prot = cmd->ev_prot;
262 	maxprot = VM_PROT_ALL;
263 #ifdef PAX_MPROTECT
264 	pax_mprotect(l, &prot, &maxprot);
265 #endif /* PAX_MPROTECT */
266 
267 #ifdef PMAP_NEED_PROCWR
268 	/*
269 	 * we had to write the process, make sure the pages are synched
270 	 * with the instruction cache.
271 	 */
272 	if (prot & VM_PROT_EXECUTE)
273 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
274 #endif
275 
276 	/*
277 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
278 	 * could write to it.   however, the caller seems to want
279 	 * it mapped read-only, so now we are going to have to call
280 	 * uvm_map_protect() to fix up the protection.  ICK.
281 	 */
282 	if (maxprot != VM_PROT_ALL) {
283 		error = uvm_map_protect(&p->p_vmspace->vm_map,
284 				trunc_page(cmd->ev_addr),
285 				round_page(cmd->ev_addr + cmd->ev_len),
286 				maxprot, true);
287 		if (error)
288 			return (error);
289 	}
290 
291 	if (prot != maxprot) {
292 		error = uvm_map_protect(&p->p_vmspace->vm_map,
293 				trunc_page(cmd->ev_addr),
294 				round_page(cmd->ev_addr + cmd->ev_len),
295 				prot, false);
296 		if (error)
297 			return (error);
298 	}
299 
300 	return 0;
301 }
302 
303 /*
304  * vmcmd_map_zero():
305  *	handle vmcmd which specifies a zero-filled address space region.  The
306  *	address range must be first allocated, then protected appropriately.
307  */
308 
309 int
310 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
311 {
312 	struct proc *p = l->l_proc;
313 	int error;
314 	long diff;
315 	vm_prot_t prot, maxprot;
316 
317 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
318 	cmd->ev_addr -= diff;			/* required by uvm_map */
319 	cmd->ev_len += diff;
320 
321 	prot = cmd->ev_prot;
322 	maxprot = UVM_PROT_ALL;
323 #ifdef PAX_MPROTECT
324 	pax_mprotect(l, &prot, &maxprot);
325 #endif /* PAX_MPROTECT */
326 
327 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
328 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
329 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
330 			UVM_ADV_NORMAL,
331 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
332 	if (cmd->ev_flags & VMCMD_STACK)
333 		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
334 	return error;
335 }
336 
337 /*
338  * exec_read_from():
339  *
340  *	Read from vnode into buffer at offset.
341  */
342 int
343 exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
344     size_t size)
345 {
346 	int error;
347 	size_t resid;
348 
349 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
350 	    0, l->l_cred, &resid, NULL)) != 0)
351 		return error;
352 	/*
353 	 * See if we got all of it
354 	 */
355 	if (resid != 0)
356 		return ENOEXEC;
357 	return 0;
358 }
359 
360 /*
361  * exec_setup_stack(): Set up the stack segment for an elf
362  * executable.
363  *
364  * Note that the ep_ssize parameter must be set to be the current stack
365  * limit; this is adjusted in the body of execve() to yield the
366  * appropriate stack segment usage once the argument length is
367  * calculated.
368  *
369  * This function returns an int for uniformity with other (future) formats'
370  * stack setup functions.  They might have errors to return.
371  */
372 
373 int
374 exec_setup_stack(struct lwp *l, struct exec_package *epp)
375 {
376 	vsize_t max_stack_size;
377 	vaddr_t access_linear_min;
378 	vsize_t access_size;
379 	vaddr_t noaccess_linear_min;
380 	vsize_t noaccess_size;
381 
382 #ifndef	USRSTACK32
383 #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
384 #endif
385 
386 	if (epp->ep_flags & EXEC_32) {
387 		epp->ep_minsaddr = USRSTACK32;
388 		max_stack_size = MAXSSIZ;
389 	} else {
390 		epp->ep_minsaddr = USRSTACK;
391 		max_stack_size = MAXSSIZ;
392 	}
393 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
394 
395 #ifdef PAX_ASLR
396 	pax_aslr_stack(l, epp, &max_stack_size);
397 #endif /* PAX_ASLR */
398 
399 	l->l_proc->p_stackbase = epp->ep_minsaddr;
400 
401 	epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr,
402 		max_stack_size);
403 
404 	/*
405 	 * set up commands for stack.  note that this takes *two*, one to
406 	 * map the part of the stack which we can access, and one to map
407 	 * the part which we can't.
408 	 *
409 	 * arguably, it could be made into one, but that would require the
410 	 * addition of another mapping proc, which is unnecessary
411 	 */
412 	access_size = epp->ep_ssize;
413 	access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size);
414 	noaccess_size = max_stack_size - access_size;
415 	noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
416 	    access_size), noaccess_size);
417 	if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
418 		NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
419 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
420 	}
421 	KASSERT(access_size > 0 && access_size <= MAXSSIZ);
422 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
423 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
424 	    VMCMD_STACK);
425 
426 	return 0;
427 }
428