xref: /openbsd-src/sys/kern/exec_elf.c (revision cac8399523560e52787028fe124bc03aaecc1ef0)
1 /*	$OpenBSD: exec_elf.c,v 1.4 1996/04/18 15:58:33 niklas Exp $	*/
2 /*	$NetBSD: exec_elf.c,v 1.6 1996/02/09 18:59:18 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Christos Zoulas
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/proc.h>
36 #include <sys/malloc.h>
37 #include <sys/namei.h>
38 #include <sys/vnode.h>
39 #include <sys/exec.h>
40 #include <sys/exec_elf.h>
41 
42 #include <sys/mman.h>
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_map.h>
46 
47 #include <machine/cpu.h>
48 #include <machine/reg.h>
49 #include <machine/exec.h>
50 
51 #ifdef COMPAT_LINUX
52 #include <compat/linux/linux_exec.h>
53 #endif
54 
55 #ifdef COMPAT_SVR4
56 #include <compat/svr4/svr4_exec.h>
57 #endif
58 
59 int (*elf_probe_funcs[]) __P((struct proc *, struct exec_package *,
60 			      char *, u_long *)) = {
61 #ifdef COMPAT_SVR4
62 	svr4_elf_probe,
63 #endif
64 #ifdef COMPAT_LINUX
65 	linux_elf_probe
66 #endif
67 };
68 
69 int elf_check_header __P((Elf32_Ehdr *, int));
70 int elf_load_file __P((struct proc *, char *, struct exec_vmcmd_set *,
71 		       u_long *, struct elf_args *, u_long *));
72 
73 static int elf_read_from __P((struct proc *, struct vnode *, u_long,
74 	caddr_t, int));
75 static void elf_load_psection __P((struct exec_vmcmd_set *,
76 	struct vnode *, Elf32_Phdr *, u_long *, u_long *, int *));
77 
78 #define ELF_ALIGN(a, b) ((a) & ~((b) - 1))
79 
80 /*
81  * Copy arguments onto the stack in the normal way, but add some
82  * extra information in case of dynamic binding.
83  */
84 void *
85 elf_copyargs(pack, arginfo, stack, argp)
86 	struct exec_package *pack;
87 	struct ps_strings *arginfo;
88 	void *stack;
89 	void *argp;
90 {
91 	size_t len;
92 	AuxInfo ai[ELF_AUX_ENTRIES], *a;
93 	struct elf_args *ap;
94 
95 	stack = copyargs(pack, arginfo, stack, argp);
96 	if (!stack)
97 		return NULL;
98 
99 	/*
100 	 * Push extra arguments on the stack needed by dynamically
101 	 * linked binaries
102 	 */
103 	if ((ap = (struct elf_args *) pack->ep_emul_arg)) {
104 		a = ai;
105 
106 		a->au_id = AUX_phdr;
107 		a->au_v = ap->arg_phaddr;
108 		a++;
109 
110 		a->au_id = AUX_phent;
111 		a->au_v = ap->arg_phentsize;
112 		a++;
113 
114 		a->au_id = AUX_phnum;
115 		a->au_v = ap->arg_phnum;
116 		a++;
117 
118 		a->au_id = AUX_pagesz;
119 		a->au_v = NBPG;
120 		a++;
121 
122 		a->au_id = AUX_base;
123 		a->au_v = ap->arg_interp;
124 		a++;
125 
126 		a->au_id = AUX_flags;
127 		a->au_v = 0;
128 		a++;
129 
130 		a->au_id = AUX_entry;
131 		a->au_v = ap->arg_entry;
132 		a++;
133 
134 		a->au_id = AUX_null;
135 		a->au_v = 0;
136 		a++;
137 
138 		free((char *) ap, M_TEMP);
139 		len = ELF_AUX_ENTRIES * sizeof (AuxInfo);
140 		if (copyout(ai, stack, len))
141 			return NULL;
142 		stack += len;
143 	}
144 	return stack;
145 }
146 
147 /*
148  * elf_check_header():
149  *
150  * Check header for validity; return 0 of ok ENOEXEC if error
151  *
152  * XXX machine type needs to be moved to <machine/param.h> so
153  * just one comparison can be done. Unfortunately, there is both
154  * em_486 and em_386, so this would not work on the i386.
155  */
156 int
157 elf_check_header(eh, type)
158 	Elf32_Ehdr *eh;
159 	int type;
160 {
161 
162 	if (bcmp(eh->e_ident, Elf32_e_ident, Elf32_e_siz) != 0)
163 		return ENOEXEC;
164 
165 	switch (eh->e_machine) {
166 	/* XXX */
167 #ifdef i386
168 	case Elf32_em_386:
169 	case Elf32_em_486:
170 #endif
171 #ifdef sparc
172 	case Elf32_em_sparc:
173 #endif
174 		break;
175 
176 	default:
177 		return ENOEXEC;
178 	}
179 
180 	if (eh->e_type != type)
181 		return ENOEXEC;
182 
183 	return 0;
184 }
185 
186 /*
187  * elf_load_psection():
188  *
189  * Load a psection at the appropriate address
190  */
191 static void
192 elf_load_psection(vcset, vp, ph, addr, size, prot)
193 	struct exec_vmcmd_set *vcset;
194 	struct vnode *vp;
195 	Elf32_Phdr *ph;
196 	u_long *addr;
197 	u_long *size;
198 	int *prot;
199 {
200 	u_long uaddr, msize, rm, rf;
201 	long diff, offset;
202 
203 	/*
204          * If the user specified an address, then we load there.
205          */
206 	if (*addr != ELF32_NO_ADDR) {
207 		if (ph->p_align > 1) {
208 			*addr = ELF_ALIGN(*addr + ph->p_align, ph->p_align);
209 			uaddr = ELF_ALIGN(ph->p_vaddr, ph->p_align);
210 		} else
211 			uaddr = ph->p_vaddr;
212 		diff = ph->p_vaddr - uaddr;
213 	} else {
214 		*addr = uaddr = ph->p_vaddr;
215 		if (ph->p_align > 1)
216 			*addr = ELF_ALIGN(uaddr, ph->p_align);
217 		diff = uaddr - *addr;
218 	}
219 
220 	*prot |= (ph->p_flags & Elf32_pf_r) ? VM_PROT_READ : 0;
221 	*prot |= (ph->p_flags & Elf32_pf_w) ? VM_PROT_WRITE : 0;
222 	*prot |= (ph->p_flags & Elf32_pf_x) ? VM_PROT_EXECUTE : 0;
223 
224 	offset = ph->p_offset - diff;
225 	*size = ph->p_filesz + diff;
226 	msize = ph->p_memsz + diff;
227 
228 	NEW_VMCMD(vcset, vmcmd_map_readvn, *size, *addr, vp, offset, *prot);
229 
230 	/*
231          * Check if we need to extend the size of the segment
232          */
233 	rm = round_page(*addr + msize);
234 	rf = round_page(*addr + *size);
235 
236 	if (rm != rf) {
237 		NEW_VMCMD(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, *prot);
238 		*size = msize;
239 	}
240 }
241 
242 /*
243  * elf_read_from():
244  *
245  *	Read from vnode into buffer at offset.
246  */
247 static int
248 elf_read_from(p, vp, off, buf, size)
249 	struct vnode *vp;
250 	u_long off;
251 	struct proc *p;
252 	caddr_t buf;
253 	int size;
254 {
255 	int error;
256 	int resid;
257 
258 	if ((error = vn_rdwr(UIO_READ, vp, buf, size,
259 			     off, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred,
260 			     &resid, p)) != 0)
261 		return error;
262 	/*
263          * See if we got all of it
264          */
265 	if (resid != 0)
266 		return ENOEXEC;
267 	return 0;
268 }
269 
270 /*
271  * elf_load_file():
272  *
273  * Load a file (interpreter/library) pointed to by path
274  * [stolen from coff_load_shlib()]. Made slightly generic
275  * so it might be used externally.
276  */
277 int
278 elf_load_file(p, path, vcset, entry, ap, last)
279 	struct proc *p;
280 	char *path;
281 	struct exec_vmcmd_set *vcset;
282 	u_long *entry;
283 	struct elf_args	*ap;
284 	u_long *last;
285 {
286 	int error, i;
287 	struct nameidata nd;
288 	Elf32_Ehdr eh;
289 	Elf32_Phdr *ph = NULL;
290 	u_long phsize;
291 	char *bp = NULL;
292 	u_long addr = *last;
293 
294 	bp = path;
295 	/*
296          * 1. open file
297          * 2. read filehdr
298          * 3. map text, data, and bss out of it using VM_*
299          */
300 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
301 	if ((error = namei(&nd)) != 0) {
302 		return error;
303 	}
304 	if ((error = elf_read_from(p, nd.ni_vp, 0, (caddr_t) &eh,
305 				    sizeof(eh))) != 0)
306 		goto bad;
307 
308 	if ((error = elf_check_header(&eh, Elf32_et_dyn)) != 0)
309 		goto bad;
310 
311 	phsize = eh.e_phnum * sizeof(Elf32_Phdr);
312 	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
313 
314 	if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff,
315 				    (caddr_t) ph, phsize)) != 0)
316 		goto bad;
317 
318 	/*
319          * Load all the necessary sections
320          */
321 	for (i = 0; i < eh.e_phnum; i++) {
322 		u_long size = 0;
323 		int prot = 0;
324 
325 		switch (ph[i].p_type) {
326 		case Elf32_pt_load:
327 			elf_load_psection(vcset, nd.ni_vp, &ph[i], &addr,
328 						&size, &prot);
329 			/* If entry is within this section it must be text */
330 			if (eh.e_entry >= ph[i].p_vaddr &&
331 			    eh.e_entry < (ph[i].p_vaddr + size)) {
332 				*entry = addr + eh.e_entry;
333 				ap->arg_interp = addr;
334 			}
335 			addr += size;
336 			break;
337 
338 		case Elf32_pt_dynamic:
339 		case Elf32_pt_phdr:
340 		case Elf32_pt_note:
341 			break;
342 
343 		default:
344 			break;
345 		}
346 	}
347 
348 bad:
349 	if (ph != NULL)
350 		free((char *) ph, M_TEMP);
351 
352 	*last = addr;
353 	vrele(nd.ni_vp);
354 	return error;
355 }
356 
357 /*
358  * exec_elf_makecmds(): Prepare an Elf binary's exec package
359  *
360  * First, set of the various offsets/lengths in the exec package.
361  *
362  * Then, mark the text image busy (so it can be demand paged) or error
363  * out if this is not possible.  Finally, set up vmcmds for the
364  * text, data, bss, and stack segments.
365  *
366  * XXX no demand paging (yet?)
367  */
368 int
369 exec_elf_makecmds(p, epp)
370 	struct proc *p;
371 	struct exec_package *epp;
372 {
373 	Elf32_Ehdr *eh = epp->ep_hdr;
374 	Elf32_Phdr *ph, *pp;
375 	Elf32_Addr phdr = 0;
376 	int error, i, n, nload;
377 	char interp[MAXPATHLEN];
378 	u_long pos = 0, phsize;
379 
380 	if (epp->ep_hdrvalid < sizeof(Elf32_Ehdr))
381 		return ENOEXEC;
382 
383 	if (elf_check_header(eh, Elf32_et_exec))
384 		return ENOEXEC;
385 
386 	/*
387          * check if vnode is in open for writing, because we want to
388          * demand-page out of it.  if it is, don't do it, for various
389          * reasons
390          */
391 	if (epp->ep_vp->v_writecount != 0) {
392 #ifdef DIAGNOSTIC
393 		if (epp->ep_vp->v_flag & VTEXT)
394 			panic("exec: a VTEXT vnode has writecount != 0\n");
395 #endif
396 		return ETXTBSY;
397 	}
398 	/*
399          * Allocate space to hold all the program headers, and read them
400          * from the file
401          */
402 	phsize = eh->e_phnum * sizeof(Elf32_Phdr);
403 	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
404 
405 	if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
406 				    (caddr_t) ph, phsize)) != 0)
407 		goto bad;
408 
409 	epp->ep_tsize = ELF32_NO_ADDR;
410 	epp->ep_dsize = ELF32_NO_ADDR;
411 
412 	interp[0] = '\0';
413 
414 	for (i = 0; i < eh->e_phnum; i++) {
415 		pp = &ph[i];
416 		if (pp->p_type == Elf32_pt_interp) {
417 			if (pp->p_filesz >= sizeof(interp))
418 				goto bad;
419 			if ((error = elf_read_from(p, epp->ep_vp, pp->p_offset,
420 				      (caddr_t) interp, pp->p_filesz)) != 0)
421 				goto bad;
422 			break;
423 		}
424 	}
425 
426 	/*
427 	 * On the same architecture, we may be emulating different systems.
428 	 * See which one will accept this executable. This currently only
429 	 * applies to Linux and SVR4 on the i386.
430 	 *
431 	 * Probe functions would normally see if the interpreter (if any)
432 	 * exists. Emulation packages may possibly replace the interpreter in
433 	 * interp[] with a changed path (/emul/xxx/<path>), and also
434 	 * set the ep_emul field in the exec package structure.
435 	 */
436 	if ((n = sizeof elf_probe_funcs / sizeof elf_probe_funcs[0])) {
437 		error = ENOEXEC;
438 		for (i = 0; i < n && error; i++)
439 			error = elf_probe_funcs[i](p, epp, interp, &pos);
440 
441 		if (error)
442 			goto bad;
443 	}
444 
445 	/*
446          * Load all the necessary sections
447          */
448 	for (i = nload = 0; i < eh->e_phnum; i++) {
449 		u_long  addr = ELF32_NO_ADDR, size = 0;
450 		int prot = 0;
451 
452 		pp = &ph[i];
453 
454 		switch (ph[i].p_type) {
455 		case Elf32_pt_load:
456 			/*
457 			 * XXX
458 			 * Can handle only 2 sections: text and data
459 			 */
460 			if (nload++ == 2)
461 				goto bad;
462 			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
463 				&ph[i], &addr, &size, &prot);
464 			/*
465 			 * Decide whether it's text or data by looking
466 			 * at the entry point.
467 			 */
468 			if (eh->e_entry >= addr && eh->e_entry < (addr + size)){
469 				epp->ep_taddr = addr;
470 				epp->ep_tsize = size;
471 			} else {
472 				epp->ep_daddr = addr;
473 				epp->ep_dsize = size;
474 			}
475 			break;
476 
477 		case Elf32_pt_shlib:
478 			error = ENOEXEC;
479 			goto bad;
480 
481 		case Elf32_pt_interp:
482 			/* Already did this one */
483 		case Elf32_pt_dynamic:
484 		case Elf32_pt_note:
485 			break;
486 
487 		case Elf32_pt_phdr:
488 			/* Note address of program headers (in text segment) */
489 			phdr = pp->p_vaddr;
490 		break;
491 
492 		default:
493 			/*
494 			 * Not fatal, we don't need to understand everything
495 			 * :-)
496 			 */
497 			break;
498 		}
499 	}
500 
501 	/*
502 	 * If no position to load the interpreter was set by a probe
503 	 * function, pick the same address that a non-fixed mmap(0, ..)
504 	 * would (i.e. something safely out of the way).
505 	 */
506 	if (pos == ELF32_NO_ADDR)
507 		pos = round_page(epp->ep_daddr + MAXDSIZ);
508 
509 	/*
510          * Check if we found a dynamically linked binary and arrange to load
511          * it's interpreter
512          */
513 	if (interp[0]) {
514 		struct elf_args *ap;
515 
516 		ap = (struct elf_args *) malloc(sizeof(struct elf_args),
517 						 M_TEMP, M_WAITOK);
518 		if ((error = elf_load_file(p, interp, &epp->ep_vmcmds,
519 				&epp->ep_entry, ap, &pos)) != 0) {
520 			free((char *) ap, M_TEMP);
521 			goto bad;
522 		}
523 		pos += phsize;
524 		ap->arg_phaddr = phdr;
525 
526 		ap->arg_phentsize = eh->e_phentsize;
527 		ap->arg_phnum = eh->e_phnum;
528 		ap->arg_entry = eh->e_entry;
529 
530 		epp->ep_emul_arg = ap;
531 	} else
532 		epp->ep_entry = eh->e_entry;
533 
534 #ifdef COMPAT_SVR4_MAP_PAGE_ZERO
535 	/* Dell SVR4 maps page zero, yeuch! */
536 	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, NBPG, 0, epp->ep_vp, 0,
537 	    VM_PROT_READ);
538 #endif
539 
540 	free((char *) ph, M_TEMP);
541 	epp->ep_vp->v_flag |= VTEXT;
542 	return exec_aout_setup_stack(p, epp);
543 
544 bad:
545 	free((char *) ph, M_TEMP);
546 	kill_vmcmds(&epp->ep_vmcmds);
547 	return ENOEXEC;
548 }
549