xref: /openbsd-src/libexec/ld.so/library.c (revision f7413c6a41ec258d5b922a501ffe84b626e09079)
1 /*	$OpenBSD: library.c,v 1.96 2024/04/05 13:51:47 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2002 Dale Rahn
5  * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #define _DYN_LOADER
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/stat.h>
35 #include <fcntl.h>
36 
37 #include "syscall.h"
38 #include "util.h"
39 #include "archdep.h"
40 #include "resolve.h"
41 #include "sod.h"
42 
43 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
44 		   (((X) & PF_W) ? PROT_WRITE : 0) | \
45 		   (((X) & PF_X) ? PROT_EXEC : 0))
46 
47 void
_dl_load_list_free(struct load_list * load_list)48 _dl_load_list_free(struct load_list *load_list)
49 {
50 	struct load_list *next;
51 
52 	while (load_list != NULL) {
53 		next = load_list->next;
54 		_dl_free(load_list);
55 		load_list = next;
56 	}
57 }
58 
59 void
_dl_unload_shlib(elf_object_t * object)60 _dl_unload_shlib(elf_object_t *object)
61 {
62 	struct dep_node *n;
63 	elf_object_t *load_object = object->load_object;
64 
65 	/*
66 	 * If our load object has become unreferenced then we lost the
67 	 * last group reference to it, so the entire group should be taken
68 	 * down.  The current object is somewhere below load_object in
69 	 * the child_vec tree, so it'll get cleaned up by the recursion.
70 	 * That means we can just switch here to the load object.
71 	 */
72 	if (load_object != object && OBJECT_REF_CNT(load_object) == 0 &&
73 	    (load_object->status & STAT_UNLOADED) == 0) {
74 		DL_DEB(("unload_shlib switched from %s to %s\n",
75 		    object->load_name, load_object->load_name));
76 		object = load_object;
77 		goto unload;
78 	}
79 
80 	DL_DEB(("unload_shlib called on %s\n", object->load_name));
81 	if (OBJECT_REF_CNT(object) == 0 &&
82 	    (object->status & STAT_UNLOADED) == 0) {
83 		struct object_vector vec;
84 		int i;
85 unload:
86 		object->status |= STAT_UNLOADED;
87 		for (vec = object->child_vec, i = 0; i < vec.len; i++)
88 			_dl_unload_shlib(vec.vec[i]);
89 		TAILQ_FOREACH(n, &object->grpref_list, next_sib)
90 			_dl_unload_shlib(n->data);
91 		DL_DEB(("unload_shlib unloading on %s\n", object->load_name));
92 		_dl_load_list_free(object->load_list);
93 		_dl_munmap((void *)object->load_base, object->load_size);
94 		_dl_remove_object(object);
95 	}
96 }
97 
98 elf_object_t *
_dl_tryload_shlib(const char * libname,int type,int flags,int nodelete)99 _dl_tryload_shlib(const char *libname, int type, int flags, int nodelete)
100 {
101 	struct range_vector imut, mut;
102 	int	libfile, libc = -1, i;
103 	struct load_list *next_load, *load_list = NULL;
104 	Elf_Addr maxva = 0, minva = ELF_NO_ADDR;
105 	Elf_Addr libaddr, loff, align = _dl_pagesz - 1;
106 	Elf_Addr relro_addr = 0, relro_size = 0;
107 	elf_object_t *object;
108 	char	hbuf[4096], *exec_start = 0;
109 	size_t exec_size = 0;
110 	Elf_Dyn *dynp = NULL;
111 	Elf_Ehdr *ehdr;
112 	Elf_Phdr *phdp, *ptls = NULL;
113 	Elf_Phdr *syscall_phdp = NULL;
114 	struct stat sb;
115 
116 #define powerof2(x) ((((x) - 1) & (x)) == 0)
117 #define ROUND_PG(x) (((x) + align) & ~(align))
118 #define TRUNC_PG(x) ((x) & ~(align))
119 
120 	libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC);
121 	if (libfile < 0) {
122 		_dl_errno = DL_CANT_OPEN;
123 		return(0);
124 	}
125 
126 	if (_dl_fstat(libfile, &sb) < 0) {
127 		_dl_errno = DL_CANT_OPEN;
128 		return(0);
129 	}
130 
131 	for (object = _dl_objects; object != NULL; object = object->next) {
132 		if (object->dev == sb.st_dev &&
133 		    object->inode == sb.st_ino) {
134 			_dl_close(libfile);
135 			_dl_handle_already_loaded(object, flags);
136 			return(object);
137 		}
138 	}
139 	if (flags & DF_1_NOOPEN) {
140 		_dl_close(libfile);
141 		return NULL;
142 	}
143 
144 	_dl_read(libfile, hbuf, sizeof(hbuf));
145 	ehdr = (Elf_Ehdr *)hbuf;
146 	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
147 	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
148 	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
149 		_dl_close(libfile);
150 		_dl_errno = DL_NOT_ELF;
151 		return(0);
152 	}
153 
154 	_dl_memset(&mut, 0, sizeof mut);
155 	_dl_memset(&imut, 0, sizeof imut);
156 
157 	/*
158 	 *  Alright, we might have a winner!
159 	 *  Figure out how much VM space we need.
160 	 */
161 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
162 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
163 		if (phdp->p_align > 1 && !powerof2(phdp->p_align)) {
164 			_dl_printf("%s: ld.so invalid ELF input %s.\n",
165 			    __progname, libname);
166 			_dl_close(libfile);
167 			_dl_errno = DL_CANT_MMAP;
168 			return(0);
169 		}
170 
171 		switch (phdp->p_type) {
172 		case PT_LOAD:
173 			if (phdp->p_vaddr < minva)
174 				minva = phdp->p_vaddr;
175 			if (phdp->p_vaddr + phdp->p_memsz > maxva)
176 				maxva = phdp->p_vaddr + phdp->p_memsz;
177 			break;
178 		case PT_DYNAMIC:
179 			dynp = (Elf_Dyn *)phdp->p_vaddr;
180 			break;
181 		case PT_TLS:
182 			if (phdp->p_filesz > phdp->p_memsz) {
183 				_dl_printf("%s: invalid tls data in %s.\n",
184 				    __progname, libname);
185 				_dl_close(libfile);
186 				_dl_errno = DL_CANT_LOAD_OBJ;
187 				return(0);
188 			}
189 			if (!_dl_tib_static_done) {
190 				ptls = phdp;
191 				break;
192 			}
193 			_dl_printf("%s: unsupported TLS program header in %s\n",
194 			    __progname, libname);
195 			_dl_close(libfile);
196 			_dl_errno = DL_CANT_LOAD_OBJ;
197 			return(0);
198 		default:
199 			break;
200 		}
201 	}
202 	minva = TRUNC_PG(minva);
203 	maxva = ROUND_PG(maxva);
204 
205 	/*
206 	 * We map the entire area to see that we can get the VM
207 	 * space required. Map it unaccessible to start with.
208 	 *
209 	 * We must map the file we'll map later otherwise the VM
210 	 * system won't be able to align the mapping properly
211 	 * on VAC architectures.
212 	 */
213 	libaddr = (Elf_Addr)_dl_mmap(0, maxva - minva, PROT_NONE,
214 	    MAP_PRIVATE|MAP_FILE, libfile, 0);
215 	if (_dl_mmap_error(libaddr)) {
216 		_dl_printf("%s: ld.so mmap failed mapping %s.\n",
217 		    __progname, libname);
218 		_dl_close(libfile);
219 		_dl_errno = DL_CANT_MMAP;
220 		return(0);
221 	}
222 
223 	loff = libaddr - minva;
224 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
225 
226 	/* Entire mapping can become immutable, minus exceptions chosen later */
227 	_dl_push_range_size(&imut, loff, maxva - minva);
228 
229 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
230 		switch (phdp->p_type) {
231 		case PT_LOAD: {
232 			char *start = (char *)(TRUNC_PG(phdp->p_vaddr)) + loff;
233 			Elf_Addr off = (phdp->p_vaddr & align);
234 			Elf_Addr size = off + phdp->p_filesz;
235 			int flags = PFLAGS(phdp->p_flags);
236 			void *res;
237 
238 			/*
239 			 * Initially map W|X segments without X
240 			 * permission.  After we're done with the
241 			 * initial relocation processing, we will make
242 			 * these segments read-only and add back the X
243 			 * permission.  This way we maintain W^X at
244 			 * all times.
245 			 */
246 			if ((flags & PROT_WRITE) && (flags & PROT_EXEC))
247 				flags &= ~PROT_EXEC;
248 
249 			if (size != 0) {
250 				res = _dl_mmap(start, ROUND_PG(size), flags,
251 				    MAP_FIXED|MAP_PRIVATE, libfile,
252 				    TRUNC_PG(phdp->p_offset));
253 			} else
254 				res = NULL;	/* silence gcc */
255 			next_load = _dl_calloc(1, sizeof(struct load_list));
256 			if (next_load == NULL)
257 				_dl_oom();
258 			next_load->next = load_list;
259 			load_list = next_load;
260 			next_load->start = start;
261 			next_load->size = size;
262 			next_load->prot = PFLAGS(phdp->p_flags);
263 			if (size != 0 && _dl_mmap_error(res)) {
264 				_dl_printf("%s: ld.so mmap failed mapping %s.\n",
265 				    __progname, libname);
266 				_dl_close(libfile);
267 				_dl_errno = DL_CANT_MMAP;
268 				_dl_munmap((void *)libaddr, maxva - minva);
269 				_dl_load_list_free(load_list);
270 				return(0);
271 			}
272 			if ((flags & PROT_EXEC) && exec_start == 0) {
273 				exec_start = start;
274 				exec_size = ROUND_PG(size);
275 			}
276 
277 			if (phdp->p_flags & PF_W) {
278 				/* Zero out everything past the EOF */
279 				if ((size & align) != 0)
280 					_dl_memset(start + size, 0,
281 					    _dl_pagesz - (size & align));
282 				if (ROUND_PG(size) ==
283 				    ROUND_PG(off + phdp->p_memsz))
284 					continue;
285 				start = start + ROUND_PG(size);
286 				size = ROUND_PG(off + phdp->p_memsz) -
287 				    ROUND_PG(size);
288 				res = _dl_mmap(start, size, flags,
289 				    MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
290 				if (_dl_mmap_error(res)) {
291 					_dl_printf("%s: ld.so mmap failed mapping %s.\n",
292 					    __progname, libname);
293 					_dl_close(libfile);
294 					_dl_errno = DL_CANT_MMAP;
295 					_dl_munmap((void *)libaddr, maxva - minva);
296 					_dl_load_list_free(load_list);
297 					return(0);
298 				}
299 			}
300 			break;
301 		}
302 
303 		case PT_OPENBSD_RANDOMIZE:
304 			_dl_arc4randombuf((char *)(phdp->p_vaddr + loff),
305 			    phdp->p_memsz);
306 			break;
307 
308 		case PT_GNU_RELRO:
309 			relro_addr = phdp->p_vaddr + loff;
310 			relro_size = phdp->p_memsz;
311 			_dl_push_range_size(&mut, relro_addr, relro_size);
312 			break;
313 
314 		case PT_OPENBSD_MUTABLE:
315 			_dl_push_range_size(&mut, phdp->p_vaddr + loff,
316 			    phdp->p_memsz);
317 			break;
318 		case PT_OPENBSD_SYSCALLS:
319 			syscall_phdp = phdp;
320 			break;
321 		default:
322 			break;
323 		}
324 	}
325 
326 	libc = _dl_islibc(dynp, loff);
327 	if (libc && syscall_phdp)
328 		_dl_pin(libfile, syscall_phdp, (void *)libaddr,
329 		    (size_t)((exec_start + exec_size) - libaddr),
330 		    exec_start, exec_size);
331 	_dl_close(libfile);
332 
333 	dynp = (Elf_Dyn *)((unsigned long)dynp + loff);
334 	object = _dl_finalize_object(libname, dynp,
335 	    (Elf_Phdr *)((char *)libaddr + ehdr->e_phoff), ehdr->e_phnum,type,
336 	    libaddr, loff);
337 	if (object) {
338 		object->load_size = maxva - minva;	/*XXX*/
339 		object->load_list = load_list;
340 		/* set inode, dev from stat info */
341 		object->dev = sb.st_dev;
342 		object->inode = sb.st_ino;
343 		object->obj_flags |= flags;
344 		object->nodelete = nodelete;
345 		object->relro_addr = relro_addr;
346 		object->relro_size = relro_size;
347 		object->islibc = libc;
348 		_dl_set_sod(object->load_name, &object->sod);
349 		if (ptls != NULL && ptls->p_memsz)
350 			_dl_set_tls(object, ptls, libaddr, libname);
351 		_dl_bcopy(&mut, &object->mut, sizeof mut);
352 		_dl_bcopy(&imut, &object->imut, sizeof imut);
353 	} else {
354 		_dl_munmap((void *)libaddr, maxva - minva);
355 		_dl_load_list_free(load_list);
356 	}
357 	return(object);
358 }
359