xref: /openbsd-src/libexec/ld.so/library_mquery.c (revision 8550894424f8a4aa4aafb6cd57229dd6ed7cd9dd)
1 /*	$OpenBSD: library_mquery.c,v 1.69 2022/12/04 15:42:07 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2002 Dale Rahn
5  * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #define _DYN_LOADER
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/stat.h>
35 #include <fcntl.h>
36 
37 #include "syscall.h"
38 #include "util.h"
39 #include "archdep.h"
40 #include "resolve.h"
41 #include "sod.h"
42 
43 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
44 		   (((X) & PF_W) ? PROT_WRITE : 0) | \
45 		   (((X) & PF_X) ? PROT_EXEC : 0))
46 
47 void
48 _dl_load_list_free(struct load_list *load_list)
49 {
50 	struct load_list *next;
51 	Elf_Addr align = _dl_pagesz - 1;
52 
53 	while (load_list != NULL) {
54 		if (load_list->start != NULL)
55 			_dl_munmap(load_list->start,
56 			    ((load_list->size) + align) & ~align);
57 		next = load_list->next;
58 		_dl_free(load_list);
59 		load_list = next;
60 	}
61 }
62 
63 
64 void
65 _dl_unload_shlib(elf_object_t *object)
66 {
67 	struct dep_node *n;
68 	elf_object_t *load_object = object->load_object;
69 
70 	/*
71 	 * If our load object has become unreferenced then we lost the
72 	 * last group reference to it, so the entire group should be taken
73 	 * down.  The current object is somewhere below load_object in
74 	 * the child_vec tree, so it'll get cleaned up by the recursion.
75 	 * That means we can just switch here to the load object.
76 	 */
77 	if (load_object != object && OBJECT_REF_CNT(load_object) == 0 &&
78 	    (load_object->status & STAT_UNLOADED) == 0) {
79 		DL_DEB(("unload_shlib switched from %s to %s\n",
80 		    object->load_name, load_object->load_name));
81 		object = load_object;
82 		goto unload;
83 	}
84 
85 	DL_DEB(("unload_shlib called on %s\n", object->load_name));
86 	if (OBJECT_REF_CNT(object) == 0 &&
87 	    (object->status & STAT_UNLOADED) == 0) {
88 		struct object_vector vec;
89 		int i;
90 unload:
91 		object->status |= STAT_UNLOADED;
92 		for (vec = object->child_vec, i = 0; i < vec.len; i++)
93 			_dl_unload_shlib(vec.vec[i]);
94 		TAILQ_FOREACH(n, &object->grpref_list, next_sib)
95 			_dl_unload_shlib(n->data);
96 		DL_DEB(("unload_shlib unloading on %s\n", object->load_name));
97 		_dl_load_list_free(object->load_list);
98 		_dl_remove_object(object);
99 	}
100 }
101 
102 
103 elf_object_t *
104 _dl_tryload_shlib(const char *libname, int type, int flags, int nodelete)
105 {
106 	struct mutate imut[MAXMUT], mut[MAXMUT];
107 	int libfile, i;
108 	struct load_list *ld, *lowld = NULL;
109 	elf_object_t *object;
110 	Elf_Dyn *dynp = NULL;
111 	Elf_Ehdr *ehdr;
112 	Elf_Phdr *phdp;
113 	Elf_Addr load_end = 0;
114 	Elf_Addr align = _dl_pagesz - 1, off, size;
115 	Elf_Phdr *ptls = NULL;
116 	Elf_Addr relro_addr = 0, relro_size = 0;
117 	struct stat sb;
118 	char hbuf[4096], *exec_start;
119 	size_t exec_size;
120 
121 #define ROUND_PG(x) (((x) + align) & ~(align))
122 #define TRUNC_PG(x) ((x) & ~(align))
123 
124 	libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC);
125 	if (libfile < 0) {
126 		_dl_errno = DL_CANT_OPEN;
127 		return(0);
128 	}
129 
130 	if ( _dl_fstat(libfile, &sb) < 0) {
131 		_dl_errno = DL_CANT_OPEN;
132 		return(0);
133 	}
134 
135 	for (object = _dl_objects; object != NULL; object = object->next) {
136 		if (object->dev == sb.st_dev &&
137 		    object->inode == sb.st_ino) {
138 			_dl_close(libfile);
139 			_dl_handle_already_loaded(object, flags);
140 			return(object);
141 		}
142 	}
143 	if (flags & DF_1_NOOPEN) {
144 		_dl_close(libfile);
145 		return NULL;
146 	}
147 
148 	_dl_read(libfile, hbuf, sizeof(hbuf));
149 	ehdr = (Elf_Ehdr *)hbuf;
150 	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
151 	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
152 	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
153 		_dl_close(libfile);
154 		_dl_errno = DL_NOT_ELF;
155 		return(0);
156 	}
157 
158 	/* Insertion sort */
159 #define LDLIST_INSERT(ld) do { \
160 	struct load_list **_ld; \
161 	for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \
162 		if ((*_ld)->moff > ld->moff) \
163 			break; \
164 	ld->next = *_ld; \
165 	*_ld = ld; \
166 } while (0)
167 	/*
168 	 *  Alright, we might have a winner!
169 	 *  Figure out how much VM space we need and set up the load
170 	 *  list that we'll use to find free VM space.
171 	 */
172 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
173 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
174 		switch (phdp->p_type) {
175 		case PT_LOAD:
176 			off = (phdp->p_vaddr & align);
177 			size = off + phdp->p_filesz;
178 
179 			if (size != 0) {
180 				ld = _dl_malloc(sizeof(struct load_list));
181 				if (ld == NULL)
182 					_dl_oom();
183 				ld->start = NULL;
184 				ld->size = size;
185 				ld->moff = TRUNC_PG(phdp->p_vaddr);
186 				ld->foff = TRUNC_PG(phdp->p_offset);
187 				ld->prot = PFLAGS(phdp->p_flags);
188 				LDLIST_INSERT(ld);
189 			}
190 
191 			if ((PFLAGS(phdp->p_flags) & PROT_WRITE) == 0 ||
192 			    ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz))
193 				break;
194 			/* This phdr has a zfod section */
195 			ld = _dl_calloc(1, sizeof(struct load_list));
196 			if (ld == NULL)
197 				_dl_oom();
198 			ld->start = NULL;
199 			ld->size = ROUND_PG(off + phdp->p_memsz) -
200 			    ROUND_PG(size);
201 			ld->moff = TRUNC_PG(phdp->p_vaddr) +
202 			    ROUND_PG(size);
203 			ld->foff = -1;
204 			ld->prot = PFLAGS(phdp->p_flags);
205 			LDLIST_INSERT(ld);
206 			break;
207 		case PT_DYNAMIC:
208 			dynp = (Elf_Dyn *)phdp->p_vaddr;
209 			break;
210 		case PT_TLS:
211 			if (phdp->p_filesz > phdp->p_memsz) {
212 				_dl_printf("%s: invalid tls data in %s.\n",
213 				    __progname, libname);
214 				_dl_close(libfile);
215 				_dl_errno = DL_CANT_LOAD_OBJ;
216 				return(0);
217 			}
218 			if (!_dl_tib_static_done) {
219 				ptls = phdp;
220 				break;
221 			}
222 			_dl_printf("%s: unsupported TLS program header in %s\n",
223 			    __progname, libname);
224 			_dl_close(libfile);
225 			_dl_errno = DL_CANT_LOAD_OBJ;
226 			return(0);
227 		default:
228 			break;
229 		}
230 	}
231 
232 #define LOFF ((Elf_Addr)lowld->start - lowld->moff)
233 
234 retry:
235 	_dl_memset(&mut, 0, sizeof mut);
236 	_dl_memset(&imut, 0, sizeof imut);
237 	exec_start = NULL;
238 	exec_size = 0;
239 	for (ld = lowld; ld != NULL; ld = ld->next) {
240 		off_t foff;
241 		int fd, flags;
242 		void *res;
243 
244 		flags = MAP_PRIVATE;
245 
246 		if (ld->foff < 0) {
247 			fd = -1;
248 			foff = 0;
249 			flags |= MAP_ANON;
250 		} else {
251 			fd = libfile;
252 			foff = ld->foff;
253 		}
254 
255 		if (ld == lowld) {
256 			/*
257 			 * Add PROT_EXEC to force the first allocation in
258 			 * EXEC region unless it is writable.
259 			 */
260 			int exec = (ld->prot & PROT_WRITE) ? 0 : PROT_EXEC;
261 			if (exec && lowld->start == NULL)
262 				lowld->start = _dl_exec_hint;
263 			res = _dl_mquery((void *)(LOFF + ld->moff),
264 			    ROUND_PG(ld->size), ld->prot | exec, flags,
265 			    fd, foff);
266 			if (_dl_mmap_error(res))
267 				goto fail;
268 			lowld->start = res;
269 		}
270 
271 		res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
272 		    ld->prot, flags | MAP_FIXED | __MAP_NOREPLACE, fd, foff);
273 		if (_dl_mmap_error(res)) {
274 			struct load_list *ll;
275 
276 			/* Unmap any mappings that we did get in. */
277 			for (ll = lowld; ll != NULL && ll != ld;
278 			     ll = ll->next) {
279 				_dl_munmap(ll->start, ROUND_PG(ll->size));
280 			}
281 
282 			lowld->start += ROUND_PG(ld->size);
283 			goto retry;
284 		}
285 
286 		if ((ld->prot & PROT_EXEC) && exec_start == NULL) {
287 			exec_start = (void *)(LOFF + ld->moff);
288 			exec_size = ROUND_PG(ld->size);
289 		}
290 
291 		/* Entire mapping can become immutable, minus exceptions chosen later */
292 		_dl_defer_immut(imut, LOFF + ld->moff, ROUND_PG(ld->size));
293 
294 		ld->start = res;
295 	}
296 
297 	for (ld = lowld; ld != NULL; ld = ld->next) {
298 		/* Zero out everything past the EOF */
299 		if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0)
300 			_dl_memset((char *)ld->start + ld->size, 0,
301 			    _dl_pagesz - (ld->size & align));
302 		load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size);
303 	}
304 
305 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
306 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
307 		switch (phdp->p_type) {
308 		case PT_OPENBSD_RANDOMIZE:
309 			_dl_arc4randombuf((char *)(phdp->p_vaddr + LOFF),
310 			    phdp->p_memsz);
311 			break;
312 		case PT_GNU_RELRO:
313 			relro_addr = phdp->p_vaddr + LOFF;
314 			relro_size = phdp->p_memsz;
315 			_dl_defer_mut(mut, phdp->p_vaddr + LOFF, phdp->p_memsz);
316 			break;
317 		case PT_OPENBSD_MUTABLE:
318 			_dl_defer_mut(mut, phdp->p_vaddr + LOFF, phdp->p_memsz);
319 			break;
320 		}
321 	}
322 
323 	_dl_close(libfile);
324 
325 	dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF);
326 	object = _dl_finalize_object(libname, dynp,
327 	    (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum,
328 	    type, (Elf_Addr)lowld->start, LOFF);
329 	if (object) {
330 		char *soname = (char *)object->Dyn.info[DT_SONAME];
331 
332 		object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start;
333 		object->load_list = lowld;
334 		/* set inode, dev from stat info */
335 		object->dev = sb.st_dev;
336 		object->inode = sb.st_ino;
337 		object->obj_flags |= flags;
338 		object->nodelete = nodelete;
339 		object->relro_addr = relro_addr;
340 		object->relro_size = relro_size;
341 		_dl_set_sod(object->load_name, &object->sod);
342 		if (ptls != NULL && ptls->p_memsz)
343 			_dl_set_tls(object, ptls, (Elf_Addr)lowld->start,
344 			    libname);
345 
346 		/* Request permission for system calls in libc.so's text segment */
347 		if (soname != NULL &&
348 		    _dl_strncmp(soname, "libc.so.", 8) == 0) {
349 			if (_dl_msyscall(exec_start, exec_size) == -1)
350 				_dl_printf("msyscall %lx %lx error\n",
351 				    exec_start, exec_size);
352 		}
353 		_dl_bcopy(mut, object->mut, sizeof mut);
354 		_dl_bcopy(imut, object->imut, sizeof imut);
355 	} else {
356 		_dl_load_list_free(lowld);
357 	}
358 	return(object);
359 fail:
360 	_dl_printf("%s: ld.so mmap failed mapping %s.\n", __progname, libname);
361 	_dl_close(libfile);
362 	_dl_errno = DL_CANT_MMAP;
363 	_dl_load_list_free(lowld);
364 	return(0);
365 }
366