1 /* $OpenBSD: library_mquery.c,v 1.76 2024/04/05 13:51:47 deraadt Exp $ */
2
3 /*
4 * Copyright (c) 2002 Dale Rahn
5 * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #define _DYN_LOADER
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/stat.h>
35 #include <fcntl.h>
36
37 #include "syscall.h"
38 #include "util.h"
39 #include "archdep.h"
40 #include "resolve.h"
41 #include "sod.h"
42
43 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
44 (((X) & PF_W) ? PROT_WRITE : 0) | \
45 (((X) & PF_X) ? PROT_EXEC : 0))
46
47 void
_dl_load_list_free(struct load_list * load_list)48 _dl_load_list_free(struct load_list *load_list)
49 {
50 struct load_list *next;
51 Elf_Addr align = _dl_pagesz - 1;
52
53 while (load_list != NULL) {
54 if (load_list->start != NULL)
55 _dl_munmap(load_list->start,
56 ((load_list->size) + align) & ~align);
57 next = load_list->next;
58 _dl_free(load_list);
59 load_list = next;
60 }
61 }
62
63 void
_dl_unload_shlib(elf_object_t * object)64 _dl_unload_shlib(elf_object_t *object)
65 {
66 struct dep_node *n;
67 elf_object_t *load_object = object->load_object;
68
69 /*
70 * If our load object has become unreferenced then we lost the
71 * last group reference to it, so the entire group should be taken
72 * down. The current object is somewhere below load_object in
73 * the child_vec tree, so it'll get cleaned up by the recursion.
74 * That means we can just switch here to the load object.
75 */
76 if (load_object != object && OBJECT_REF_CNT(load_object) == 0 &&
77 (load_object->status & STAT_UNLOADED) == 0) {
78 DL_DEB(("unload_shlib switched from %s to %s\n",
79 object->load_name, load_object->load_name));
80 object = load_object;
81 goto unload;
82 }
83
84 DL_DEB(("unload_shlib called on %s\n", object->load_name));
85 if (OBJECT_REF_CNT(object) == 0 &&
86 (object->status & STAT_UNLOADED) == 0) {
87 struct object_vector vec;
88 int i;
89 unload:
90 object->status |= STAT_UNLOADED;
91 for (vec = object->child_vec, i = 0; i < vec.len; i++)
92 _dl_unload_shlib(vec.vec[i]);
93 TAILQ_FOREACH(n, &object->grpref_list, next_sib)
94 _dl_unload_shlib(n->data);
95 DL_DEB(("unload_shlib unloading on %s\n", object->load_name));
96 _dl_load_list_free(object->load_list);
97 _dl_remove_object(object);
98 }
99 }
100
101 elf_object_t *
_dl_tryload_shlib(const char * libname,int type,int flags,int nodelete)102 _dl_tryload_shlib(const char *libname, int type, int flags, int nodelete)
103 {
104 struct range_vector imut, mut;
105 int libfile, libc = -1, i;
106 struct load_list *ld, *lowld = NULL;
107 elf_object_t *object;
108 Elf_Dyn *dynp = NULL;
109 Elf_Ehdr *ehdr;
110 Elf_Phdr *phdp, *ptls = NULL;
111 Elf_Phdr *syscall_phdp = NULL;
112 Elf_Addr load_end = 0;
113 Elf_Addr align = _dl_pagesz - 1, off, size;
114 Elf_Addr relro_addr = 0, relro_size = 0;
115 struct stat sb;
116 char hbuf[4096], *exec_start;
117 size_t exec_size;
118
119 #define powerof2(x) ((((x) - 1) & (x)) == 0)
120 #define ROUND_PG(x) (((x) + align) & ~(align))
121 #define TRUNC_PG(x) ((x) & ~(align))
122
123 libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC);
124 if (libfile < 0) {
125 _dl_errno = DL_CANT_OPEN;
126 return(0);
127 }
128
129 if (_dl_fstat(libfile, &sb) < 0) {
130 _dl_errno = DL_CANT_OPEN;
131 return(0);
132 }
133
134 for (object = _dl_objects; object != NULL; object = object->next) {
135 if (object->dev == sb.st_dev &&
136 object->inode == sb.st_ino) {
137 _dl_close(libfile);
138 _dl_handle_already_loaded(object, flags);
139 return(object);
140 }
141 }
142 if (flags & DF_1_NOOPEN) {
143 _dl_close(libfile);
144 return NULL;
145 }
146
147 _dl_read(libfile, hbuf, sizeof(hbuf));
148 ehdr = (Elf_Ehdr *)hbuf;
149 if (ehdr->e_ident[0] != ELFMAG0 || ehdr->e_ident[1] != ELFMAG1 ||
150 ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
151 ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
152 _dl_close(libfile);
153 _dl_errno = DL_NOT_ELF;
154 return(0);
155 }
156
157 /* Insertion sort */
158 #define LDLIST_INSERT(ld) do { \
159 struct load_list **_ld; \
160 for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \
161 if ((*_ld)->moff > ld->moff) \
162 break; \
163 ld->next = *_ld; \
164 *_ld = ld; \
165 } while (0)
166 /*
167 * Alright, we might have a winner!
168 * Figure out how much VM space we need and set up the load
169 * list that we'll use to find free VM space.
170 */
171 phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
172 for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
173 if (phdp->p_align > 1 && !powerof2(phdp->p_align)) {
174 _dl_printf("%s: ld.so invalid ELF input %s.\n",
175 __progname, libname);
176 _dl_close(libfile);
177 _dl_errno = DL_CANT_MMAP;
178 return(0);
179 }
180
181 switch (phdp->p_type) {
182 case PT_LOAD:
183 off = (phdp->p_vaddr & align);
184 size = off + phdp->p_filesz;
185
186 if (size != 0) {
187 ld = _dl_malloc(sizeof(struct load_list));
188 if (ld == NULL)
189 _dl_oom();
190 ld->start = NULL;
191 ld->size = size;
192 ld->moff = TRUNC_PG(phdp->p_vaddr);
193 ld->foff = TRUNC_PG(phdp->p_offset);
194 ld->prot = PFLAGS(phdp->p_flags);
195 LDLIST_INSERT(ld);
196 }
197
198 if ((PFLAGS(phdp->p_flags) & PROT_WRITE) == 0 ||
199 ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz))
200 break;
201 /* This phdr has a zfod section */
202 ld = _dl_calloc(1, sizeof(struct load_list));
203 if (ld == NULL)
204 _dl_oom();
205 ld->start = NULL;
206 ld->size = ROUND_PG(off + phdp->p_memsz) -
207 ROUND_PG(size);
208 ld->moff = TRUNC_PG(phdp->p_vaddr) +
209 ROUND_PG(size);
210 ld->foff = -1;
211 ld->prot = PFLAGS(phdp->p_flags);
212 LDLIST_INSERT(ld);
213 break;
214 case PT_DYNAMIC:
215 dynp = (Elf_Dyn *)phdp->p_vaddr;
216 break;
217 case PT_TLS:
218 if (phdp->p_filesz > phdp->p_memsz) {
219 _dl_printf("%s: invalid tls data in %s.\n",
220 __progname, libname);
221 _dl_close(libfile);
222 _dl_errno = DL_CANT_LOAD_OBJ;
223 return(0);
224 }
225 if (!_dl_tib_static_done) {
226 ptls = phdp;
227 break;
228 }
229 _dl_printf("%s: unsupported TLS program header in %s\n",
230 __progname, libname);
231 _dl_close(libfile);
232 _dl_errno = DL_CANT_LOAD_OBJ;
233 return(0);
234 default:
235 break;
236 }
237 }
238
239 #define LOFF ((Elf_Addr)lowld->start - lowld->moff)
240
241 retry:
242 _dl_memset(&mut, 0, sizeof mut);
243 _dl_memset(&imut, 0, sizeof imut);
244 exec_start = NULL;
245 exec_size = 0;
246 for (ld = lowld; ld != NULL; ld = ld->next) {
247 off_t foff;
248 int fd, flags;
249 void *res;
250
251 flags = MAP_PRIVATE;
252
253 if (ld->foff < 0) {
254 fd = -1;
255 foff = 0;
256 flags |= MAP_ANON;
257 } else {
258 fd = libfile;
259 foff = ld->foff;
260 }
261
262 if (ld == lowld) {
263 /*
264 * Add PROT_EXEC to force the first allocation in
265 * EXEC region unless it is writable.
266 */
267 int exec = (ld->prot & PROT_WRITE) ? 0 : PROT_EXEC;
268 if (exec && lowld->start == NULL)
269 lowld->start = _dl_exec_hint;
270 res = _dl_mquery((void *)(LOFF + ld->moff),
271 ROUND_PG(ld->size), ld->prot | exec, flags,
272 fd, foff);
273 if (_dl_mmap_error(res))
274 goto fail;
275 lowld->start = res;
276 }
277
278 res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
279 ld->prot, flags | MAP_FIXED | __MAP_NOREPLACE, fd, foff);
280 if (_dl_mmap_error(res)) {
281 struct load_list *ll;
282
283 /* Unmap any mappings that we did get in. */
284 for (ll = lowld; ll != NULL && ll != ld;
285 ll = ll->next) {
286 _dl_munmap(ll->start, ROUND_PG(ll->size));
287 }
288
289 lowld->start += ROUND_PG(ld->size);
290 goto retry;
291 }
292
293 if ((ld->prot & PROT_EXEC) && exec_start == NULL) {
294 exec_start = (void *)(LOFF + ld->moff);
295 exec_size = ROUND_PG(ld->size);
296 }
297
298 /* Entire mapping can become immutable, minus exceptions chosen later */
299 _dl_push_range_size(&imut, LOFF + ld->moff, ROUND_PG(ld->size));
300
301 ld->start = res;
302 }
303
304 for (ld = lowld; ld != NULL; ld = ld->next) {
305 /* Zero out everything past the EOF */
306 if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0)
307 _dl_memset((char *)ld->start + ld->size, 0,
308 _dl_pagesz - (ld->size & align));
309 load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size);
310 }
311
312 phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
313 for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
314 switch (phdp->p_type) {
315 case PT_OPENBSD_RANDOMIZE:
316 _dl_arc4randombuf((char *)(phdp->p_vaddr + LOFF),
317 phdp->p_memsz);
318 break;
319 case PT_GNU_RELRO:
320 relro_addr = phdp->p_vaddr + LOFF;
321 relro_size = phdp->p_memsz;
322 _dl_push_range_size(&mut, relro_addr, relro_size);
323 break;
324 case PT_OPENBSD_MUTABLE:
325 _dl_push_range_size(&mut, phdp->p_vaddr + LOFF,
326 phdp->p_memsz);
327 break;
328 case PT_OPENBSD_SYSCALLS:
329 syscall_phdp = phdp;
330 break;
331 }
332 }
333
334 libc = _dl_islibc(dynp, LOFF);
335 if (libc && syscall_phdp)
336 _dl_pin(libfile, syscall_phdp, lowld->start,
337 (size_t)((exec_start + exec_size) - LOFF),
338 exec_start, exec_size);
339 _dl_close(libfile);
340
341 dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF);
342 object = _dl_finalize_object(libname, dynp,
343 (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum,
344 type, (Elf_Addr)lowld->start, LOFF);
345 if (object) {
346 object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start;
347 object->load_list = lowld;
348 /* set inode, dev from stat info */
349 object->dev = sb.st_dev;
350 object->inode = sb.st_ino;
351 object->obj_flags |= flags;
352 object->nodelete = nodelete;
353 object->relro_addr = relro_addr;
354 object->relro_size = relro_size;
355 object->islibc = libc;
356 _dl_set_sod(object->load_name, &object->sod);
357 if (ptls != NULL && ptls->p_memsz)
358 _dl_set_tls(object, ptls, (Elf_Addr)lowld->start,
359 libname);
360 _dl_bcopy(&mut, &object->mut, sizeof mut);
361 _dl_bcopy(&imut, &object->imut, sizeof imut);
362 } else {
363 _dl_load_list_free(lowld);
364 }
365 return(object);
366 fail:
367 _dl_printf("%s: ld.so mmap failed mapping %s.\n", __progname, libname);
368 _dl_close(libfile);
369 _dl_errno = DL_CANT_MMAP;
370 _dl_load_list_free(lowld);
371 return(0);
372 }
373