xref: /openbsd-src/libexec/ld.so/library_mquery.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: library_mquery.c,v 1.47 2014/07/10 09:03:01 otto Exp $ */
2 
3 /*
4  * Copyright (c) 2002 Dale Rahn
5  * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #define _DYN_LOADER
31 
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <fcntl.h>
35 #include <sys/mman.h>
36 #include "dl_prebind.h"
37 
38 #include "syscall.h"
39 #include "archdep.h"
40 #include "resolve.h"
41 #include "sod.h"
42 
43 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
44 		   (((X) & PF_W) ? PROT_WRITE : 0) | \
45 		   (((X) & PF_X) ? PROT_EXEC : 0))
46 
47 void
48 _dl_load_list_free(struct load_list *load_list)
49 {
50 	struct load_list *next;
51 	Elf_Addr align = _dl_pagesz - 1;
52 
53 	while (load_list != NULL) {
54 		if (load_list->start != NULL)
55 			_dl_munmap(load_list->start,
56 			    ((load_list->size) + align) & ~align);
57 		next = load_list->next;
58 		_dl_free(load_list);
59 		load_list = next;
60 	}
61 }
62 
63 
64 void
65 _dl_unload_shlib(elf_object_t *object)
66 {
67 	struct dep_node *n;
68 
69 	DL_DEB(("unload_shlib called on %s\n", object->load_name));
70 	if (OBJECT_REF_CNT(object) == 0 &&
71 	    (object->status & STAT_UNLOADED) == 0) {
72 		object->status |= STAT_UNLOADED;
73 		TAILQ_FOREACH(n, &object->child_list, next_sib)
74 			_dl_unload_shlib(n->data);
75 		TAILQ_FOREACH(n, &object->grpref_list, next_sib)
76 			_dl_unload_shlib(n->data);
77 		DL_DEB(("unload_shlib unloading on %s\n", object->load_name));
78 		_dl_load_list_free(object->load_list);
79 		_dl_remove_object(object);
80 	}
81 }
82 
83 
84 elf_object_t *
85 _dl_tryload_shlib(const char *libname, int type, int flags)
86 {
87 	int libfile, i;
88 	struct load_list *ld, *lowld = NULL;
89 	elf_object_t *object;
90 	Elf_Dyn *dynp = 0;
91 	Elf_Ehdr *ehdr;
92 	Elf_Phdr *phdp;
93 	Elf_Addr load_end = 0;
94 	Elf_Addr align = _dl_pagesz - 1, off, size;
95 	struct stat sb;
96 	void *prebind_data;
97 	char hbuf[4096];
98 
99 #define ROUND_PG(x) (((x) + align) & ~(align))
100 #define TRUNC_PG(x) ((x) & ~(align))
101 
102 	libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC);
103 	if (libfile < 0) {
104 		_dl_errno = DL_CANT_OPEN;
105 		return(0);
106 	}
107 
108 	if ( _dl_fstat(libfile, &sb) < 0) {
109 		_dl_errno = DL_CANT_OPEN;
110 		return(0);
111 	}
112 
113 	for (object = _dl_objects; object != NULL; object = object->next) {
114 		if (object->dev == sb.st_dev &&
115 		    object->inode == sb.st_ino) {
116 			object->obj_flags |= flags & DF_1_GLOBAL;
117 			_dl_close(libfile);
118 			if (_dl_loading_object == NULL)
119 				_dl_loading_object = object;
120 			if (object->load_object != _dl_objects &&
121 			    object->load_object != _dl_loading_object) {
122 				_dl_link_grpref(object->load_object,
123 				    _dl_loading_object);
124 			}
125 			return(object);
126 		}
127 	}
128 
129 	_dl_read(libfile, hbuf, sizeof(hbuf));
130 	ehdr = (Elf_Ehdr *)hbuf;
131 	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
132 	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
133 	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
134 		_dl_close(libfile);
135 		_dl_errno = DL_NOT_ELF;
136 		return(0);
137 	}
138 
139 	/* Insertion sort */
140 #define LDLIST_INSERT(ld) do { \
141 	struct load_list **_ld; \
142 	for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \
143 		if ((*_ld)->moff > ld->moff) \
144 			break; \
145 	ld->next = *_ld; \
146 	*_ld = ld; \
147 } while (0)
148 	/*
149 	 *  Alright, we might have a winner!
150 	 *  Figure out how much VM space we need and set up the load
151 	 *  list that we'll use to find free VM space.
152 	 */
153 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
154 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
155 		switch (phdp->p_type) {
156 		case PT_LOAD:
157 			off = (phdp->p_vaddr & align);
158 			size = off + phdp->p_filesz;
159 
160 			if (size != 0) {
161 				ld = _dl_malloc(sizeof(struct load_list));
162 				if (ld == NULL)
163 					_dl_exit(7);
164 				ld->start = NULL;
165 				ld->size = size;
166 				ld->moff = TRUNC_PG(phdp->p_vaddr);
167 				ld->foff = TRUNC_PG(phdp->p_offset);
168 				ld->prot = PFLAGS(phdp->p_flags);
169 				LDLIST_INSERT(ld);
170 			}
171 
172 			if ((PFLAGS(phdp->p_flags) & PROT_WRITE) == 0 ||
173 			    ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz))
174 				break;
175 			/* This phdr has a zfod section */
176 			ld = _dl_calloc(1, sizeof(struct load_list));
177 			if (ld == NULL)
178 				_dl_exit(7);
179 			ld->start = NULL;
180 			ld->size = ROUND_PG(off + phdp->p_memsz) -
181 			    ROUND_PG(size);
182 			ld->moff = TRUNC_PG(phdp->p_vaddr) +
183 			    ROUND_PG(size);
184 			ld->foff = -1;
185 			ld->prot = PFLAGS(phdp->p_flags);
186 			LDLIST_INSERT(ld);
187 			break;
188 		case PT_DYNAMIC:
189 			dynp = (Elf_Dyn *)phdp->p_vaddr;
190 			break;
191 		case PT_TLS:
192 			_dl_printf("%s: unsupported TLS program header in %s\n",
193 			    _dl_progname, libname);
194 			_dl_close(libfile);
195 			_dl_errno = DL_CANT_LOAD_OBJ;
196 			return(0);
197 		default:
198 			break;
199 		}
200 	}
201 
202 #define LOFF ((Elf_Addr)lowld->start - lowld->moff)
203 
204 retry:
205 	for (ld = lowld; ld != NULL; ld = ld->next) {
206 		off_t foff;
207 		int fd, flags;
208 		void *res;
209 
210 		flags = MAP_PRIVATE;
211 		if (LOFF + ld->moff != 0)
212 			flags |= MAP_FIXED | __MAP_NOREPLACE;
213 
214 		if (ld->foff < 0) {
215 			fd = -1;
216 			foff = 0;
217 			flags |= MAP_ANON;
218 		} else {
219 			fd = libfile;
220 			foff = ld->foff;
221 		}
222 
223 		res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
224 		    ld->prot, flags, fd, foff);
225 		if (_dl_mmap_error(res)) {
226 			/*
227 			 * The mapping we wanted isn't free, so we do an
228 			 * mquery without MAP_FIXED to get the next free
229 			 * mapping, adjust the base mapping address to match
230 			 * this free mapping and restart the process again.
231 			 *
232 			 * XXX - we need some kind of boundary condition
233 			 * here, or fix mquery to not run into the stack
234 			 */
235 			res = _dl_mquery((void *)(LOFF + ld->moff),
236 			    ROUND_PG(ld->size), ld->prot,
237 			    flags & ~(MAP_FIXED | __MAP_NOREPLACE), fd, foff);
238 
239 			/*
240 			 * If ld == lowld, then ld->start is just a hint and
241 			 * thus shouldn't be unmapped.
242 			 */
243 			ld->start = NULL;
244 
245 			/* Unmap any mappings that we did get in. */
246 			for (ld = lowld; ld != NULL; ld = ld->next) {
247 				if (ld->start == NULL)
248 					break;
249 				_dl_munmap(ld->start, ROUND_PG(ld->size));
250 				ld->start = NULL;
251 			}
252 
253 			/* if the mquery failed, give up */
254 			if (_dl_mmap_error(res))
255 				goto fail;
256 
257 			/* otherwise, reset the start of the base mapping */
258 			lowld->start = res - ld->moff + lowld->moff;
259 			goto retry;
260 		}
261 
262 		ld->start = res;
263 	}
264 
265 	for (ld = lowld; ld != NULL; ld = ld->next) {
266 		/* Zero out everything past the EOF */
267 		if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0)
268 			_dl_memset((char *)ld->start + ld->size, 0,
269 			    _dl_pagesz - (ld->size & align));
270 		load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size);
271 	}
272 
273 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
274 	for (i = 0; i < ehdr->e_phnum; i++, phdp++)
275 		if (phdp->p_type == PT_OPENBSD_RANDOMIZE)
276 			_dl_randombuf((char *)(phdp->p_vaddr + LOFF),
277 			    phdp->p_memsz);
278 
279 	prebind_data = prebind_load_fd(libfile, libname);
280 
281 	_dl_close(libfile);
282 
283 	dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF);
284 	object = _dl_finalize_object(libname, dynp,
285 	    (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum,
286 	    type, (Elf_Addr)lowld->start, LOFF);
287 	if (object) {
288 		object->prebind_data = prebind_data;
289 		object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start;
290 		object->load_list = lowld;
291 		/* set inode, dev from stat info */
292 		object->dev = sb.st_dev;
293 		object->inode = sb.st_ino;
294 		object->obj_flags |= flags;
295 		_dl_set_sod(object->load_name, &object->sod);
296 	} else {
297 		_dl_load_list_free(lowld);
298 	}
299 	return(object);
300 fail:
301 	_dl_printf("%s: rtld mmap failed mapping %s.\n",
302 	    _dl_progname, libname);
303 	_dl_close(libfile);
304 	_dl_errno = DL_CANT_MMAP;
305 	_dl_load_list_free(lowld);
306 	return(0);
307 }
308