1 /* $OpenBSD: library_mquery.c,v 1.36 2008/10/02 20:12:08 kurt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Dale Rahn 5 * Copyright (c) 1998 Per Fogelstrom, Opsycon AB 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #define _DYN_LOADER 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <fcntl.h> 35 #include <sys/mman.h> 36 #include "dl_prebind.h" 37 38 #include "syscall.h" 39 #include "archdep.h" 40 #include "resolve.h" 41 42 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \ 43 (((X) & PF_W) ? PROT_WRITE : 0) | \ 44 (((X) & PF_X) ? PROT_EXEC : 0)) 45 46 void 47 _dl_load_list_free(struct load_list *load_list) 48 { 49 struct load_list *next; 50 Elf_Addr align = _dl_pagesz - 1; 51 52 while (load_list != NULL) { 53 if (load_list->start != NULL) 54 _dl_munmap(load_list->start, 55 ((load_list->size) + align) & ~align); 56 next = load_list->next; 57 _dl_free(load_list); 58 load_list = next; 59 } 60 } 61 62 63 void 64 _dl_unload_shlib(elf_object_t *object) 65 { 66 struct dep_node *n; 67 68 DL_DEB(("unload_shlib called on %s\n", object->load_name)); 69 if (OBJECT_REF_CNT(object) == 0 && 70 (object->status & STAT_UNLOADED) == 0) { 71 object->status |= STAT_UNLOADED; 72 TAILQ_FOREACH(n, &object->child_list, next_sib) 73 _dl_unload_shlib(n->data); 74 TAILQ_FOREACH(n, &object->grpref_list, next_sib) 75 _dl_unload_shlib(n->data); 76 DL_DEB(("unload_shlib unloading on %s\n", object->load_name)); 77 _dl_load_list_free(object->load_list); 78 _dl_remove_object(object); 79 } 80 } 81 82 83 elf_object_t * 84 _dl_tryload_shlib(const char *libname, int type, int flags) 85 { 86 int libfile, i; 87 struct load_list *ld, *lowld = NULL; 88 elf_object_t *object; 89 Elf_Dyn *dynp = 0; 90 Elf_Ehdr *ehdr; 91 Elf_Phdr *phdp; 92 Elf_Addr load_end = 0; 93 Elf_Addr align = _dl_pagesz - 1, off, size; 94 struct stat sb; 95 void *prebind_data; 96 char hbuf[4096]; 97 98 #define ROUND_PG(x) (((x) + align) & ~(align)) 99 #define TRUNC_PG(x) ((x) & ~(align)) 100 101 object = _dl_lookup_object(libname); 102 if (object) { 103 object->obj_flags |= flags & RTLD_GLOBAL; 104 if (_dl_loading_object == NULL) 105 _dl_loading_object = object; 106 if (object->load_object != _dl_objects && 107 object->load_object != _dl_loading_object) { 108 _dl_link_grpref(object->load_object, _dl_loading_object); 109 } 110 return(object); /* Already loaded */ 111 } 112 113 libfile = _dl_open(libname, O_RDONLY); 114 if (libfile < 0) { 115 _dl_errno = DL_CANT_OPEN; 116 return(0); 117 } 118 119 if ( _dl_fstat(libfile, &sb) < 0) { 120 _dl_errno = DL_CANT_OPEN; 121 return(0); 122 } 123 124 for (object = _dl_objects; object != NULL; object = object->next) { 125 if (object->dev == sb.st_dev && 126 object->inode == sb.st_ino) { 127 object->obj_flags |= flags & RTLD_GLOBAL; 128 _dl_close(libfile); 129 if (_dl_loading_object == NULL) 130 _dl_loading_object = object; 131 if (object->load_object != _dl_objects && 132 object->load_object != _dl_loading_object) { 133 _dl_link_grpref(object->load_object, 134 _dl_loading_object); 135 } 136 return(object); 137 } 138 } 139 140 _dl_read(libfile, hbuf, sizeof(hbuf)); 141 ehdr = (Elf_Ehdr *)hbuf; 142 if (ehdr->e_ident[0] != ELFMAG0 || ehdr->e_ident[1] != ELFMAG1 || 143 ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 || 144 ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) { 145 _dl_close(libfile); 146 _dl_errno = DL_NOT_ELF; 147 return(0); 148 } 149 150 /* Insertion sort */ 151 #define LDLIST_INSERT(ld) do { \ 152 struct load_list **_ld; \ 153 for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \ 154 if ((*_ld)->moff > ld->moff) \ 155 break; \ 156 ld->next = *_ld; \ 157 *_ld = ld; \ 158 } while (0) 159 /* 160 * Alright, we might have a winner! 161 * Figure out how much VM space we need and set up the load 162 * list that we'll use to find free VM space. 163 */ 164 phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); 165 for (i = 0; i < ehdr->e_phnum; i++, phdp++) { 166 switch (phdp->p_type) { 167 case PT_LOAD: 168 off = (phdp->p_vaddr & align); 169 size = off + phdp->p_filesz; 170 171 ld = _dl_malloc(sizeof(struct load_list)); 172 ld->start = NULL; 173 ld->size = size; 174 ld->moff = TRUNC_PG(phdp->p_vaddr); 175 ld->foff = TRUNC_PG(phdp->p_offset); 176 ld->prot = PFLAGS(phdp->p_flags); 177 LDLIST_INSERT(ld); 178 179 if ((ld->prot & PROT_WRITE) == 0 || 180 ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz)) 181 break; 182 /* This phdr has a zfod section */ 183 ld = _dl_malloc(sizeof(struct load_list)); 184 ld->start = NULL; 185 ld->size = ROUND_PG(off + phdp->p_memsz) - 186 ROUND_PG(size); 187 ld->moff = TRUNC_PG(phdp->p_vaddr) + 188 ROUND_PG(size); 189 ld->foff = -1; 190 ld->prot = PFLAGS(phdp->p_flags); 191 LDLIST_INSERT(ld); 192 break; 193 case PT_DYNAMIC: 194 dynp = (Elf_Dyn *)phdp->p_vaddr; 195 break; 196 default: 197 break; 198 } 199 } 200 201 #define LOFF ((Elf_Addr)lowld->start - lowld->moff) 202 203 retry: 204 for (ld = lowld; ld != NULL; ld = ld->next) { 205 off_t foff; 206 int fd, flags; 207 208 /* 209 * We don't want to provide the fd/off hint for anything 210 * but the first mapping, all other might have 211 * cache-incoherent aliases and will cause this code to 212 * loop forever. 213 */ 214 if (ld == lowld) { 215 fd = libfile; 216 foff = ld->foff; 217 flags = 0; 218 } else { 219 fd = -1; 220 foff = 0; 221 flags = MAP_FIXED; 222 } 223 224 ld->start = (void *)(LOFF + ld->moff); 225 226 /* 227 * Magic here. 228 * The first mquery is done with MAP_FIXED to see if 229 * the mapping we want is free. If it's not, we redo the 230 * mquery without MAP_FIXED to get the next free mapping, 231 * adjust the base mapping address to match this free mapping 232 * and restart the process again. 233 */ 234 ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size), ld->prot, 235 flags, fd, foff); 236 if (_dl_mmap_error(ld->start)) { 237 ld->start = (void *)(LOFF + ld->moff); 238 ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size), 239 ld->prot, flags & ~MAP_FIXED, fd, foff); 240 if (_dl_mmap_error(ld->start)) 241 goto fail; 242 } 243 244 if (ld->start != (void *)(LOFF + ld->moff)) { 245 lowld->start = ld->start - ld->moff + lowld->moff; 246 goto retry; 247 } 248 /* 249 * XXX - we need some kind of boundary condition here, 250 * or fix mquery to not run into the stack 251 */ 252 } 253 254 for (ld = lowld; ld != NULL; ld = ld->next) { 255 int fd, flags; 256 off_t foff; 257 void *res; 258 259 if (ld->foff < 0) { 260 fd = -1; 261 foff = 0; 262 flags = MAP_FIXED|MAP_PRIVATE|MAP_ANON; 263 } else { 264 fd = libfile; 265 foff = ld->foff; 266 flags = MAP_FIXED|MAP_PRIVATE; 267 } 268 res = _dl_mmap(ld->start, ROUND_PG(ld->size), ld->prot, flags, 269 fd, foff); 270 if (_dl_mmap_error(res)) 271 goto fail; 272 /* Zero out everything past the EOF */ 273 if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0) 274 _dl_memset((char *)ld->start + ld->size, 0, 275 _dl_pagesz - (ld->size & align)); 276 load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size); 277 } 278 279 prebind_data = prebind_load_fd(libfile, libname); 280 281 _dl_close(libfile); 282 283 dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF); 284 object = _dl_finalize_object(libname, dynp, 285 (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum, 286 type, (Elf_Addr)lowld->start, LOFF); 287 if (object) { 288 object->prebind_data = prebind_data; 289 object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start; 290 object->load_list = lowld; 291 /* set inode, dev from stat info */ 292 object->dev = sb.st_dev; 293 object->inode = sb.st_ino; 294 object->obj_flags |= flags; 295 296 } else { 297 /* XXX no point. object is never returned NULL */ 298 _dl_load_list_free(lowld); 299 } 300 return(object); 301 fail: 302 _dl_printf("%s: rtld mmap failed mapping %s.\n", 303 _dl_progname, libname); 304 _dl_close(libfile); 305 _dl_errno = DL_CANT_MMAP; 306 _dl_load_list_free(lowld); 307 return(0); 308 } 309