1 /* $OpenBSD: library.c,v 1.79 2016/08/12 20:39:01 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Dale Rahn 5 * Copyright (c) 1998 Per Fogelstrom, Opsycon AB 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #define _DYN_LOADER 31 32 #include <sys/types.h> 33 #include <fcntl.h> 34 #include <sys/mman.h> 35 36 #include "syscall.h" 37 #include "archdep.h" 38 #include "resolve.h" 39 #include "sod.h" 40 41 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \ 42 (((X) & PF_W) ? PROT_WRITE : 0) | \ 43 (((X) & PF_X) ? PROT_EXEC : 0)) 44 45 void 46 _dl_load_list_free(struct load_list *load_list) 47 { 48 struct load_list *next; 49 50 while (load_list != NULL) { 51 next = load_list->next; 52 _dl_free(load_list); 53 load_list = next; 54 } 55 } 56 57 void 58 _dl_unload_shlib(elf_object_t *object) 59 { 60 struct dep_node *n; 61 elf_object_t *load_object = object->load_object; 62 63 /* 64 * If our load object has become unreferenced then we lost the 65 * last group reference to it, so the entire group should be taken 66 * down. The current object is somewhere below load_object in 67 * the child_list tree, so it'll get cleaned up by the recursion. 68 * That means we can just switch here to the load object. 69 */ 70 if (load_object != object && OBJECT_REF_CNT(load_object) == 0 && 71 (load_object->status & STAT_UNLOADED) == 0) { 72 DL_DEB(("unload_shlib switched from %s to %s\n", 73 object->load_name, load_object->load_name)); 74 object = load_object; 75 goto unload; 76 } 77 78 DL_DEB(("unload_shlib called on %s\n", object->load_name)); 79 if (OBJECT_REF_CNT(object) == 0 && 80 (object->status & STAT_UNLOADED) == 0) { 81 unload: 82 object->status |= STAT_UNLOADED; 83 TAILQ_FOREACH(n, &object->child_list, next_sib) 84 _dl_unload_shlib(n->data); 85 TAILQ_FOREACH(n, &object->grpref_list, next_sib) 86 _dl_unload_shlib(n->data); 87 DL_DEB(("unload_shlib unloading on %s\n", object->load_name)); 88 _dl_load_list_free(object->load_list); 89 _dl_munmap((void *)object->load_base, object->load_size); 90 _dl_remove_object(object); 91 } 92 } 93 94 elf_object_t * 95 _dl_tryload_shlib(const char *libname, int type, int flags) 96 { 97 int libfile, i; 98 struct load_list *next_load, *load_list = NULL; 99 Elf_Addr maxva = 0, minva = ELFDEFNNAME(NO_ADDR); 100 Elf_Addr libaddr, loff, align = _dl_pagesz - 1; 101 Elf_Addr relro_addr = 0, relro_size = 0; 102 elf_object_t *object; 103 char hbuf[4096]; 104 Elf_Dyn *dynp = NULL; 105 Elf_Ehdr *ehdr; 106 Elf_Phdr *phdp; 107 Elf_Phdr *ptls = NULL; 108 struct stat sb; 109 110 #define ROUND_PG(x) (((x) + align) & ~(align)) 111 #define TRUNC_PG(x) ((x) & ~(align)) 112 113 libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC); 114 if (libfile < 0) { 115 _dl_errno = DL_CANT_OPEN; 116 return(0); 117 } 118 119 if ( _dl_fstat(libfile, &sb) < 0) { 120 _dl_errno = DL_CANT_OPEN; 121 return(0); 122 } 123 124 for (object = _dl_objects; object != NULL; object = object->next) { 125 if (object->dev == sb.st_dev && 126 object->inode == sb.st_ino) { 127 object->obj_flags |= flags & DF_1_GLOBAL; 128 _dl_close(libfile); 129 if (_dl_loading_object == NULL) 130 _dl_loading_object = object; 131 if (object->load_object != _dl_objects && 132 object->load_object != _dl_loading_object) { 133 _dl_link_grpref(object->load_object, 134 _dl_loading_object); 135 } 136 return(object); 137 } 138 } 139 140 _dl_read(libfile, hbuf, sizeof(hbuf)); 141 ehdr = (Elf_Ehdr *)hbuf; 142 if (ehdr->e_ident[0] != ELFMAG0 || ehdr->e_ident[1] != ELFMAG1 || 143 ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 || 144 ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) { 145 _dl_close(libfile); 146 _dl_errno = DL_NOT_ELF; 147 return(0); 148 } 149 150 /* 151 * Alright, we might have a winner! 152 * Figure out how much VM space we need. 153 */ 154 phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); 155 for (i = 0; i < ehdr->e_phnum; i++, phdp++) { 156 switch (phdp->p_type) { 157 case PT_LOAD: 158 if (phdp->p_vaddr < minva) 159 minva = phdp->p_vaddr; 160 if (phdp->p_vaddr + phdp->p_memsz > maxva) 161 maxva = phdp->p_vaddr + phdp->p_memsz; 162 break; 163 case PT_DYNAMIC: 164 dynp = (Elf_Dyn *)phdp->p_vaddr; 165 break; 166 case PT_TLS: 167 if (phdp->p_filesz > phdp->p_memsz) { 168 _dl_printf("%s: invalid tls data in %s.\n", 169 __progname, libname); 170 _dl_close(libfile); 171 _dl_errno = DL_CANT_LOAD_OBJ; 172 return(0); 173 } 174 if (!_dl_tib_static_done) { 175 ptls = phdp; 176 break; 177 } 178 _dl_printf("%s: unsupported TLS program header in %s\n", 179 __progname, libname); 180 _dl_close(libfile); 181 _dl_errno = DL_CANT_LOAD_OBJ; 182 return(0); 183 default: 184 break; 185 } 186 } 187 minva = TRUNC_PG(minva); 188 maxva = ROUND_PG(maxva); 189 190 /* 191 * We map the entire area to see that we can get the VM 192 * space required. Map it unaccessible to start with. 193 * 194 * We must map the file we'll map later otherwise the VM 195 * system won't be able to align the mapping properly 196 * on VAC architectures. 197 */ 198 libaddr = (Elf_Addr)_dl_mmap(0, maxva - minva, PROT_NONE, 199 MAP_PRIVATE|MAP_FILE, libfile, 0); 200 if (_dl_mmap_error(libaddr)) { 201 _dl_printf("%s: rtld mmap failed mapping %s.\n", 202 __progname, libname); 203 _dl_close(libfile); 204 _dl_errno = DL_CANT_MMAP; 205 return(0); 206 } 207 208 loff = libaddr - minva; 209 phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff); 210 211 for (i = 0; i < ehdr->e_phnum; i++, phdp++) { 212 switch (phdp->p_type) { 213 case PT_LOAD: { 214 char *start = (char *)(TRUNC_PG(phdp->p_vaddr)) + loff; 215 Elf_Addr off = (phdp->p_vaddr & align); 216 Elf_Addr size = off + phdp->p_filesz; 217 int flags = PFLAGS(phdp->p_flags); 218 void *res; 219 220 /* 221 * Initially map W|X segments without X 222 * permission. After we're done with the 223 * initial relocation processing, we will make 224 * these segments read-only and add back the X 225 * permission. This way we maintain W^X at 226 * all times. 227 */ 228 if ((flags & PROT_WRITE) && (flags & PROT_EXEC)) 229 flags &= ~PROT_EXEC; 230 231 if (size != 0) { 232 res = _dl_mmap(start, ROUND_PG(size), flags, 233 MAP_FIXED|MAP_PRIVATE, libfile, 234 TRUNC_PG(phdp->p_offset)); 235 } else 236 res = NULL; /* silence gcc */ 237 next_load = _dl_calloc(1, sizeof(struct load_list)); 238 if (next_load == NULL) 239 _dl_exit(7); 240 next_load->next = load_list; 241 load_list = next_load; 242 next_load->start = start; 243 next_load->size = size; 244 next_load->prot = PFLAGS(phdp->p_flags); 245 if (size != 0 && _dl_mmap_error(res)) { 246 _dl_printf("%s: rtld mmap failed mapping %s.\n", 247 __progname, libname); 248 _dl_close(libfile); 249 _dl_errno = DL_CANT_MMAP; 250 _dl_munmap((void *)libaddr, maxva - minva); 251 _dl_load_list_free(load_list); 252 return(0); 253 } 254 if (phdp->p_flags & PF_W) { 255 /* Zero out everything past the EOF */ 256 if ((size & align) != 0) 257 _dl_memset(start + size, 0, 258 _dl_pagesz - (size & align)); 259 if (ROUND_PG(size) == 260 ROUND_PG(off + phdp->p_memsz)) 261 continue; 262 start = start + ROUND_PG(size); 263 size = ROUND_PG(off + phdp->p_memsz) - 264 ROUND_PG(size); 265 res = _dl_mmap(start, size, flags, 266 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); 267 if (_dl_mmap_error(res)) { 268 _dl_printf("%s: rtld mmap failed mapping %s.\n", 269 __progname, libname); 270 _dl_close(libfile); 271 _dl_errno = DL_CANT_MMAP; 272 _dl_munmap((void *)libaddr, maxva - minva); 273 _dl_load_list_free(load_list); 274 return(0); 275 } 276 } 277 break; 278 } 279 280 case PT_OPENBSD_RANDOMIZE: 281 _dl_arc4randombuf((char *)(phdp->p_vaddr + loff), 282 phdp->p_memsz); 283 break; 284 285 case PT_GNU_RELRO: 286 relro_addr = phdp->p_vaddr + loff; 287 relro_size = phdp->p_memsz; 288 break; 289 290 default: 291 break; 292 } 293 } 294 295 _dl_close(libfile); 296 297 dynp = (Elf_Dyn *)((unsigned long)dynp + loff); 298 object = _dl_finalize_object(libname, dynp, 299 (Elf_Phdr *)((char *)libaddr + ehdr->e_phoff), ehdr->e_phnum,type, 300 libaddr, loff); 301 if (object) { 302 object->load_size = maxva - minva; /*XXX*/ 303 object->load_list = load_list; 304 /* set inode, dev from stat info */ 305 object->dev = sb.st_dev; 306 object->inode = sb.st_ino; 307 object->obj_flags |= flags; 308 object->relro_addr = relro_addr; 309 object->relro_size = relro_size; 310 _dl_set_sod(object->load_name, &object->sod); 311 if (ptls != NULL && ptls->p_memsz) 312 _dl_set_tls(object, ptls, libaddr, libname); 313 } else { 314 _dl_munmap((void *)libaddr, maxva - minva); 315 _dl_load_list_free(load_list); 316 } 317 return(object); 318 } 319