1 /*- 2 * Copyright 1996-1998 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/mman.h> 30 #include <sys/stat.h> 31 32 #include <errno.h> 33 #include <stddef.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <unistd.h> 37 38 #include "debug.h" 39 #include "rtld.h" 40 41 static Elf_Ehdr *get_elf_header(int, const char *); 42 static int convert_flags(int); /* Elf flags -> mmap flags */ 43 44 /* 45 * Map a shared object into memory. The "fd" argument is a file descriptor, 46 * which must be open on the object and positioned at its beginning. 47 * The "path" argument is a pathname that is used only for error messages. 48 * 49 * The return value is a pointer to a newly-allocated Obj_Entry structure 50 * for the shared object. Returns NULL on failure. 51 */ 52 Obj_Entry * 53 map_object(int fd, const char *path, const struct stat *sb) 54 { 55 Obj_Entry *obj; 56 Elf_Ehdr *hdr; 57 int i; 58 Elf_Phdr *phdr; 59 Elf_Phdr *phlimit; 60 Elf_Phdr **segs; 61 int nsegs; 62 Elf_Phdr *phdyn; 63 Elf_Phdr *phinterp; 64 Elf_Phdr *phtls; 65 caddr_t mapbase; 66 caddr_t shlib_base; 67 size_t mapsize; 68 Elf_Addr base_vaddr; 69 Elf_Addr base_vlimit; 70 caddr_t base_addr; 71 Elf_Off data_offset; 72 Elf_Addr data_vaddr; 73 Elf_Addr data_vlimit; 74 caddr_t data_addr; 75 int data_prot; 76 int data_flags; 77 Elf_Addr clear_vaddr; 78 caddr_t clear_addr; 79 caddr_t clear_page; 80 Elf_Addr phdr_vaddr; 81 size_t nclear, phsize; 82 Elf_Addr bss_vaddr; 83 Elf_Addr bss_vlimit; 84 caddr_t bss_addr; 85 Elf_Word stack_flags; 86 Elf_Addr relro_page; 87 size_t relro_size; 88 Elf_Addr note_start; 89 Elf_Addr note_end; 90 91 hdr = get_elf_header(fd, path); 92 if (hdr == NULL) 93 return (NULL); 94 95 if (__ld_sharedlib_base) { 96 shlib_base = (void *)(intptr_t)strtoul(__ld_sharedlib_base, NULL, 0); 97 } else { 98 shlib_base = NULL; 99 } 100 101 /* 102 * Scan the program header entries, and save key information. 103 * 104 * We expect that the loadable segments are ordered by load address. 105 */ 106 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff); 107 phsize = hdr->e_phnum * sizeof (phdr[0]); 108 phlimit = phdr + hdr->e_phnum; 109 nsegs = -1; 110 phdyn = phinterp = phtls = NULL; 111 phdr_vaddr = 0; 112 relro_page = 0; 113 relro_size = 0; 114 note_start = 0; 115 note_end = 0; 116 segs = alloca(sizeof(segs[0]) * hdr->e_phnum); 117 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W; 118 while (phdr < phlimit) { 119 switch (phdr->p_type) { 120 121 case PT_INTERP: 122 phinterp = phdr; 123 break; 124 125 case PT_LOAD: 126 segs[++nsegs] = phdr; 127 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) { 128 _rtld_error("%s: PT_LOAD segment %d not page-aligned", 129 path, nsegs); 130 goto error; 131 } 132 break; 133 134 case PT_PHDR: 135 phdr_vaddr = phdr->p_vaddr; 136 phsize = phdr->p_memsz; 137 break; 138 139 case PT_DYNAMIC: 140 phdyn = phdr; 141 break; 142 143 case PT_TLS: 144 phtls = phdr; 145 break; 146 147 case PT_GNU_STACK: 148 stack_flags = phdr->p_flags; 149 break; 150 151 case PT_GNU_RELRO: 152 relro_page = phdr->p_vaddr; 153 relro_size = phdr->p_memsz; 154 break; 155 156 case PT_NOTE: 157 if (phdr->p_offset > PAGE_SIZE || 158 phdr->p_offset + phdr->p_filesz > PAGE_SIZE) 159 break; 160 note_start = (Elf_Addr)(char *)hdr + phdr->p_offset; 161 note_end = note_start + phdr->p_filesz; 162 break; 163 } 164 165 ++phdr; 166 } 167 if (phdyn == NULL) { 168 _rtld_error("%s: object is not dynamically-linked", path); 169 goto error; 170 } 171 172 if (nsegs < 0) { 173 _rtld_error("%s: too few PT_LOAD segments", path); 174 goto error; 175 } 176 177 /* 178 * Map the entire address space of the object, to stake out our 179 * contiguous region, and to establish the base address for relocation. 180 */ 181 base_vaddr = trunc_page(segs[0]->p_vaddr); 182 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 183 mapsize = base_vlimit - base_vaddr; 184 base_addr = (caddr_t) base_vaddr; 185 186 if (base_addr == NULL && shlib_base) { 187 size_t limit = 1024 * 256 * 1024; 188 size_t offset; 189 190 for (offset = 0; offset < limit; offset += 256 * 1024) { 191 mapbase = mmap(shlib_base + offset, mapsize, 192 PROT_NONE, 193 MAP_ANON | MAP_PRIVATE | MAP_NOCORE | 194 MAP_TRYFIXED, 195 -1, 0); 196 if (mapbase != MAP_FAILED) 197 break; 198 } 199 } else { 200 mapbase = mmap(base_addr, mapsize, 201 PROT_NONE, 202 MAP_ANON | MAP_PRIVATE | MAP_NOCORE, 203 -1, 0); 204 } 205 if (mapbase == (caddr_t) -1) { 206 _rtld_error("%s: mmap of entire address space failed: %s", 207 path, rtld_strerror(errno)); 208 goto error; 209 } 210 if (base_addr != NULL && mapbase != base_addr) { 211 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 212 path, base_addr, mapbase); 213 goto error1; 214 } 215 216 for (i = 0; i <= nsegs; i++) { 217 /* Overlay the segment onto the proper region. */ 218 data_offset = trunc_page(segs[i]->p_offset); 219 data_vaddr = trunc_page(segs[i]->p_vaddr); 220 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 221 data_addr = mapbase + (data_vaddr - base_vaddr); 222 data_prot = convert_prot(segs[i]->p_flags); 223 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 224 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot, 225 data_flags, fd, data_offset) == (caddr_t) -1) { 226 _rtld_error("%s: mmap of data failed: %s", path, 227 rtld_strerror(errno)); 228 goto error1; 229 } 230 231 /* Do BSS setup */ 232 if (segs[i]->p_filesz != segs[i]->p_memsz) { 233 234 /* Clear any BSS in the last page of the segment. */ 235 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 236 clear_addr = mapbase + (clear_vaddr - base_vaddr); 237 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 238 239 if ((nclear = data_vlimit - clear_vaddr) > 0) { 240 /* Make sure the end of the segment is writable */ 241 if ((data_prot & PROT_WRITE) == 0 && -1 == 242 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 243 _rtld_error("%s: mprotect failed: %s", path, 244 rtld_strerror(errno)); 245 goto error1; 246 } 247 248 memset(clear_addr, 0, nclear); 249 250 /* 251 * reset the data protection back, enable the segment to be 252 * coredumped since we modified it. 253 */ 254 if ((data_prot & PROT_WRITE) == 0) { 255 madvise(clear_page, PAGE_SIZE, MADV_CORE); 256 mprotect(clear_page, PAGE_SIZE, data_prot); 257 } 258 } 259 260 /* Overlay the BSS segment onto the proper region. */ 261 bss_vaddr = data_vlimit; 262 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 263 bss_addr = mapbase + (bss_vaddr - base_vaddr); 264 if (bss_vlimit > bss_vaddr) { /* There is something to do */ 265 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 266 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) { 267 _rtld_error("%s: mmap of bss failed: %s", path, 268 rtld_strerror(errno)); 269 goto error1; 270 } 271 } 272 } 273 274 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff && 275 (data_vlimit - data_vaddr + data_offset) >= 276 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) { 277 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset; 278 } 279 } 280 281 obj = obj_new(); 282 if (sb != NULL) { 283 obj->dev = sb->st_dev; 284 obj->ino = sb->st_ino; 285 } 286 obj->mapbase = mapbase; 287 obj->mapsize = mapsize; 288 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - 289 base_vaddr; 290 obj->vaddrbase = base_vaddr; 291 obj->relocbase = mapbase - base_vaddr; 292 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); 293 if (hdr->e_entry != 0) 294 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry); 295 if (phdr_vaddr != 0) { 296 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr); 297 } else { 298 obj->phdr = malloc(phsize); 299 if (obj->phdr == NULL) { 300 obj_free(obj); 301 _rtld_error("%s: cannot allocate program header", path); 302 goto error1; 303 } 304 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize); 305 obj->phdr_alloc = true; 306 } 307 obj->phsize = phsize; 308 if (phinterp != NULL) 309 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); 310 if (phtls != NULL) { 311 tls_dtv_generation++; 312 obj->tlsindex = ++tls_max_index; 313 obj->tlssize = phtls->p_memsz; 314 obj->tlsalign = phtls->p_align; 315 obj->tlsinitsize = phtls->p_filesz; 316 obj->tlsinit = mapbase + phtls->p_vaddr; 317 } 318 obj->stack_flags = stack_flags; 319 if (relro_size) { 320 obj->relro_page = obj->relocbase + trunc_page(relro_page); 321 obj->relro_size = round_page(relro_size); 322 } 323 if (note_start < note_end) 324 digest_notes(obj, note_start, note_end); 325 munmap(hdr, PAGE_SIZE); 326 return (obj); 327 328 error1: 329 munmap(mapbase, mapsize); 330 error: 331 munmap(hdr, PAGE_SIZE); 332 return (NULL); 333 } 334 335 static Elf_Ehdr * 336 get_elf_header(int fd, const char *path) 337 { 338 Elf_Ehdr *hdr; 339 340 /* DragonFly mmap does not have MAP_PREFAULT_READ */ 341 hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0); 342 if (hdr == (Elf_Ehdr *)MAP_FAILED) { 343 _rtld_error("%s: read error: %s", path, rtld_strerror(errno)); 344 return (NULL); 345 } 346 347 /* Make sure the file is valid */ 348 if (!IS_ELF(*hdr)) { 349 _rtld_error("%s: invalid file format", path); 350 goto error; 351 } 352 if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 353 hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { 354 _rtld_error("%s: unsupported file layout", path); 355 goto error; 356 } 357 if (hdr->e_ident[EI_VERSION] != EV_CURRENT || 358 hdr->e_version != EV_CURRENT) { 359 _rtld_error("%s: unsupported file version", path); 360 goto error; 361 } 362 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { 363 _rtld_error("%s: unsupported file type", path); 364 goto error; 365 } 366 if (hdr->e_machine != ELF_TARG_MACH) { 367 _rtld_error("%s: unsupported machine", path); 368 goto error; 369 } 370 371 /* 372 * We rely on the program header being in the first page. This is 373 * not strictly required by the ABI specification, but it seems to 374 * always true in practice. And, it simplifies things considerably. 375 */ 376 if (hdr->e_phentsize != sizeof(Elf_Phdr)) { 377 _rtld_error( 378 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 379 goto error; 380 } 381 if (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) > 382 (size_t)PAGE_SIZE) { 383 _rtld_error("%s: program header too large", path); 384 goto error; 385 } 386 return (hdr); 387 388 error: 389 munmap(hdr, PAGE_SIZE); 390 return (NULL); 391 } 392 393 void 394 obj_free(Obj_Entry *obj) 395 { 396 Objlist_Entry *elm; 397 398 if (obj->tls_static) 399 free_tls_offset(obj); 400 while (obj->needed != NULL) { 401 Needed_Entry *needed = obj->needed; 402 obj->needed = needed->next; 403 free(needed); 404 } 405 while (!STAILQ_EMPTY(&obj->names)) { 406 Name_Entry *entry = STAILQ_FIRST(&obj->names); 407 STAILQ_REMOVE_HEAD(&obj->names, link); 408 free(entry); 409 } 410 while (!STAILQ_EMPTY(&obj->dldags)) { 411 elm = STAILQ_FIRST(&obj->dldags); 412 STAILQ_REMOVE_HEAD(&obj->dldags, link); 413 free(elm); 414 } 415 while (!STAILQ_EMPTY(&obj->dagmembers)) { 416 elm = STAILQ_FIRST(&obj->dagmembers); 417 STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 418 free(elm); 419 } 420 if (obj->vertab) 421 free(obj->vertab); 422 if (obj->origin_path) 423 free(obj->origin_path); 424 if (obj->z_origin) 425 free(obj->rpath); 426 if (obj->priv) 427 free(obj->priv); 428 if (obj->path) 429 free(obj->path); 430 if (obj->phdr_alloc) 431 free((void *)obj->phdr); 432 free(obj); 433 } 434 435 Obj_Entry * 436 obj_new(void) 437 { 438 Obj_Entry *obj; 439 440 obj = CNEW(Obj_Entry); 441 STAILQ_INIT(&obj->dldags); 442 STAILQ_INIT(&obj->dagmembers); 443 STAILQ_INIT(&obj->names); 444 return obj; 445 } 446 447 /* 448 * Given a set of ELF protection flags, return the corresponding protection 449 * flags for MMAP. 450 */ 451 int 452 convert_prot(int elfflags) 453 { 454 int prot = 0; 455 if (elfflags & PF_R) 456 prot |= PROT_READ; 457 if (elfflags & PF_W) 458 prot |= PROT_WRITE; 459 if (elfflags & PF_X) 460 prot |= PROT_EXEC; 461 return prot; 462 } 463 464 static int 465 convert_flags(int elfflags) 466 { 467 int flags = MAP_PRIVATE; /* All mappings are private */ 468 469 /* 470 * Readonly mappings are marked "MAP_NOCORE", because they can be 471 * reconstructed by a debugger. 472 */ 473 if (!(elfflags & PF_W)) 474 flags |= MAP_NOCORE; 475 return flags; 476 } 477