1 /* $OpenBSD: loader.c,v 1.212 2023/02/20 00:51:57 gnezdo Exp $ */ 2 3 /* 4 * Copyright (c) 1998 Per Fogelstrom, Opsycon AB 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #define _DYN_LOADER 30 31 #include <sys/types.h> 32 #include <sys/mman.h> 33 #include <sys/syscall.h> 34 #include <sys/exec.h> 35 #ifdef __i386__ 36 # include <machine/vmparam.h> 37 #endif 38 #include <string.h> 39 #include <link.h> 40 #include <limits.h> /* NAME_MAX */ 41 #include <dlfcn.h> 42 #include <tib.h> 43 44 #include "syscall.h" 45 #include "util.h" 46 #include "resolve.h" 47 #include "path.h" 48 #include "sod.h" 49 50 /* 51 * Local decls. 52 */ 53 unsigned long _dl_boot(const char **, char **, const long, long *) __boot; 54 void _dl_debug_state(void); 55 void _dl_setup_env(const char *_argv0, char **_envp) __boot; 56 void _dl_dtors(void); 57 void _dl_dopreload(char *_paths) __boot; 58 void _dl_fixup_user_env(void) __boot; 59 void _dl_call_preinit(elf_object_t *) __boot; 60 void _dl_call_init_recurse(elf_object_t *object, int initfirst); 61 void _dl_clean_boot(void); 62 static inline void unprotect_if_textrel(elf_object_t *_object); 63 static inline void reprotect_if_textrel(elf_object_t *_object); 64 static void _dl_rreloc(elf_object_t *_object); 65 66 int _dl_pagesz __relro = 4096; 67 int _dl_bindnow __relro = 0; 68 int _dl_debug __relro = 0; 69 int _dl_trust __relro = 0; 70 char **_dl_libpath __relro = NULL; 71 const char **_dl_argv __relro = NULL; 72 int _dl_argc __relro = 0; 73 74 char *_dl_preload __boot_data = NULL; 75 char *_dl_tracefmt1 __boot_data = NULL; 76 char *_dl_tracefmt2 __boot_data = NULL; 77 char *_dl_traceprog __boot_data = NULL; 78 void *_dl_exec_hint __boot_data = NULL; 79 80 char **environ = NULL; 81 char *__progname = NULL; 82 83 int _dl_traceld; 84 struct r_debug *_dl_debug_map; 85 86 static dl_cb_cb _dl_cb_cb; 87 const struct dl_cb_0 callbacks_0 = { 88 .dl_allocate_tib = &_dl_allocate_tib, 89 .dl_free_tib = &_dl_free_tib, 90 #if DO_CLEAN_BOOT 91 .dl_clean_boot = &_dl_clean_boot, 92 #endif 93 .dlopen = &dlopen, 94 .dlclose = &dlclose, 95 .dlsym = &dlsym, 96 .dladdr = &dladdr, 97 .dlctl = &dlctl, 98 .dlerror = &dlerror, 99 .dl_iterate_phdr = &dl_iterate_phdr, 100 }; 101 102 103 /* 104 * Run dtors for a single object. 105 */ 106 void 107 _dl_run_dtors(elf_object_t *obj) 108 { 109 if (obj->dyn.fini_array) { 110 int num = obj->dyn.fini_arraysz / sizeof(Elf_Addr); 111 int i; 112 113 DL_DEB(("doing finiarray obj %p @%p: [%s]\n", 114 obj, obj->dyn.fini_array, obj->load_name)); 115 for (i = num; i > 0; i--) 116 (*obj->dyn.fini_array[i-1])(); 117 } 118 119 if (obj->dyn.fini) { 120 DL_DEB(("doing dtors obj %p @%p: [%s]\n", 121 obj, obj->dyn.fini, obj->load_name)); 122 (*obj->dyn.fini)(); 123 } 124 } 125 126 /* 127 * Run dtors for all objects that are eligible. 128 */ 129 void 130 _dl_run_all_dtors(void) 131 { 132 elf_object_t *node; 133 int fini_complete; 134 int skip_initfirst; 135 int initfirst_skipped; 136 137 fini_complete = 0; 138 skip_initfirst = 1; 139 initfirst_skipped = 0; 140 141 while (fini_complete == 0) { 142 fini_complete = 1; 143 for (node = _dl_objects; 144 node != NULL; 145 node = node->next) { 146 if ((node->dyn.fini || node->dyn.fini_array) && 147 (OBJECT_REF_CNT(node) == 0) && 148 (node->status & STAT_INIT_DONE) && 149 ((node->status & STAT_FINI_DONE) == 0)) { 150 if (skip_initfirst && 151 (node->obj_flags & DF_1_INITFIRST)) 152 initfirst_skipped = 1; 153 else 154 node->status |= STAT_FINI_READY; 155 } 156 } 157 for (node = _dl_objects; 158 node != NULL; 159 node = node->next ) { 160 if ((node->dyn.fini || node->dyn.fini_array) && 161 (OBJECT_REF_CNT(node) == 0) && 162 (node->status & STAT_INIT_DONE) && 163 ((node->status & STAT_FINI_DONE) == 0) && 164 (!skip_initfirst || 165 (node->obj_flags & DF_1_INITFIRST) == 0)) { 166 struct object_vector vec = node->child_vec; 167 int i; 168 169 for (i = 0; i < vec.len; i++) 170 vec.vec[i]->status &= ~STAT_FINI_READY; 171 } 172 } 173 174 175 for (node = _dl_objects; 176 node != NULL; 177 node = node->next ) { 178 if (node->status & STAT_FINI_READY) { 179 fini_complete = 0; 180 node->status |= STAT_FINI_DONE; 181 node->status &= ~STAT_FINI_READY; 182 _dl_run_dtors(node); 183 } 184 } 185 186 if (fini_complete && initfirst_skipped) 187 fini_complete = initfirst_skipped = skip_initfirst = 0; 188 } 189 } 190 191 /* 192 * Routine to walk through all of the objects except the first 193 * (main executable). 194 * 195 * Big question, should dlopen()ed objects be unloaded before or after 196 * the destructor for the main application runs? 197 */ 198 void 199 _dl_dtors(void) 200 { 201 _dl_thread_kern_stop(); 202 203 /* ORDER? */ 204 _dl_unload_dlopen(); 205 206 DL_DEB(("doing dtors\n")); 207 208 _dl_objects->opencount--; 209 _dl_notify_unload_shlib(_dl_objects); 210 211 _dl_run_all_dtors(); 212 } 213 214 #if DO_CLEAN_BOOT 215 void 216 _dl_clean_boot(void) 217 { 218 extern char boot_text_start[], boot_text_end[]; 219 #if 0 /* XXX breaks boehm-gc?!? */ 220 extern char boot_data_start[], boot_data_end[]; 221 #endif 222 223 _dl_mmap(boot_text_start, boot_text_end - boot_text_start, 224 PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0); 225 _dl_mimmutable(boot_text_start, boot_text_end - boot_text_start); 226 #if 0 /* XXX breaks boehm-gc?!? */ 227 _dl_mmap(boot_data_start, boot_data_end - boot_data_start, 228 PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0); 229 _dl_mimmutable(boot_data_start, boot_data_end - boot_data_start); 230 #endif 231 } 232 #endif /* DO_CLEAN_BOOT */ 233 234 void 235 _dl_dopreload(char *paths) 236 { 237 char *cp, *dp; 238 elf_object_t *shlib; 239 int count; 240 241 dp = paths = _dl_strdup(paths); 242 if (dp == NULL) 243 _dl_oom(); 244 245 /* preallocate child_vec for the LD_PRELOAD objects */ 246 count = 1; 247 while (*dp++ != '\0') 248 if (*dp == ':') 249 count++; 250 object_vec_grow(&_dl_objects->child_vec, count); 251 252 dp = paths; 253 while ((cp = _dl_strsep(&dp, ":")) != NULL) { 254 shlib = _dl_load_shlib(cp, _dl_objects, OBJTYPE_LIB, 255 _dl_objects->obj_flags, 1); 256 if (shlib == NULL) 257 _dl_die("can't preload library '%s'", cp); 258 _dl_add_object(shlib); 259 _dl_link_child(shlib, _dl_objects); 260 } 261 _dl_free(paths); 262 return; 263 } 264 265 /* 266 * grab interesting environment variables, zap bad env vars if 267 * issetugid, and set the exported environ and __progname variables 268 */ 269 void 270 _dl_setup_env(const char *argv0, char **envp) 271 { 272 static char progname_storage[NAME_MAX+1] = ""; 273 274 /* 275 * Don't allow someone to change the search paths if he runs 276 * a suid program without credentials high enough. 277 */ 278 _dl_trust = !_dl_issetugid(); 279 if (!_dl_trust) { /* Zap paths if s[ug]id... */ 280 _dl_unsetenv("LD_DEBUG", envp); 281 _dl_unsetenv("LD_LIBRARY_PATH", envp); 282 _dl_unsetenv("LD_PRELOAD", envp); 283 _dl_unsetenv("LD_BIND_NOW", envp); 284 } else { 285 /* 286 * Get paths to various things we are going to use. 287 */ 288 _dl_debug = _dl_getenv("LD_DEBUG", envp) != NULL; 289 _dl_libpath = _dl_split_path(_dl_getenv("LD_LIBRARY_PATH", 290 envp)); 291 _dl_preload = _dl_getenv("LD_PRELOAD", envp); 292 _dl_bindnow = _dl_getenv("LD_BIND_NOW", envp) != NULL; 293 } 294 295 /* these are usable even in setugid processes */ 296 _dl_traceld = _dl_getenv("LD_TRACE_LOADED_OBJECTS", envp) != NULL; 297 _dl_tracefmt1 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT1", envp); 298 _dl_tracefmt2 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT2", envp); 299 _dl_traceprog = _dl_getenv("LD_TRACE_LOADED_OBJECTS_PROGNAME", envp); 300 301 environ = envp; 302 303 _dl_trace_setup(envp); 304 305 if (argv0 != NULL) { /* NULL ptr if argc = 0 */ 306 const char *p = _dl_strrchr(argv0, '/'); 307 308 if (p == NULL) 309 p = argv0; 310 else 311 p++; 312 _dl_strlcpy(progname_storage, p, sizeof(progname_storage)); 313 } 314 __progname = progname_storage; 315 } 316 317 int 318 _dl_load_dep_libs(elf_object_t *object, int flags, int booting) 319 { 320 elf_object_t *dynobj, *obj; 321 Elf_Dyn *dynp; 322 unsigned int loop; 323 int libcount; 324 int depflags, nodelete = 0; 325 326 dynobj = object; 327 while (dynobj) { 328 DL_DEB(("examining: '%s'\n", dynobj->load_name)); 329 libcount = 0; 330 331 /* propagate DF_1_NOW to deplibs (can be set by dynamic tags) */ 332 depflags = flags | (dynobj->obj_flags & DF_1_NOW); 333 if (booting || object->nodelete) 334 nodelete = 1; 335 336 for (dynp = dynobj->load_dyn; dynp->d_tag; dynp++) { 337 if (dynp->d_tag == DT_NEEDED) { 338 libcount++; 339 } 340 } 341 342 if ( libcount != 0) { 343 struct listent { 344 Elf_Dyn *dynp; 345 elf_object_t *depobj; 346 } *liblist; 347 int *randomlist; 348 349 liblist = _dl_reallocarray(NULL, libcount, 350 sizeof(struct listent)); 351 randomlist = _dl_reallocarray(NULL, libcount, 352 sizeof(int)); 353 354 if (liblist == NULL || randomlist == NULL) 355 _dl_oom(); 356 357 for (dynp = dynobj->load_dyn, loop = 0; dynp->d_tag; 358 dynp++) 359 if (dynp->d_tag == DT_NEEDED) 360 liblist[loop++].dynp = dynp; 361 362 /* Randomize these */ 363 for (loop = 0; loop < libcount; loop++) 364 randomlist[loop] = loop; 365 366 for (loop = 1; loop < libcount; loop++) { 367 unsigned int rnd; 368 int cur; 369 rnd = _dl_arc4random(); 370 rnd = rnd % (loop+1); 371 cur = randomlist[rnd]; 372 randomlist[rnd] = randomlist[loop]; 373 randomlist[loop] = cur; 374 } 375 376 for (loop = 0; loop < libcount; loop++) { 377 elf_object_t *depobj; 378 const char *libname; 379 libname = dynobj->dyn.strtab; 380 libname += 381 liblist[randomlist[loop]].dynp->d_un.d_val; 382 DL_DEB(("loading: %s required by %s\n", libname, 383 dynobj->load_name)); 384 depobj = _dl_load_shlib(libname, dynobj, 385 OBJTYPE_LIB, depflags, nodelete); 386 if (depobj == 0) { 387 if (booting) { 388 _dl_die( 389 "can't load library '%s'", 390 libname); 391 } 392 DL_DEB(("dlopen: failed to open %s\n", 393 libname)); 394 _dl_free(liblist); 395 _dl_free(randomlist); 396 return (1); 397 } 398 liblist[randomlist[loop]].depobj = depobj; 399 } 400 401 object_vec_grow(&dynobj->child_vec, libcount); 402 for (loop = 0; loop < libcount; loop++) { 403 _dl_add_object(liblist[loop].depobj); 404 _dl_link_child(liblist[loop].depobj, dynobj); 405 } 406 _dl_free(liblist); 407 _dl_free(randomlist); 408 } 409 dynobj = dynobj->next; 410 } 411 412 _dl_cache_grpsym_list_setup(object); 413 414 for (obj = _dl_objects; booting && obj != NULL; obj = obj->next) { 415 char *soname = (char *)obj->Dyn.info[DT_SONAME]; 416 struct sym_res sr; 417 418 if (!soname || _dl_strncmp(soname, "libc.so.", 8)) 419 continue; 420 sr = _dl_find_symbol("execve", 421 SYM_SEARCH_SELF|SYM_PLT|SYM_WARNNOTFOUND, NULL, obj); 422 if (sr.sym) 423 _dl_pinsyscall(SYS_execve, 424 (void *)sr.obj->obj_base + sr.sym->st_value, 425 sr.sym->st_size); 426 _dl_memset(&sr, 0, sizeof sr); 427 break; 428 } 429 return(0); 430 } 431 432 433 /* do any RWX -> RX fixups for executable PLTs and apply GNU_RELRO */ 434 static inline void 435 _dl_self_relro(long loff) 436 { 437 Elf_Ehdr *ehdp; 438 Elf_Phdr *phdp; 439 int i; 440 441 ehdp = (Elf_Ehdr *)loff; 442 phdp = (Elf_Phdr *)(loff + ehdp->e_phoff); 443 for (i = 0; i < ehdp->e_phnum; i++, phdp++) { 444 switch (phdp->p_type) { 445 #if defined(__alpha__) || defined(__hppa__) || defined(__powerpc__) || \ 446 defined(__sparc64__) 447 case PT_LOAD: 448 if ((phdp->p_flags & (PF_X | PF_W)) != (PF_X | PF_W)) 449 break; 450 _dl_mprotect((void *)(phdp->p_vaddr + loff), 451 phdp->p_memsz, PROT_READ); 452 break; 453 #endif 454 case PT_GNU_RELRO: 455 _dl_mprotect((void *)(phdp->p_vaddr + loff), 456 phdp->p_memsz, PROT_READ); 457 _dl_mimmutable((void *)(phdp->p_vaddr + loff), 458 phdp->p_memsz); 459 break; 460 } 461 } 462 } 463 464 465 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \ 466 (((X) & PF_W) ? PROT_WRITE : 0) | \ 467 (((X) & PF_X) ? PROT_EXEC : 0)) 468 469 /* 470 * This is the dynamic loader entrypoint. When entering here, depending 471 * on architecture type, the stack and registers are set up according 472 * to the architectures ABI specification. The first thing required 473 * to do is to dig out all information we need to accomplish our task. 474 */ 475 unsigned long 476 _dl_boot(const char **argv, char **envp, const long dyn_loff, long *dl_data) 477 { 478 struct elf_object *exe_obj; /* Pointer to executable object */ 479 struct elf_object *dyn_obj; /* Pointer to ld.so object */ 480 struct r_debug **map_link; /* Where to put pointer for gdb */ 481 struct r_debug *debug_map; 482 struct load_list *next_load, *load_list = NULL; 483 Elf_Dyn *dynp; 484 Elf_Phdr *phdp; 485 Elf_Ehdr *ehdr; 486 char *us = NULL; 487 unsigned int loop; 488 int failed; 489 struct dep_node *n; 490 Elf_Addr minva, maxva, exe_loff, exec_end, cur_exec_end; 491 Elf_Addr relro_addr = 0, relro_size = 0; 492 Elf_Phdr *ptls = NULL; 493 int align; 494 495 if (dl_data[AUX_pagesz] != 0) 496 _dl_pagesz = dl_data[AUX_pagesz]; 497 _dl_malloc_init(); 498 499 _dl_argv = argv; 500 while (_dl_argv[_dl_argc] != NULL) 501 _dl_argc++; 502 _dl_setup_env(argv[0], envp); 503 504 /* 505 * Make read-only the GOT and PLT and variables initialized 506 * during the ld.so setup above. 507 */ 508 _dl_self_relro(dyn_loff); 509 510 align = _dl_pagesz - 1; 511 512 #define ROUND_PG(x) (((x) + align) & ~(align)) 513 #define TRUNC_PG(x) ((x) & ~(align)) 514 515 if (_dl_bindnow) { 516 /* Lazy binding disabled, so disable kbind */ 517 _dl_kbind(NULL, 0, 0); 518 } 519 520 DL_DEB(("ld.so loading: '%s'\n", __progname)); 521 522 /* init this in runtime, not statically */ 523 TAILQ_INIT(&_dlopened_child_list); 524 525 exe_obj = NULL; 526 _dl_loading_object = NULL; 527 528 minva = ELF_NO_ADDR; 529 maxva = exe_loff = exec_end = 0; 530 531 /* 532 * Examine the user application and set up object information. 533 */ 534 phdp = (Elf_Phdr *)dl_data[AUX_phdr]; 535 for (loop = 0; loop < dl_data[AUX_phnum]; loop++) { 536 switch (phdp->p_type) { 537 case PT_PHDR: 538 exe_loff = (Elf_Addr)dl_data[AUX_phdr] - phdp->p_vaddr; 539 us += exe_loff; 540 DL_DEB(("exe load offset: 0x%lx\n", exe_loff)); 541 break; 542 case PT_DYNAMIC: 543 minva = TRUNC_PG(minva); 544 maxva = ROUND_PG(maxva); 545 exe_obj = _dl_finalize_object(argv[0] ? argv[0] : "", 546 (Elf_Dyn *)(phdp->p_vaddr + exe_loff), 547 (Elf_Phdr *)dl_data[AUX_phdr], 548 dl_data[AUX_phnum], OBJTYPE_EXE, minva + exe_loff, 549 exe_loff); 550 _dl_add_object(exe_obj); 551 break; 552 case PT_INTERP: 553 us += phdp->p_vaddr; 554 break; 555 case PT_LOAD: 556 if (phdp->p_vaddr < minva) 557 minva = phdp->p_vaddr; 558 if (phdp->p_vaddr > maxva) 559 maxva = phdp->p_vaddr + phdp->p_memsz; 560 561 next_load = _dl_calloc(1, sizeof(struct load_list)); 562 if (next_load == NULL) 563 _dl_oom(); 564 next_load->next = load_list; 565 load_list = next_load; 566 next_load->start = (char *)TRUNC_PG(phdp->p_vaddr) + exe_loff; 567 next_load->size = (phdp->p_vaddr & align) + phdp->p_filesz; 568 next_load->prot = PFLAGS(phdp->p_flags); 569 cur_exec_end = (Elf_Addr)next_load->start + next_load->size; 570 if ((next_load->prot & PROT_EXEC) != 0 && 571 cur_exec_end > exec_end) 572 exec_end = cur_exec_end; 573 break; 574 case PT_TLS: 575 if (phdp->p_filesz > phdp->p_memsz) 576 _dl_die("invalid tls data"); 577 ptls = phdp; 578 break; 579 case PT_GNU_RELRO: 580 relro_addr = phdp->p_vaddr + exe_loff; 581 relro_size = phdp->p_memsz; 582 break; 583 } 584 phdp++; 585 } 586 exe_obj->load_list = load_list; 587 exe_obj->obj_flags |= DF_1_GLOBAL; 588 exe_obj->nodelete = 1; 589 exe_obj->load_size = maxva - minva; 590 exe_obj->relro_addr = relro_addr; 591 exe_obj->relro_size = relro_size; 592 _dl_set_sod(exe_obj->load_name, &exe_obj->sod); 593 594 #ifdef __i386__ 595 if (exec_end > I386_MAX_EXE_ADDR) 596 _dl_exec_hint = (void *)ROUND_PG(exec_end-I386_MAX_EXE_ADDR); 597 DL_DEB(("_dl_exec_hint: 0x%lx\n", _dl_exec_hint)); 598 #endif 599 600 /* TLS bits in the base executable */ 601 if (ptls != NULL && ptls->p_memsz) 602 _dl_set_tls(exe_obj, ptls, exe_loff, NULL); 603 604 n = _dl_malloc(sizeof *n); 605 if (n == NULL) 606 _dl_oom(); 607 n->data = exe_obj; 608 TAILQ_INSERT_TAIL(&_dlopened_child_list, n, next_sib); 609 exe_obj->opencount++; 610 611 if (_dl_preload != NULL) 612 _dl_dopreload(_dl_preload); 613 614 _dl_load_dep_libs(exe_obj, exe_obj->obj_flags, 1); 615 616 /* 617 * Now add the dynamic loader itself last in the object list 618 * so we can use the _dl_ code when serving dl.... calls. 619 * Intentionally left off the exe child_vec. 620 */ 621 dynp = (Elf_Dyn *)((void *)_DYNAMIC); 622 ehdr = (Elf_Ehdr *)dl_data[AUX_base]; 623 dyn_obj = _dl_finalize_object(us, dynp, 624 (Elf_Phdr *)((char *)dl_data[AUX_base] + ehdr->e_phoff), 625 ehdr->e_phnum, OBJTYPE_LDR, dl_data[AUX_base], dyn_loff); 626 _dl_add_object(dyn_obj); 627 628 dyn_obj->refcount++; 629 _dl_link_grpsym(dyn_obj); 630 631 dyn_obj->status |= STAT_RELOC_DONE; 632 _dl_set_sod(dyn_obj->load_name, &dyn_obj->sod); 633 634 /* calculate the offsets for static TLS allocations */ 635 _dl_allocate_tls_offsets(); 636 637 /* 638 * Make something to help gdb when poking around in the code. 639 * Do this poking at the .dynamic section now, before relocation 640 * renders it read-only 641 */ 642 map_link = NULL; 643 #ifdef __mips__ 644 for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) { 645 if (dynp->d_tag == DT_MIPS_RLD_MAP_REL) { 646 map_link = (struct r_debug **) 647 (dynp->d_un.d_ptr + (Elf_Addr)dynp); 648 break; 649 } else if (dynp->d_tag == DT_MIPS_RLD_MAP) { 650 map_link = (struct r_debug **) 651 (dynp->d_un.d_ptr + exe_loff); 652 break; 653 } 654 } 655 #endif 656 if (map_link == NULL) { 657 for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) { 658 if (dynp->d_tag == DT_DEBUG) { 659 map_link = (struct r_debug **)&dynp->d_un.d_ptr; 660 break; 661 } 662 } 663 if (dynp->d_tag != DT_DEBUG) 664 DL_DEB(("failed to mark DTDEBUG\n")); 665 } 666 if (map_link) { 667 debug_map = _dl_malloc(sizeof(*debug_map)); 668 if (debug_map == NULL) 669 _dl_oom(); 670 debug_map->r_version = 1; 671 debug_map->r_map = (struct link_map *)_dl_objects; 672 debug_map->r_brk = (Elf_Addr)_dl_debug_state; 673 debug_map->r_state = RT_CONSISTENT; 674 debug_map->r_ldbase = dyn_loff; 675 _dl_debug_map = debug_map; 676 #ifdef __mips__ 677 relro_addr = exe_obj->relro_addr; 678 if (dynp->d_tag == DT_DEBUG && 679 ((Elf_Addr)map_link + sizeof(*map_link) <= relro_addr || 680 (Elf_Addr)map_link >= relro_addr + exe_obj->relro_size)) { 681 _dl_mprotect(map_link, sizeof(*map_link), 682 PROT_READ|PROT_WRITE); 683 *map_link = _dl_debug_map; 684 _dl_mprotect(map_link, sizeof(*map_link), 685 PROT_READ|PROT_EXEC); 686 } else 687 #endif 688 *map_link = _dl_debug_map; 689 } 690 691 692 /* 693 * Everything should be in place now for doing the relocation 694 * and binding. Call _dl_rtld to do the job. Fingers crossed. 695 */ 696 697 failed = 0; 698 if (!_dl_traceld) 699 failed = _dl_rtld(_dl_objects); 700 701 if (_dl_debug || _dl_traceld) { 702 if (_dl_traceld) 703 _dl_pledge("stdio rpath", NULL); 704 _dl_show_objects(); 705 } 706 707 DL_DEB(("dynamic loading done, %s.\n", 708 (failed == 0) ? "success":"failed")); 709 710 if (failed != 0) 711 _dl_die("relocation failed"); 712 713 if (_dl_traceld) 714 _dl_exit(0); 715 716 _dl_loading_object = NULL; 717 718 /* set up the TIB for the initial thread */ 719 _dl_allocate_first_tib(); 720 721 _dl_fixup_user_env(); 722 723 _dl_debug_state(); 724 725 /* 726 * Do not run init code if run from ldd. 727 */ 728 if (_dl_objects->next != NULL) { 729 _dl_call_preinit(_dl_objects); 730 _dl_call_init(_dl_objects); 731 } 732 733 DL_DEB(("entry point: 0x%lx\n", dl_data[AUX_entry])); 734 735 /* 736 * Return the entry point. 737 */ 738 return(dl_data[AUX_entry]); 739 } 740 741 int 742 _dl_rtld(elf_object_t *object) 743 { 744 struct load_list *llist; 745 int fails = 0; 746 747 if (object->next) 748 fails += _dl_rtld(object->next); 749 750 if (object->status & STAT_RELOC_DONE) 751 return 0; 752 753 /* 754 * Do relocation information first, then GOT. 755 */ 756 unprotect_if_textrel(object); 757 _dl_rreloc(object); 758 fails =_dl_md_reloc(object, DT_REL, DT_RELSZ); 759 fails += _dl_md_reloc(object, DT_RELA, DT_RELASZ); 760 reprotect_if_textrel(object); 761 762 /* 763 * We do lazy resolution by default, doing eager resolution if 764 * - the object requests it with -znow, OR 765 * - LD_BIND_NOW is set and this object isn't being ltraced 766 * 767 * Note that -znow disables ltrace for the object: on at least 768 * amd64 'ld' doesn't generate the trampoline for lazy relocation 769 * when -znow is used. 770 */ 771 fails += _dl_md_reloc_got(object, !(object->obj_flags & DF_1_NOW) && 772 !(_dl_bindnow && !object->traced)); 773 774 /* 775 * Look for W&X segments and make them read-only. 776 */ 777 for (llist = object->load_list; llist != NULL; llist = llist->next) { 778 if ((llist->prot & PROT_WRITE) && (llist->prot & PROT_EXEC)) { 779 _dl_mprotect(llist->start, llist->size, 780 llist->prot & ~PROT_WRITE); 781 } 782 } 783 784 /* 785 * TEXTREL binaries are loaded without immutable on un-writeable sections. 786 * After text relocations are finished, these regions can become 787 * immutable. OPENBSD_MUTABLE section always overlaps writeable LOADs, 788 * so don't be afraid. 789 */ 790 if (object->dyn.textrel) { 791 for (llist = object->load_list; llist != NULL; llist = llist->next) 792 if ((llist->prot & PROT_WRITE) == 0) 793 _dl_mimmutable(llist->start, llist->size); 794 } 795 796 if (fails == 0) 797 object->status |= STAT_RELOC_DONE; 798 799 return (fails); 800 } 801 802 void 803 _dl_call_preinit(elf_object_t *object) 804 { 805 if (object->dyn.preinit_array) { 806 int num = object->dyn.preinit_arraysz / sizeof(Elf_Addr); 807 int i; 808 809 DL_DEB(("doing preinitarray obj %p @%p: [%s]\n", 810 object, object->dyn.preinit_array, object->load_name)); 811 for (i = 0; i < num; i++) 812 (*object->dyn.preinit_array[i])(_dl_argc, _dl_argv, 813 environ, &_dl_cb_cb); 814 } 815 } 816 817 void 818 _dl_call_init(elf_object_t *object) 819 { 820 _dl_call_init_recurse(object, 1); 821 _dl_call_init_recurse(object, 0); 822 } 823 824 static void 825 _dl_relro(elf_object_t *object) 826 { 827 /* 828 * Handle GNU_RELRO 829 */ 830 if (object->relro_addr != 0 && object->relro_size != 0) { 831 Elf_Addr addr = object->relro_addr; 832 833 DL_DEB(("protect RELRO [0x%lx,0x%lx) in %s\n", 834 addr, addr + object->relro_size, object->load_name)); 835 _dl_mprotect((void *)addr, object->relro_size, PROT_READ); 836 837 /* if library will never be unloaded, RELRO can be immutable */ 838 if (object->nodelete) 839 _dl_mimmutable((void *)addr, object->relro_size); 840 } 841 } 842 843 void 844 _dl_call_init_recurse(elf_object_t *object, int initfirst) 845 { 846 struct object_vector vec; 847 int visited_flag = initfirst ? STAT_VISIT_INITFIRST : STAT_VISIT_INIT; 848 int i; 849 850 object->status |= visited_flag; 851 852 for (vec = object->child_vec, i = 0; i < vec.len; i++) { 853 if (vec.vec[i]->status & visited_flag) 854 continue; 855 _dl_call_init_recurse(vec.vec[i], initfirst); 856 } 857 858 if (object->status & STAT_INIT_DONE) 859 return; 860 861 if (initfirst && (object->obj_flags & DF_1_INITFIRST) == 0) 862 return; 863 864 if (!initfirst) { 865 _dl_relro(object); 866 _dl_apply_immutable(object); 867 } 868 869 if (object->dyn.init) { 870 DL_DEB(("doing ctors obj %p @%p: [%s]\n", 871 object, object->dyn.init, object->load_name)); 872 (*object->dyn.init)(); 873 } 874 875 if (object->dyn.init_array) { 876 int num = object->dyn.init_arraysz / sizeof(Elf_Addr); 877 int i; 878 879 DL_DEB(("doing initarray obj %p @%p: [%s]\n", 880 object, object->dyn.init_array, object->load_name)); 881 for (i = 0; i < num; i++) 882 (*object->dyn.init_array[i])(_dl_argc, _dl_argv, 883 environ, &_dl_cb_cb); 884 } 885 886 if (initfirst) { 887 _dl_relro(object); 888 _dl_apply_immutable(object); 889 } 890 891 object->status |= STAT_INIT_DONE; 892 } 893 894 char * 895 _dl_getenv(const char *var, char **env) 896 { 897 const char *ep; 898 899 while ((ep = *env++)) { 900 const char *vp = var; 901 902 while (*vp && *vp == *ep) { 903 vp++; 904 ep++; 905 } 906 if (*vp == '\0' && *ep++ == '=') 907 return((char *)ep); 908 } 909 return(NULL); 910 } 911 912 void 913 _dl_unsetenv(const char *var, char **env) 914 { 915 char *ep; 916 917 while ((ep = *env)) { 918 const char *vp = var; 919 920 while (*vp && *vp == *ep) { 921 vp++; 922 ep++; 923 } 924 if (*vp == '\0' && *ep++ == '=') { 925 char **P; 926 927 for (P = env;; ++P) 928 if (!(*P = *(P + 1))) 929 break; 930 } else 931 env++; 932 } 933 } 934 935 static inline void 936 fixup_sym(struct elf_object *dummy_obj, const char *name, void *addr) 937 { 938 struct sym_res sr; 939 940 sr = _dl_find_symbol(name, SYM_SEARCH_ALL|SYM_NOWARNNOTFOUND|SYM_PLT, 941 NULL, dummy_obj); 942 if (sr.sym != NULL) { 943 void *p = (void *)(sr.sym->st_value + sr.obj->obj_base); 944 if (p != addr) { 945 DL_DEB(("setting %s %p@%s[%p] from %p\n", name, 946 p, sr.obj->load_name, (void *)sr.obj, addr)); 947 *(void **)p = *(void **)addr; 948 } 949 } 950 } 951 952 /* 953 * _dl_fixup_user_env() 954 * 955 * Set the user environment so that programs can use the environment 956 * while running constructors. Specifically, MALLOC_OPTIONS= for malloc() 957 */ 958 void 959 _dl_fixup_user_env(void) 960 { 961 struct elf_object dummy_obj; 962 963 dummy_obj.dyn.symbolic = 0; 964 dummy_obj.load_name = "ld.so"; 965 fixup_sym(&dummy_obj, "environ", &environ); 966 fixup_sym(&dummy_obj, "__progname", &__progname); 967 } 968 969 const void * 970 _dl_cb_cb(int version) 971 { 972 DL_DEB(("version %d callbacks requested\n", version)); 973 if (version == 0) 974 return &callbacks_0; 975 return NULL; 976 } 977 978 static inline void 979 unprotect_if_textrel(elf_object_t *object) 980 { 981 struct load_list *ll; 982 983 if (__predict_false(object->dyn.textrel == 1)) { 984 for (ll = object->load_list; ll != NULL; ll = ll->next) { 985 if ((ll->prot & PROT_WRITE) == 0) 986 _dl_mprotect(ll->start, ll->size, 987 PROT_READ | PROT_WRITE); 988 } 989 } 990 } 991 992 static inline void 993 reprotect_if_textrel(elf_object_t *object) 994 { 995 struct load_list *ll; 996 997 if (__predict_false(object->dyn.textrel == 1)) { 998 for (ll = object->load_list; ll != NULL; ll = ll->next) { 999 if ((ll->prot & PROT_WRITE) == 0) 1000 _dl_mprotect(ll->start, ll->size, ll->prot); 1001 } 1002 } 1003 } 1004 1005 static void 1006 _dl_rreloc(elf_object_t *object) 1007 { 1008 const Elf_Relr *reloc, *rend; 1009 Elf_Addr loff = object->obj_base; 1010 1011 reloc = object->dyn.relr; 1012 rend = (const Elf_Relr *)((char *)reloc + object->dyn.relrsz); 1013 1014 while (reloc < rend) { 1015 Elf_Addr *where; 1016 1017 where = (Elf_Addr *)(*reloc + loff); 1018 *where++ += loff; 1019 1020 for (reloc++; reloc < rend && (*reloc & 1); reloc++) { 1021 Elf_Addr bits = *reloc >> 1; 1022 1023 Elf_Addr *here = where; 1024 while (bits != 0) { 1025 if (bits & 1) { 1026 *here += loff; 1027 } 1028 bits >>= 1; 1029 here++; 1030 } 1031 where += (8 * sizeof *reloc) - 1; 1032 } 1033 } 1034 } 1035 1036 void 1037 _dl_push_range(struct range_vector *v, vaddr_t s, vaddr_t e) 1038 { 1039 int i = v->count; 1040 1041 if (i == nitems(v->slice)) { 1042 _dl_die("too many ranges"); 1043 } 1044 /* Skips the empty ranges (s == e). */ 1045 if (s < e) { 1046 v->slice[i].start = s; 1047 v->slice[i].end = e; 1048 v->count++; 1049 } else if (s > e) { 1050 _dl_die("invalid range"); 1051 } 1052 } 1053 1054 void 1055 _dl_push_range_size(struct range_vector *v, vaddr_t s, vsize_t size) 1056 { 1057 _dl_push_range(v, s, s + size); 1058 } 1059 1060 /* 1061 * Finds the truly immutable ranges by taking mutable ones out. Implements 1062 * interval difference of imut and mut. Interval splitting necessitates 1063 * intermediate storage and complex double buffering. 1064 */ 1065 void 1066 _dl_apply_immutable(elf_object_t *object) 1067 { 1068 struct range_vector acc[2]; /* flips out to avoid copying */ 1069 struct addr_range *m, *im; 1070 int i, j, imut, in, out; 1071 1072 if (object->obj_type != OBJTYPE_LIB) 1073 return; 1074 1075 for (imut = 0; imut < object->imut.count; imut++) { 1076 im = &object->imut.slice[imut]; 1077 out = 0; 1078 acc[out].count = 0; 1079 _dl_push_range(&acc[out], im->start, im->end); 1080 1081 for (i = 0; i < object->mut.count; i++) { 1082 m = &object->mut.slice[i]; 1083 in = out; 1084 out = 1 - in; 1085 acc[out].count = 0; 1086 for (j = 0; j < acc[in].count; j++) { 1087 const vaddr_t ms = m->start, me = m->end; 1088 const vaddr_t is = acc[in].slice[j].start, 1089 ie = acc[in].slice[j].end; 1090 if (ie <= ms || me <= is) { 1091 /* is .. ie .. ms .. me -> is .. ie */ 1092 /* ms .. me .. is .. ie -> is .. ie */ 1093 _dl_push_range(&acc[out], is, ie); 1094 } else if (ms <= is && ie <= me) { 1095 /* PROVIDED: ms < ie && is < me */ 1096 /* ms .. is .. ie .. me -> [] */ 1097 ; 1098 } else if (ie <= me) { 1099 /* is .. ms .. ie .. me -> is .. ms */ 1100 _dl_push_range(&acc[out], is, ms); 1101 } else if (is < ms) { 1102 /* is .. ms .. me .. ie -> is .. ms */ 1103 _dl_push_range(&acc[out], is, ms); 1104 _dl_push_range(&acc[out], me, ie); 1105 } else { 1106 /* ms .. is .. me .. ie -> me .. ie */ 1107 _dl_push_range(&acc[out], me, ie); 1108 } 1109 } 1110 } 1111 1112 /* and now, install immutability for objects */ 1113 for (i = 0; i < acc[out].count; i++) { 1114 const struct addr_range *ar = &acc[out].slice[i]; 1115 _dl_mimmutable((void *)ar->start, ar->end - ar->start); 1116 } 1117 1118 } 1119 1120 } 1121