10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51618Srie * Common Development and Distribution License (the "License"). 61618Srie * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211618Srie 220Sstevel@tonic-gate /* 235892Sab196087 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 241618Srie * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * amd64 machine dependent and ELF file class dependent functions. 300Sstevel@tonic-gate * Contains routines for performing function binding and symbol relocations. 310Sstevel@tonic-gate */ 320Sstevel@tonic-gate #include "_synonyms.h" 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include <stdio.h> 350Sstevel@tonic-gate #include <sys/elf.h> 360Sstevel@tonic-gate #include <sys/elf_amd64.h> 370Sstevel@tonic-gate #include <sys/mman.h> 380Sstevel@tonic-gate #include <dlfcn.h> 390Sstevel@tonic-gate #include <synch.h> 400Sstevel@tonic-gate #include <string.h> 411618Srie #include <debug.h> 421618Srie #include <reloc.h> 431618Srie #include <conv.h> 440Sstevel@tonic-gate #include "_rtld.h" 450Sstevel@tonic-gate #include "_audit.h" 460Sstevel@tonic-gate #include "_elf.h" 470Sstevel@tonic-gate #include "msg.h" 480Sstevel@tonic-gate 490Sstevel@tonic-gate 500Sstevel@tonic-gate extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 510Sstevel@tonic-gate 520Sstevel@tonic-gate int 530Sstevel@tonic-gate elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 540Sstevel@tonic-gate { 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * Check machine type and flags. 570Sstevel@tonic-gate */ 580Sstevel@tonic-gate if (ehdr->e_flags != 0) { 590Sstevel@tonic-gate rej->rej_type = SGS_REJ_BADFLAG; 600Sstevel@tonic-gate rej->rej_info = (uint_t)ehdr->e_flags; 610Sstevel@tonic-gate return (0); 620Sstevel@tonic-gate } 630Sstevel@tonic-gate return (1); 640Sstevel@tonic-gate } 650Sstevel@tonic-gate 660Sstevel@tonic-gate void 670Sstevel@tonic-gate ldso_plt_init(Rt_map * lmp) 680Sstevel@tonic-gate { 690Sstevel@tonic-gate /* 700Sstevel@tonic-gate * There is no need to analyze ld.so because we don't map in any of 710Sstevel@tonic-gate * its dependencies. However we may map these dependencies in later 720Sstevel@tonic-gate * (as if ld.so had dlopened them), so initialize the plt and the 730Sstevel@tonic-gate * permission information. 740Sstevel@tonic-gate */ 750Sstevel@tonic-gate if (PLTGOT(lmp)) 760Sstevel@tonic-gate elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate static const uchar_t dyn_plt_template[] = { 800Sstevel@tonic-gate /* 0x00 */ 0x55, /* pushq %rbp */ 810Sstevel@tonic-gate /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 820Sstevel@tonic-gate /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 830Sstevel@tonic-gate /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 840Sstevel@tonic-gate 0x00, 0x00, 0x00, 850Sstevel@tonic-gate /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 860Sstevel@tonic-gate /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 870Sstevel@tonic-gate 0x00, 0x00, 0x00, 880Sstevel@tonic-gate 0x00, 0x00, 0x00, 890Sstevel@tonic-gate /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 900Sstevel@tonic-gate /* 0x20 */ 910Sstevel@tonic-gate }; 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * And the virutal outstanding relocations against the 950Sstevel@tonic-gate * above block are: 960Sstevel@tonic-gate * 970Sstevel@tonic-gate * reloc offset Addend symbol 980Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 990Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1000Sstevel@tonic-gate */ 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate #define TRCREL1OFF 0x0b 1030Sstevel@tonic-gate #define TRCREL2OFF 0x15 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate int dyn_plt_ent_size = sizeof (dyn_plt_template); 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * the dynamic plt entry is: 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * pushq %rbp 1110Sstevel@tonic-gate * movq %rsp, %rbp 1120Sstevel@tonic-gate * subq $0x10, %rsp 1130Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1140Sstevel@tonic-gate * movq %r11, -0x8(%rbp) 1150Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1160Sstevel@tonic-gate * jmp *%r11 1170Sstevel@tonic-gate * dyn_data: 1180Sstevel@tonic-gate * .align 8 1190Sstevel@tonic-gate * uintptr_t reflmp 1200Sstevel@tonic-gate * uintptr_t deflmp 1210Sstevel@tonic-gate * uint_t symndx 1220Sstevel@tonic-gate * uint_t sb_flags 1230Sstevel@tonic-gate * Sym symdef 1240Sstevel@tonic-gate */ 1250Sstevel@tonic-gate static caddr_t 1260Sstevel@tonic-gate elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 1270Sstevel@tonic-gate uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 1280Sstevel@tonic-gate { 1290Sstevel@tonic-gate extern int elf_plt_trace(); 1300Sstevel@tonic-gate ulong_t got_entry; 1310Sstevel@tonic-gate uchar_t *dyn_plt; 1320Sstevel@tonic-gate uintptr_t *dyndata; 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate /* 1360Sstevel@tonic-gate * We only need to add the glue code if there is an auditing 1370Sstevel@tonic-gate * library that is interested in this binding. 1380Sstevel@tonic-gate */ 1390Sstevel@tonic-gate dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 1404679Srie (pltndx * dyn_plt_ent_size)); 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate /* 1430Sstevel@tonic-gate * Have we initialized this dynamic plt entry yet? If we haven't do it 1440Sstevel@tonic-gate * now. Otherwise this function has been called before, but from a 1450Sstevel@tonic-gate * different plt (ie. from another shared object). In that case 1460Sstevel@tonic-gate * we just set the plt to point to the new dyn_plt. 1470Sstevel@tonic-gate */ 1480Sstevel@tonic-gate if (*dyn_plt == 0) { 1490Sstevel@tonic-gate Sym * symp; 1500Sstevel@tonic-gate Xword symvalue; 1511618Srie Lm_list *lml = LIST(rlmp); 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate (void) memcpy((void *)dyn_plt, dyn_plt_template, 1540Sstevel@tonic-gate sizeof (dyn_plt_template)); 1550Sstevel@tonic-gate dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 1560Sstevel@tonic-gate ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate /* 1590Sstevel@tonic-gate * relocate: 1600Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1610Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 1620Sstevel@tonic-gate */ 1630Sstevel@tonic-gate symvalue = (Xword)((uintptr_t)dyndata - 1640Sstevel@tonic-gate (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 1655189Sab196087 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 1661618Srie &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 1671618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1680Sstevel@tonic-gate *fail = 1; 1690Sstevel@tonic-gate return (0); 1700Sstevel@tonic-gate } 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate /* 1730Sstevel@tonic-gate * relocating: 1740Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1750Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1760Sstevel@tonic-gate */ 1770Sstevel@tonic-gate symvalue = (Xword)elf_plt_trace; 1785189Sab196087 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF], 1791618Srie &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 1801618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1810Sstevel@tonic-gate *fail = 1; 1820Sstevel@tonic-gate return (0); 1830Sstevel@tonic-gate } 1840Sstevel@tonic-gate 1850Sstevel@tonic-gate *dyndata++ = (uintptr_t)rlmp; 1860Sstevel@tonic-gate *dyndata++ = (uintptr_t)dlmp; 1870Sstevel@tonic-gate *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 1880Sstevel@tonic-gate dyndata++; 1890Sstevel@tonic-gate symp = (Sym *)dyndata; 1900Sstevel@tonic-gate *symp = *sym; 1910Sstevel@tonic-gate symp->st_value = (Addr)to; 1920Sstevel@tonic-gate } 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate got_entry = (ulong_t)roffset; 1950Sstevel@tonic-gate *(ulong_t *)got_entry = (ulong_t)dyn_plt; 1960Sstevel@tonic-gate return ((caddr_t)dyn_plt); 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* 2010Sstevel@tonic-gate * Function binding routine - invoked on the first call to a function through 2020Sstevel@tonic-gate * the procedure linkage table; 2030Sstevel@tonic-gate * passes first through an assembly language interface. 2040Sstevel@tonic-gate * 2050Sstevel@tonic-gate * Takes the offset into the relocation table of the associated 2060Sstevel@tonic-gate * relocation entry and the address of the link map (rt_private_map struct) 2070Sstevel@tonic-gate * for the entry. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * Returns the address of the function referenced after re-writing the PLT 2100Sstevel@tonic-gate * entry to invoke the function directly. 2110Sstevel@tonic-gate * 2120Sstevel@tonic-gate * On error, causes process to terminate with a signal. 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate ulong_t 2150Sstevel@tonic-gate elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 2160Sstevel@tonic-gate { 2170Sstevel@tonic-gate Rt_map *nlmp, * llmp; 2180Sstevel@tonic-gate ulong_t addr, reloff, symval, rsymndx; 2190Sstevel@tonic-gate char *name; 2200Sstevel@tonic-gate Rela *rptr; 2215220Srie Sym *rsym, *nsym; 2221618Srie uint_t binfo, sb_flags = 0, dbg_class; 2230Sstevel@tonic-gate Slookup sl; 2241618Srie int entry, lmflags; 2251618Srie Lm_list *lml; 2260Sstevel@tonic-gate 2270Sstevel@tonic-gate /* 2280Sstevel@tonic-gate * For compatibility with libthread (TI_VERSION 1) we track the entry 2290Sstevel@tonic-gate * value. A zero value indicates we have recursed into ld.so.1 to 2300Sstevel@tonic-gate * further process a locking request. Under this recursion we disable 2310Sstevel@tonic-gate * tsort and cleanup activities. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate entry = enter(); 2340Sstevel@tonic-gate 2351618Srie lml = LIST(lmp); 2361618Srie if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 2371618Srie dbg_class = dbg_desc->d_class; 2381618Srie dbg_desc->d_class = 0; 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate /* 2420Sstevel@tonic-gate * Perform some basic sanity checks. If we didn't get a load map or 2430Sstevel@tonic-gate * the relocation offset is invalid then its possible someone has walked 2440Sstevel@tonic-gate * over the .got entries or jumped to plt0 out of the blue. 2450Sstevel@tonic-gate */ 2460Sstevel@tonic-gate if ((!lmp) && (pltndx <= 2470Sstevel@tonic-gate (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 2484734Sab196087 Conv_inv_buf_t inv_buf; 2494734Sab196087 2501618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 2514734Sab196087 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf), 2521618Srie EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 2531618Srie rtldexit(lml, 1); 2540Sstevel@tonic-gate } 2550Sstevel@tonic-gate reloff = pltndx * (ulong_t)RELENT(lmp); 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate /* 2580Sstevel@tonic-gate * Use relocation entry to get symbol table entry and symbol name. 2590Sstevel@tonic-gate */ 2600Sstevel@tonic-gate addr = (ulong_t)JMPREL(lmp); 2610Sstevel@tonic-gate rptr = (Rela *)(addr + reloff); 2620Sstevel@tonic-gate rsymndx = ELF_R_SYM(rptr->r_info); 2635220Srie rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 2645220Srie name = (char *)(STRTAB(lmp) + rsym->st_name); 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate /* 2670Sstevel@tonic-gate * Determine the last link-map of this list, this'll be the starting 2680Sstevel@tonic-gate * point for any tsort() processing. 2690Sstevel@tonic-gate */ 2701618Srie llmp = lml->lm_tail; 2710Sstevel@tonic-gate 2720Sstevel@tonic-gate /* 2735950Srie * Find definition for symbol. Initialize the symbol lookup data 2745950Srie * structure. 2750Sstevel@tonic-gate */ 2765950Srie SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 2775950Srie rsymndx, rsym, 0, LKUP_DEFT); 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 2801618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 2810Sstevel@tonic-gate demangle(name)); 2821618Srie rtldexit(lml, 1); 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate symval = nsym->st_value; 2860Sstevel@tonic-gate if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 2870Sstevel@tonic-gate (nsym->st_shndx != SHN_ABS)) 2880Sstevel@tonic-gate symval += ADDR(nlmp); 2890Sstevel@tonic-gate if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 2900Sstevel@tonic-gate /* 2910Sstevel@tonic-gate * Record that this new link map is now bound to the caller. 2920Sstevel@tonic-gate */ 2930Sstevel@tonic-gate if (bind_one(lmp, nlmp, BND_REFER) == 0) 2941618Srie rtldexit(lml, 1); 2950Sstevel@tonic-gate } 2960Sstevel@tonic-gate 2971618Srie if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 2980Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 2994679Srie (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 3000Sstevel@tonic-gate symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 3014679Srie &sb_flags); 3020Sstevel@tonic-gate } 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate if (!(rtld_flags & RT_FL_NOBIND)) { 3050Sstevel@tonic-gate addr = rptr->r_offset; 3060Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) 3070Sstevel@tonic-gate addr += ADDR(lmp); 3081618Srie if (((lml->lm_tflags | FLAGS1(lmp)) & 3090Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 3100Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 3110Sstevel@tonic-gate int fail = 0; 3120Sstevel@tonic-gate uint_t pltndx = reloff / sizeof (Rela); 3130Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 3144679Srie (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 3170Sstevel@tonic-gate nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 3180Sstevel@tonic-gate &fail); 3190Sstevel@tonic-gate if (fail) 3201618Srie rtldexit(lml, 1); 3210Sstevel@tonic-gate } else { 3220Sstevel@tonic-gate /* 3230Sstevel@tonic-gate * Write standard PLT entry to jump directly 3240Sstevel@tonic-gate * to newly bound function. 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate *(ulong_t *)addr = symval; 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate } 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate /* 3310Sstevel@tonic-gate * Print binding information and rebuild PLT entry. 3320Sstevel@tonic-gate */ 3331618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 3341618Srie (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, 3351618Srie (Addr)symval, nsym->st_value, name, binfo)); 3360Sstevel@tonic-gate 3370Sstevel@tonic-gate /* 3380Sstevel@tonic-gate * Complete any processing for newly loaded objects. Note we don't 3390Sstevel@tonic-gate * know exactly where any new objects are loaded (we know the object 3400Sstevel@tonic-gate * that supplied the symbol, but others may have been loaded lazily as 3410Sstevel@tonic-gate * we searched for the symbol), so sorting starts from the last 3420Sstevel@tonic-gate * link-map know on entry to this routine. 3430Sstevel@tonic-gate */ 3440Sstevel@tonic-gate if (entry) 3454679Srie load_completion(llmp); 3460Sstevel@tonic-gate 3470Sstevel@tonic-gate /* 3480Sstevel@tonic-gate * Some operations like dldump() or dlopen()'ing a relocatable object 3490Sstevel@tonic-gate * result in objects being loaded on rtld's link-map, make sure these 3500Sstevel@tonic-gate * objects are initialized also. 3510Sstevel@tonic-gate */ 3521618Srie if ((lml->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 3534679Srie load_completion(nlmp); 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate /* 3560Sstevel@tonic-gate * If the object we've bound to is in the process of being initialized 3570Sstevel@tonic-gate * by another thread, determine whether we should block. 3580Sstevel@tonic-gate */ 3590Sstevel@tonic-gate is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate /* 3620Sstevel@tonic-gate * Make sure the object to which we've bound has had it's .init fired. 3630Sstevel@tonic-gate * Cleanup before return to user code. 3640Sstevel@tonic-gate */ 3650Sstevel@tonic-gate if (entry) { 3660Sstevel@tonic-gate is_dep_init(nlmp, lmp); 3671618Srie leave(lml); 3680Sstevel@tonic-gate } 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate if (lmflags & LML_FLG_RTLDLM) 3711618Srie dbg_desc->d_class = dbg_class; 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate return (symval); 3740Sstevel@tonic-gate } 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate /* 3780Sstevel@tonic-gate * When the relocation loop realizes that it's dealing with relative 3790Sstevel@tonic-gate * relocations in a shared object, it breaks into this tighter loop 3800Sstevel@tonic-gate * as an optimization. 3810Sstevel@tonic-gate */ 3820Sstevel@tonic-gate ulong_t 3830Sstevel@tonic-gate elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 3840Sstevel@tonic-gate ulong_t basebgn, ulong_t etext, ulong_t emap) 3850Sstevel@tonic-gate { 3860Sstevel@tonic-gate ulong_t roffset = ((Rela *)relbgn)->r_offset; 3870Sstevel@tonic-gate char rtype; 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate do { 3900Sstevel@tonic-gate roffset += basebgn; 3910Sstevel@tonic-gate 3920Sstevel@tonic-gate /* 3930Sstevel@tonic-gate * If this relocation is against an address not mapped in, 3940Sstevel@tonic-gate * then break out of the relative relocation loop, falling 3950Sstevel@tonic-gate * back on the main relocation loop. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate if (roffset < etext || roffset > emap) 3980Sstevel@tonic-gate break; 3990Sstevel@tonic-gate 4000Sstevel@tonic-gate /* 4010Sstevel@tonic-gate * Perform the actual relocation. 4020Sstevel@tonic-gate */ 4030Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 4040Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate relbgn += relsiz; 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate if (relbgn >= relend) 4090Sstevel@tonic-gate break; 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 4120Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate } while (rtype == R_AMD64_RELATIVE); 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate return (relbgn); 4170Sstevel@tonic-gate } 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate /* 4200Sstevel@tonic-gate * This is the tightest loop for RELATIVE relocations for those 4210Sstevel@tonic-gate * objects built with the DT_RELACOUNT .dynamic entry. 4220Sstevel@tonic-gate */ 4230Sstevel@tonic-gate ulong_t 4240Sstevel@tonic-gate elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 4250Sstevel@tonic-gate ulong_t basebgn) 4260Sstevel@tonic-gate { 4270Sstevel@tonic-gate ulong_t roffset = ((Rela *) relbgn)->r_offset; 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate for (; relacount; relacount--) { 4300Sstevel@tonic-gate roffset += basebgn; 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate /* 4330Sstevel@tonic-gate * Perform the actual relocation. 4340Sstevel@tonic-gate */ 4350Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 4360Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate relbgn += relsiz; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate return (relbgn); 4450Sstevel@tonic-gate } 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate /* 4480Sstevel@tonic-gate * Read and process the relocations for one link object, we assume all 4490Sstevel@tonic-gate * relocation sections for loadable segments are stored contiguously in 4500Sstevel@tonic-gate * the file. 4510Sstevel@tonic-gate */ 4520Sstevel@tonic-gate int 4530Sstevel@tonic-gate elf_reloc(Rt_map *lmp, uint_t plt) 4540Sstevel@tonic-gate { 4550Sstevel@tonic-gate ulong_t relbgn, relend, relsiz, basebgn; 4560Sstevel@tonic-gate ulong_t pltbgn, pltend, _pltbgn, _pltend; 4570Sstevel@tonic-gate ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 4580Sstevel@tonic-gate ulong_t emap, dsymndx; 4590Sstevel@tonic-gate uchar_t rtype; 4600Sstevel@tonic-gate long reladd, value, pvalue; 4610Sstevel@tonic-gate Sym *symref, *psymref, *symdef, *psymdef; 4620Sstevel@tonic-gate char *name, *pname; 4630Sstevel@tonic-gate Rt_map *_lmp, *plmp; 4640Sstevel@tonic-gate int textrel = 0, ret = 1, noplt = 0; 4650Sstevel@tonic-gate int relacount = RELACOUNT(lmp), plthint = 0; 4660Sstevel@tonic-gate Rela *rel; 4670Sstevel@tonic-gate uint_t binfo, pbinfo; 4685892Sab196087 APlist *bound = NULL; 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate /* 4710Sstevel@tonic-gate * Although only necessary for lazy binding, initialize the first 4720Sstevel@tonic-gate * global offset entry to go to elf_rtbndr(). dbx(1) seems 4730Sstevel@tonic-gate * to find this useful. 4740Sstevel@tonic-gate */ 4750Sstevel@tonic-gate if ((plt == 0) && PLTGOT(lmp)) { 4760Sstevel@tonic-gate if ((ulong_t)PLTGOT(lmp) < etext) { 4770Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) 4780Sstevel@tonic-gate return (0); 4790Sstevel@tonic-gate textrel = 1; 4800Sstevel@tonic-gate } 4810Sstevel@tonic-gate elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 4820Sstevel@tonic-gate } 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate /* 4850Sstevel@tonic-gate * Initialize the plt start and end addresses. 4860Sstevel@tonic-gate */ 4870Sstevel@tonic-gate if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 4880Sstevel@tonic-gate pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate relsiz = (ulong_t)(RELENT(lmp)); 4920Sstevel@tonic-gate basebgn = ADDR(lmp); 4930Sstevel@tonic-gate emap = ADDR(lmp) + MSIZE(lmp); 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate if (PLTRELSZ(lmp)) 4960Sstevel@tonic-gate plthint = PLTRELSZ(lmp) / relsiz; 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate /* 4990Sstevel@tonic-gate * If we've been called upon to promote an RTLD_LAZY object to an 5000Sstevel@tonic-gate * RTLD_NOW then we're only interested in scaning the .plt table. 5010Sstevel@tonic-gate * An uninitialized .plt is the case where the associated got entry 5020Sstevel@tonic-gate * points back to the plt itself. Determine the range of the real .plt 5030Sstevel@tonic-gate * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 5040Sstevel@tonic-gate */ 5050Sstevel@tonic-gate if (plt) { 5060Sstevel@tonic-gate Slookup sl; 5070Sstevel@tonic-gate 5080Sstevel@tonic-gate relbgn = pltbgn; 5090Sstevel@tonic-gate relend = pltend; 5100Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) 5110Sstevel@tonic-gate return (1); 5120Sstevel@tonic-gate 5135950Srie /* 5145950Srie * Initialize the symbol lookup data structure. 5155950Srie */ 5165950Srie SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, 5175950Srie elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 5200Sstevel@tonic-gate return (1); 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate _pltbgn = symdef->st_value; 5230Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED) && 5240Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS)) 5250Sstevel@tonic-gate _pltbgn += basebgn; 5260Sstevel@tonic-gate _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 5274679Srie M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 5280Sstevel@tonic-gate 5290Sstevel@tonic-gate } else { 5300Sstevel@tonic-gate /* 5310Sstevel@tonic-gate * The relocation sections appear to the run-time linker as a 5320Sstevel@tonic-gate * single table. Determine the address of the beginning and end 5330Sstevel@tonic-gate * of this table. There are two different interpretations of 5340Sstevel@tonic-gate * the ABI at this point: 5350Sstevel@tonic-gate * 5360Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 5370Sstevel@tonic-gate * concatenation of *all* relocation sections (this is the 5380Sstevel@tonic-gate * model our link-editor constructs). 5390Sstevel@tonic-gate * 5400Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 5410Sstevel@tonic-gate * concatenation of all *but* the .plt relocations. These 5420Sstevel@tonic-gate * relocations are specified individually by the JMPREL and 5430Sstevel@tonic-gate * PLTRELSZ entries. 5440Sstevel@tonic-gate * 5450Sstevel@tonic-gate * Determine from our knowledege of the relocation range and 5460Sstevel@tonic-gate * .plt range, the range of the total relocation table. Note 5470Sstevel@tonic-gate * that one other ABI assumption seems to be that the .plt 5480Sstevel@tonic-gate * relocations always follow any other relocations, the 5490Sstevel@tonic-gate * following range checking drops that assumption. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate relbgn = (ulong_t)(REL(lmp)); 5520Sstevel@tonic-gate relend = relbgn + (ulong_t)(RELSZ(lmp)); 5530Sstevel@tonic-gate if (pltbgn) { 5540Sstevel@tonic-gate if (!relbgn || (relbgn > pltbgn)) 5550Sstevel@tonic-gate relbgn = pltbgn; 5560Sstevel@tonic-gate if (!relbgn || (relend < pltend)) 5570Sstevel@tonic-gate relend = pltend; 5580Sstevel@tonic-gate } 5590Sstevel@tonic-gate } 5600Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) { 5611618Srie DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 5620Sstevel@tonic-gate return (1); 5630Sstevel@tonic-gate } 5641618Srie DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate /* 5670Sstevel@tonic-gate * If we're processing a dynamic executable in lazy mode there is no 5680Sstevel@tonic-gate * need to scan the .rel.plt table, however if we're processing a shared 5690Sstevel@tonic-gate * object in lazy mode the .got addresses associated to each .plt must 5700Sstevel@tonic-gate * be relocated to reflect the location of the shared object. 5710Sstevel@tonic-gate */ 5720Sstevel@tonic-gate if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 5730Sstevel@tonic-gate (FLAGS(lmp) & FLG_RT_FIXED)) 5740Sstevel@tonic-gate noplt = 1; 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* 5770Sstevel@tonic-gate * Loop through relocations. 5780Sstevel@tonic-gate */ 5790Sstevel@tonic-gate while (relbgn < relend) { 5800Sstevel@tonic-gate uint_t sb_flags = 0; 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate /* 5850Sstevel@tonic-gate * If this is a RELATIVE relocation in a shared object (the 5860Sstevel@tonic-gate * common case), and if we are not debugging, then jump into a 5870Sstevel@tonic-gate * tighter relocation loop (elf_reloc_relative). Only make the 5880Sstevel@tonic-gate * jump if we've been given a hint on the number of relocations. 5890Sstevel@tonic-gate */ 5900Sstevel@tonic-gate if ((rtype == R_AMD64_RELATIVE) && 5911618Srie ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * It's possible that the relative relocation block 5940Sstevel@tonic-gate * has relocations against the text segment as well 5950Sstevel@tonic-gate * as the data segment. Since our optimized relocation 5960Sstevel@tonic-gate * engine does not check which segment the relocation 5970Sstevel@tonic-gate * is against - just mprotect it now if it's been 5980Sstevel@tonic-gate * marked as containing TEXTREL's. 5990Sstevel@tonic-gate */ 6000Sstevel@tonic-gate if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 6010Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 6020Sstevel@tonic-gate ret = 0; 6030Sstevel@tonic-gate break; 6040Sstevel@tonic-gate } 6050Sstevel@tonic-gate textrel = 1; 6060Sstevel@tonic-gate } 6070Sstevel@tonic-gate if (relacount) { 6080Sstevel@tonic-gate relbgn = elf_reloc_relacount(relbgn, relacount, 6090Sstevel@tonic-gate relsiz, basebgn); 6100Sstevel@tonic-gate relacount = 0; 6110Sstevel@tonic-gate } else { 6120Sstevel@tonic-gate relbgn = elf_reloc_relative(relbgn, relend, 6130Sstevel@tonic-gate relsiz, basebgn, etext, emap); 6140Sstevel@tonic-gate } 6152145Srie 6160Sstevel@tonic-gate if (relbgn >= relend) 6170Sstevel@tonic-gate break; 6180Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate /* 6240Sstevel@tonic-gate * If this is a shared object, add the base address to offset. 6250Sstevel@tonic-gate */ 6260Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate 6290Sstevel@tonic-gate /* 6300Sstevel@tonic-gate * If we're processing lazy bindings, we have to step 6310Sstevel@tonic-gate * through the plt entries and add the base address 6320Sstevel@tonic-gate * to the corresponding got entry. 6330Sstevel@tonic-gate */ 6340Sstevel@tonic-gate if (plthint && (plt == 0) && 6350Sstevel@tonic-gate (rtype == R_AMD64_JUMP_SLOT) && 6360Sstevel@tonic-gate ((MODE(lmp) & RTLD_NOW) == 0)) { 6370Sstevel@tonic-gate /* 6380Sstevel@tonic-gate * The PLT relocations (for lazy bindings) 6390Sstevel@tonic-gate * are additive to what's already in the GOT. 6400Sstevel@tonic-gate * This differs to what happens in 6410Sstevel@tonic-gate * elf_reloc_relacount() and that's why we 6420Sstevel@tonic-gate * just do it inline here. 6430Sstevel@tonic-gate */ 6440Sstevel@tonic-gate for (roffset = ((Rela *)relbgn)->r_offset; 6450Sstevel@tonic-gate plthint; plthint--) { 6460Sstevel@tonic-gate roffset += basebgn; 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate /* 6490Sstevel@tonic-gate * Perform the actual relocation. 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate *((ulong_t *)roffset) += basebgn; 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate relbgn += relsiz; 6540Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate } 6570Sstevel@tonic-gate continue; 6580Sstevel@tonic-gate } 6590Sstevel@tonic-gate roffset += basebgn; 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate reladd = (long)(((Rela *)relbgn)->r_addend); 6630Sstevel@tonic-gate rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 6640Sstevel@tonic-gate rel = (Rela *)relbgn; 6650Sstevel@tonic-gate relbgn += relsiz; 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate /* 6680Sstevel@tonic-gate * Optimizations. 6690Sstevel@tonic-gate */ 6700Sstevel@tonic-gate if (rtype == R_AMD64_NONE) 6710Sstevel@tonic-gate continue; 6720Sstevel@tonic-gate if (noplt && ((ulong_t)rel >= pltbgn) && 6730Sstevel@tonic-gate ((ulong_t)rel < pltend)) { 6740Sstevel@tonic-gate relbgn = pltend; 6750Sstevel@tonic-gate continue; 6760Sstevel@tonic-gate } 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate /* 6790Sstevel@tonic-gate * If this relocation is not against part of the image 6800Sstevel@tonic-gate * mapped into memory we skip it. 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 6830Sstevel@tonic-gate MSIZE(lmp)))) { 6840Sstevel@tonic-gate elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 6850Sstevel@tonic-gate rsymndx); 6860Sstevel@tonic-gate continue; 6870Sstevel@tonic-gate } 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate /* 6900Sstevel@tonic-gate * If we're promoting plts determine if this one has already 6910Sstevel@tonic-gate * been written. 6920Sstevel@tonic-gate */ 6930Sstevel@tonic-gate if (plt) { 6940Sstevel@tonic-gate if ((*(ulong_t *)roffset < _pltbgn) || 6950Sstevel@tonic-gate (*(ulong_t *)roffset > _pltend)) 6960Sstevel@tonic-gate continue; 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate binfo = 0; 7000Sstevel@tonic-gate /* 7010Sstevel@tonic-gate * If a symbol index is specified then get the symbol table 7020Sstevel@tonic-gate * entry, locate the symbol definition, and determine its 7030Sstevel@tonic-gate * address. 7040Sstevel@tonic-gate */ 7050Sstevel@tonic-gate if (rsymndx) { 7060Sstevel@tonic-gate /* 7070Sstevel@tonic-gate * Get the local symbol table entry. 7080Sstevel@tonic-gate */ 7090Sstevel@tonic-gate symref = (Sym *)((ulong_t)SYMTAB(lmp) + 7104679Srie (rsymndx * SYMENT(lmp))); 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate /* 7130Sstevel@tonic-gate * If this is a local symbol, just use the base address. 7140Sstevel@tonic-gate * (we should have no local relocations in the 7150Sstevel@tonic-gate * executable). 7160Sstevel@tonic-gate */ 7170Sstevel@tonic-gate if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 7180Sstevel@tonic-gate value = basebgn; 7190Sstevel@tonic-gate name = (char *)0; 7200Sstevel@tonic-gate 7210Sstevel@tonic-gate /* 7222145Srie * Special case TLS relocations. 7230Sstevel@tonic-gate */ 7242145Srie if (rtype == R_AMD64_DTPMOD64) { 7252145Srie /* 7262145Srie * Use the TLS modid. 7272145Srie */ 7280Sstevel@tonic-gate value = TLSMODID(lmp); 7292145Srie 7302145Srie } else if ((rtype == R_AMD64_TPOFF64) || 7312145Srie (rtype == R_AMD64_TPOFF32)) { 7322145Srie if ((value = elf_static_tls(lmp, symref, 7332145Srie rel, rtype, 0, roffset, 0)) == 0) { 7342145Srie ret = 0; 7352145Srie break; 7362145Srie } 7372145Srie } 7380Sstevel@tonic-gate } else { 7390Sstevel@tonic-gate /* 7400Sstevel@tonic-gate * If the symbol index is equal to the previous 7410Sstevel@tonic-gate * symbol index relocation we processed then 7420Sstevel@tonic-gate * reuse the previous values. (Note that there 7430Sstevel@tonic-gate * have been cases where a relocation exists 7440Sstevel@tonic-gate * against a copy relocation symbol, our ld(1) 7450Sstevel@tonic-gate * should optimize this away, but make sure we 7460Sstevel@tonic-gate * don't use the same symbol information should 7470Sstevel@tonic-gate * this case exist). 7480Sstevel@tonic-gate */ 7490Sstevel@tonic-gate if ((rsymndx == psymndx) && 7500Sstevel@tonic-gate (rtype != R_AMD64_COPY)) { 7510Sstevel@tonic-gate /* LINTED */ 7520Sstevel@tonic-gate if (psymdef == 0) { 7531618Srie DBG_CALL(Dbg_bind_weak(lmp, 7541618Srie (Addr)roffset, (Addr) 7550Sstevel@tonic-gate (roffset - basebgn), name)); 7560Sstevel@tonic-gate continue; 7570Sstevel@tonic-gate } 7580Sstevel@tonic-gate /* LINTED */ 7590Sstevel@tonic-gate value = pvalue; 7600Sstevel@tonic-gate /* LINTED */ 7610Sstevel@tonic-gate name = pname; 7620Sstevel@tonic-gate /* LINTED */ 7630Sstevel@tonic-gate symdef = psymdef; 7640Sstevel@tonic-gate /* LINTED */ 7650Sstevel@tonic-gate symref = psymref; 7660Sstevel@tonic-gate /* LINTED */ 7670Sstevel@tonic-gate _lmp = plmp; 7680Sstevel@tonic-gate /* LINTED */ 7690Sstevel@tonic-gate binfo = pbinfo; 7700Sstevel@tonic-gate 7710Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 7720Sstevel@tonic-gate FLAGS1(_lmp)) & 7730Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 7740Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 7750Sstevel@tonic-gate /* LINTED */ 7760Sstevel@tonic-gate symdef, dsymndx, value, 7770Sstevel@tonic-gate &sb_flags); 7780Sstevel@tonic-gate } 7790Sstevel@tonic-gate } else { 7800Sstevel@tonic-gate Slookup sl; 7810Sstevel@tonic-gate 7820Sstevel@tonic-gate /* 7830Sstevel@tonic-gate * Lookup the symbol definition. 7845950Srie * Initialize the symbol lookup data 7855950Srie * structure. 7860Sstevel@tonic-gate */ 7870Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + 7880Sstevel@tonic-gate symref->st_name); 7890Sstevel@tonic-gate 7905950Srie SLOOKUP_INIT(sl, name, lmp, 0, 7915950Srie ld_entry_cnt, 0, rsymndx, symref, 7925950Srie rtype, LKUP_STDRELOC); 7930Sstevel@tonic-gate 7940Sstevel@tonic-gate symdef = lookup_sym(&sl, &_lmp, &binfo); 7950Sstevel@tonic-gate 7960Sstevel@tonic-gate /* 7970Sstevel@tonic-gate * If the symbol is not found and the 7980Sstevel@tonic-gate * reference was not to a weak symbol, 7990Sstevel@tonic-gate * report an error. Weak references 8000Sstevel@tonic-gate * may be unresolved. 8010Sstevel@tonic-gate */ 8024679Srie /* BEGIN CSTYLED */ 8030Sstevel@tonic-gate if (symdef == 0) { 804*6150Srie if (sl.sl_bind != STB_WEAK) { 805*6150Srie if (elf_reloc_error(lmp, name, 806*6150Srie rel, binfo)) 807*6150Srie continue; 8081618Srie 8090Sstevel@tonic-gate ret = 0; 8100Sstevel@tonic-gate break; 811*6150Srie 8120Sstevel@tonic-gate } else { 8130Sstevel@tonic-gate psymndx = rsymndx; 8140Sstevel@tonic-gate psymdef = 0; 8150Sstevel@tonic-gate 8161618Srie DBG_CALL(Dbg_bind_weak(lmp, 8171618Srie (Addr)roffset, (Addr) 8180Sstevel@tonic-gate (roffset - basebgn), name)); 8190Sstevel@tonic-gate continue; 8200Sstevel@tonic-gate } 8210Sstevel@tonic-gate } 8224679Srie /* END CSTYLED */ 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate /* 8250Sstevel@tonic-gate * If symbol was found in an object 8260Sstevel@tonic-gate * other than the referencing object 8270Sstevel@tonic-gate * then record the binding. 8280Sstevel@tonic-gate */ 8290Sstevel@tonic-gate if ((lmp != _lmp) && ((FLAGS1(_lmp) & 8300Sstevel@tonic-gate FL1_RT_NOINIFIN) == 0)) { 8315892Sab196087 if (aplist_test(&bound, _lmp, 8320Sstevel@tonic-gate AL_CNT_RELBIND) == 0) { 8330Sstevel@tonic-gate ret = 0; 8340Sstevel@tonic-gate break; 8350Sstevel@tonic-gate } 8360Sstevel@tonic-gate } 8370Sstevel@tonic-gate 8380Sstevel@tonic-gate /* 8390Sstevel@tonic-gate * Calculate the location of definition; 8400Sstevel@tonic-gate * symbol value plus base address of 8410Sstevel@tonic-gate * containing shared object. 8420Sstevel@tonic-gate */ 8432850Srie if (IS_SIZE(rtype)) 8442850Srie value = symdef->st_size; 8452850Srie else 8462850Srie value = symdef->st_value; 8472850Srie 8480Sstevel@tonic-gate if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 8492850Srie !(IS_SIZE(rtype)) && 8500Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS) && 8510Sstevel@tonic-gate (ELF_ST_TYPE(symdef->st_info) != 8520Sstevel@tonic-gate STT_TLS)) 8530Sstevel@tonic-gate value += ADDR(_lmp); 8540Sstevel@tonic-gate 8550Sstevel@tonic-gate /* 8560Sstevel@tonic-gate * Retain this symbol index and the 8570Sstevel@tonic-gate * value in case it can be used for the 8580Sstevel@tonic-gate * subsequent relocations. 8590Sstevel@tonic-gate */ 8600Sstevel@tonic-gate if (rtype != R_AMD64_COPY) { 8610Sstevel@tonic-gate psymndx = rsymndx; 8620Sstevel@tonic-gate pvalue = value; 8630Sstevel@tonic-gate pname = name; 8640Sstevel@tonic-gate psymdef = symdef; 8650Sstevel@tonic-gate psymref = symref; 8660Sstevel@tonic-gate plmp = _lmp; 8670Sstevel@tonic-gate pbinfo = binfo; 8680Sstevel@tonic-gate } 8690Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 8700Sstevel@tonic-gate FLAGS1(_lmp)) & 8710Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 8720Sstevel@tonic-gate dsymndx = (((uintptr_t)symdef - 8730Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 8740Sstevel@tonic-gate SYMENT(_lmp)); 8750Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 8760Sstevel@tonic-gate symdef, dsymndx, value, 8770Sstevel@tonic-gate &sb_flags); 8780Sstevel@tonic-gate } 8790Sstevel@tonic-gate } 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate /* 8820Sstevel@tonic-gate * If relocation is PC-relative, subtract 8830Sstevel@tonic-gate * offset address. 8840Sstevel@tonic-gate */ 8850Sstevel@tonic-gate if (IS_PC_RELATIVE(rtype)) 8860Sstevel@tonic-gate value -= roffset; 8870Sstevel@tonic-gate 8880Sstevel@tonic-gate /* 8892145Srie * Special case TLS relocations. 8900Sstevel@tonic-gate */ 8912145Srie if (rtype == R_AMD64_DTPMOD64) { 8922145Srie /* 8932145Srie * Relocation value is the TLS modid. 8942145Srie */ 8950Sstevel@tonic-gate value = TLSMODID(_lmp); 8962145Srie 8972145Srie } else if ((rtype == R_AMD64_TPOFF64) || 8982145Srie (rtype == R_AMD64_TPOFF32)) { 8992145Srie if ((value = elf_static_tls(_lmp, 9002145Srie symdef, rel, rtype, name, roffset, 9012145Srie value)) == 0) { 9022145Srie ret = 0; 9032145Srie break; 9042145Srie } 9052145Srie } 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate } else { 9080Sstevel@tonic-gate /* 9092145Srie * Special cases. 9100Sstevel@tonic-gate */ 9112145Srie if (rtype == R_AMD64_DTPMOD64) { 9122145Srie /* 9132145Srie * TLS relocation value is the TLS modid. 9142145Srie */ 9150Sstevel@tonic-gate value = TLSMODID(lmp); 9162145Srie } else 9170Sstevel@tonic-gate value = basebgn; 9180Sstevel@tonic-gate name = (char *)0; 9190Sstevel@tonic-gate } 9200Sstevel@tonic-gate 9212145Srie DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 9222145Srie M_REL_SHT_TYPE, rel, NULL, name)); 9232145Srie 9240Sstevel@tonic-gate /* 9250Sstevel@tonic-gate * If this object has relocations in the text segment, turn 9260Sstevel@tonic-gate * off the write protect. 9270Sstevel@tonic-gate */ 9280Sstevel@tonic-gate if ((roffset < etext) && (textrel == 0)) { 9290Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 9300Sstevel@tonic-gate ret = 0; 9310Sstevel@tonic-gate break; 9320Sstevel@tonic-gate } 9330Sstevel@tonic-gate textrel = 1; 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate /* 9370Sstevel@tonic-gate * Call relocation routine to perform required relocation. 9380Sstevel@tonic-gate */ 9390Sstevel@tonic-gate switch (rtype) { 9400Sstevel@tonic-gate case R_AMD64_COPY: 9410Sstevel@tonic-gate if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 9420Sstevel@tonic-gate symdef, _lmp, (const void *)value) == 0) 9430Sstevel@tonic-gate ret = 0; 9440Sstevel@tonic-gate break; 9450Sstevel@tonic-gate case R_AMD64_JUMP_SLOT: 9460Sstevel@tonic-gate if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 9470Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 9480Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 9490Sstevel@tonic-gate int fail = 0; 9500Sstevel@tonic-gate int pltndx = (((ulong_t)rel - 9514679Srie (uintptr_t)JMPREL(lmp)) / relsiz); 9520Sstevel@tonic-gate int symndx = (((uintptr_t)symdef - 9534679Srie (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate (void) elf_plt_trace_write(roffset, lmp, _lmp, 9560Sstevel@tonic-gate symdef, symndx, pltndx, (caddr_t)value, 9570Sstevel@tonic-gate sb_flags, &fail); 9580Sstevel@tonic-gate if (fail) 9590Sstevel@tonic-gate ret = 0; 9600Sstevel@tonic-gate } else { 9610Sstevel@tonic-gate /* 9620Sstevel@tonic-gate * Write standard PLT entry to jump directly 9630Sstevel@tonic-gate * to newly bound function. 9640Sstevel@tonic-gate */ 9651618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 9661618Srie ELF_DBG_RTLD, (Xword)roffset, 9670Sstevel@tonic-gate (Xword)value)); 9680Sstevel@tonic-gate *(ulong_t *)roffset = value; 9690Sstevel@tonic-gate } 9700Sstevel@tonic-gate break; 9710Sstevel@tonic-gate default: 9720Sstevel@tonic-gate value += reladd; 9730Sstevel@tonic-gate /* 9740Sstevel@tonic-gate * Write the relocation out. 9750Sstevel@tonic-gate */ 9765189Sab196087 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 9771618Srie (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 9780Sstevel@tonic-gate ret = 0; 9790Sstevel@tonic-gate 9801618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 9811618Srie (Xword)roffset, (Xword)value)); 9820Sstevel@tonic-gate } 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate if ((ret == 0) && 9850Sstevel@tonic-gate ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 9860Sstevel@tonic-gate break; 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate if (binfo) { 9891618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 9901618Srie (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 9911618Srie _lmp, (Addr)value, symdef->st_value, name, binfo)); 9920Sstevel@tonic-gate } 9930Sstevel@tonic-gate } 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate return (relocate_finish(lmp, bound, textrel, ret)); 9960Sstevel@tonic-gate } 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate /* 9990Sstevel@tonic-gate * Initialize the first few got entries so that function calls go to 10000Sstevel@tonic-gate * elf_rtbndr: 10010Sstevel@tonic-gate * 10020Sstevel@tonic-gate * GOT[GOT_XLINKMAP] = the address of the link map 10030Sstevel@tonic-gate * GOT[GOT_XRTLD] = the address of rtbinder 10040Sstevel@tonic-gate */ 10050Sstevel@tonic-gate void 10060Sstevel@tonic-gate elf_plt_init(void *got, caddr_t l) 10070Sstevel@tonic-gate { 10080Sstevel@tonic-gate uint64_t *_got; 10090Sstevel@tonic-gate /* LINTED */ 10100Sstevel@tonic-gate Rt_map *lmp = (Rt_map *)l; 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XLINKMAP; 10130Sstevel@tonic-gate *_got = (uint64_t)lmp; 10140Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XRTLD; 10150Sstevel@tonic-gate *_got = (uint64_t)elf_rtbndr; 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate /* 10190Sstevel@tonic-gate * Plt writing interface to allow debugging initialization to be generic. 10200Sstevel@tonic-gate */ 10210Sstevel@tonic-gate Pltbindtype 10220Sstevel@tonic-gate /* ARGSUSED1 */ 10230Sstevel@tonic-gate elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 10240Sstevel@tonic-gate Xword pltndx) 10250Sstevel@tonic-gate { 10260Sstevel@tonic-gate Rela *rel = (Rela*)rptr; 10270Sstevel@tonic-gate uintptr_t pltaddr; 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate pltaddr = addr + rel->r_offset; 10300Sstevel@tonic-gate *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 10310Sstevel@tonic-gate DBG_CALL(pltcntfull++); 10320Sstevel@tonic-gate return (PLT_T_FULL); 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate /* 10360Sstevel@tonic-gate * Provide a machine specific interface to the conversion routine. By calling 10370Sstevel@tonic-gate * the machine specific version, rather than the generic version, we insure that 10380Sstevel@tonic-gate * the data tables/strings for all known machine versions aren't dragged into 10390Sstevel@tonic-gate * ld.so.1. 10400Sstevel@tonic-gate */ 10410Sstevel@tonic-gate const char * 10421618Srie _conv_reloc_type(uint_t rel) 10430Sstevel@tonic-gate { 10444734Sab196087 static Conv_inv_buf_t inv_buf; 10454734Sab196087 10464734Sab196087 return (conv_reloc_amd64_type(rel, 0, &inv_buf)); 10470Sstevel@tonic-gate } 1048