10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*1618Srie * Common Development and Distribution License (the "License"). 6*1618Srie * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 21*1618Srie 220Sstevel@tonic-gate /* 23*1618Srie * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24*1618Srie * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * amd64 machine dependent and ELF file class dependent functions. 300Sstevel@tonic-gate * Contains routines for performing function binding and symbol relocations. 310Sstevel@tonic-gate */ 320Sstevel@tonic-gate #include "_synonyms.h" 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include <stdio.h> 350Sstevel@tonic-gate #include <sys/elf.h> 360Sstevel@tonic-gate #include <sys/elf_amd64.h> 370Sstevel@tonic-gate #include <sys/mman.h> 380Sstevel@tonic-gate #include <dlfcn.h> 390Sstevel@tonic-gate #include <synch.h> 400Sstevel@tonic-gate #include <string.h> 41*1618Srie #include <debug.h> 42*1618Srie #include <reloc.h> 43*1618Srie #include <conv.h> 440Sstevel@tonic-gate #include "_rtld.h" 450Sstevel@tonic-gate #include "_audit.h" 460Sstevel@tonic-gate #include "_elf.h" 470Sstevel@tonic-gate #include "msg.h" 480Sstevel@tonic-gate 490Sstevel@tonic-gate 500Sstevel@tonic-gate extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 510Sstevel@tonic-gate 520Sstevel@tonic-gate int 530Sstevel@tonic-gate elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 540Sstevel@tonic-gate { 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * Check machine type and flags. 570Sstevel@tonic-gate */ 580Sstevel@tonic-gate if (ehdr->e_flags != 0) { 590Sstevel@tonic-gate rej->rej_type = SGS_REJ_BADFLAG; 600Sstevel@tonic-gate rej->rej_info = (uint_t)ehdr->e_flags; 610Sstevel@tonic-gate return (0); 620Sstevel@tonic-gate } 630Sstevel@tonic-gate return (1); 640Sstevel@tonic-gate } 650Sstevel@tonic-gate 660Sstevel@tonic-gate void 670Sstevel@tonic-gate ldso_plt_init(Rt_map * lmp) 680Sstevel@tonic-gate { 690Sstevel@tonic-gate /* 700Sstevel@tonic-gate * There is no need to analyze ld.so because we don't map in any of 710Sstevel@tonic-gate * its dependencies. However we may map these dependencies in later 720Sstevel@tonic-gate * (as if ld.so had dlopened them), so initialize the plt and the 730Sstevel@tonic-gate * permission information. 740Sstevel@tonic-gate */ 750Sstevel@tonic-gate if (PLTGOT(lmp)) 760Sstevel@tonic-gate elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate static const uchar_t dyn_plt_template[] = { 800Sstevel@tonic-gate /* 0x00 */ 0x55, /* pushq %rbp */ 810Sstevel@tonic-gate /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 820Sstevel@tonic-gate /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 830Sstevel@tonic-gate /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 840Sstevel@tonic-gate 0x00, 0x00, 0x00, 850Sstevel@tonic-gate /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 860Sstevel@tonic-gate /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 870Sstevel@tonic-gate 0x00, 0x00, 0x00, 880Sstevel@tonic-gate 0x00, 0x00, 0x00, 890Sstevel@tonic-gate /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 900Sstevel@tonic-gate /* 0x20 */ 910Sstevel@tonic-gate }; 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * And the virutal outstanding relocations against the 950Sstevel@tonic-gate * above block are: 960Sstevel@tonic-gate * 970Sstevel@tonic-gate * reloc offset Addend symbol 980Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 990Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1000Sstevel@tonic-gate */ 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate #define TRCREL1OFF 0x0b 1030Sstevel@tonic-gate #define TRCREL2OFF 0x15 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate int dyn_plt_ent_size = sizeof (dyn_plt_template); 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * the dynamic plt entry is: 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * pushq %rbp 1110Sstevel@tonic-gate * movq %rsp, %rbp 1120Sstevel@tonic-gate * subq $0x10, %rsp 1130Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1140Sstevel@tonic-gate * movq %r11, -0x8(%rbp) 1150Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1160Sstevel@tonic-gate * jmp *%r11 1170Sstevel@tonic-gate * dyn_data: 1180Sstevel@tonic-gate * .align 8 1190Sstevel@tonic-gate * uintptr_t reflmp 1200Sstevel@tonic-gate * uintptr_t deflmp 1210Sstevel@tonic-gate * uint_t symndx 1220Sstevel@tonic-gate * uint_t sb_flags 1230Sstevel@tonic-gate * Sym symdef 1240Sstevel@tonic-gate */ 1250Sstevel@tonic-gate static caddr_t 1260Sstevel@tonic-gate elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 1270Sstevel@tonic-gate uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 1280Sstevel@tonic-gate { 1290Sstevel@tonic-gate extern int elf_plt_trace(); 1300Sstevel@tonic-gate ulong_t got_entry; 1310Sstevel@tonic-gate uchar_t *dyn_plt; 1320Sstevel@tonic-gate uintptr_t *dyndata; 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate /* 1360Sstevel@tonic-gate * We only need to add the glue code if there is an auditing 1370Sstevel@tonic-gate * library that is interested in this binding. 1380Sstevel@tonic-gate */ 1390Sstevel@tonic-gate dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 1400Sstevel@tonic-gate (pltndx * dyn_plt_ent_size)); 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate /* 1430Sstevel@tonic-gate * Have we initialized this dynamic plt entry yet? If we haven't do it 1440Sstevel@tonic-gate * now. Otherwise this function has been called before, but from a 1450Sstevel@tonic-gate * different plt (ie. from another shared object). In that case 1460Sstevel@tonic-gate * we just set the plt to point to the new dyn_plt. 1470Sstevel@tonic-gate */ 1480Sstevel@tonic-gate if (*dyn_plt == 0) { 1490Sstevel@tonic-gate Sym * symp; 1500Sstevel@tonic-gate Xword symvalue; 151*1618Srie Lm_list *lml = LIST(rlmp); 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate (void) memcpy((void *)dyn_plt, dyn_plt_template, 1540Sstevel@tonic-gate sizeof (dyn_plt_template)); 1550Sstevel@tonic-gate dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 1560Sstevel@tonic-gate ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate /* 1590Sstevel@tonic-gate * relocate: 1600Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1610Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 1620Sstevel@tonic-gate */ 1630Sstevel@tonic-gate symvalue = (Xword)((uintptr_t)dyndata - 1640Sstevel@tonic-gate (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 165*1618Srie if (do_reloc(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 166*1618Srie &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 167*1618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1680Sstevel@tonic-gate *fail = 1; 1690Sstevel@tonic-gate return (0); 1700Sstevel@tonic-gate } 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate /* 1730Sstevel@tonic-gate * relocating: 1740Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1750Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1760Sstevel@tonic-gate */ 1770Sstevel@tonic-gate symvalue = (Xword)elf_plt_trace; 178*1618Srie if (do_reloc(R_AMD64_64, &dyn_plt[TRCREL2OFF], 179*1618Srie &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 180*1618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1810Sstevel@tonic-gate *fail = 1; 1820Sstevel@tonic-gate return (0); 1830Sstevel@tonic-gate } 1840Sstevel@tonic-gate 1850Sstevel@tonic-gate *dyndata++ = (uintptr_t)rlmp; 1860Sstevel@tonic-gate *dyndata++ = (uintptr_t)dlmp; 1870Sstevel@tonic-gate *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 1880Sstevel@tonic-gate dyndata++; 1890Sstevel@tonic-gate symp = (Sym *)dyndata; 1900Sstevel@tonic-gate *symp = *sym; 1910Sstevel@tonic-gate symp->st_value = (Addr)to; 1920Sstevel@tonic-gate } 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate got_entry = (ulong_t)roffset; 1950Sstevel@tonic-gate *(ulong_t *)got_entry = (ulong_t)dyn_plt; 1960Sstevel@tonic-gate return ((caddr_t)dyn_plt); 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* 2010Sstevel@tonic-gate * Function binding routine - invoked on the first call to a function through 2020Sstevel@tonic-gate * the procedure linkage table; 2030Sstevel@tonic-gate * passes first through an assembly language interface. 2040Sstevel@tonic-gate * 2050Sstevel@tonic-gate * Takes the offset into the relocation table of the associated 2060Sstevel@tonic-gate * relocation entry and the address of the link map (rt_private_map struct) 2070Sstevel@tonic-gate * for the entry. 2080Sstevel@tonic-gate * 2090Sstevel@tonic-gate * Returns the address of the function referenced after re-writing the PLT 2100Sstevel@tonic-gate * entry to invoke the function directly. 2110Sstevel@tonic-gate * 2120Sstevel@tonic-gate * On error, causes process to terminate with a signal. 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate ulong_t 2150Sstevel@tonic-gate elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 2160Sstevel@tonic-gate { 2170Sstevel@tonic-gate Rt_map *nlmp, * llmp; 2180Sstevel@tonic-gate ulong_t addr, reloff, symval, rsymndx; 2190Sstevel@tonic-gate char *name; 2200Sstevel@tonic-gate Rela *rptr; 2210Sstevel@tonic-gate Sym *sym, *nsym; 222*1618Srie uint_t binfo, sb_flags = 0, dbg_class; 2230Sstevel@tonic-gate Slookup sl; 224*1618Srie int entry, lmflags; 225*1618Srie Lm_list *lml; 2260Sstevel@tonic-gate 2270Sstevel@tonic-gate /* 2280Sstevel@tonic-gate * For compatibility with libthread (TI_VERSION 1) we track the entry 2290Sstevel@tonic-gate * value. A zero value indicates we have recursed into ld.so.1 to 2300Sstevel@tonic-gate * further process a locking request. Under this recursion we disable 2310Sstevel@tonic-gate * tsort and cleanup activities. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate entry = enter(); 2340Sstevel@tonic-gate 235*1618Srie lml = LIST(lmp); 236*1618Srie if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 237*1618Srie dbg_class = dbg_desc->d_class; 238*1618Srie dbg_desc->d_class = 0; 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate /* 2420Sstevel@tonic-gate * Perform some basic sanity checks. If we didn't get a load map or 2430Sstevel@tonic-gate * the relocation offset is invalid then its possible someone has walked 2440Sstevel@tonic-gate * over the .got entries or jumped to plt0 out of the blue. 2450Sstevel@tonic-gate */ 2460Sstevel@tonic-gate if ((!lmp) && (pltndx <= 2470Sstevel@tonic-gate (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 248*1618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 249*1618Srie conv_reloc_amd64_type(R_AMD64_JUMP_SLOT), 250*1618Srie EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 251*1618Srie rtldexit(lml, 1); 2520Sstevel@tonic-gate } 2530Sstevel@tonic-gate reloff = pltndx * (ulong_t)RELENT(lmp); 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate /* 2560Sstevel@tonic-gate * Use relocation entry to get symbol table entry and symbol name. 2570Sstevel@tonic-gate */ 2580Sstevel@tonic-gate addr = (ulong_t)JMPREL(lmp); 2590Sstevel@tonic-gate rptr = (Rela *)(addr + reloff); 2600Sstevel@tonic-gate rsymndx = ELF_R_SYM(rptr->r_info); 2610Sstevel@tonic-gate sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 2620Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + sym->st_name); 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate /* 2650Sstevel@tonic-gate * Determine the last link-map of this list, this'll be the starting 2660Sstevel@tonic-gate * point for any tsort() processing. 2670Sstevel@tonic-gate */ 268*1618Srie llmp = lml->lm_tail; 2690Sstevel@tonic-gate 2700Sstevel@tonic-gate /* 2710Sstevel@tonic-gate * Find definition for symbol. 2720Sstevel@tonic-gate */ 2730Sstevel@tonic-gate sl.sl_name = name; 2740Sstevel@tonic-gate sl.sl_cmap = lmp; 275*1618Srie sl.sl_imap = lml->lm_head; 2760Sstevel@tonic-gate sl.sl_hash = 0; 2770Sstevel@tonic-gate sl.sl_rsymndx = rsymndx; 2780Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 281*1618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 2820Sstevel@tonic-gate demangle(name)); 283*1618Srie rtldexit(lml, 1); 2840Sstevel@tonic-gate } 2850Sstevel@tonic-gate 2860Sstevel@tonic-gate symval = nsym->st_value; 2870Sstevel@tonic-gate if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 2880Sstevel@tonic-gate (nsym->st_shndx != SHN_ABS)) 2890Sstevel@tonic-gate symval += ADDR(nlmp); 2900Sstevel@tonic-gate if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 2910Sstevel@tonic-gate /* 2920Sstevel@tonic-gate * Record that this new link map is now bound to the caller. 2930Sstevel@tonic-gate */ 2940Sstevel@tonic-gate if (bind_one(lmp, nlmp, BND_REFER) == 0) 295*1618Srie rtldexit(lml, 1); 2960Sstevel@tonic-gate } 2970Sstevel@tonic-gate 298*1618Srie if ((lml->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 2990Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 3000Sstevel@tonic-gate (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 3010Sstevel@tonic-gate symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 3020Sstevel@tonic-gate &sb_flags); 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate if (!(rtld_flags & RT_FL_NOBIND)) { 3060Sstevel@tonic-gate addr = rptr->r_offset; 3070Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) 3080Sstevel@tonic-gate addr += ADDR(lmp); 309*1618Srie if (((lml->lm_tflags | FLAGS1(lmp)) & 3100Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 3110Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 3120Sstevel@tonic-gate int fail = 0; 3130Sstevel@tonic-gate uint_t pltndx = reloff / sizeof (Rela); 3140Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 3150Sstevel@tonic-gate (uintptr_t)SYMTAB(nlmp)) / 3160Sstevel@tonic-gate SYMENT(nlmp)); 3170Sstevel@tonic-gate 3180Sstevel@tonic-gate symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 3190Sstevel@tonic-gate nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 3200Sstevel@tonic-gate &fail); 3210Sstevel@tonic-gate if (fail) 322*1618Srie rtldexit(lml, 1); 3230Sstevel@tonic-gate } else { 3240Sstevel@tonic-gate /* 3250Sstevel@tonic-gate * Write standard PLT entry to jump directly 3260Sstevel@tonic-gate * to newly bound function. 3270Sstevel@tonic-gate */ 3280Sstevel@tonic-gate *(ulong_t *)addr = symval; 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate } 3310Sstevel@tonic-gate 3320Sstevel@tonic-gate /* 3330Sstevel@tonic-gate * Print binding information and rebuild PLT entry. 3340Sstevel@tonic-gate */ 335*1618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 336*1618Srie (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, 337*1618Srie (Addr)symval, nsym->st_value, name, binfo)); 3380Sstevel@tonic-gate 3390Sstevel@tonic-gate /* 3400Sstevel@tonic-gate * Complete any processing for newly loaded objects. Note we don't 3410Sstevel@tonic-gate * know exactly where any new objects are loaded (we know the object 3420Sstevel@tonic-gate * that supplied the symbol, but others may have been loaded lazily as 3430Sstevel@tonic-gate * we searched for the symbol), so sorting starts from the last 3440Sstevel@tonic-gate * link-map know on entry to this routine. 3450Sstevel@tonic-gate */ 3460Sstevel@tonic-gate if (entry) 3470Sstevel@tonic-gate load_completion(llmp, lmp); 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate /* 3500Sstevel@tonic-gate * Some operations like dldump() or dlopen()'ing a relocatable object 3510Sstevel@tonic-gate * result in objects being loaded on rtld's link-map, make sure these 3520Sstevel@tonic-gate * objects are initialized also. 3530Sstevel@tonic-gate */ 354*1618Srie if ((lml->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 3550Sstevel@tonic-gate load_completion(nlmp, 0); 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * If the object we've bound to is in the process of being initialized 3590Sstevel@tonic-gate * by another thread, determine whether we should block. 3600Sstevel@tonic-gate */ 3610Sstevel@tonic-gate is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* 3640Sstevel@tonic-gate * Make sure the object to which we've bound has had it's .init fired. 3650Sstevel@tonic-gate * Cleanup before return to user code. 3660Sstevel@tonic-gate */ 3670Sstevel@tonic-gate if (entry) { 3680Sstevel@tonic-gate is_dep_init(nlmp, lmp); 369*1618Srie leave(lml); 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate if (lmflags & LML_FLG_RTLDLM) 373*1618Srie dbg_desc->d_class = dbg_class; 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate return (symval); 3760Sstevel@tonic-gate } 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate /* 3800Sstevel@tonic-gate * When the relocation loop realizes that it's dealing with relative 3810Sstevel@tonic-gate * relocations in a shared object, it breaks into this tighter loop 3820Sstevel@tonic-gate * as an optimization. 3830Sstevel@tonic-gate */ 3840Sstevel@tonic-gate ulong_t 3850Sstevel@tonic-gate elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 3860Sstevel@tonic-gate ulong_t basebgn, ulong_t etext, ulong_t emap) 3870Sstevel@tonic-gate { 3880Sstevel@tonic-gate ulong_t roffset = ((Rela *)relbgn)->r_offset; 3890Sstevel@tonic-gate char rtype; 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate do { 3920Sstevel@tonic-gate roffset += basebgn; 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * If this relocation is against an address not mapped in, 3960Sstevel@tonic-gate * then break out of the relative relocation loop, falling 3970Sstevel@tonic-gate * back on the main relocation loop. 3980Sstevel@tonic-gate */ 3990Sstevel@tonic-gate if (roffset < etext || roffset > emap) 4000Sstevel@tonic-gate break; 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /* 4030Sstevel@tonic-gate * Perform the actual relocation. 4040Sstevel@tonic-gate */ 4050Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 4060Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate relbgn += relsiz; 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate if (relbgn >= relend) 4110Sstevel@tonic-gate break; 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 4140Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate } while (rtype == R_AMD64_RELATIVE); 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate return (relbgn); 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate /* 4220Sstevel@tonic-gate * This is the tightest loop for RELATIVE relocations for those 4230Sstevel@tonic-gate * objects built with the DT_RELACOUNT .dynamic entry. 4240Sstevel@tonic-gate */ 4250Sstevel@tonic-gate ulong_t 4260Sstevel@tonic-gate elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 4270Sstevel@tonic-gate ulong_t basebgn) 4280Sstevel@tonic-gate { 4290Sstevel@tonic-gate ulong_t roffset = ((Rela *) relbgn)->r_offset; 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate for (; relacount; relacount--) { 4320Sstevel@tonic-gate roffset += basebgn; 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate /* 4350Sstevel@tonic-gate * Perform the actual relocation. 4360Sstevel@tonic-gate */ 4370Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 4380Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate relbgn += relsiz; 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate } 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate return (relbgn); 4470Sstevel@tonic-gate } 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate /* 4500Sstevel@tonic-gate * Read and process the relocations for one link object, we assume all 4510Sstevel@tonic-gate * relocation sections for loadable segments are stored contiguously in 4520Sstevel@tonic-gate * the file. 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate int 4550Sstevel@tonic-gate elf_reloc(Rt_map *lmp, uint_t plt) 4560Sstevel@tonic-gate { 4570Sstevel@tonic-gate ulong_t relbgn, relend, relsiz, basebgn; 4580Sstevel@tonic-gate ulong_t pltbgn, pltend, _pltbgn, _pltend; 4590Sstevel@tonic-gate ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 4600Sstevel@tonic-gate ulong_t emap, dsymndx; 4610Sstevel@tonic-gate uchar_t rtype; 4620Sstevel@tonic-gate long reladd, value, pvalue; 4630Sstevel@tonic-gate Sym *symref, *psymref, *symdef, *psymdef; 4640Sstevel@tonic-gate char *name, *pname; 4650Sstevel@tonic-gate Rt_map *_lmp, *plmp; 4660Sstevel@tonic-gate int textrel = 0, ret = 1, noplt = 0; 4670Sstevel@tonic-gate int relacount = RELACOUNT(lmp), plthint = 0; 4680Sstevel@tonic-gate Rela *rel; 4690Sstevel@tonic-gate uint_t binfo, pbinfo; 4700Sstevel@tonic-gate Alist *bound = 0; 4710Sstevel@tonic-gate 4720Sstevel@tonic-gate /* 4730Sstevel@tonic-gate * Although only necessary for lazy binding, initialize the first 4740Sstevel@tonic-gate * global offset entry to go to elf_rtbndr(). dbx(1) seems 4750Sstevel@tonic-gate * to find this useful. 4760Sstevel@tonic-gate */ 4770Sstevel@tonic-gate if ((plt == 0) && PLTGOT(lmp)) { 4780Sstevel@tonic-gate if ((ulong_t)PLTGOT(lmp) < etext) { 4790Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) 4800Sstevel@tonic-gate return (0); 4810Sstevel@tonic-gate textrel = 1; 4820Sstevel@tonic-gate } 4830Sstevel@tonic-gate elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 4840Sstevel@tonic-gate } 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* 4870Sstevel@tonic-gate * Initialize the plt start and end addresses. 4880Sstevel@tonic-gate */ 4890Sstevel@tonic-gate if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 4900Sstevel@tonic-gate pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate relsiz = (ulong_t)(RELENT(lmp)); 4940Sstevel@tonic-gate basebgn = ADDR(lmp); 4950Sstevel@tonic-gate emap = ADDR(lmp) + MSIZE(lmp); 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate if (PLTRELSZ(lmp)) 4980Sstevel@tonic-gate plthint = PLTRELSZ(lmp) / relsiz; 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate /* 5010Sstevel@tonic-gate * If we've been called upon to promote an RTLD_LAZY object to an 5020Sstevel@tonic-gate * RTLD_NOW then we're only interested in scaning the .plt table. 5030Sstevel@tonic-gate * An uninitialized .plt is the case where the associated got entry 5040Sstevel@tonic-gate * points back to the plt itself. Determine the range of the real .plt 5050Sstevel@tonic-gate * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 5060Sstevel@tonic-gate */ 5070Sstevel@tonic-gate if (plt) { 5080Sstevel@tonic-gate Slookup sl; 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate relbgn = pltbgn; 5110Sstevel@tonic-gate relend = pltend; 5120Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) 5130Sstevel@tonic-gate return (1); 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 5160Sstevel@tonic-gate sl.sl_cmap = lmp; 5170Sstevel@tonic-gate sl.sl_imap = lmp; 518546Srie sl.sl_hash = elf_hash(MSG_ORIG(MSG_SYM_PLT)); 5190Sstevel@tonic-gate sl.sl_rsymndx = 0; 5200Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 5230Sstevel@tonic-gate return (1); 5240Sstevel@tonic-gate 5250Sstevel@tonic-gate _pltbgn = symdef->st_value; 5260Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED) && 5270Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS)) 5280Sstevel@tonic-gate _pltbgn += basebgn; 5290Sstevel@tonic-gate _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 5300Sstevel@tonic-gate M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 5310Sstevel@tonic-gate 5320Sstevel@tonic-gate } else { 5330Sstevel@tonic-gate /* 5340Sstevel@tonic-gate * The relocation sections appear to the run-time linker as a 5350Sstevel@tonic-gate * single table. Determine the address of the beginning and end 5360Sstevel@tonic-gate * of this table. There are two different interpretations of 5370Sstevel@tonic-gate * the ABI at this point: 5380Sstevel@tonic-gate * 5390Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 5400Sstevel@tonic-gate * concatenation of *all* relocation sections (this is the 5410Sstevel@tonic-gate * model our link-editor constructs). 5420Sstevel@tonic-gate * 5430Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 5440Sstevel@tonic-gate * concatenation of all *but* the .plt relocations. These 5450Sstevel@tonic-gate * relocations are specified individually by the JMPREL and 5460Sstevel@tonic-gate * PLTRELSZ entries. 5470Sstevel@tonic-gate * 5480Sstevel@tonic-gate * Determine from our knowledege of the relocation range and 5490Sstevel@tonic-gate * .plt range, the range of the total relocation table. Note 5500Sstevel@tonic-gate * that one other ABI assumption seems to be that the .plt 5510Sstevel@tonic-gate * relocations always follow any other relocations, the 5520Sstevel@tonic-gate * following range checking drops that assumption. 5530Sstevel@tonic-gate */ 5540Sstevel@tonic-gate relbgn = (ulong_t)(REL(lmp)); 5550Sstevel@tonic-gate relend = relbgn + (ulong_t)(RELSZ(lmp)); 5560Sstevel@tonic-gate if (pltbgn) { 5570Sstevel@tonic-gate if (!relbgn || (relbgn > pltbgn)) 5580Sstevel@tonic-gate relbgn = pltbgn; 5590Sstevel@tonic-gate if (!relbgn || (relend < pltend)) 5600Sstevel@tonic-gate relend = pltend; 5610Sstevel@tonic-gate } 5620Sstevel@tonic-gate } 5630Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) { 564*1618Srie DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 5650Sstevel@tonic-gate return (1); 5660Sstevel@tonic-gate } 567*1618Srie DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate /* 5700Sstevel@tonic-gate * If we're processing a dynamic executable in lazy mode there is no 5710Sstevel@tonic-gate * need to scan the .rel.plt table, however if we're processing a shared 5720Sstevel@tonic-gate * object in lazy mode the .got addresses associated to each .plt must 5730Sstevel@tonic-gate * be relocated to reflect the location of the shared object. 5740Sstevel@tonic-gate */ 5750Sstevel@tonic-gate if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 5760Sstevel@tonic-gate (FLAGS(lmp) & FLG_RT_FIXED)) 5770Sstevel@tonic-gate noplt = 1; 5780Sstevel@tonic-gate 5790Sstevel@tonic-gate /* 5800Sstevel@tonic-gate * Loop through relocations. 5810Sstevel@tonic-gate */ 5820Sstevel@tonic-gate while (relbgn < relend) { 5830Sstevel@tonic-gate uint_t sb_flags = 0; 5840Sstevel@tonic-gate 5850Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate /* 5880Sstevel@tonic-gate * If this is a RELATIVE relocation in a shared object (the 5890Sstevel@tonic-gate * common case), and if we are not debugging, then jump into a 5900Sstevel@tonic-gate * tighter relocation loop (elf_reloc_relative). Only make the 5910Sstevel@tonic-gate * jump if we've been given a hint on the number of relocations. 5920Sstevel@tonic-gate */ 5930Sstevel@tonic-gate if ((rtype == R_AMD64_RELATIVE) && 594*1618Srie ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 5950Sstevel@tonic-gate /* 5960Sstevel@tonic-gate * It's possible that the relative relocation block 5970Sstevel@tonic-gate * has relocations against the text segment as well 5980Sstevel@tonic-gate * as the data segment. Since our optimized relocation 5990Sstevel@tonic-gate * engine does not check which segment the relocation 6000Sstevel@tonic-gate * is against - just mprotect it now if it's been 6010Sstevel@tonic-gate * marked as containing TEXTREL's. 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 6040Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 6050Sstevel@tonic-gate ret = 0; 6060Sstevel@tonic-gate break; 6070Sstevel@tonic-gate } 6080Sstevel@tonic-gate textrel = 1; 6090Sstevel@tonic-gate } 6100Sstevel@tonic-gate if (relacount) { 6110Sstevel@tonic-gate relbgn = elf_reloc_relacount(relbgn, relacount, 6120Sstevel@tonic-gate relsiz, basebgn); 6130Sstevel@tonic-gate relacount = 0; 6140Sstevel@tonic-gate } else { 6150Sstevel@tonic-gate relbgn = elf_reloc_relative(relbgn, relend, 6160Sstevel@tonic-gate relsiz, basebgn, etext, emap); 6170Sstevel@tonic-gate } 6180Sstevel@tonic-gate if (relbgn >= relend) 6190Sstevel@tonic-gate break; 6200Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 6210Sstevel@tonic-gate } 6220Sstevel@tonic-gate 6230Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * If this is a shared object, add the base address to offset. 6270Sstevel@tonic-gate */ 6280Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate /* 6320Sstevel@tonic-gate * If we're processing lazy bindings, we have to step 6330Sstevel@tonic-gate * through the plt entries and add the base address 6340Sstevel@tonic-gate * to the corresponding got entry. 6350Sstevel@tonic-gate */ 6360Sstevel@tonic-gate if (plthint && (plt == 0) && 6370Sstevel@tonic-gate (rtype == R_AMD64_JUMP_SLOT) && 6380Sstevel@tonic-gate ((MODE(lmp) & RTLD_NOW) == 0)) { 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * The PLT relocations (for lazy bindings) 6410Sstevel@tonic-gate * are additive to what's already in the GOT. 6420Sstevel@tonic-gate * This differs to what happens in 6430Sstevel@tonic-gate * elf_reloc_relacount() and that's why we 6440Sstevel@tonic-gate * just do it inline here. 6450Sstevel@tonic-gate */ 6460Sstevel@tonic-gate for (roffset = ((Rela *)relbgn)->r_offset; 6470Sstevel@tonic-gate plthint; plthint--) { 6480Sstevel@tonic-gate roffset += basebgn; 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* 6510Sstevel@tonic-gate * Perform the actual relocation. 6520Sstevel@tonic-gate */ 6530Sstevel@tonic-gate *((ulong_t *)roffset) += basebgn; 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate relbgn += relsiz; 6560Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate } 6590Sstevel@tonic-gate continue; 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate roffset += basebgn; 6620Sstevel@tonic-gate } 6630Sstevel@tonic-gate 6640Sstevel@tonic-gate reladd = (long)(((Rela *)relbgn)->r_addend); 6650Sstevel@tonic-gate rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 6660Sstevel@tonic-gate rel = (Rela *)relbgn; 6670Sstevel@tonic-gate relbgn += relsiz; 6680Sstevel@tonic-gate 6690Sstevel@tonic-gate /* 6700Sstevel@tonic-gate * Optimizations. 6710Sstevel@tonic-gate */ 6720Sstevel@tonic-gate if (rtype == R_AMD64_NONE) 6730Sstevel@tonic-gate continue; 6740Sstevel@tonic-gate if (noplt && ((ulong_t)rel >= pltbgn) && 6750Sstevel@tonic-gate ((ulong_t)rel < pltend)) { 6760Sstevel@tonic-gate relbgn = pltend; 6770Sstevel@tonic-gate continue; 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate /* 6810Sstevel@tonic-gate * If this relocation is not against part of the image 6820Sstevel@tonic-gate * mapped into memory we skip it. 6830Sstevel@tonic-gate */ 6840Sstevel@tonic-gate if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 6850Sstevel@tonic-gate MSIZE(lmp)))) { 6860Sstevel@tonic-gate elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 6870Sstevel@tonic-gate rsymndx); 6880Sstevel@tonic-gate continue; 6890Sstevel@tonic-gate } 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate /* 6920Sstevel@tonic-gate * If we're promoting plts determine if this one has already 6930Sstevel@tonic-gate * been written. 6940Sstevel@tonic-gate */ 6950Sstevel@tonic-gate if (plt) { 6960Sstevel@tonic-gate if ((*(ulong_t *)roffset < _pltbgn) || 6970Sstevel@tonic-gate (*(ulong_t *)roffset > _pltend)) 6980Sstevel@tonic-gate continue; 6990Sstevel@tonic-gate } 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate binfo = 0; 7020Sstevel@tonic-gate /* 7030Sstevel@tonic-gate * If a symbol index is specified then get the symbol table 7040Sstevel@tonic-gate * entry, locate the symbol definition, and determine its 7050Sstevel@tonic-gate * address. 7060Sstevel@tonic-gate */ 7070Sstevel@tonic-gate if (rsymndx) { 7080Sstevel@tonic-gate /* 7090Sstevel@tonic-gate * Get the local symbol table entry. 7100Sstevel@tonic-gate */ 7110Sstevel@tonic-gate symref = (Sym *)((ulong_t)SYMTAB(lmp) + 7120Sstevel@tonic-gate (rsymndx * SYMENT(lmp))); 7130Sstevel@tonic-gate 7140Sstevel@tonic-gate /* 7150Sstevel@tonic-gate * If this is a local symbol, just use the base address. 7160Sstevel@tonic-gate * (we should have no local relocations in the 7170Sstevel@tonic-gate * executable). 7180Sstevel@tonic-gate */ 7190Sstevel@tonic-gate if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 7200Sstevel@tonic-gate value = basebgn; 7210Sstevel@tonic-gate name = (char *)0; 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate /* 7240Sstevel@tonic-gate * TLS relocation - value for DTPMOD64 7250Sstevel@tonic-gate * relocation is the TLS modid. 7260Sstevel@tonic-gate */ 7270Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 7280Sstevel@tonic-gate value = TLSMODID(lmp); 7290Sstevel@tonic-gate } else { 7300Sstevel@tonic-gate /* 7310Sstevel@tonic-gate * If the symbol index is equal to the previous 7320Sstevel@tonic-gate * symbol index relocation we processed then 7330Sstevel@tonic-gate * reuse the previous values. (Note that there 7340Sstevel@tonic-gate * have been cases where a relocation exists 7350Sstevel@tonic-gate * against a copy relocation symbol, our ld(1) 7360Sstevel@tonic-gate * should optimize this away, but make sure we 7370Sstevel@tonic-gate * don't use the same symbol information should 7380Sstevel@tonic-gate * this case exist). 7390Sstevel@tonic-gate */ 7400Sstevel@tonic-gate if ((rsymndx == psymndx) && 7410Sstevel@tonic-gate (rtype != R_AMD64_COPY)) { 7420Sstevel@tonic-gate /* LINTED */ 7430Sstevel@tonic-gate if (psymdef == 0) { 744*1618Srie DBG_CALL(Dbg_bind_weak(lmp, 745*1618Srie (Addr)roffset, (Addr) 7460Sstevel@tonic-gate (roffset - basebgn), name)); 7470Sstevel@tonic-gate continue; 7480Sstevel@tonic-gate } 7490Sstevel@tonic-gate /* LINTED */ 7500Sstevel@tonic-gate value = pvalue; 7510Sstevel@tonic-gate /* LINTED */ 7520Sstevel@tonic-gate name = pname; 7530Sstevel@tonic-gate /* LINTED */ 7540Sstevel@tonic-gate symdef = psymdef; 7550Sstevel@tonic-gate /* LINTED */ 7560Sstevel@tonic-gate symref = psymref; 7570Sstevel@tonic-gate /* LINTED */ 7580Sstevel@tonic-gate _lmp = plmp; 7590Sstevel@tonic-gate /* LINTED */ 7600Sstevel@tonic-gate binfo = pbinfo; 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 7630Sstevel@tonic-gate FLAGS1(_lmp)) & 7640Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 7650Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 7660Sstevel@tonic-gate /* LINTED */ 7670Sstevel@tonic-gate symdef, dsymndx, value, 7680Sstevel@tonic-gate &sb_flags); 7690Sstevel@tonic-gate } 7700Sstevel@tonic-gate } else { 7710Sstevel@tonic-gate Slookup sl; 7720Sstevel@tonic-gate uchar_t bind; 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate /* 7750Sstevel@tonic-gate * Lookup the symbol definition. 7760Sstevel@tonic-gate */ 7770Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + 7780Sstevel@tonic-gate symref->st_name); 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate sl.sl_name = name; 7810Sstevel@tonic-gate sl.sl_cmap = lmp; 7820Sstevel@tonic-gate sl.sl_imap = 0; 7830Sstevel@tonic-gate sl.sl_hash = 0; 7840Sstevel@tonic-gate sl.sl_rsymndx = rsymndx; 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate if (rtype == R_AMD64_COPY) 7870Sstevel@tonic-gate sl.sl_flags = LKUP_COPY; 7880Sstevel@tonic-gate else 7890Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate sl.sl_flags |= LKUP_ALLCNTLIST; 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate if (rtype != R_AMD64_JUMP_SLOT) 7940Sstevel@tonic-gate sl.sl_flags |= LKUP_SPEC; 7950Sstevel@tonic-gate 7960Sstevel@tonic-gate bind = ELF_ST_BIND(symref->st_info); 7970Sstevel@tonic-gate if (bind == STB_WEAK) 7980Sstevel@tonic-gate sl.sl_flags |= LKUP_WEAK; 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate symdef = lookup_sym(&sl, &_lmp, &binfo); 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* 8030Sstevel@tonic-gate * If the symbol is not found and the 8040Sstevel@tonic-gate * reference was not to a weak symbol, 8050Sstevel@tonic-gate * report an error. Weak references 8060Sstevel@tonic-gate * may be unresolved. 8070Sstevel@tonic-gate * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 8080Sstevel@tonic-gate */ 8090Sstevel@tonic-gate if (symdef == 0) { 810*1618Srie Lm_list *lml = LIST(lmp); 811*1618Srie 8120Sstevel@tonic-gate if (bind != STB_WEAK) { 813*1618Srie if (lml->lm_flags & 8140Sstevel@tonic-gate LML_FLG_IGNRELERR) { 8150Sstevel@tonic-gate continue; 816*1618Srie } else if (lml->lm_flags & 8170Sstevel@tonic-gate LML_FLG_TRC_WARN) { 8180Sstevel@tonic-gate (void) printf(MSG_INTL( 8190Sstevel@tonic-gate MSG_LDD_SYM_NFOUND), 8200Sstevel@tonic-gate demangle(name), 8210Sstevel@tonic-gate NAME(lmp)); 8220Sstevel@tonic-gate continue; 8230Sstevel@tonic-gate } else { 824*1618Srie eprintf(lml, ERR_FATAL, 8250Sstevel@tonic-gate MSG_INTL(MSG_REL_NOSYM), 8260Sstevel@tonic-gate NAME(lmp), 8270Sstevel@tonic-gate demangle(name)); 8280Sstevel@tonic-gate ret = 0; 8290Sstevel@tonic-gate break; 8300Sstevel@tonic-gate } 8310Sstevel@tonic-gate } else { 8320Sstevel@tonic-gate psymndx = rsymndx; 8330Sstevel@tonic-gate psymdef = 0; 8340Sstevel@tonic-gate 835*1618Srie DBG_CALL(Dbg_bind_weak(lmp, 836*1618Srie (Addr)roffset, (Addr) 8370Sstevel@tonic-gate (roffset - basebgn), name)); 8380Sstevel@tonic-gate continue; 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate /* 8430Sstevel@tonic-gate * If symbol was found in an object 8440Sstevel@tonic-gate * other than the referencing object 8450Sstevel@tonic-gate * then record the binding. 8460Sstevel@tonic-gate */ 8470Sstevel@tonic-gate if ((lmp != _lmp) && ((FLAGS1(_lmp) & 8480Sstevel@tonic-gate FL1_RT_NOINIFIN) == 0)) { 8490Sstevel@tonic-gate if (alist_test(&bound, _lmp, 8500Sstevel@tonic-gate sizeof (Rt_map *), 8510Sstevel@tonic-gate AL_CNT_RELBIND) == 0) { 8520Sstevel@tonic-gate ret = 0; 8530Sstevel@tonic-gate break; 8540Sstevel@tonic-gate } 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate /* 8580Sstevel@tonic-gate * Calculate the location of definition; 8590Sstevel@tonic-gate * symbol value plus base address of 8600Sstevel@tonic-gate * containing shared object. 8610Sstevel@tonic-gate */ 8620Sstevel@tonic-gate value = symdef->st_value; 8630Sstevel@tonic-gate if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 8640Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS) && 8650Sstevel@tonic-gate (ELF_ST_TYPE(symdef->st_info) != 8660Sstevel@tonic-gate STT_TLS)) 8670Sstevel@tonic-gate value += ADDR(_lmp); 8680Sstevel@tonic-gate 8690Sstevel@tonic-gate /* 8700Sstevel@tonic-gate * Retain this symbol index and the 8710Sstevel@tonic-gate * value in case it can be used for the 8720Sstevel@tonic-gate * subsequent relocations. 8730Sstevel@tonic-gate */ 8740Sstevel@tonic-gate if (rtype != R_AMD64_COPY) { 8750Sstevel@tonic-gate psymndx = rsymndx; 8760Sstevel@tonic-gate pvalue = value; 8770Sstevel@tonic-gate pname = name; 8780Sstevel@tonic-gate psymdef = symdef; 8790Sstevel@tonic-gate psymref = symref; 8800Sstevel@tonic-gate plmp = _lmp; 8810Sstevel@tonic-gate pbinfo = binfo; 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 8840Sstevel@tonic-gate FLAGS1(_lmp)) & 8850Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 8860Sstevel@tonic-gate dsymndx = (((uintptr_t)symdef - 8870Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 8880Sstevel@tonic-gate SYMENT(_lmp)); 8890Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 8900Sstevel@tonic-gate symdef, dsymndx, value, 8910Sstevel@tonic-gate &sb_flags); 8920Sstevel@tonic-gate } 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate /* 8960Sstevel@tonic-gate * If relocation is PC-relative, subtract 8970Sstevel@tonic-gate * offset address. 8980Sstevel@tonic-gate */ 8990Sstevel@tonic-gate if (IS_PC_RELATIVE(rtype)) 9000Sstevel@tonic-gate value -= roffset; 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate /* 9030Sstevel@tonic-gate * TLS relocation - value for DTPMOD64 9040Sstevel@tonic-gate * relocation is the TLS modid. 9050Sstevel@tonic-gate */ 9060Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 9070Sstevel@tonic-gate value = TLSMODID(_lmp); 9080Sstevel@tonic-gate else if ((rtype == R_AMD64_TPOFF64) || 9090Sstevel@tonic-gate (rtype == R_AMD64_TPOFF32)) 9100Sstevel@tonic-gate value = -(TLSSTATOFF(_lmp) - value); 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate } else { 9130Sstevel@tonic-gate /* 9140Sstevel@tonic-gate * Special case: 9150Sstevel@tonic-gate * 9160Sstevel@tonic-gate * A DTPMOD32 relocation is a local binding to a TLS 9170Sstevel@tonic-gate * symbol. Fill in the TLSMODID for the current object. 9180Sstevel@tonic-gate */ 9190Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 9200Sstevel@tonic-gate value = TLSMODID(lmp); 9210Sstevel@tonic-gate else 9220Sstevel@tonic-gate value = basebgn; 9230Sstevel@tonic-gate name = (char *)0; 9240Sstevel@tonic-gate } 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* 9270Sstevel@tonic-gate * If this object has relocations in the text segment, turn 9280Sstevel@tonic-gate * off the write protect. 9290Sstevel@tonic-gate */ 9300Sstevel@tonic-gate if ((roffset < etext) && (textrel == 0)) { 9310Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 9320Sstevel@tonic-gate ret = 0; 9330Sstevel@tonic-gate break; 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate textrel = 1; 9360Sstevel@tonic-gate } 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate /* 9390Sstevel@tonic-gate * Call relocation routine to perform required relocation. 9400Sstevel@tonic-gate */ 941*1618Srie DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 942*1618Srie M_REL_SHT_TYPE, rel, NULL, name)); 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate switch (rtype) { 9450Sstevel@tonic-gate case R_AMD64_COPY: 9460Sstevel@tonic-gate if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 9470Sstevel@tonic-gate symdef, _lmp, (const void *)value) == 0) 9480Sstevel@tonic-gate ret = 0; 9490Sstevel@tonic-gate break; 9500Sstevel@tonic-gate case R_AMD64_JUMP_SLOT: 9510Sstevel@tonic-gate if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 9520Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 9530Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 9540Sstevel@tonic-gate int fail = 0; 9550Sstevel@tonic-gate int pltndx = (((ulong_t)rel - 9560Sstevel@tonic-gate (uintptr_t)JMPREL(lmp)) / relsiz); 9570Sstevel@tonic-gate int symndx = (((uintptr_t)symdef - 9580Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 9590Sstevel@tonic-gate SYMENT(_lmp)); 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate (void) elf_plt_trace_write(roffset, lmp, _lmp, 9620Sstevel@tonic-gate symdef, symndx, pltndx, (caddr_t)value, 9630Sstevel@tonic-gate sb_flags, &fail); 9640Sstevel@tonic-gate if (fail) 9650Sstevel@tonic-gate ret = 0; 9660Sstevel@tonic-gate } else { 9670Sstevel@tonic-gate /* 9680Sstevel@tonic-gate * Write standard PLT entry to jump directly 9690Sstevel@tonic-gate * to newly bound function. 9700Sstevel@tonic-gate */ 971*1618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 972*1618Srie ELF_DBG_RTLD, (Xword)roffset, 9730Sstevel@tonic-gate (Xword)value)); 9740Sstevel@tonic-gate *(ulong_t *)roffset = value; 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate break; 9770Sstevel@tonic-gate default: 9780Sstevel@tonic-gate value += reladd; 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * Write the relocation out. 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate if (do_reloc(rtype, (uchar_t *)roffset, 983*1618Srie (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 9840Sstevel@tonic-gate ret = 0; 9850Sstevel@tonic-gate 986*1618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 987*1618Srie (Xword)roffset, (Xword)value)); 9880Sstevel@tonic-gate } 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate if ((ret == 0) && 9910Sstevel@tonic-gate ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 9920Sstevel@tonic-gate break; 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate if (binfo) { 995*1618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 996*1618Srie (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 997*1618Srie _lmp, (Addr)value, symdef->st_value, name, binfo)); 9980Sstevel@tonic-gate } 9990Sstevel@tonic-gate } 10000Sstevel@tonic-gate 10010Sstevel@tonic-gate return (relocate_finish(lmp, bound, textrel, ret)); 10020Sstevel@tonic-gate } 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate /* 10050Sstevel@tonic-gate * Initialize the first few got entries so that function calls go to 10060Sstevel@tonic-gate * elf_rtbndr: 10070Sstevel@tonic-gate * 10080Sstevel@tonic-gate * GOT[GOT_XLINKMAP] = the address of the link map 10090Sstevel@tonic-gate * GOT[GOT_XRTLD] = the address of rtbinder 10100Sstevel@tonic-gate */ 10110Sstevel@tonic-gate void 10120Sstevel@tonic-gate elf_plt_init(void *got, caddr_t l) 10130Sstevel@tonic-gate { 10140Sstevel@tonic-gate uint64_t *_got; 10150Sstevel@tonic-gate /* LINTED */ 10160Sstevel@tonic-gate Rt_map *lmp = (Rt_map *)l; 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XLINKMAP; 10190Sstevel@tonic-gate *_got = (uint64_t)lmp; 10200Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XRTLD; 10210Sstevel@tonic-gate *_got = (uint64_t)elf_rtbndr; 10220Sstevel@tonic-gate } 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate /* 10250Sstevel@tonic-gate * Plt writing interface to allow debugging initialization to be generic. 10260Sstevel@tonic-gate */ 10270Sstevel@tonic-gate Pltbindtype 10280Sstevel@tonic-gate /* ARGSUSED1 */ 10290Sstevel@tonic-gate elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 10300Sstevel@tonic-gate Xword pltndx) 10310Sstevel@tonic-gate { 10320Sstevel@tonic-gate Rela *rel = (Rela*)rptr; 10330Sstevel@tonic-gate uintptr_t pltaddr; 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate pltaddr = addr + rel->r_offset; 10360Sstevel@tonic-gate *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 10370Sstevel@tonic-gate DBG_CALL(pltcntfull++); 10380Sstevel@tonic-gate return (PLT_T_FULL); 10390Sstevel@tonic-gate } 10400Sstevel@tonic-gate 10410Sstevel@tonic-gate /* 10420Sstevel@tonic-gate * Provide a machine specific interface to the conversion routine. By calling 10430Sstevel@tonic-gate * the machine specific version, rather than the generic version, we insure that 10440Sstevel@tonic-gate * the data tables/strings for all known machine versions aren't dragged into 10450Sstevel@tonic-gate * ld.so.1. 10460Sstevel@tonic-gate */ 10470Sstevel@tonic-gate const char * 1048*1618Srie _conv_reloc_type(uint_t rel) 10490Sstevel@tonic-gate { 1050*1618Srie return (conv_reloc_amd64_type(rel)); 10510Sstevel@tonic-gate } 1052