10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51618Srie * Common Development and Distribution License (the "License"). 61618Srie * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211618Srie 220Sstevel@tonic-gate /* 23*8598SRod.Evans@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 241618Srie * Use is subject to license terms. 250Sstevel@tonic-gate */ 266812Sraf 27*8598SRod.Evans@Sun.COM #pragma ident "@(#)amd64_elf.c 1.25 08/07/30 SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * amd64 machine dependent and ELF file class dependent functions. 310Sstevel@tonic-gate * Contains routines for performing function binding and symbol relocations. 320Sstevel@tonic-gate */ 330Sstevel@tonic-gate 340Sstevel@tonic-gate #include <stdio.h> 350Sstevel@tonic-gate #include <sys/elf.h> 360Sstevel@tonic-gate #include <sys/elf_amd64.h> 370Sstevel@tonic-gate #include <sys/mman.h> 380Sstevel@tonic-gate #include <dlfcn.h> 390Sstevel@tonic-gate #include <synch.h> 400Sstevel@tonic-gate #include <string.h> 411618Srie #include <debug.h> 421618Srie #include <reloc.h> 431618Srie #include <conv.h> 440Sstevel@tonic-gate #include "_rtld.h" 450Sstevel@tonic-gate #include "_audit.h" 460Sstevel@tonic-gate #include "_elf.h" 47*8598SRod.Evans@Sun.COM #include "_inline.h" 480Sstevel@tonic-gate #include "msg.h" 490Sstevel@tonic-gate 500Sstevel@tonic-gate extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 510Sstevel@tonic-gate 520Sstevel@tonic-gate int 530Sstevel@tonic-gate elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 540Sstevel@tonic-gate { 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * Check machine type and flags. 570Sstevel@tonic-gate */ 580Sstevel@tonic-gate if (ehdr->e_flags != 0) { 590Sstevel@tonic-gate rej->rej_type = SGS_REJ_BADFLAG; 600Sstevel@tonic-gate rej->rej_info = (uint_t)ehdr->e_flags; 610Sstevel@tonic-gate return (0); 620Sstevel@tonic-gate } 630Sstevel@tonic-gate return (1); 640Sstevel@tonic-gate } 650Sstevel@tonic-gate 660Sstevel@tonic-gate void 67*8598SRod.Evans@Sun.COM ldso_plt_init(Rt_map *lmp) 680Sstevel@tonic-gate { 690Sstevel@tonic-gate /* 700Sstevel@tonic-gate * There is no need to analyze ld.so because we don't map in any of 710Sstevel@tonic-gate * its dependencies. However we may map these dependencies in later 720Sstevel@tonic-gate * (as if ld.so had dlopened them), so initialize the plt and the 730Sstevel@tonic-gate * permission information. 740Sstevel@tonic-gate */ 750Sstevel@tonic-gate if (PLTGOT(lmp)) 76*8598SRod.Evans@Sun.COM elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp); 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate static const uchar_t dyn_plt_template[] = { 800Sstevel@tonic-gate /* 0x00 */ 0x55, /* pushq %rbp */ 810Sstevel@tonic-gate /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 820Sstevel@tonic-gate /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 830Sstevel@tonic-gate /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 840Sstevel@tonic-gate 0x00, 0x00, 0x00, 850Sstevel@tonic-gate /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 860Sstevel@tonic-gate /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 870Sstevel@tonic-gate 0x00, 0x00, 0x00, 880Sstevel@tonic-gate 0x00, 0x00, 0x00, 890Sstevel@tonic-gate /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 900Sstevel@tonic-gate /* 0x20 */ 910Sstevel@tonic-gate }; 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * And the virutal outstanding relocations against the 950Sstevel@tonic-gate * above block are: 960Sstevel@tonic-gate * 970Sstevel@tonic-gate * reloc offset Addend symbol 980Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 990Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1000Sstevel@tonic-gate */ 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate #define TRCREL1OFF 0x0b 1030Sstevel@tonic-gate #define TRCREL2OFF 0x15 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate int dyn_plt_ent_size = sizeof (dyn_plt_template); 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate /* 1080Sstevel@tonic-gate * the dynamic plt entry is: 1090Sstevel@tonic-gate * 1100Sstevel@tonic-gate * pushq %rbp 1110Sstevel@tonic-gate * movq %rsp, %rbp 1120Sstevel@tonic-gate * subq $0x10, %rsp 1130Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1140Sstevel@tonic-gate * movq %r11, -0x8(%rbp) 1150Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1160Sstevel@tonic-gate * jmp *%r11 1170Sstevel@tonic-gate * dyn_data: 1180Sstevel@tonic-gate * .align 8 1190Sstevel@tonic-gate * uintptr_t reflmp 1200Sstevel@tonic-gate * uintptr_t deflmp 1210Sstevel@tonic-gate * uint_t symndx 1220Sstevel@tonic-gate * uint_t sb_flags 1230Sstevel@tonic-gate * Sym symdef 1240Sstevel@tonic-gate */ 1250Sstevel@tonic-gate static caddr_t 1260Sstevel@tonic-gate elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 1270Sstevel@tonic-gate uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 1280Sstevel@tonic-gate { 1290Sstevel@tonic-gate extern int elf_plt_trace(); 1300Sstevel@tonic-gate ulong_t got_entry; 1310Sstevel@tonic-gate uchar_t *dyn_plt; 1320Sstevel@tonic-gate uintptr_t *dyndata; 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate /* 1350Sstevel@tonic-gate * We only need to add the glue code if there is an auditing 1360Sstevel@tonic-gate * library that is interested in this binding. 1370Sstevel@tonic-gate */ 1380Sstevel@tonic-gate dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 1394679Srie (pltndx * dyn_plt_ent_size)); 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate /* 1420Sstevel@tonic-gate * Have we initialized this dynamic plt entry yet? If we haven't do it 1430Sstevel@tonic-gate * now. Otherwise this function has been called before, but from a 1440Sstevel@tonic-gate * different plt (ie. from another shared object). In that case 1450Sstevel@tonic-gate * we just set the plt to point to the new dyn_plt. 1460Sstevel@tonic-gate */ 1470Sstevel@tonic-gate if (*dyn_plt == 0) { 148*8598SRod.Evans@Sun.COM Sym *symp; 1490Sstevel@tonic-gate Xword symvalue; 1501618Srie Lm_list *lml = LIST(rlmp); 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate (void) memcpy((void *)dyn_plt, dyn_plt_template, 1530Sstevel@tonic-gate sizeof (dyn_plt_template)); 1540Sstevel@tonic-gate dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 1550Sstevel@tonic-gate ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 1560Sstevel@tonic-gate 1570Sstevel@tonic-gate /* 1580Sstevel@tonic-gate * relocate: 1590Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 1600Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 1610Sstevel@tonic-gate */ 1620Sstevel@tonic-gate symvalue = (Xword)((uintptr_t)dyndata - 1630Sstevel@tonic-gate (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 1645189Sab196087 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], 1651618Srie &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA), 1661618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1670Sstevel@tonic-gate *fail = 1; 1680Sstevel@tonic-gate return (0); 1690Sstevel@tonic-gate } 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate /* 1720Sstevel@tonic-gate * relocating: 1730Sstevel@tonic-gate * movq $elf_plt_trace, %r11 1740Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 1750Sstevel@tonic-gate */ 1760Sstevel@tonic-gate symvalue = (Xword)elf_plt_trace; 1775189Sab196087 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF], 1781618Srie &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE), 1791618Srie MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) { 1800Sstevel@tonic-gate *fail = 1; 1810Sstevel@tonic-gate return (0); 1820Sstevel@tonic-gate } 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate *dyndata++ = (uintptr_t)rlmp; 1850Sstevel@tonic-gate *dyndata++ = (uintptr_t)dlmp; 1860Sstevel@tonic-gate *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 1870Sstevel@tonic-gate dyndata++; 1880Sstevel@tonic-gate symp = (Sym *)dyndata; 1890Sstevel@tonic-gate *symp = *sym; 1900Sstevel@tonic-gate symp->st_value = (Addr)to; 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate 1930Sstevel@tonic-gate got_entry = (ulong_t)roffset; 1940Sstevel@tonic-gate *(ulong_t *)got_entry = (ulong_t)dyn_plt; 1950Sstevel@tonic-gate return ((caddr_t)dyn_plt); 1960Sstevel@tonic-gate } 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * Function binding routine - invoked on the first call to a function through 2000Sstevel@tonic-gate * the procedure linkage table; 2010Sstevel@tonic-gate * passes first through an assembly language interface. 2020Sstevel@tonic-gate * 2030Sstevel@tonic-gate * Takes the offset into the relocation table of the associated 2040Sstevel@tonic-gate * relocation entry and the address of the link map (rt_private_map struct) 2050Sstevel@tonic-gate * for the entry. 2060Sstevel@tonic-gate * 2070Sstevel@tonic-gate * Returns the address of the function referenced after re-writing the PLT 2080Sstevel@tonic-gate * entry to invoke the function directly. 2090Sstevel@tonic-gate * 2100Sstevel@tonic-gate * On error, causes process to terminate with a signal. 2110Sstevel@tonic-gate */ 2120Sstevel@tonic-gate ulong_t 2130Sstevel@tonic-gate elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 2140Sstevel@tonic-gate { 215*8598SRod.Evans@Sun.COM Rt_map *nlmp, *llmp; 2160Sstevel@tonic-gate ulong_t addr, reloff, symval, rsymndx; 2170Sstevel@tonic-gate char *name; 2180Sstevel@tonic-gate Rela *rptr; 2195220Srie Sym *rsym, *nsym; 2201618Srie uint_t binfo, sb_flags = 0, dbg_class; 2210Sstevel@tonic-gate Slookup sl; 2221618Srie int entry, lmflags; 2231618Srie Lm_list *lml; 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate /* 2260Sstevel@tonic-gate * For compatibility with libthread (TI_VERSION 1) we track the entry 2270Sstevel@tonic-gate * value. A zero value indicates we have recursed into ld.so.1 to 2280Sstevel@tonic-gate * further process a locking request. Under this recursion we disable 2290Sstevel@tonic-gate * tsort and cleanup activities. 2300Sstevel@tonic-gate */ 2316515Sraf entry = enter(0); 2320Sstevel@tonic-gate 2331618Srie lml = LIST(lmp); 2341618Srie if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) { 2351618Srie dbg_class = dbg_desc->d_class; 2361618Srie dbg_desc->d_class = 0; 2370Sstevel@tonic-gate } 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate /* 2400Sstevel@tonic-gate * Perform some basic sanity checks. If we didn't get a load map or 2410Sstevel@tonic-gate * the relocation offset is invalid then its possible someone has walked 2420Sstevel@tonic-gate * over the .got entries or jumped to plt0 out of the blue. 2430Sstevel@tonic-gate */ 2440Sstevel@tonic-gate if ((!lmp) && (pltndx <= 2450Sstevel@tonic-gate (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 2464734Sab196087 Conv_inv_buf_t inv_buf; 2474734Sab196087 2481618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 2494734Sab196087 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf), 2501618Srie EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from)); 2511618Srie rtldexit(lml, 1); 2520Sstevel@tonic-gate } 2530Sstevel@tonic-gate reloff = pltndx * (ulong_t)RELENT(lmp); 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate /* 2560Sstevel@tonic-gate * Use relocation entry to get symbol table entry and symbol name. 2570Sstevel@tonic-gate */ 2580Sstevel@tonic-gate addr = (ulong_t)JMPREL(lmp); 2590Sstevel@tonic-gate rptr = (Rela *)(addr + reloff); 2600Sstevel@tonic-gate rsymndx = ELF_R_SYM(rptr->r_info); 2615220Srie rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 2625220Srie name = (char *)(STRTAB(lmp) + rsym->st_name); 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate /* 2650Sstevel@tonic-gate * Determine the last link-map of this list, this'll be the starting 2660Sstevel@tonic-gate * point for any tsort() processing. 2670Sstevel@tonic-gate */ 2681618Srie llmp = lml->lm_tail; 2690Sstevel@tonic-gate 2700Sstevel@tonic-gate /* 2715950Srie * Find definition for symbol. Initialize the symbol lookup data 2725950Srie * structure. 2730Sstevel@tonic-gate */ 2745950Srie SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0, 2755950Srie rsymndx, rsym, 0, LKUP_DEFT); 2760Sstevel@tonic-gate 2776387Srie if ((nsym = lookup_sym(&sl, &nlmp, &binfo, NULL)) == 0) { 2781618Srie eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 2790Sstevel@tonic-gate demangle(name)); 2801618Srie rtldexit(lml, 1); 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate symval = nsym->st_value; 2840Sstevel@tonic-gate if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 2850Sstevel@tonic-gate (nsym->st_shndx != SHN_ABS)) 2860Sstevel@tonic-gate symval += ADDR(nlmp); 2870Sstevel@tonic-gate if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 2880Sstevel@tonic-gate /* 2890Sstevel@tonic-gate * Record that this new link map is now bound to the caller. 2900Sstevel@tonic-gate */ 2910Sstevel@tonic-gate if (bind_one(lmp, nlmp, BND_REFER) == 0) 2921618Srie rtldexit(lml, 1); 2930Sstevel@tonic-gate } 2940Sstevel@tonic-gate 295*8598SRod.Evans@Sun.COM if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) { 2960Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 2974679Srie (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 2980Sstevel@tonic-gate symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 2994679Srie &sb_flags); 3000Sstevel@tonic-gate } 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate if (!(rtld_flags & RT_FL_NOBIND)) { 3030Sstevel@tonic-gate addr = rptr->r_offset; 3040Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) 3050Sstevel@tonic-gate addr += ADDR(lmp); 306*8598SRod.Evans@Sun.COM if (((lml->lm_tflags | AFLAGS(lmp)) & 3070Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 3080Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 3090Sstevel@tonic-gate int fail = 0; 3100Sstevel@tonic-gate uint_t pltndx = reloff / sizeof (Rela); 3110Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 3124679Srie (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 3150Sstevel@tonic-gate nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 3160Sstevel@tonic-gate &fail); 3170Sstevel@tonic-gate if (fail) 3181618Srie rtldexit(lml, 1); 3190Sstevel@tonic-gate } else { 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * Write standard PLT entry to jump directly 3220Sstevel@tonic-gate * to newly bound function. 3230Sstevel@tonic-gate */ 3240Sstevel@tonic-gate *(ulong_t *)addr = symval; 3250Sstevel@tonic-gate } 3260Sstevel@tonic-gate } 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate /* 3290Sstevel@tonic-gate * Print binding information and rebuild PLT entry. 3300Sstevel@tonic-gate */ 3311618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)), 332*8598SRod.Evans@Sun.COM (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval, 333*8598SRod.Evans@Sun.COM nsym->st_value, name, binfo)); 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate /* 3360Sstevel@tonic-gate * Complete any processing for newly loaded objects. Note we don't 3370Sstevel@tonic-gate * know exactly where any new objects are loaded (we know the object 3380Sstevel@tonic-gate * that supplied the symbol, but others may have been loaded lazily as 3390Sstevel@tonic-gate * we searched for the symbol), so sorting starts from the last 3400Sstevel@tonic-gate * link-map know on entry to this routine. 3410Sstevel@tonic-gate */ 3420Sstevel@tonic-gate if (entry) 3434679Srie load_completion(llmp); 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate /* 3460Sstevel@tonic-gate * Some operations like dldump() or dlopen()'ing a relocatable object 3470Sstevel@tonic-gate * result in objects being loaded on rtld's link-map, make sure these 3480Sstevel@tonic-gate * objects are initialized also. 3490Sstevel@tonic-gate */ 350*8598SRod.Evans@Sun.COM if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 3514679Srie load_completion(nlmp); 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3540Sstevel@tonic-gate * Make sure the object to which we've bound has had it's .init fired. 3550Sstevel@tonic-gate * Cleanup before return to user code. 3560Sstevel@tonic-gate */ 3570Sstevel@tonic-gate if (entry) { 3580Sstevel@tonic-gate is_dep_init(nlmp, lmp); 3596515Sraf leave(lml, 0); 3600Sstevel@tonic-gate } 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate if (lmflags & LML_FLG_RTLDLM) 3631618Srie dbg_desc->d_class = dbg_class; 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate return (symval); 3660Sstevel@tonic-gate } 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate /* 3690Sstevel@tonic-gate * Read and process the relocations for one link object, we assume all 3700Sstevel@tonic-gate * relocation sections for loadable segments are stored contiguously in 3710Sstevel@tonic-gate * the file. 3720Sstevel@tonic-gate */ 3730Sstevel@tonic-gate int 374*8598SRod.Evans@Sun.COM elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel) 3750Sstevel@tonic-gate { 3760Sstevel@tonic-gate ulong_t relbgn, relend, relsiz, basebgn; 3770Sstevel@tonic-gate ulong_t pltbgn, pltend, _pltbgn, _pltend; 378*8598SRod.Evans@Sun.COM ulong_t roffset, rsymndx, psymndx = 0; 379*8598SRod.Evans@Sun.COM ulong_t dsymndx; 3800Sstevel@tonic-gate uchar_t rtype; 3810Sstevel@tonic-gate long reladd, value, pvalue; 3820Sstevel@tonic-gate Sym *symref, *psymref, *symdef, *psymdef; 3830Sstevel@tonic-gate char *name, *pname; 3840Sstevel@tonic-gate Rt_map *_lmp, *plmp; 385*8598SRod.Evans@Sun.COM int ret = 1, noplt = 0; 3860Sstevel@tonic-gate int relacount = RELACOUNT(lmp), plthint = 0; 3870Sstevel@tonic-gate Rela *rel; 3880Sstevel@tonic-gate uint_t binfo, pbinfo; 3895892Sab196087 APlist *bound = NULL; 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate /* 3920Sstevel@tonic-gate * Although only necessary for lazy binding, initialize the first 3930Sstevel@tonic-gate * global offset entry to go to elf_rtbndr(). dbx(1) seems 3940Sstevel@tonic-gate * to find this useful. 3950Sstevel@tonic-gate */ 3960Sstevel@tonic-gate if ((plt == 0) && PLTGOT(lmp)) { 397*8598SRod.Evans@Sun.COM mmapobj_result_t *mpp; 398*8598SRod.Evans@Sun.COM 399*8598SRod.Evans@Sun.COM /* 400*8598SRod.Evans@Sun.COM * Make sure the segment is writable. 401*8598SRod.Evans@Sun.COM */ 402*8598SRod.Evans@Sun.COM if ((((mpp = 403*8598SRod.Evans@Sun.COM find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) && 404*8598SRod.Evans@Sun.COM ((mpp->mr_prot & PROT_WRITE) == 0)) && 405*8598SRod.Evans@Sun.COM ((set_prot(lmp, mpp, 1) == 0) || 406*8598SRod.Evans@Sun.COM (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) 407*8598SRod.Evans@Sun.COM return (0); 408*8598SRod.Evans@Sun.COM 409*8598SRod.Evans@Sun.COM elf_plt_init(PLTGOT(lmp), (caddr_t)lmp); 4100Sstevel@tonic-gate } 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate /* 4130Sstevel@tonic-gate * Initialize the plt start and end addresses. 4140Sstevel@tonic-gate */ 4150Sstevel@tonic-gate if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 4160Sstevel@tonic-gate pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate relsiz = (ulong_t)(RELENT(lmp)); 4200Sstevel@tonic-gate basebgn = ADDR(lmp); 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate if (PLTRELSZ(lmp)) 4230Sstevel@tonic-gate plthint = PLTRELSZ(lmp) / relsiz; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate /* 4260Sstevel@tonic-gate * If we've been called upon to promote an RTLD_LAZY object to an 4270Sstevel@tonic-gate * RTLD_NOW then we're only interested in scaning the .plt table. 4280Sstevel@tonic-gate * An uninitialized .plt is the case where the associated got entry 4290Sstevel@tonic-gate * points back to the plt itself. Determine the range of the real .plt 4300Sstevel@tonic-gate * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 4310Sstevel@tonic-gate */ 4320Sstevel@tonic-gate if (plt) { 4330Sstevel@tonic-gate Slookup sl; 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate relbgn = pltbgn; 4360Sstevel@tonic-gate relend = pltend; 4370Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) 4380Sstevel@tonic-gate return (1); 4390Sstevel@tonic-gate 4405950Srie /* 4415950Srie * Initialize the symbol lookup data structure. 4425950Srie */ 4435950Srie SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt, 4445950Srie elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT); 4450Sstevel@tonic-gate 446*8598SRod.Evans@Sun.COM if ((symdef = elf_find_sym(&sl, &_lmp, &binfo, NULL)) == 0) 4470Sstevel@tonic-gate return (1); 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate _pltbgn = symdef->st_value; 4500Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED) && 4510Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS)) 4520Sstevel@tonic-gate _pltbgn += basebgn; 4530Sstevel@tonic-gate _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 4544679Srie M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate } else { 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * The relocation sections appear to the run-time linker as a 4590Sstevel@tonic-gate * single table. Determine the address of the beginning and end 4600Sstevel@tonic-gate * of this table. There are two different interpretations of 4610Sstevel@tonic-gate * the ABI at this point: 4620Sstevel@tonic-gate * 4630Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 4640Sstevel@tonic-gate * concatenation of *all* relocation sections (this is the 4650Sstevel@tonic-gate * model our link-editor constructs). 4660Sstevel@tonic-gate * 4670Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 4680Sstevel@tonic-gate * concatenation of all *but* the .plt relocations. These 4690Sstevel@tonic-gate * relocations are specified individually by the JMPREL and 4700Sstevel@tonic-gate * PLTRELSZ entries. 4710Sstevel@tonic-gate * 4720Sstevel@tonic-gate * Determine from our knowledege of the relocation range and 4730Sstevel@tonic-gate * .plt range, the range of the total relocation table. Note 4740Sstevel@tonic-gate * that one other ABI assumption seems to be that the .plt 4750Sstevel@tonic-gate * relocations always follow any other relocations, the 4760Sstevel@tonic-gate * following range checking drops that assumption. 4770Sstevel@tonic-gate */ 4780Sstevel@tonic-gate relbgn = (ulong_t)(REL(lmp)); 4790Sstevel@tonic-gate relend = relbgn + (ulong_t)(RELSZ(lmp)); 4800Sstevel@tonic-gate if (pltbgn) { 4810Sstevel@tonic-gate if (!relbgn || (relbgn > pltbgn)) 4820Sstevel@tonic-gate relbgn = pltbgn; 4830Sstevel@tonic-gate if (!relbgn || (relend < pltend)) 4840Sstevel@tonic-gate relend = pltend; 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate } 4870Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) { 4881618Srie DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE)); 4890Sstevel@tonic-gate return (1); 4900Sstevel@tonic-gate } 4911618Srie DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START)); 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate /* 4940Sstevel@tonic-gate * If we're processing a dynamic executable in lazy mode there is no 4950Sstevel@tonic-gate * need to scan the .rel.plt table, however if we're processing a shared 4960Sstevel@tonic-gate * object in lazy mode the .got addresses associated to each .plt must 4970Sstevel@tonic-gate * be relocated to reflect the location of the shared object. 4980Sstevel@tonic-gate */ 4990Sstevel@tonic-gate if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 5000Sstevel@tonic-gate (FLAGS(lmp) & FLG_RT_FIXED)) 5010Sstevel@tonic-gate noplt = 1; 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate /* 5040Sstevel@tonic-gate * Loop through relocations. 5050Sstevel@tonic-gate */ 5060Sstevel@tonic-gate while (relbgn < relend) { 507*8598SRod.Evans@Sun.COM mmapobj_result_t *mpp; 508*8598SRod.Evans@Sun.COM uint_t sb_flags = 0; 5090Sstevel@tonic-gate 5106206Sab196087 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate /* 5130Sstevel@tonic-gate * If this is a RELATIVE relocation in a shared object (the 5140Sstevel@tonic-gate * common case), and if we are not debugging, then jump into a 515*8598SRod.Evans@Sun.COM * tighter relocation loop (elf_reloc_relative). 5160Sstevel@tonic-gate */ 5170Sstevel@tonic-gate if ((rtype == R_AMD64_RELATIVE) && 5181618Srie ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) { 5190Sstevel@tonic-gate if (relacount) { 520*8598SRod.Evans@Sun.COM relbgn = elf_reloc_relative_count(relbgn, 521*8598SRod.Evans@Sun.COM relacount, relsiz, basebgn, lmp, textrel); 5220Sstevel@tonic-gate relacount = 0; 5230Sstevel@tonic-gate } else { 5240Sstevel@tonic-gate relbgn = elf_reloc_relative(relbgn, relend, 525*8598SRod.Evans@Sun.COM relsiz, basebgn, lmp, textrel); 5260Sstevel@tonic-gate } 5270Sstevel@tonic-gate if (relbgn >= relend) 5280Sstevel@tonic-gate break; 5296206Sab196087 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH); 5300Sstevel@tonic-gate } 5310Sstevel@tonic-gate 5320Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * If this is a shared object, add the base address to offset. 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 5380Sstevel@tonic-gate /* 5390Sstevel@tonic-gate * If we're processing lazy bindings, we have to step 5400Sstevel@tonic-gate * through the plt entries and add the base address 5410Sstevel@tonic-gate * to the corresponding got entry. 5420Sstevel@tonic-gate */ 5430Sstevel@tonic-gate if (plthint && (plt == 0) && 5440Sstevel@tonic-gate (rtype == R_AMD64_JUMP_SLOT) && 5450Sstevel@tonic-gate ((MODE(lmp) & RTLD_NOW) == 0)) { 5460Sstevel@tonic-gate /* 5470Sstevel@tonic-gate * The PLT relocations (for lazy bindings) 5480Sstevel@tonic-gate * are additive to what's already in the GOT. 5490Sstevel@tonic-gate * This differs to what happens in 5500Sstevel@tonic-gate * elf_reloc_relacount() and that's why we 5510Sstevel@tonic-gate * just do it inline here. 5520Sstevel@tonic-gate */ 5530Sstevel@tonic-gate for (roffset = ((Rela *)relbgn)->r_offset; 5540Sstevel@tonic-gate plthint; plthint--) { 5550Sstevel@tonic-gate roffset += basebgn; 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate /* 5580Sstevel@tonic-gate * Perform the actual relocation. 5590Sstevel@tonic-gate */ 5600Sstevel@tonic-gate *((ulong_t *)roffset) += basebgn; 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate relbgn += relsiz; 5630Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate continue; 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate roffset += basebgn; 5690Sstevel@tonic-gate } 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate reladd = (long)(((Rela *)relbgn)->r_addend); 5720Sstevel@tonic-gate rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 5730Sstevel@tonic-gate rel = (Rela *)relbgn; 5740Sstevel@tonic-gate relbgn += relsiz; 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* 5770Sstevel@tonic-gate * Optimizations. 5780Sstevel@tonic-gate */ 5790Sstevel@tonic-gate if (rtype == R_AMD64_NONE) 5800Sstevel@tonic-gate continue; 5810Sstevel@tonic-gate if (noplt && ((ulong_t)rel >= pltbgn) && 5820Sstevel@tonic-gate ((ulong_t)rel < pltend)) { 5830Sstevel@tonic-gate relbgn = pltend; 5840Sstevel@tonic-gate continue; 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate /* 588*8598SRod.Evans@Sun.COM * If we're promoting plts, determine if this one has already 589*8598SRod.Evans@Sun.COM * been written. 590*8598SRod.Evans@Sun.COM */ 591*8598SRod.Evans@Sun.COM if (plt && ((*(ulong_t *)roffset < _pltbgn) || 592*8598SRod.Evans@Sun.COM (*(ulong_t *)roffset > _pltend))) 593*8598SRod.Evans@Sun.COM continue; 594*8598SRod.Evans@Sun.COM 595*8598SRod.Evans@Sun.COM /* 5960Sstevel@tonic-gate * If this relocation is not against part of the image 5970Sstevel@tonic-gate * mapped into memory we skip it. 5980Sstevel@tonic-gate */ 599*8598SRod.Evans@Sun.COM if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) { 6000Sstevel@tonic-gate elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 6010Sstevel@tonic-gate rsymndx); 6020Sstevel@tonic-gate continue; 6030Sstevel@tonic-gate } 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate binfo = 0; 6060Sstevel@tonic-gate /* 6070Sstevel@tonic-gate * If a symbol index is specified then get the symbol table 6080Sstevel@tonic-gate * entry, locate the symbol definition, and determine its 6090Sstevel@tonic-gate * address. 6100Sstevel@tonic-gate */ 6110Sstevel@tonic-gate if (rsymndx) { 6120Sstevel@tonic-gate /* 6130Sstevel@tonic-gate * Get the local symbol table entry. 6140Sstevel@tonic-gate */ 6150Sstevel@tonic-gate symref = (Sym *)((ulong_t)SYMTAB(lmp) + 6164679Srie (rsymndx * SYMENT(lmp))); 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate /* 6190Sstevel@tonic-gate * If this is a local symbol, just use the base address. 6200Sstevel@tonic-gate * (we should have no local relocations in the 6210Sstevel@tonic-gate * executable). 6220Sstevel@tonic-gate */ 6230Sstevel@tonic-gate if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 6240Sstevel@tonic-gate value = basebgn; 6250Sstevel@tonic-gate name = (char *)0; 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate /* 6282145Srie * Special case TLS relocations. 6290Sstevel@tonic-gate */ 6302145Srie if (rtype == R_AMD64_DTPMOD64) { 6312145Srie /* 6322145Srie * Use the TLS modid. 6332145Srie */ 6340Sstevel@tonic-gate value = TLSMODID(lmp); 6352145Srie 6362145Srie } else if ((rtype == R_AMD64_TPOFF64) || 6372145Srie (rtype == R_AMD64_TPOFF32)) { 6382145Srie if ((value = elf_static_tls(lmp, symref, 6392145Srie rel, rtype, 0, roffset, 0)) == 0) { 6402145Srie ret = 0; 6412145Srie break; 6422145Srie } 6432145Srie } 6440Sstevel@tonic-gate } else { 6450Sstevel@tonic-gate /* 6460Sstevel@tonic-gate * If the symbol index is equal to the previous 6470Sstevel@tonic-gate * symbol index relocation we processed then 6480Sstevel@tonic-gate * reuse the previous values. (Note that there 6490Sstevel@tonic-gate * have been cases where a relocation exists 6500Sstevel@tonic-gate * against a copy relocation symbol, our ld(1) 6510Sstevel@tonic-gate * should optimize this away, but make sure we 6520Sstevel@tonic-gate * don't use the same symbol information should 6530Sstevel@tonic-gate * this case exist). 6540Sstevel@tonic-gate */ 6550Sstevel@tonic-gate if ((rsymndx == psymndx) && 6560Sstevel@tonic-gate (rtype != R_AMD64_COPY)) { 6570Sstevel@tonic-gate /* LINTED */ 6580Sstevel@tonic-gate if (psymdef == 0) { 6591618Srie DBG_CALL(Dbg_bind_weak(lmp, 6601618Srie (Addr)roffset, (Addr) 6610Sstevel@tonic-gate (roffset - basebgn), name)); 6620Sstevel@tonic-gate continue; 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate /* LINTED */ 6650Sstevel@tonic-gate value = pvalue; 6660Sstevel@tonic-gate /* LINTED */ 6670Sstevel@tonic-gate name = pname; 6680Sstevel@tonic-gate /* LINTED */ 6690Sstevel@tonic-gate symdef = psymdef; 6700Sstevel@tonic-gate /* LINTED */ 6710Sstevel@tonic-gate symref = psymref; 6720Sstevel@tonic-gate /* LINTED */ 6730Sstevel@tonic-gate _lmp = plmp; 6740Sstevel@tonic-gate /* LINTED */ 6750Sstevel@tonic-gate binfo = pbinfo; 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 678*8598SRod.Evans@Sun.COM AFLAGS(_lmp)) & 6790Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 6800Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 6810Sstevel@tonic-gate /* LINTED */ 6820Sstevel@tonic-gate symdef, dsymndx, value, 6830Sstevel@tonic-gate &sb_flags); 6840Sstevel@tonic-gate } 6850Sstevel@tonic-gate } else { 6860Sstevel@tonic-gate Slookup sl; 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate /* 6890Sstevel@tonic-gate * Lookup the symbol definition. 6905950Srie * Initialize the symbol lookup data 6915950Srie * structure. 6920Sstevel@tonic-gate */ 6930Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + 6940Sstevel@tonic-gate symref->st_name); 6950Sstevel@tonic-gate 6965950Srie SLOOKUP_INIT(sl, name, lmp, 0, 6975950Srie ld_entry_cnt, 0, rsymndx, symref, 6985950Srie rtype, LKUP_STDRELOC); 6990Sstevel@tonic-gate 7006387Srie symdef = lookup_sym(&sl, &_lmp, 7016387Srie &binfo, in_nfavl); 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate /* 7040Sstevel@tonic-gate * If the symbol is not found and the 7050Sstevel@tonic-gate * reference was not to a weak symbol, 7060Sstevel@tonic-gate * report an error. Weak references 7070Sstevel@tonic-gate * may be unresolved. 7080Sstevel@tonic-gate */ 7094679Srie /* BEGIN CSTYLED */ 7100Sstevel@tonic-gate if (symdef == 0) { 7116150Srie if (sl.sl_bind != STB_WEAK) { 7126150Srie if (elf_reloc_error(lmp, name, 7136150Srie rel, binfo)) 7146150Srie continue; 7151618Srie 716*8598SRod.Evans@Sun.COM ret = 0; 717*8598SRod.Evans@Sun.COM break; 7186150Srie 7190Sstevel@tonic-gate } else { 7200Sstevel@tonic-gate psymndx = rsymndx; 7210Sstevel@tonic-gate psymdef = 0; 7220Sstevel@tonic-gate 7231618Srie DBG_CALL(Dbg_bind_weak(lmp, 7241618Srie (Addr)roffset, (Addr) 7250Sstevel@tonic-gate (roffset - basebgn), name)); 7260Sstevel@tonic-gate continue; 7270Sstevel@tonic-gate } 7280Sstevel@tonic-gate } 7294679Srie /* END CSTYLED */ 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate /* 7320Sstevel@tonic-gate * If symbol was found in an object 7330Sstevel@tonic-gate * other than the referencing object 7340Sstevel@tonic-gate * then record the binding. 7350Sstevel@tonic-gate */ 7360Sstevel@tonic-gate if ((lmp != _lmp) && ((FLAGS1(_lmp) & 7370Sstevel@tonic-gate FL1_RT_NOINIFIN) == 0)) { 7385892Sab196087 if (aplist_test(&bound, _lmp, 7390Sstevel@tonic-gate AL_CNT_RELBIND) == 0) { 7400Sstevel@tonic-gate ret = 0; 7410Sstevel@tonic-gate break; 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate } 7440Sstevel@tonic-gate 7450Sstevel@tonic-gate /* 7460Sstevel@tonic-gate * Calculate the location of definition; 7470Sstevel@tonic-gate * symbol value plus base address of 7480Sstevel@tonic-gate * containing shared object. 7490Sstevel@tonic-gate */ 7502850Srie if (IS_SIZE(rtype)) 7512850Srie value = symdef->st_size; 7522850Srie else 7532850Srie value = symdef->st_value; 7542850Srie 7550Sstevel@tonic-gate if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 7562850Srie !(IS_SIZE(rtype)) && 7570Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS) && 7580Sstevel@tonic-gate (ELF_ST_TYPE(symdef->st_info) != 7590Sstevel@tonic-gate STT_TLS)) 7600Sstevel@tonic-gate value += ADDR(_lmp); 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate /* 7630Sstevel@tonic-gate * Retain this symbol index and the 7640Sstevel@tonic-gate * value in case it can be used for the 7650Sstevel@tonic-gate * subsequent relocations. 7660Sstevel@tonic-gate */ 7670Sstevel@tonic-gate if (rtype != R_AMD64_COPY) { 7680Sstevel@tonic-gate psymndx = rsymndx; 7690Sstevel@tonic-gate pvalue = value; 7700Sstevel@tonic-gate pname = name; 7710Sstevel@tonic-gate psymdef = symdef; 7720Sstevel@tonic-gate psymref = symref; 7730Sstevel@tonic-gate plmp = _lmp; 7740Sstevel@tonic-gate pbinfo = binfo; 7750Sstevel@tonic-gate } 7760Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 777*8598SRod.Evans@Sun.COM AFLAGS(_lmp)) & 7780Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 7790Sstevel@tonic-gate dsymndx = (((uintptr_t)symdef - 7800Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 7810Sstevel@tonic-gate SYMENT(_lmp)); 7820Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 7830Sstevel@tonic-gate symdef, dsymndx, value, 7840Sstevel@tonic-gate &sb_flags); 7850Sstevel@tonic-gate } 7860Sstevel@tonic-gate } 7870Sstevel@tonic-gate 7880Sstevel@tonic-gate /* 7890Sstevel@tonic-gate * If relocation is PC-relative, subtract 7900Sstevel@tonic-gate * offset address. 7910Sstevel@tonic-gate */ 7920Sstevel@tonic-gate if (IS_PC_RELATIVE(rtype)) 7930Sstevel@tonic-gate value -= roffset; 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate /* 7962145Srie * Special case TLS relocations. 7970Sstevel@tonic-gate */ 7982145Srie if (rtype == R_AMD64_DTPMOD64) { 7992145Srie /* 8002145Srie * Relocation value is the TLS modid. 8012145Srie */ 8020Sstevel@tonic-gate value = TLSMODID(_lmp); 8032145Srie 8042145Srie } else if ((rtype == R_AMD64_TPOFF64) || 8052145Srie (rtype == R_AMD64_TPOFF32)) { 8062145Srie if ((value = elf_static_tls(_lmp, 8072145Srie symdef, rel, rtype, name, roffset, 8082145Srie value)) == 0) { 8092145Srie ret = 0; 8102145Srie break; 8112145Srie } 8122145Srie } 8130Sstevel@tonic-gate } 8140Sstevel@tonic-gate } else { 8150Sstevel@tonic-gate /* 8162145Srie * Special cases. 8170Sstevel@tonic-gate */ 8182145Srie if (rtype == R_AMD64_DTPMOD64) { 8192145Srie /* 8202145Srie * TLS relocation value is the TLS modid. 8212145Srie */ 8220Sstevel@tonic-gate value = TLSMODID(lmp); 8232145Srie } else 8240Sstevel@tonic-gate value = basebgn; 8250Sstevel@tonic-gate name = (char *)0; 8260Sstevel@tonic-gate } 8270Sstevel@tonic-gate 8282145Srie DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH, 8292145Srie M_REL_SHT_TYPE, rel, NULL, name)); 8302145Srie 8310Sstevel@tonic-gate /* 832*8598SRod.Evans@Sun.COM * Make sure the segment is writable. 8330Sstevel@tonic-gate */ 834*8598SRod.Evans@Sun.COM if (((mpp->mr_prot & PROT_WRITE) == 0) && 835*8598SRod.Evans@Sun.COM ((set_prot(lmp, mpp, 1) == 0) || 836*8598SRod.Evans@Sun.COM (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) { 837*8598SRod.Evans@Sun.COM ret = 0; 838*8598SRod.Evans@Sun.COM break; 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate /* 8420Sstevel@tonic-gate * Call relocation routine to perform required relocation. 8430Sstevel@tonic-gate */ 8440Sstevel@tonic-gate switch (rtype) { 8450Sstevel@tonic-gate case R_AMD64_COPY: 8460Sstevel@tonic-gate if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 8470Sstevel@tonic-gate symdef, _lmp, (const void *)value) == 0) 8480Sstevel@tonic-gate ret = 0; 8490Sstevel@tonic-gate break; 8500Sstevel@tonic-gate case R_AMD64_JUMP_SLOT: 851*8598SRod.Evans@Sun.COM if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) & 8520Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 8530Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 8540Sstevel@tonic-gate int fail = 0; 8550Sstevel@tonic-gate int pltndx = (((ulong_t)rel - 8564679Srie (uintptr_t)JMPREL(lmp)) / relsiz); 8570Sstevel@tonic-gate int symndx = (((uintptr_t)symdef - 8584679Srie (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp)); 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate (void) elf_plt_trace_write(roffset, lmp, _lmp, 8610Sstevel@tonic-gate symdef, symndx, pltndx, (caddr_t)value, 8620Sstevel@tonic-gate sb_flags, &fail); 8630Sstevel@tonic-gate if (fail) 8640Sstevel@tonic-gate ret = 0; 8650Sstevel@tonic-gate } else { 8660Sstevel@tonic-gate /* 8670Sstevel@tonic-gate * Write standard PLT entry to jump directly 8680Sstevel@tonic-gate * to newly bound function. 8690Sstevel@tonic-gate */ 8701618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), 8711618Srie ELF_DBG_RTLD, (Xword)roffset, 8720Sstevel@tonic-gate (Xword)value)); 8730Sstevel@tonic-gate *(ulong_t *)roffset = value; 8740Sstevel@tonic-gate } 8750Sstevel@tonic-gate break; 8760Sstevel@tonic-gate default: 8770Sstevel@tonic-gate value += reladd; 8780Sstevel@tonic-gate /* 8790Sstevel@tonic-gate * Write the relocation out. 8800Sstevel@tonic-gate */ 8815189Sab196087 if (do_reloc_rtld(rtype, (uchar_t *)roffset, 8821618Srie (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0) 8830Sstevel@tonic-gate ret = 0; 8840Sstevel@tonic-gate 8851618Srie DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD, 8861618Srie (Xword)roffset, (Xword)value)); 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate if ((ret == 0) && 8900Sstevel@tonic-gate ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 8910Sstevel@tonic-gate break; 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate if (binfo) { 8941618Srie DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset, 8951618Srie (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL, 8961618Srie _lmp, (Addr)value, symdef->st_value, name, binfo)); 8970Sstevel@tonic-gate } 8980Sstevel@tonic-gate } 8990Sstevel@tonic-gate 900*8598SRod.Evans@Sun.COM return (relocate_finish(lmp, bound, ret)); 9010Sstevel@tonic-gate } 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate /* 9040Sstevel@tonic-gate * Initialize the first few got entries so that function calls go to 9050Sstevel@tonic-gate * elf_rtbndr: 9060Sstevel@tonic-gate * 9070Sstevel@tonic-gate * GOT[GOT_XLINKMAP] = the address of the link map 9080Sstevel@tonic-gate * GOT[GOT_XRTLD] = the address of rtbinder 9090Sstevel@tonic-gate */ 9100Sstevel@tonic-gate void 9110Sstevel@tonic-gate elf_plt_init(void *got, caddr_t l) 9120Sstevel@tonic-gate { 9130Sstevel@tonic-gate uint64_t *_got; 9140Sstevel@tonic-gate /* LINTED */ 9150Sstevel@tonic-gate Rt_map *lmp = (Rt_map *)l; 9160Sstevel@tonic-gate 9170Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XLINKMAP; 9180Sstevel@tonic-gate *_got = (uint64_t)lmp; 9190Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XRTLD; 9200Sstevel@tonic-gate *_got = (uint64_t)elf_rtbndr; 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* 9240Sstevel@tonic-gate * Plt writing interface to allow debugging initialization to be generic. 9250Sstevel@tonic-gate */ 9260Sstevel@tonic-gate Pltbindtype 9270Sstevel@tonic-gate /* ARGSUSED1 */ 9280Sstevel@tonic-gate elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 9290Sstevel@tonic-gate Xword pltndx) 9300Sstevel@tonic-gate { 9310Sstevel@tonic-gate Rela *rel = (Rela*)rptr; 9320Sstevel@tonic-gate uintptr_t pltaddr; 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate pltaddr = addr + rel->r_offset; 9350Sstevel@tonic-gate *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 9360Sstevel@tonic-gate DBG_CALL(pltcntfull++); 9370Sstevel@tonic-gate return (PLT_T_FULL); 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * Provide a machine specific interface to the conversion routine. By calling 9420Sstevel@tonic-gate * the machine specific version, rather than the generic version, we insure that 9430Sstevel@tonic-gate * the data tables/strings for all known machine versions aren't dragged into 9440Sstevel@tonic-gate * ld.so.1. 9450Sstevel@tonic-gate */ 9460Sstevel@tonic-gate const char * 9471618Srie _conv_reloc_type(uint_t rel) 9480Sstevel@tonic-gate { 949*8598SRod.Evans@Sun.COM static Conv_inv_buf_t inv_buf; 9504734Sab196087 9514734Sab196087 return (conv_reloc_amd64_type(rel, 0, &inv_buf)); 9520Sstevel@tonic-gate } 953