1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 27*0Sstevel@tonic-gate 28*0Sstevel@tonic-gate /* 29*0Sstevel@tonic-gate * amd64 machine dependent and ELF file class dependent functions. 30*0Sstevel@tonic-gate * Contains routines for performing function binding and symbol relocations. 31*0Sstevel@tonic-gate */ 32*0Sstevel@tonic-gate #include "_synonyms.h" 33*0Sstevel@tonic-gate 34*0Sstevel@tonic-gate #include <stdio.h> 35*0Sstevel@tonic-gate #include <sys/elf.h> 36*0Sstevel@tonic-gate #include <sys/elf_amd64.h> 37*0Sstevel@tonic-gate #include <sys/mman.h> 38*0Sstevel@tonic-gate #include <dlfcn.h> 39*0Sstevel@tonic-gate #include <synch.h> 40*0Sstevel@tonic-gate #include <string.h> 41*0Sstevel@tonic-gate #include "_rtld.h" 42*0Sstevel@tonic-gate #include "_audit.h" 43*0Sstevel@tonic-gate #include "_elf.h" 44*0Sstevel@tonic-gate #include "msg.h" 45*0Sstevel@tonic-gate #include "debug.h" 46*0Sstevel@tonic-gate #include "reloc.h" 47*0Sstevel@tonic-gate #include "conv.h" 48*0Sstevel@tonic-gate 49*0Sstevel@tonic-gate 50*0Sstevel@tonic-gate extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t); 51*0Sstevel@tonic-gate 52*0Sstevel@tonic-gate int 53*0Sstevel@tonic-gate elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr) 54*0Sstevel@tonic-gate { 55*0Sstevel@tonic-gate /* 56*0Sstevel@tonic-gate * Check machine type and flags. 57*0Sstevel@tonic-gate */ 58*0Sstevel@tonic-gate if (ehdr->e_flags != 0) { 59*0Sstevel@tonic-gate rej->rej_type = SGS_REJ_BADFLAG; 60*0Sstevel@tonic-gate rej->rej_info = (uint_t)ehdr->e_flags; 61*0Sstevel@tonic-gate return (0); 62*0Sstevel@tonic-gate } 63*0Sstevel@tonic-gate return (1); 64*0Sstevel@tonic-gate } 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate void 67*0Sstevel@tonic-gate ldso_plt_init(Rt_map * lmp) 68*0Sstevel@tonic-gate { 69*0Sstevel@tonic-gate /* 70*0Sstevel@tonic-gate * There is no need to analyze ld.so because we don't map in any of 71*0Sstevel@tonic-gate * its dependencies. However we may map these dependencies in later 72*0Sstevel@tonic-gate * (as if ld.so had dlopened them), so initialize the plt and the 73*0Sstevel@tonic-gate * permission information. 74*0Sstevel@tonic-gate */ 75*0Sstevel@tonic-gate if (PLTGOT(lmp)) 76*0Sstevel@tonic-gate elf_plt_init((void *)(PLTGOT(lmp)), (caddr_t)lmp); 77*0Sstevel@tonic-gate } 78*0Sstevel@tonic-gate 79*0Sstevel@tonic-gate static const uchar_t dyn_plt_template[] = { 80*0Sstevel@tonic-gate /* 0x00 */ 0x55, /* pushq %rbp */ 81*0Sstevel@tonic-gate /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */ 82*0Sstevel@tonic-gate /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */ 83*0Sstevel@tonic-gate /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */ 84*0Sstevel@tonic-gate 0x00, 0x00, 0x00, 85*0Sstevel@tonic-gate /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */ 86*0Sstevel@tonic-gate /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */ 87*0Sstevel@tonic-gate 0x00, 0x00, 0x00, 88*0Sstevel@tonic-gate 0x00, 0x00, 0x00, 89*0Sstevel@tonic-gate /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */ 90*0Sstevel@tonic-gate /* 0x20 */ 91*0Sstevel@tonic-gate }; 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate /* 94*0Sstevel@tonic-gate * And the virutal outstanding relocations against the 95*0Sstevel@tonic-gate * above block are: 96*0Sstevel@tonic-gate * 97*0Sstevel@tonic-gate * reloc offset Addend symbol 98*0Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 99*0Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 100*0Sstevel@tonic-gate */ 101*0Sstevel@tonic-gate 102*0Sstevel@tonic-gate #define TRCREL1OFF 0x0b 103*0Sstevel@tonic-gate #define TRCREL2OFF 0x15 104*0Sstevel@tonic-gate 105*0Sstevel@tonic-gate int dyn_plt_ent_size = sizeof (dyn_plt_template); 106*0Sstevel@tonic-gate 107*0Sstevel@tonic-gate /* 108*0Sstevel@tonic-gate * the dynamic plt entry is: 109*0Sstevel@tonic-gate * 110*0Sstevel@tonic-gate * pushq %rbp 111*0Sstevel@tonic-gate * movq %rsp, %rbp 112*0Sstevel@tonic-gate * subq $0x10, %rsp 113*0Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 114*0Sstevel@tonic-gate * movq %r11, -0x8(%rbp) 115*0Sstevel@tonic-gate * movq $elf_plt_trace, %r11 116*0Sstevel@tonic-gate * jmp *%r11 117*0Sstevel@tonic-gate * dyn_data: 118*0Sstevel@tonic-gate * .align 8 119*0Sstevel@tonic-gate * uintptr_t reflmp 120*0Sstevel@tonic-gate * uintptr_t deflmp 121*0Sstevel@tonic-gate * uint_t symndx 122*0Sstevel@tonic-gate * uint_t sb_flags 123*0Sstevel@tonic-gate * Sym symdef 124*0Sstevel@tonic-gate */ 125*0Sstevel@tonic-gate static caddr_t 126*0Sstevel@tonic-gate elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym, 127*0Sstevel@tonic-gate uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail) 128*0Sstevel@tonic-gate { 129*0Sstevel@tonic-gate extern int elf_plt_trace(); 130*0Sstevel@tonic-gate ulong_t got_entry; 131*0Sstevel@tonic-gate uchar_t *dyn_plt; 132*0Sstevel@tonic-gate uintptr_t *dyndata; 133*0Sstevel@tonic-gate 134*0Sstevel@tonic-gate 135*0Sstevel@tonic-gate /* 136*0Sstevel@tonic-gate * We only need to add the glue code if there is an auditing 137*0Sstevel@tonic-gate * library that is interested in this binding. 138*0Sstevel@tonic-gate */ 139*0Sstevel@tonic-gate dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts + 140*0Sstevel@tonic-gate (pltndx * dyn_plt_ent_size)); 141*0Sstevel@tonic-gate 142*0Sstevel@tonic-gate /* 143*0Sstevel@tonic-gate * Have we initialized this dynamic plt entry yet? If we haven't do it 144*0Sstevel@tonic-gate * now. Otherwise this function has been called before, but from a 145*0Sstevel@tonic-gate * different plt (ie. from another shared object). In that case 146*0Sstevel@tonic-gate * we just set the plt to point to the new dyn_plt. 147*0Sstevel@tonic-gate */ 148*0Sstevel@tonic-gate if (*dyn_plt == 0) { 149*0Sstevel@tonic-gate Sym * symp; 150*0Sstevel@tonic-gate Xword symvalue; 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate (void) memcpy((void *)dyn_plt, dyn_plt_template, 153*0Sstevel@tonic-gate sizeof (dyn_plt_template)); 154*0Sstevel@tonic-gate dyndata = (uintptr_t *)((uintptr_t)dyn_plt + 155*0Sstevel@tonic-gate ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN)); 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* 158*0Sstevel@tonic-gate * relocate: 159*0Sstevel@tonic-gate * leaq trace_fields(%rip), %r11 160*0Sstevel@tonic-gate * R_AMD64_PC32 0x0b -4 trace_fields 161*0Sstevel@tonic-gate */ 162*0Sstevel@tonic-gate symvalue = (Xword)((uintptr_t)dyndata - 163*0Sstevel@tonic-gate (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4); 164*0Sstevel@tonic-gate if (do_reloc(R_AMD64_PC32, &dyn_plt[TRCREL1OFF], &symvalue, 165*0Sstevel@tonic-gate MSG_ORIG(MSG_SYM_LADYNDATA), 166*0Sstevel@tonic-gate MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 167*0Sstevel@tonic-gate *fail = 1; 168*0Sstevel@tonic-gate return (0); 169*0Sstevel@tonic-gate } 170*0Sstevel@tonic-gate 171*0Sstevel@tonic-gate /* 172*0Sstevel@tonic-gate * relocating: 173*0Sstevel@tonic-gate * movq $elf_plt_trace, %r11 174*0Sstevel@tonic-gate * R_AMD64_64 0x15 0 elf_plt_trace 175*0Sstevel@tonic-gate */ 176*0Sstevel@tonic-gate symvalue = (Xword)elf_plt_trace; 177*0Sstevel@tonic-gate if (do_reloc(R_AMD64_64, &dyn_plt[TRCREL2OFF], &symvalue, 178*0Sstevel@tonic-gate MSG_ORIG(MSG_SYM_ELFPLTTRACE), 179*0Sstevel@tonic-gate MSG_ORIG(MSG_SPECFIL_DYNPLT)) == 0) { 180*0Sstevel@tonic-gate *fail = 1; 181*0Sstevel@tonic-gate return (0); 182*0Sstevel@tonic-gate } 183*0Sstevel@tonic-gate 184*0Sstevel@tonic-gate *dyndata++ = (uintptr_t)rlmp; 185*0Sstevel@tonic-gate *dyndata++ = (uintptr_t)dlmp; 186*0Sstevel@tonic-gate *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx); 187*0Sstevel@tonic-gate dyndata++; 188*0Sstevel@tonic-gate symp = (Sym *)dyndata; 189*0Sstevel@tonic-gate *symp = *sym; 190*0Sstevel@tonic-gate symp->st_value = (Addr)to; 191*0Sstevel@tonic-gate } 192*0Sstevel@tonic-gate 193*0Sstevel@tonic-gate got_entry = (ulong_t)roffset; 194*0Sstevel@tonic-gate *(ulong_t *)got_entry = (ulong_t)dyn_plt; 195*0Sstevel@tonic-gate return ((caddr_t)dyn_plt); 196*0Sstevel@tonic-gate } 197*0Sstevel@tonic-gate 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate /* 200*0Sstevel@tonic-gate * Function binding routine - invoked on the first call to a function through 201*0Sstevel@tonic-gate * the procedure linkage table; 202*0Sstevel@tonic-gate * passes first through an assembly language interface. 203*0Sstevel@tonic-gate * 204*0Sstevel@tonic-gate * Takes the offset into the relocation table of the associated 205*0Sstevel@tonic-gate * relocation entry and the address of the link map (rt_private_map struct) 206*0Sstevel@tonic-gate * for the entry. 207*0Sstevel@tonic-gate * 208*0Sstevel@tonic-gate * Returns the address of the function referenced after re-writing the PLT 209*0Sstevel@tonic-gate * entry to invoke the function directly. 210*0Sstevel@tonic-gate * 211*0Sstevel@tonic-gate * On error, causes process to terminate with a signal. 212*0Sstevel@tonic-gate */ 213*0Sstevel@tonic-gate ulong_t 214*0Sstevel@tonic-gate elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from) 215*0Sstevel@tonic-gate { 216*0Sstevel@tonic-gate Rt_map *nlmp, * llmp; 217*0Sstevel@tonic-gate ulong_t addr, reloff, symval, rsymndx; 218*0Sstevel@tonic-gate char *name; 219*0Sstevel@tonic-gate Rela *rptr; 220*0Sstevel@tonic-gate Sym *sym, *nsym; 221*0Sstevel@tonic-gate uint_t binfo, sb_flags = 0; 222*0Sstevel@tonic-gate Slookup sl; 223*0Sstevel@tonic-gate int entry, dbg_save, lmflags; 224*0Sstevel@tonic-gate 225*0Sstevel@tonic-gate /* 226*0Sstevel@tonic-gate * For compatibility with libthread (TI_VERSION 1) we track the entry 227*0Sstevel@tonic-gate * value. A zero value indicates we have recursed into ld.so.1 to 228*0Sstevel@tonic-gate * further process a locking request. Under this recursion we disable 229*0Sstevel@tonic-gate * tsort and cleanup activities. 230*0Sstevel@tonic-gate */ 231*0Sstevel@tonic-gate entry = enter(); 232*0Sstevel@tonic-gate 233*0Sstevel@tonic-gate if ((lmflags = LIST(lmp)->lm_flags) & LML_FLG_RTLDLM) { 234*0Sstevel@tonic-gate dbg_save = dbg_mask; 235*0Sstevel@tonic-gate dbg_mask = 0; 236*0Sstevel@tonic-gate } 237*0Sstevel@tonic-gate 238*0Sstevel@tonic-gate /* 239*0Sstevel@tonic-gate * Perform some basic sanity checks. If we didn't get a load map or 240*0Sstevel@tonic-gate * the relocation offset is invalid then its possible someone has walked 241*0Sstevel@tonic-gate * over the .got entries or jumped to plt0 out of the blue. 242*0Sstevel@tonic-gate */ 243*0Sstevel@tonic-gate if ((!lmp) && (pltndx <= 244*0Sstevel@tonic-gate (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) { 245*0Sstevel@tonic-gate eprintf(ERR_FATAL, MSG_INTL(MSG_REL_PLTREF), 246*0Sstevel@tonic-gate conv_reloc_amd64_type_str(R_AMD64_JUMP_SLOT), 247*0Sstevel@tonic-gate EC_XWORD(lmp), EC_XWORD(pltndx), EC_ADDR(from)); 248*0Sstevel@tonic-gate rtldexit(LIST(lmp), 1); 249*0Sstevel@tonic-gate } 250*0Sstevel@tonic-gate reloff = pltndx * (ulong_t)RELENT(lmp); 251*0Sstevel@tonic-gate 252*0Sstevel@tonic-gate /* 253*0Sstevel@tonic-gate * Use relocation entry to get symbol table entry and symbol name. 254*0Sstevel@tonic-gate */ 255*0Sstevel@tonic-gate addr = (ulong_t)JMPREL(lmp); 256*0Sstevel@tonic-gate rptr = (Rela *)(addr + reloff); 257*0Sstevel@tonic-gate rsymndx = ELF_R_SYM(rptr->r_info); 258*0Sstevel@tonic-gate sym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp))); 259*0Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + sym->st_name); 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * Determine the last link-map of this list, this'll be the starting 263*0Sstevel@tonic-gate * point for any tsort() processing. 264*0Sstevel@tonic-gate */ 265*0Sstevel@tonic-gate llmp = LIST(lmp)->lm_tail; 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate /* 268*0Sstevel@tonic-gate * Find definition for symbol. 269*0Sstevel@tonic-gate */ 270*0Sstevel@tonic-gate sl.sl_name = name; 271*0Sstevel@tonic-gate sl.sl_cmap = lmp; 272*0Sstevel@tonic-gate sl.sl_imap = LIST(lmp)->lm_head; 273*0Sstevel@tonic-gate sl.sl_hash = 0; 274*0Sstevel@tonic-gate sl.sl_rsymndx = rsymndx; 275*0Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 276*0Sstevel@tonic-gate 277*0Sstevel@tonic-gate if ((nsym = lookup_sym(&sl, &nlmp, &binfo)) == 0) { 278*0Sstevel@tonic-gate eprintf(ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp), 279*0Sstevel@tonic-gate demangle(name)); 280*0Sstevel@tonic-gate rtldexit(LIST(lmp), 1); 281*0Sstevel@tonic-gate } 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate symval = nsym->st_value; 284*0Sstevel@tonic-gate if (!(FLAGS(nlmp) & FLG_RT_FIXED) && 285*0Sstevel@tonic-gate (nsym->st_shndx != SHN_ABS)) 286*0Sstevel@tonic-gate symval += ADDR(nlmp); 287*0Sstevel@tonic-gate if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) { 288*0Sstevel@tonic-gate /* 289*0Sstevel@tonic-gate * Record that this new link map is now bound to the caller. 290*0Sstevel@tonic-gate */ 291*0Sstevel@tonic-gate if (bind_one(lmp, nlmp, BND_REFER) == 0) 292*0Sstevel@tonic-gate rtldexit(LIST(lmp), 1); 293*0Sstevel@tonic-gate } 294*0Sstevel@tonic-gate 295*0Sstevel@tonic-gate if ((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & LML_TFLG_AUD_SYMBIND) { 296*0Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 297*0Sstevel@tonic-gate (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp)); 298*0Sstevel@tonic-gate symval = audit_symbind(lmp, nlmp, nsym, symndx, symval, 299*0Sstevel@tonic-gate &sb_flags); 300*0Sstevel@tonic-gate } 301*0Sstevel@tonic-gate 302*0Sstevel@tonic-gate if (!(rtld_flags & RT_FL_NOBIND)) { 303*0Sstevel@tonic-gate addr = rptr->r_offset; 304*0Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) 305*0Sstevel@tonic-gate addr += ADDR(lmp); 306*0Sstevel@tonic-gate if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 307*0Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 308*0Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 309*0Sstevel@tonic-gate int fail = 0; 310*0Sstevel@tonic-gate uint_t pltndx = reloff / sizeof (Rela); 311*0Sstevel@tonic-gate uint_t symndx = (((uintptr_t)nsym - 312*0Sstevel@tonic-gate (uintptr_t)SYMTAB(nlmp)) / 313*0Sstevel@tonic-gate SYMENT(nlmp)); 314*0Sstevel@tonic-gate 315*0Sstevel@tonic-gate symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp, 316*0Sstevel@tonic-gate nsym, symndx, pltndx, (caddr_t)symval, sb_flags, 317*0Sstevel@tonic-gate &fail); 318*0Sstevel@tonic-gate if (fail) 319*0Sstevel@tonic-gate rtldexit(LIST(lmp), 1); 320*0Sstevel@tonic-gate } else { 321*0Sstevel@tonic-gate /* 322*0Sstevel@tonic-gate * Write standard PLT entry to jump directly 323*0Sstevel@tonic-gate * to newly bound function. 324*0Sstevel@tonic-gate */ 325*0Sstevel@tonic-gate *(ulong_t *)addr = symval; 326*0Sstevel@tonic-gate } 327*0Sstevel@tonic-gate } 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate /* 330*0Sstevel@tonic-gate * Print binding information and rebuild PLT entry. 331*0Sstevel@tonic-gate */ 332*0Sstevel@tonic-gate DBG_CALL(Dbg_bind_global(NAME(lmp), from, from - ADDR(lmp), 333*0Sstevel@tonic-gate (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, NAME(nlmp), 334*0Sstevel@tonic-gate (caddr_t)symval, (caddr_t)nsym->st_value, name, binfo)); 335*0Sstevel@tonic-gate 336*0Sstevel@tonic-gate /* 337*0Sstevel@tonic-gate * Complete any processing for newly loaded objects. Note we don't 338*0Sstevel@tonic-gate * know exactly where any new objects are loaded (we know the object 339*0Sstevel@tonic-gate * that supplied the symbol, but others may have been loaded lazily as 340*0Sstevel@tonic-gate * we searched for the symbol), so sorting starts from the last 341*0Sstevel@tonic-gate * link-map know on entry to this routine. 342*0Sstevel@tonic-gate */ 343*0Sstevel@tonic-gate if (entry) 344*0Sstevel@tonic-gate load_completion(llmp, lmp); 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate /* 347*0Sstevel@tonic-gate * Some operations like dldump() or dlopen()'ing a relocatable object 348*0Sstevel@tonic-gate * result in objects being loaded on rtld's link-map, make sure these 349*0Sstevel@tonic-gate * objects are initialized also. 350*0Sstevel@tonic-gate */ 351*0Sstevel@tonic-gate if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init) 352*0Sstevel@tonic-gate load_completion(nlmp, 0); 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate /* 355*0Sstevel@tonic-gate * If the object we've bound to is in the process of being initialized 356*0Sstevel@tonic-gate * by another thread, determine whether we should block. 357*0Sstevel@tonic-gate */ 358*0Sstevel@tonic-gate is_dep_ready(nlmp, lmp, DBG_WAIT_SYMBOL); 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate /* 361*0Sstevel@tonic-gate * Make sure the object to which we've bound has had it's .init fired. 362*0Sstevel@tonic-gate * Cleanup before return to user code. 363*0Sstevel@tonic-gate */ 364*0Sstevel@tonic-gate if (entry) { 365*0Sstevel@tonic-gate is_dep_init(nlmp, lmp); 366*0Sstevel@tonic-gate leave(LIST(lmp)); 367*0Sstevel@tonic-gate } 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate if (lmflags & LML_FLG_RTLDLM) 370*0Sstevel@tonic-gate dbg_mask = dbg_save; 371*0Sstevel@tonic-gate 372*0Sstevel@tonic-gate return (symval); 373*0Sstevel@tonic-gate } 374*0Sstevel@tonic-gate 375*0Sstevel@tonic-gate 376*0Sstevel@tonic-gate /* 377*0Sstevel@tonic-gate * When the relocation loop realizes that it's dealing with relative 378*0Sstevel@tonic-gate * relocations in a shared object, it breaks into this tighter loop 379*0Sstevel@tonic-gate * as an optimization. 380*0Sstevel@tonic-gate */ 381*0Sstevel@tonic-gate ulong_t 382*0Sstevel@tonic-gate elf_reloc_relative(ulong_t relbgn, ulong_t relend, ulong_t relsiz, 383*0Sstevel@tonic-gate ulong_t basebgn, ulong_t etext, ulong_t emap) 384*0Sstevel@tonic-gate { 385*0Sstevel@tonic-gate ulong_t roffset = ((Rela *)relbgn)->r_offset; 386*0Sstevel@tonic-gate char rtype; 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate do { 389*0Sstevel@tonic-gate roffset += basebgn; 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate /* 392*0Sstevel@tonic-gate * If this relocation is against an address not mapped in, 393*0Sstevel@tonic-gate * then break out of the relative relocation loop, falling 394*0Sstevel@tonic-gate * back on the main relocation loop. 395*0Sstevel@tonic-gate */ 396*0Sstevel@tonic-gate if (roffset < etext || roffset > emap) 397*0Sstevel@tonic-gate break; 398*0Sstevel@tonic-gate 399*0Sstevel@tonic-gate /* 400*0Sstevel@tonic-gate * Perform the actual relocation. 401*0Sstevel@tonic-gate */ 402*0Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 403*0Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate relbgn += relsiz; 406*0Sstevel@tonic-gate 407*0Sstevel@tonic-gate if (relbgn >= relend) 408*0Sstevel@tonic-gate break; 409*0Sstevel@tonic-gate 410*0Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 411*0Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 412*0Sstevel@tonic-gate 413*0Sstevel@tonic-gate } while (rtype == R_AMD64_RELATIVE); 414*0Sstevel@tonic-gate 415*0Sstevel@tonic-gate return (relbgn); 416*0Sstevel@tonic-gate } 417*0Sstevel@tonic-gate 418*0Sstevel@tonic-gate /* 419*0Sstevel@tonic-gate * This is the tightest loop for RELATIVE relocations for those 420*0Sstevel@tonic-gate * objects built with the DT_RELACOUNT .dynamic entry. 421*0Sstevel@tonic-gate */ 422*0Sstevel@tonic-gate ulong_t 423*0Sstevel@tonic-gate elf_reloc_relacount(ulong_t relbgn, ulong_t relacount, ulong_t relsiz, 424*0Sstevel@tonic-gate ulong_t basebgn) 425*0Sstevel@tonic-gate { 426*0Sstevel@tonic-gate ulong_t roffset = ((Rela *) relbgn)->r_offset; 427*0Sstevel@tonic-gate 428*0Sstevel@tonic-gate for (; relacount; relacount--) { 429*0Sstevel@tonic-gate roffset += basebgn; 430*0Sstevel@tonic-gate 431*0Sstevel@tonic-gate /* 432*0Sstevel@tonic-gate * Perform the actual relocation. 433*0Sstevel@tonic-gate */ 434*0Sstevel@tonic-gate *((ulong_t *)roffset) = basebgn + 435*0Sstevel@tonic-gate ((Rela *)relbgn)->r_addend; 436*0Sstevel@tonic-gate 437*0Sstevel@tonic-gate relbgn += relsiz; 438*0Sstevel@tonic-gate 439*0Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 440*0Sstevel@tonic-gate 441*0Sstevel@tonic-gate } 442*0Sstevel@tonic-gate 443*0Sstevel@tonic-gate return (relbgn); 444*0Sstevel@tonic-gate } 445*0Sstevel@tonic-gate 446*0Sstevel@tonic-gate /* 447*0Sstevel@tonic-gate * Read and process the relocations for one link object, we assume all 448*0Sstevel@tonic-gate * relocation sections for loadable segments are stored contiguously in 449*0Sstevel@tonic-gate * the file. 450*0Sstevel@tonic-gate */ 451*0Sstevel@tonic-gate int 452*0Sstevel@tonic-gate elf_reloc(Rt_map *lmp, uint_t plt) 453*0Sstevel@tonic-gate { 454*0Sstevel@tonic-gate ulong_t relbgn, relend, relsiz, basebgn; 455*0Sstevel@tonic-gate ulong_t pltbgn, pltend, _pltbgn, _pltend; 456*0Sstevel@tonic-gate ulong_t roffset, rsymndx, psymndx = 0, etext = ETEXT(lmp); 457*0Sstevel@tonic-gate ulong_t emap, dsymndx; 458*0Sstevel@tonic-gate uchar_t rtype; 459*0Sstevel@tonic-gate long reladd, value, pvalue; 460*0Sstevel@tonic-gate Sym *symref, *psymref, *symdef, *psymdef; 461*0Sstevel@tonic-gate char *name, *pname; 462*0Sstevel@tonic-gate Rt_map *_lmp, *plmp; 463*0Sstevel@tonic-gate int textrel = 0, ret = 1, noplt = 0; 464*0Sstevel@tonic-gate int relacount = RELACOUNT(lmp), plthint = 0; 465*0Sstevel@tonic-gate Rela *rel; 466*0Sstevel@tonic-gate uint_t binfo, pbinfo; 467*0Sstevel@tonic-gate Alist *bound = 0; 468*0Sstevel@tonic-gate 469*0Sstevel@tonic-gate /* 470*0Sstevel@tonic-gate * Although only necessary for lazy binding, initialize the first 471*0Sstevel@tonic-gate * global offset entry to go to elf_rtbndr(). dbx(1) seems 472*0Sstevel@tonic-gate * to find this useful. 473*0Sstevel@tonic-gate */ 474*0Sstevel@tonic-gate if ((plt == 0) && PLTGOT(lmp)) { 475*0Sstevel@tonic-gate if ((ulong_t)PLTGOT(lmp) < etext) { 476*0Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) 477*0Sstevel@tonic-gate return (0); 478*0Sstevel@tonic-gate textrel = 1; 479*0Sstevel@tonic-gate } 480*0Sstevel@tonic-gate elf_plt_init((void *)PLTGOT(lmp), (caddr_t)lmp); 481*0Sstevel@tonic-gate } 482*0Sstevel@tonic-gate 483*0Sstevel@tonic-gate /* 484*0Sstevel@tonic-gate * Initialize the plt start and end addresses. 485*0Sstevel@tonic-gate */ 486*0Sstevel@tonic-gate if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0) 487*0Sstevel@tonic-gate pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp)); 488*0Sstevel@tonic-gate 489*0Sstevel@tonic-gate 490*0Sstevel@tonic-gate relsiz = (ulong_t)(RELENT(lmp)); 491*0Sstevel@tonic-gate basebgn = ADDR(lmp); 492*0Sstevel@tonic-gate emap = ADDR(lmp) + MSIZE(lmp); 493*0Sstevel@tonic-gate 494*0Sstevel@tonic-gate if (PLTRELSZ(lmp)) 495*0Sstevel@tonic-gate plthint = PLTRELSZ(lmp) / relsiz; 496*0Sstevel@tonic-gate 497*0Sstevel@tonic-gate /* 498*0Sstevel@tonic-gate * If we've been called upon to promote an RTLD_LAZY object to an 499*0Sstevel@tonic-gate * RTLD_NOW then we're only interested in scaning the .plt table. 500*0Sstevel@tonic-gate * An uninitialized .plt is the case where the associated got entry 501*0Sstevel@tonic-gate * points back to the plt itself. Determine the range of the real .plt 502*0Sstevel@tonic-gate * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol. 503*0Sstevel@tonic-gate */ 504*0Sstevel@tonic-gate if (plt) { 505*0Sstevel@tonic-gate Slookup sl; 506*0Sstevel@tonic-gate 507*0Sstevel@tonic-gate relbgn = pltbgn; 508*0Sstevel@tonic-gate relend = pltend; 509*0Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) 510*0Sstevel@tonic-gate return (1); 511*0Sstevel@tonic-gate 512*0Sstevel@tonic-gate sl.sl_name = MSG_ORIG(MSG_SYM_PLT); 513*0Sstevel@tonic-gate sl.sl_cmap = lmp; 514*0Sstevel@tonic-gate sl.sl_imap = lmp; 515*0Sstevel@tonic-gate sl.sl_hash = 0; 516*0Sstevel@tonic-gate sl.sl_rsymndx = 0; 517*0Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 518*0Sstevel@tonic-gate 519*0Sstevel@tonic-gate if ((symdef = elf_find_sym(&sl, &_lmp, &binfo)) == 0) 520*0Sstevel@tonic-gate return (1); 521*0Sstevel@tonic-gate 522*0Sstevel@tonic-gate _pltbgn = symdef->st_value; 523*0Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED) && 524*0Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS)) 525*0Sstevel@tonic-gate _pltbgn += basebgn; 526*0Sstevel@tonic-gate _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) * 527*0Sstevel@tonic-gate M_PLT_ENTSIZE) + M_PLT_RESERVSZ; 528*0Sstevel@tonic-gate 529*0Sstevel@tonic-gate } else { 530*0Sstevel@tonic-gate /* 531*0Sstevel@tonic-gate * The relocation sections appear to the run-time linker as a 532*0Sstevel@tonic-gate * single table. Determine the address of the beginning and end 533*0Sstevel@tonic-gate * of this table. There are two different interpretations of 534*0Sstevel@tonic-gate * the ABI at this point: 535*0Sstevel@tonic-gate * 536*0Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 537*0Sstevel@tonic-gate * concatenation of *all* relocation sections (this is the 538*0Sstevel@tonic-gate * model our link-editor constructs). 539*0Sstevel@tonic-gate * 540*0Sstevel@tonic-gate * o The REL table and its associated RELSZ indicate the 541*0Sstevel@tonic-gate * concatenation of all *but* the .plt relocations. These 542*0Sstevel@tonic-gate * relocations are specified individually by the JMPREL and 543*0Sstevel@tonic-gate * PLTRELSZ entries. 544*0Sstevel@tonic-gate * 545*0Sstevel@tonic-gate * Determine from our knowledege of the relocation range and 546*0Sstevel@tonic-gate * .plt range, the range of the total relocation table. Note 547*0Sstevel@tonic-gate * that one other ABI assumption seems to be that the .plt 548*0Sstevel@tonic-gate * relocations always follow any other relocations, the 549*0Sstevel@tonic-gate * following range checking drops that assumption. 550*0Sstevel@tonic-gate */ 551*0Sstevel@tonic-gate relbgn = (ulong_t)(REL(lmp)); 552*0Sstevel@tonic-gate relend = relbgn + (ulong_t)(RELSZ(lmp)); 553*0Sstevel@tonic-gate if (pltbgn) { 554*0Sstevel@tonic-gate if (!relbgn || (relbgn > pltbgn)) 555*0Sstevel@tonic-gate relbgn = pltbgn; 556*0Sstevel@tonic-gate if (!relbgn || (relend < pltend)) 557*0Sstevel@tonic-gate relend = pltend; 558*0Sstevel@tonic-gate } 559*0Sstevel@tonic-gate } 560*0Sstevel@tonic-gate if (!relbgn || (relbgn == relend)) { 561*0Sstevel@tonic-gate DBG_CALL(Dbg_reloc_run(NAME(lmp), 0, plt, DBG_REL_NONE)); 562*0Sstevel@tonic-gate return (1); 563*0Sstevel@tonic-gate } 564*0Sstevel@tonic-gate DBG_CALL(Dbg_reloc_run(NAME(lmp), M_REL_SHT_TYPE, plt, DBG_REL_START)); 565*0Sstevel@tonic-gate 566*0Sstevel@tonic-gate /* 567*0Sstevel@tonic-gate * If we're processing a dynamic executable in lazy mode there is no 568*0Sstevel@tonic-gate * need to scan the .rel.plt table, however if we're processing a shared 569*0Sstevel@tonic-gate * object in lazy mode the .got addresses associated to each .plt must 570*0Sstevel@tonic-gate * be relocated to reflect the location of the shared object. 571*0Sstevel@tonic-gate */ 572*0Sstevel@tonic-gate if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) && 573*0Sstevel@tonic-gate (FLAGS(lmp) & FLG_RT_FIXED)) 574*0Sstevel@tonic-gate noplt = 1; 575*0Sstevel@tonic-gate 576*0Sstevel@tonic-gate /* 577*0Sstevel@tonic-gate * Loop through relocations. 578*0Sstevel@tonic-gate */ 579*0Sstevel@tonic-gate while (relbgn < relend) { 580*0Sstevel@tonic-gate uint_t sb_flags = 0; 581*0Sstevel@tonic-gate 582*0Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 583*0Sstevel@tonic-gate 584*0Sstevel@tonic-gate /* 585*0Sstevel@tonic-gate * If this is a RELATIVE relocation in a shared object (the 586*0Sstevel@tonic-gate * common case), and if we are not debugging, then jump into a 587*0Sstevel@tonic-gate * tighter relocation loop (elf_reloc_relative). Only make the 588*0Sstevel@tonic-gate * jump if we've been given a hint on the number of relocations. 589*0Sstevel@tonic-gate */ 590*0Sstevel@tonic-gate if ((rtype == R_AMD64_RELATIVE) && 591*0Sstevel@tonic-gate !(FLAGS(lmp) & FLG_RT_FIXED) && !dbg_mask) { 592*0Sstevel@tonic-gate /* 593*0Sstevel@tonic-gate * It's possible that the relative relocation block 594*0Sstevel@tonic-gate * has relocations against the text segment as well 595*0Sstevel@tonic-gate * as the data segment. Since our optimized relocation 596*0Sstevel@tonic-gate * engine does not check which segment the relocation 597*0Sstevel@tonic-gate * is against - just mprotect it now if it's been 598*0Sstevel@tonic-gate * marked as containing TEXTREL's. 599*0Sstevel@tonic-gate */ 600*0Sstevel@tonic-gate if ((textrel == 0) && (FLAGS1(lmp) & FL1_RT_TEXTREL)) { 601*0Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 602*0Sstevel@tonic-gate ret = 0; 603*0Sstevel@tonic-gate break; 604*0Sstevel@tonic-gate } 605*0Sstevel@tonic-gate textrel = 1; 606*0Sstevel@tonic-gate } 607*0Sstevel@tonic-gate if (relacount) { 608*0Sstevel@tonic-gate relbgn = elf_reloc_relacount(relbgn, relacount, 609*0Sstevel@tonic-gate relsiz, basebgn); 610*0Sstevel@tonic-gate relacount = 0; 611*0Sstevel@tonic-gate } else { 612*0Sstevel@tonic-gate relbgn = elf_reloc_relative(relbgn, relend, 613*0Sstevel@tonic-gate relsiz, basebgn, etext, emap); 614*0Sstevel@tonic-gate } 615*0Sstevel@tonic-gate if (relbgn >= relend) 616*0Sstevel@tonic-gate break; 617*0Sstevel@tonic-gate rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info); 618*0Sstevel@tonic-gate } 619*0Sstevel@tonic-gate 620*0Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 621*0Sstevel@tonic-gate 622*0Sstevel@tonic-gate /* 623*0Sstevel@tonic-gate * If this is a shared object, add the base address to offset. 624*0Sstevel@tonic-gate */ 625*0Sstevel@tonic-gate if (!(FLAGS(lmp) & FLG_RT_FIXED)) { 626*0Sstevel@tonic-gate 627*0Sstevel@tonic-gate 628*0Sstevel@tonic-gate /* 629*0Sstevel@tonic-gate * If we're processing lazy bindings, we have to step 630*0Sstevel@tonic-gate * through the plt entries and add the base address 631*0Sstevel@tonic-gate * to the corresponding got entry. 632*0Sstevel@tonic-gate */ 633*0Sstevel@tonic-gate if (plthint && (plt == 0) && 634*0Sstevel@tonic-gate (rtype == R_AMD64_JUMP_SLOT) && 635*0Sstevel@tonic-gate ((MODE(lmp) & RTLD_NOW) == 0)) { 636*0Sstevel@tonic-gate /* 637*0Sstevel@tonic-gate * The PLT relocations (for lazy bindings) 638*0Sstevel@tonic-gate * are additive to what's already in the GOT. 639*0Sstevel@tonic-gate * This differs to what happens in 640*0Sstevel@tonic-gate * elf_reloc_relacount() and that's why we 641*0Sstevel@tonic-gate * just do it inline here. 642*0Sstevel@tonic-gate */ 643*0Sstevel@tonic-gate for (roffset = ((Rela *)relbgn)->r_offset; 644*0Sstevel@tonic-gate plthint; plthint--) { 645*0Sstevel@tonic-gate roffset += basebgn; 646*0Sstevel@tonic-gate 647*0Sstevel@tonic-gate /* 648*0Sstevel@tonic-gate * Perform the actual relocation. 649*0Sstevel@tonic-gate */ 650*0Sstevel@tonic-gate *((ulong_t *)roffset) += basebgn; 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate relbgn += relsiz; 653*0Sstevel@tonic-gate roffset = ((Rela *)relbgn)->r_offset; 654*0Sstevel@tonic-gate 655*0Sstevel@tonic-gate } 656*0Sstevel@tonic-gate continue; 657*0Sstevel@tonic-gate } 658*0Sstevel@tonic-gate roffset += basebgn; 659*0Sstevel@tonic-gate } 660*0Sstevel@tonic-gate 661*0Sstevel@tonic-gate reladd = (long)(((Rela *)relbgn)->r_addend); 662*0Sstevel@tonic-gate rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info); 663*0Sstevel@tonic-gate rel = (Rela *)relbgn; 664*0Sstevel@tonic-gate relbgn += relsiz; 665*0Sstevel@tonic-gate 666*0Sstevel@tonic-gate /* 667*0Sstevel@tonic-gate * Optimizations. 668*0Sstevel@tonic-gate */ 669*0Sstevel@tonic-gate if (rtype == R_AMD64_NONE) 670*0Sstevel@tonic-gate continue; 671*0Sstevel@tonic-gate if (noplt && ((ulong_t)rel >= pltbgn) && 672*0Sstevel@tonic-gate ((ulong_t)rel < pltend)) { 673*0Sstevel@tonic-gate relbgn = pltend; 674*0Sstevel@tonic-gate continue; 675*0Sstevel@tonic-gate } 676*0Sstevel@tonic-gate 677*0Sstevel@tonic-gate /* 678*0Sstevel@tonic-gate * If this relocation is not against part of the image 679*0Sstevel@tonic-gate * mapped into memory we skip it. 680*0Sstevel@tonic-gate */ 681*0Sstevel@tonic-gate if ((roffset < ADDR(lmp)) || (roffset > (ADDR(lmp) + 682*0Sstevel@tonic-gate MSIZE(lmp)))) { 683*0Sstevel@tonic-gate elf_reloc_bad(lmp, (void *)rel, rtype, roffset, 684*0Sstevel@tonic-gate rsymndx); 685*0Sstevel@tonic-gate continue; 686*0Sstevel@tonic-gate } 687*0Sstevel@tonic-gate 688*0Sstevel@tonic-gate /* 689*0Sstevel@tonic-gate * If we're promoting plts determine if this one has already 690*0Sstevel@tonic-gate * been written. 691*0Sstevel@tonic-gate */ 692*0Sstevel@tonic-gate if (plt) { 693*0Sstevel@tonic-gate if ((*(ulong_t *)roffset < _pltbgn) || 694*0Sstevel@tonic-gate (*(ulong_t *)roffset > _pltend)) 695*0Sstevel@tonic-gate continue; 696*0Sstevel@tonic-gate } 697*0Sstevel@tonic-gate 698*0Sstevel@tonic-gate binfo = 0; 699*0Sstevel@tonic-gate /* 700*0Sstevel@tonic-gate * If a symbol index is specified then get the symbol table 701*0Sstevel@tonic-gate * entry, locate the symbol definition, and determine its 702*0Sstevel@tonic-gate * address. 703*0Sstevel@tonic-gate */ 704*0Sstevel@tonic-gate if (rsymndx) { 705*0Sstevel@tonic-gate /* 706*0Sstevel@tonic-gate * Get the local symbol table entry. 707*0Sstevel@tonic-gate */ 708*0Sstevel@tonic-gate symref = (Sym *)((ulong_t)SYMTAB(lmp) + 709*0Sstevel@tonic-gate (rsymndx * SYMENT(lmp))); 710*0Sstevel@tonic-gate 711*0Sstevel@tonic-gate /* 712*0Sstevel@tonic-gate * If this is a local symbol, just use the base address. 713*0Sstevel@tonic-gate * (we should have no local relocations in the 714*0Sstevel@tonic-gate * executable). 715*0Sstevel@tonic-gate */ 716*0Sstevel@tonic-gate if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) { 717*0Sstevel@tonic-gate value = basebgn; 718*0Sstevel@tonic-gate name = (char *)0; 719*0Sstevel@tonic-gate 720*0Sstevel@tonic-gate /* 721*0Sstevel@tonic-gate * TLS relocation - value for DTPMOD64 722*0Sstevel@tonic-gate * relocation is the TLS modid. 723*0Sstevel@tonic-gate */ 724*0Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 725*0Sstevel@tonic-gate value = TLSMODID(lmp); 726*0Sstevel@tonic-gate } else { 727*0Sstevel@tonic-gate /* 728*0Sstevel@tonic-gate * If the symbol index is equal to the previous 729*0Sstevel@tonic-gate * symbol index relocation we processed then 730*0Sstevel@tonic-gate * reuse the previous values. (Note that there 731*0Sstevel@tonic-gate * have been cases where a relocation exists 732*0Sstevel@tonic-gate * against a copy relocation symbol, our ld(1) 733*0Sstevel@tonic-gate * should optimize this away, but make sure we 734*0Sstevel@tonic-gate * don't use the same symbol information should 735*0Sstevel@tonic-gate * this case exist). 736*0Sstevel@tonic-gate */ 737*0Sstevel@tonic-gate if ((rsymndx == psymndx) && 738*0Sstevel@tonic-gate (rtype != R_AMD64_COPY)) { 739*0Sstevel@tonic-gate /* LINTED */ 740*0Sstevel@tonic-gate if (psymdef == 0) { 741*0Sstevel@tonic-gate DBG_CALL(Dbg_bind_weak( 742*0Sstevel@tonic-gate NAME(lmp), (caddr_t)roffset, 743*0Sstevel@tonic-gate (caddr_t) 744*0Sstevel@tonic-gate (roffset - basebgn), name)); 745*0Sstevel@tonic-gate continue; 746*0Sstevel@tonic-gate } 747*0Sstevel@tonic-gate /* LINTED */ 748*0Sstevel@tonic-gate value = pvalue; 749*0Sstevel@tonic-gate /* LINTED */ 750*0Sstevel@tonic-gate name = pname; 751*0Sstevel@tonic-gate /* LINTED */ 752*0Sstevel@tonic-gate symdef = psymdef; 753*0Sstevel@tonic-gate /* LINTED */ 754*0Sstevel@tonic-gate symref = psymref; 755*0Sstevel@tonic-gate /* LINTED */ 756*0Sstevel@tonic-gate _lmp = plmp; 757*0Sstevel@tonic-gate /* LINTED */ 758*0Sstevel@tonic-gate binfo = pbinfo; 759*0Sstevel@tonic-gate 760*0Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 761*0Sstevel@tonic-gate FLAGS1(_lmp)) & 762*0Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 763*0Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 764*0Sstevel@tonic-gate /* LINTED */ 765*0Sstevel@tonic-gate symdef, dsymndx, value, 766*0Sstevel@tonic-gate &sb_flags); 767*0Sstevel@tonic-gate } 768*0Sstevel@tonic-gate } else { 769*0Sstevel@tonic-gate Slookup sl; 770*0Sstevel@tonic-gate uchar_t bind; 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate /* 773*0Sstevel@tonic-gate * Lookup the symbol definition. 774*0Sstevel@tonic-gate */ 775*0Sstevel@tonic-gate name = (char *)(STRTAB(lmp) + 776*0Sstevel@tonic-gate symref->st_name); 777*0Sstevel@tonic-gate 778*0Sstevel@tonic-gate sl.sl_name = name; 779*0Sstevel@tonic-gate sl.sl_cmap = lmp; 780*0Sstevel@tonic-gate sl.sl_imap = 0; 781*0Sstevel@tonic-gate sl.sl_hash = 0; 782*0Sstevel@tonic-gate sl.sl_rsymndx = rsymndx; 783*0Sstevel@tonic-gate 784*0Sstevel@tonic-gate if (rtype == R_AMD64_COPY) 785*0Sstevel@tonic-gate sl.sl_flags = LKUP_COPY; 786*0Sstevel@tonic-gate else 787*0Sstevel@tonic-gate sl.sl_flags = LKUP_DEFT; 788*0Sstevel@tonic-gate 789*0Sstevel@tonic-gate sl.sl_flags |= LKUP_ALLCNTLIST; 790*0Sstevel@tonic-gate 791*0Sstevel@tonic-gate if (rtype != R_AMD64_JUMP_SLOT) 792*0Sstevel@tonic-gate sl.sl_flags |= LKUP_SPEC; 793*0Sstevel@tonic-gate 794*0Sstevel@tonic-gate bind = ELF_ST_BIND(symref->st_info); 795*0Sstevel@tonic-gate if (bind == STB_WEAK) 796*0Sstevel@tonic-gate sl.sl_flags |= LKUP_WEAK; 797*0Sstevel@tonic-gate 798*0Sstevel@tonic-gate symdef = lookup_sym(&sl, &_lmp, &binfo); 799*0Sstevel@tonic-gate 800*0Sstevel@tonic-gate /* 801*0Sstevel@tonic-gate * If the symbol is not found and the 802*0Sstevel@tonic-gate * reference was not to a weak symbol, 803*0Sstevel@tonic-gate * report an error. Weak references 804*0Sstevel@tonic-gate * may be unresolved. 805*0Sstevel@tonic-gate * chkmsg: MSG_INTL(MSG_LDD_SYM_NFOUND) 806*0Sstevel@tonic-gate */ 807*0Sstevel@tonic-gate if (symdef == 0) { 808*0Sstevel@tonic-gate if (bind != STB_WEAK) { 809*0Sstevel@tonic-gate if (LIST(lmp)->lm_flags & 810*0Sstevel@tonic-gate LML_FLG_IGNRELERR) { 811*0Sstevel@tonic-gate continue; 812*0Sstevel@tonic-gate } else if (LIST(lmp)->lm_flags & 813*0Sstevel@tonic-gate LML_FLG_TRC_WARN) { 814*0Sstevel@tonic-gate (void) printf(MSG_INTL( 815*0Sstevel@tonic-gate MSG_LDD_SYM_NFOUND), 816*0Sstevel@tonic-gate demangle(name), 817*0Sstevel@tonic-gate NAME(lmp)); 818*0Sstevel@tonic-gate continue; 819*0Sstevel@tonic-gate } else { 820*0Sstevel@tonic-gate eprintf(ERR_FATAL, 821*0Sstevel@tonic-gate MSG_INTL(MSG_REL_NOSYM), 822*0Sstevel@tonic-gate NAME(lmp), 823*0Sstevel@tonic-gate demangle(name)); 824*0Sstevel@tonic-gate ret = 0; 825*0Sstevel@tonic-gate break; 826*0Sstevel@tonic-gate } 827*0Sstevel@tonic-gate } else { 828*0Sstevel@tonic-gate psymndx = rsymndx; 829*0Sstevel@tonic-gate psymdef = 0; 830*0Sstevel@tonic-gate 831*0Sstevel@tonic-gate DBG_CALL(Dbg_bind_weak( 832*0Sstevel@tonic-gate NAME(lmp), (caddr_t)roffset, 833*0Sstevel@tonic-gate (caddr_t) 834*0Sstevel@tonic-gate (roffset - basebgn), name)); 835*0Sstevel@tonic-gate continue; 836*0Sstevel@tonic-gate } 837*0Sstevel@tonic-gate } 838*0Sstevel@tonic-gate 839*0Sstevel@tonic-gate /* 840*0Sstevel@tonic-gate * If symbol was found in an object 841*0Sstevel@tonic-gate * other than the referencing object 842*0Sstevel@tonic-gate * then record the binding. 843*0Sstevel@tonic-gate */ 844*0Sstevel@tonic-gate if ((lmp != _lmp) && ((FLAGS1(_lmp) & 845*0Sstevel@tonic-gate FL1_RT_NOINIFIN) == 0)) { 846*0Sstevel@tonic-gate if (alist_test(&bound, _lmp, 847*0Sstevel@tonic-gate sizeof (Rt_map *), 848*0Sstevel@tonic-gate AL_CNT_RELBIND) == 0) { 849*0Sstevel@tonic-gate ret = 0; 850*0Sstevel@tonic-gate break; 851*0Sstevel@tonic-gate } 852*0Sstevel@tonic-gate } 853*0Sstevel@tonic-gate 854*0Sstevel@tonic-gate /* 855*0Sstevel@tonic-gate * Calculate the location of definition; 856*0Sstevel@tonic-gate * symbol value plus base address of 857*0Sstevel@tonic-gate * containing shared object. 858*0Sstevel@tonic-gate */ 859*0Sstevel@tonic-gate value = symdef->st_value; 860*0Sstevel@tonic-gate if (!(FLAGS(_lmp) & FLG_RT_FIXED) && 861*0Sstevel@tonic-gate (symdef->st_shndx != SHN_ABS) && 862*0Sstevel@tonic-gate (ELF_ST_TYPE(symdef->st_info) != 863*0Sstevel@tonic-gate STT_TLS)) 864*0Sstevel@tonic-gate value += ADDR(_lmp); 865*0Sstevel@tonic-gate 866*0Sstevel@tonic-gate /* 867*0Sstevel@tonic-gate * Retain this symbol index and the 868*0Sstevel@tonic-gate * value in case it can be used for the 869*0Sstevel@tonic-gate * subsequent relocations. 870*0Sstevel@tonic-gate */ 871*0Sstevel@tonic-gate if (rtype != R_AMD64_COPY) { 872*0Sstevel@tonic-gate psymndx = rsymndx; 873*0Sstevel@tonic-gate pvalue = value; 874*0Sstevel@tonic-gate pname = name; 875*0Sstevel@tonic-gate psymdef = symdef; 876*0Sstevel@tonic-gate psymref = symref; 877*0Sstevel@tonic-gate plmp = _lmp; 878*0Sstevel@tonic-gate pbinfo = binfo; 879*0Sstevel@tonic-gate } 880*0Sstevel@tonic-gate if ((LIST(_lmp)->lm_tflags | 881*0Sstevel@tonic-gate FLAGS1(_lmp)) & 882*0Sstevel@tonic-gate LML_TFLG_AUD_SYMBIND) { 883*0Sstevel@tonic-gate dsymndx = (((uintptr_t)symdef - 884*0Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 885*0Sstevel@tonic-gate SYMENT(_lmp)); 886*0Sstevel@tonic-gate value = audit_symbind(lmp, _lmp, 887*0Sstevel@tonic-gate symdef, dsymndx, value, 888*0Sstevel@tonic-gate &sb_flags); 889*0Sstevel@tonic-gate } 890*0Sstevel@tonic-gate } 891*0Sstevel@tonic-gate 892*0Sstevel@tonic-gate /* 893*0Sstevel@tonic-gate * If relocation is PC-relative, subtract 894*0Sstevel@tonic-gate * offset address. 895*0Sstevel@tonic-gate */ 896*0Sstevel@tonic-gate if (IS_PC_RELATIVE(rtype)) 897*0Sstevel@tonic-gate value -= roffset; 898*0Sstevel@tonic-gate 899*0Sstevel@tonic-gate /* 900*0Sstevel@tonic-gate * TLS relocation - value for DTPMOD64 901*0Sstevel@tonic-gate * relocation is the TLS modid. 902*0Sstevel@tonic-gate */ 903*0Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 904*0Sstevel@tonic-gate value = TLSMODID(_lmp); 905*0Sstevel@tonic-gate else if ((rtype == R_AMD64_TPOFF64) || 906*0Sstevel@tonic-gate (rtype == R_AMD64_TPOFF32)) 907*0Sstevel@tonic-gate value = -(TLSSTATOFF(_lmp) - value); 908*0Sstevel@tonic-gate } 909*0Sstevel@tonic-gate } else { 910*0Sstevel@tonic-gate /* 911*0Sstevel@tonic-gate * Special case: 912*0Sstevel@tonic-gate * 913*0Sstevel@tonic-gate * A DTPMOD32 relocation is a local binding to a TLS 914*0Sstevel@tonic-gate * symbol. Fill in the TLSMODID for the current object. 915*0Sstevel@tonic-gate */ 916*0Sstevel@tonic-gate if (rtype == R_AMD64_DTPMOD64) 917*0Sstevel@tonic-gate value = TLSMODID(lmp); 918*0Sstevel@tonic-gate else 919*0Sstevel@tonic-gate value = basebgn; 920*0Sstevel@tonic-gate name = (char *)0; 921*0Sstevel@tonic-gate } 922*0Sstevel@tonic-gate 923*0Sstevel@tonic-gate /* 924*0Sstevel@tonic-gate * If this object has relocations in the text segment, turn 925*0Sstevel@tonic-gate * off the write protect. 926*0Sstevel@tonic-gate */ 927*0Sstevel@tonic-gate if ((roffset < etext) && (textrel == 0)) { 928*0Sstevel@tonic-gate if (elf_set_prot(lmp, PROT_WRITE) == 0) { 929*0Sstevel@tonic-gate ret = 0; 930*0Sstevel@tonic-gate break; 931*0Sstevel@tonic-gate } 932*0Sstevel@tonic-gate textrel = 1; 933*0Sstevel@tonic-gate } 934*0Sstevel@tonic-gate 935*0Sstevel@tonic-gate /* 936*0Sstevel@tonic-gate * Call relocation routine to perform required relocation. 937*0Sstevel@tonic-gate */ 938*0Sstevel@tonic-gate DBG_CALL(Dbg_reloc_in(M_MACH, M_REL_SHT_TYPE, rel, name, NULL)); 939*0Sstevel@tonic-gate 940*0Sstevel@tonic-gate switch (rtype) { 941*0Sstevel@tonic-gate case R_AMD64_COPY: 942*0Sstevel@tonic-gate if (elf_copy_reloc(name, symref, lmp, (void *)roffset, 943*0Sstevel@tonic-gate symdef, _lmp, (const void *)value) == 0) 944*0Sstevel@tonic-gate ret = 0; 945*0Sstevel@tonic-gate break; 946*0Sstevel@tonic-gate case R_AMD64_JUMP_SLOT: 947*0Sstevel@tonic-gate if (((LIST(lmp)->lm_tflags | FLAGS1(lmp)) & 948*0Sstevel@tonic-gate (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) && 949*0Sstevel@tonic-gate AUDINFO(lmp)->ai_dynplts) { 950*0Sstevel@tonic-gate int fail = 0; 951*0Sstevel@tonic-gate int pltndx = (((ulong_t)rel - 952*0Sstevel@tonic-gate (uintptr_t)JMPREL(lmp)) / relsiz); 953*0Sstevel@tonic-gate int symndx = (((uintptr_t)symdef - 954*0Sstevel@tonic-gate (uintptr_t)SYMTAB(_lmp)) / 955*0Sstevel@tonic-gate SYMENT(_lmp)); 956*0Sstevel@tonic-gate 957*0Sstevel@tonic-gate (void) elf_plt_trace_write(roffset, lmp, _lmp, 958*0Sstevel@tonic-gate symdef, symndx, pltndx, (caddr_t)value, 959*0Sstevel@tonic-gate sb_flags, &fail); 960*0Sstevel@tonic-gate if (fail) 961*0Sstevel@tonic-gate ret = 0; 962*0Sstevel@tonic-gate } else { 963*0Sstevel@tonic-gate /* 964*0Sstevel@tonic-gate * Write standard PLT entry to jump directly 965*0Sstevel@tonic-gate * to newly bound function. 966*0Sstevel@tonic-gate */ 967*0Sstevel@tonic-gate DBG_CALL(Dbg_reloc_apply((Xword)roffset, 968*0Sstevel@tonic-gate (Xword)value)); 969*0Sstevel@tonic-gate *(ulong_t *)roffset = value; 970*0Sstevel@tonic-gate } 971*0Sstevel@tonic-gate break; 972*0Sstevel@tonic-gate default: 973*0Sstevel@tonic-gate value += reladd; 974*0Sstevel@tonic-gate /* 975*0Sstevel@tonic-gate * Write the relocation out. 976*0Sstevel@tonic-gate */ 977*0Sstevel@tonic-gate if (do_reloc(rtype, (uchar_t *)roffset, 978*0Sstevel@tonic-gate (Xword *)&value, name, NAME(lmp)) == 0) 979*0Sstevel@tonic-gate ret = 0; 980*0Sstevel@tonic-gate 981*0Sstevel@tonic-gate DBG_CALL(Dbg_reloc_apply((Xword)roffset, 982*0Sstevel@tonic-gate (Xword)value)); 983*0Sstevel@tonic-gate } 984*0Sstevel@tonic-gate 985*0Sstevel@tonic-gate if ((ret == 0) && 986*0Sstevel@tonic-gate ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0)) 987*0Sstevel@tonic-gate break; 988*0Sstevel@tonic-gate 989*0Sstevel@tonic-gate if (binfo) { 990*0Sstevel@tonic-gate DBG_CALL(Dbg_bind_global(NAME(lmp), (caddr_t)roffset, 991*0Sstevel@tonic-gate (caddr_t)(roffset - basebgn), (Xword)(-1), 992*0Sstevel@tonic-gate PLT_T_FULL, NAME(_lmp), (caddr_t)value, 993*0Sstevel@tonic-gate (caddr_t)symdef->st_value, name, binfo)); 994*0Sstevel@tonic-gate } 995*0Sstevel@tonic-gate } 996*0Sstevel@tonic-gate 997*0Sstevel@tonic-gate return (relocate_finish(lmp, bound, textrel, ret)); 998*0Sstevel@tonic-gate } 999*0Sstevel@tonic-gate 1000*0Sstevel@tonic-gate /* 1001*0Sstevel@tonic-gate * Initialize the first few got entries so that function calls go to 1002*0Sstevel@tonic-gate * elf_rtbndr: 1003*0Sstevel@tonic-gate * 1004*0Sstevel@tonic-gate * GOT[GOT_XLINKMAP] = the address of the link map 1005*0Sstevel@tonic-gate * GOT[GOT_XRTLD] = the address of rtbinder 1006*0Sstevel@tonic-gate */ 1007*0Sstevel@tonic-gate void 1008*0Sstevel@tonic-gate elf_plt_init(void *got, caddr_t l) 1009*0Sstevel@tonic-gate { 1010*0Sstevel@tonic-gate uint64_t *_got; 1011*0Sstevel@tonic-gate /* LINTED */ 1012*0Sstevel@tonic-gate Rt_map *lmp = (Rt_map *)l; 1013*0Sstevel@tonic-gate 1014*0Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XLINKMAP; 1015*0Sstevel@tonic-gate *_got = (uint64_t)lmp; 1016*0Sstevel@tonic-gate _got = (uint64_t *)got + M_GOT_XRTLD; 1017*0Sstevel@tonic-gate *_got = (uint64_t)elf_rtbndr; 1018*0Sstevel@tonic-gate } 1019*0Sstevel@tonic-gate 1020*0Sstevel@tonic-gate /* 1021*0Sstevel@tonic-gate * Plt writing interface to allow debugging initialization to be generic. 1022*0Sstevel@tonic-gate */ 1023*0Sstevel@tonic-gate Pltbindtype 1024*0Sstevel@tonic-gate /* ARGSUSED1 */ 1025*0Sstevel@tonic-gate elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval, 1026*0Sstevel@tonic-gate Xword pltndx) 1027*0Sstevel@tonic-gate { 1028*0Sstevel@tonic-gate Rela *rel = (Rela*)rptr; 1029*0Sstevel@tonic-gate uintptr_t pltaddr; 1030*0Sstevel@tonic-gate 1031*0Sstevel@tonic-gate pltaddr = addr + rel->r_offset; 1032*0Sstevel@tonic-gate *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend; 1033*0Sstevel@tonic-gate DBG_CALL(pltcntfull++); 1034*0Sstevel@tonic-gate return (PLT_T_FULL); 1035*0Sstevel@tonic-gate } 1036*0Sstevel@tonic-gate 1037*0Sstevel@tonic-gate /* 1038*0Sstevel@tonic-gate * Provide a machine specific interface to the conversion routine. By calling 1039*0Sstevel@tonic-gate * the machine specific version, rather than the generic version, we insure that 1040*0Sstevel@tonic-gate * the data tables/strings for all known machine versions aren't dragged into 1041*0Sstevel@tonic-gate * ld.so.1. 1042*0Sstevel@tonic-gate */ 1043*0Sstevel@tonic-gate const char * 1044*0Sstevel@tonic-gate _conv_reloc_type_str(uint_t rel) 1045*0Sstevel@tonic-gate { 1046*0Sstevel@tonic-gate return (conv_reloc_amd64_type_str(rel)); 1047*0Sstevel@tonic-gate } 1048