1 /* AArch64-specific support for NN-bit ELF. 2 Copyright (C) 2009-2022 Free Software Foundation, Inc. 3 Contributed by ARM Ltd. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; see the file COPYING3. If not, 19 see <http://www.gnu.org/licenses/>. */ 20 21 /* Notes on implementation: 22 23 Thread Local Store (TLS) 24 25 Overview: 26 27 The implementation currently supports both traditional TLS and TLS 28 descriptors, but only general dynamic (GD). 29 30 For traditional TLS the assembler will present us with code 31 fragments of the form: 32 33 adrp x0, :tlsgd:foo 34 R_AARCH64_TLSGD_ADR_PAGE21(foo) 35 add x0, :tlsgd_lo12:foo 36 R_AARCH64_TLSGD_ADD_LO12_NC(foo) 37 bl __tls_get_addr 38 nop 39 40 For TLS descriptors the assembler will present us with code 41 fragments of the form: 42 43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo) 44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo) 45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo) 46 .tlsdesccall foo 47 blr x1 R_AARCH64_TLSDESC_CALL(foo) 48 49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo 50 indicate that foo is thread local and should be accessed via the 51 traditional TLS mechanims. 52 53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} 54 against foo indicate that 'foo' is thread local and should be accessed 55 via a TLS descriptor mechanism. 56 57 The precise instruction sequence is only relevant from the 58 perspective of linker relaxation which is currently not implemented. 59 60 The static linker must detect that 'foo' is a TLS object and 61 allocate a double GOT entry. The GOT entry must be created for both 62 global and local TLS symbols. Note that this is different to none 63 TLS local objects which do not need a GOT entry. 64 65 In the traditional TLS mechanism, the double GOT entry is used to 66 provide the tls_index structure, containing module and offset 67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD 68 on the module entry. The loader will subsequently fixup this 69 relocation with the module identity. 70 71 For global traditional TLS symbols the static linker places an 72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader 73 will subsequently fixup the offset. For local TLS symbols the static 74 linker fixes up offset. 75 76 In the TLS descriptor mechanism the double GOT entry is used to 77 provide the descriptor. The static linker places the relocation 78 R_AARCH64_TLSDESC on the first GOT slot. The loader will 79 subsequently fix this up. 80 81 Implementation: 82 83 The handling of TLS symbols is implemented across a number of 84 different backend functions. The following is a top level view of 85 what processing is performed where. 86 87 The TLS implementation maintains state information for each TLS 88 symbol. The state information for local and global symbols is kept 89 in different places. Global symbols use generic BFD structures while 90 local symbols use backend specific structures that are allocated and 91 maintained entirely by the backend. 92 93 The flow: 94 95 elfNN_aarch64_check_relocs() 96 97 This function is invoked for each relocation. 98 99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and 100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are 101 spotted. One time creation of local symbol data structures are 102 created when the first local symbol is seen. 103 104 The reference count for a symbol is incremented. The GOT type for 105 each symbol is marked as general dynamic. 106 107 elfNN_aarch64_allocate_dynrelocs () 108 109 For each global with positive reference count we allocate a double 110 GOT slot. For a traditional TLS symbol we allocate space for two 111 relocation entries on the GOT, for a TLS descriptor symbol we 112 allocate space for one relocation on the slot. Record the GOT offset 113 for this symbol. 114 115 elfNN_aarch64_size_dynamic_sections () 116 117 Iterate all input BFDS, look for in the local symbol data structure 118 constructed earlier for local TLS symbols and allocate them double 119 GOT slots along with space for a single GOT relocation. Update the 120 local symbol structure to record the GOT offset allocated. 121 122 elfNN_aarch64_relocate_section () 123 124 Calls elfNN_aarch64_final_link_relocate () 125 126 Emit the relevant TLS relocations against the GOT for each TLS 127 symbol. For local TLS symbols emit the GOT offset directly. The GOT 128 relocations are emitted once the first time a TLS symbol is 129 encountered. The implementation uses the LSB of the GOT offset to 130 flag that the relevant GOT relocations for a symbol have been 131 emitted. All of the TLS code that uses the GOT offset needs to take 132 care to mask out this flag bit before using the offset. 133 134 elfNN_aarch64_final_link_relocate () 135 136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */ 137 138 #include "sysdep.h" 139 #include "bfd.h" 140 #include "libiberty.h" 141 #include "libbfd.h" 142 #include "elf-bfd.h" 143 #include "bfdlink.h" 144 #include "objalloc.h" 145 #include "elf/aarch64.h" 146 #include "elfxx-aarch64.h" 147 #include "cpu-aarch64.h" 148 149 #define ARCH_SIZE NN 150 151 #if ARCH_SIZE == 64 152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME 153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME 154 #define HOWTO64(...) HOWTO (__VA_ARGS__) 155 #define HOWTO32(...) EMPTY_HOWTO (0) 156 #define LOG_FILE_ALIGN 3 157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 158 #endif 159 160 #if ARCH_SIZE == 32 161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME 162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME 163 #define HOWTO64(...) EMPTY_HOWTO (0) 164 #define HOWTO32(...) HOWTO (__VA_ARGS__) 165 #define LOG_FILE_ALIGN 2 166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC 167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC 168 #endif 169 170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \ 171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \ 172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \ 173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \ 174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \ 175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \ 176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \ 177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \ 178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \ 179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \ 180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \ 181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \ 182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \ 183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \ 184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \ 185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \ 186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \ 187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \ 188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \ 189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \ 190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \ 191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \ 192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \ 193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \ 194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \ 195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \ 196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \ 197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \ 198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \ 199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \ 200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \ 201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \ 202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \ 203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \ 204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \ 205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \ 206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \ 207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \ 208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \ 209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \ 210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \ 211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \ 212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \ 213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \ 214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \ 215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \ 216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \ 217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \ 218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \ 219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \ 220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE))) 221 222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \ 223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \ 224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \ 225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \ 226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \ 227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \ 228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \ 229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \ 230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \ 232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \ 233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \ 235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \ 236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \ 237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \ 238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \ 239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \ 240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \ 241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \ 242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \ 243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \ 244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21) 245 246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \ 247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \ 248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \ 249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \ 250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \ 251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \ 252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \ 253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \ 254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \ 255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \ 257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \ 258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1) 259 260 #define ELIMINATE_COPY_RELOCS 1 261 262 /* Return size of a relocation entry. HTAB is the bfd's 263 elf_aarch64_link_hash_entry. */ 264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela)) 265 266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */ 267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8) 268 #define PLT_ENTRY_SIZE (32) 269 #define PLT_SMALL_ENTRY_SIZE (16) 270 #define PLT_TLSDESC_ENTRY_SIZE (32) 271 /* PLT sizes with BTI insn. */ 272 #define PLT_BTI_SMALL_ENTRY_SIZE (24) 273 /* PLT sizes with PAC insn. */ 274 #define PLT_PAC_SMALL_ENTRY_SIZE (24) 275 /* PLT sizes with BTI and PAC insn. */ 276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24) 277 278 /* Encoding of the nop instruction. */ 279 #define INSN_NOP 0xd503201f 280 281 #define aarch64_compute_jump_table_size(htab) \ 282 (((htab)->root.srelplt == NULL) ? 0 \ 283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE) 284 285 /* The first entry in a procedure linkage table looks like this 286 if the distance between the PLTGOT and the PLT is < 4GB use 287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2] 288 in x16 and needs to work out PLTGOT[1] by using an address of 289 [x16,#-GOT_ENTRY_SIZE]. */ 290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] = 291 { 292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ 293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ 294 #if ARCH_SIZE == 64 295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ 296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ 297 #else 298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */ 299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */ 300 #endif 301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ 302 0x1f, 0x20, 0x03, 0xd5, /* nop */ 303 0x1f, 0x20, 0x03, 0xd5, /* nop */ 304 0x1f, 0x20, 0x03, 0xd5, /* nop */ 305 }; 306 307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] = 308 { 309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ 311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ 312 #if ARCH_SIZE == 64 313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ 314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ 315 #else 316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */ 317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */ 318 #endif 319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ 320 0x1f, 0x20, 0x03, 0xd5, /* nop */ 321 0x1f, 0x20, 0x03, 0xd5, /* nop */ 322 }; 323 324 /* Per function entry in a procedure linkage table looks like this 325 if the distance between the PLTGOT and the PLT is < 4GB use 326 these PLT entries. Use BTI versions of the PLTs when enabled. */ 327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] = 328 { 329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 330 #if ARCH_SIZE == 64 331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 333 #else 334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 336 #endif 337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 338 }; 339 340 static const bfd_byte 341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] = 342 { 343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 345 #if ARCH_SIZE == 64 346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 348 #else 349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 351 #endif 352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 353 0x1f, 0x20, 0x03, 0xd5, /* nop */ 354 }; 355 356 static const bfd_byte 357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] = 358 { 359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 360 #if ARCH_SIZE == 64 361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 363 #else 364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 366 #endif 367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ 368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 369 0x1f, 0x20, 0x03, 0xd5, /* nop */ 370 }; 371 372 static const bfd_byte 373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] = 374 { 375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 377 #if ARCH_SIZE == 64 378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 380 #else 381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 383 #endif 384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ 385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 386 }; 387 388 static const bfd_byte 389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] = 390 { 391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ 392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ 393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ 394 #if ARCH_SIZE == 64 395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */ 396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ 397 #else 398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */ 399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */ 400 #endif 401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */ 402 0x1f, 0x20, 0x03, 0xd5, /* nop */ 403 0x1f, 0x20, 0x03, 0xd5, /* nop */ 404 }; 405 406 static const bfd_byte 407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] = 408 { 409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ 411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ 412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ 413 #if ARCH_SIZE == 64 414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */ 415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ 416 #else 417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */ 418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */ 419 #endif 420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */ 421 0x1f, 0x20, 0x03, 0xd5, /* nop */ 422 }; 423 424 #define elf_info_to_howto elfNN_aarch64_info_to_howto 425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto 426 427 #define AARCH64_ELF_ABI_VERSION 0 428 429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ 430 #define ALL_ONES (~ (bfd_vma) 0) 431 432 /* Indexed by the bfd interal reloc enumerators. 433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_* 434 in reloc.c. */ 435 436 static reloc_howto_type elfNN_aarch64_howto_table[] = 437 { 438 EMPTY_HOWTO (0), 439 440 /* Basic data relocations. */ 441 442 /* Deprecated, but retained for backwards compatibility. */ 443 HOWTO64 (R_AARCH64_NULL, /* type */ 444 0, /* rightshift */ 445 0, /* size */ 446 0, /* bitsize */ 447 false, /* pc_relative */ 448 0, /* bitpos */ 449 complain_overflow_dont, /* complain_on_overflow */ 450 bfd_elf_generic_reloc, /* special_function */ 451 "R_AARCH64_NULL", /* name */ 452 false, /* partial_inplace */ 453 0, /* src_mask */ 454 0, /* dst_mask */ 455 false), /* pcrel_offset */ 456 HOWTO (R_AARCH64_NONE, /* type */ 457 0, /* rightshift */ 458 0, /* size */ 459 0, /* bitsize */ 460 false, /* pc_relative */ 461 0, /* bitpos */ 462 complain_overflow_dont, /* complain_on_overflow */ 463 bfd_elf_generic_reloc, /* special_function */ 464 "R_AARCH64_NONE", /* name */ 465 false, /* partial_inplace */ 466 0, /* src_mask */ 467 0, /* dst_mask */ 468 false), /* pcrel_offset */ 469 470 /* .xword: (S+A) */ 471 HOWTO64 (AARCH64_R (ABS64), /* type */ 472 0, /* rightshift */ 473 8, /* size */ 474 64, /* bitsize */ 475 false, /* pc_relative */ 476 0, /* bitpos */ 477 complain_overflow_unsigned, /* complain_on_overflow */ 478 bfd_elf_generic_reloc, /* special_function */ 479 AARCH64_R_STR (ABS64), /* name */ 480 false, /* partial_inplace */ 481 ALL_ONES, /* src_mask */ 482 ALL_ONES, /* dst_mask */ 483 false), /* pcrel_offset */ 484 485 /* .word: (S+A) */ 486 HOWTO (AARCH64_R (ABS32), /* type */ 487 0, /* rightshift */ 488 4, /* size */ 489 32, /* bitsize */ 490 false, /* pc_relative */ 491 0, /* bitpos */ 492 complain_overflow_unsigned, /* complain_on_overflow */ 493 bfd_elf_generic_reloc, /* special_function */ 494 AARCH64_R_STR (ABS32), /* name */ 495 false, /* partial_inplace */ 496 0xffffffff, /* src_mask */ 497 0xffffffff, /* dst_mask */ 498 false), /* pcrel_offset */ 499 500 /* .half: (S+A) */ 501 HOWTO (AARCH64_R (ABS16), /* type */ 502 0, /* rightshift */ 503 2, /* size */ 504 16, /* bitsize */ 505 false, /* pc_relative */ 506 0, /* bitpos */ 507 complain_overflow_unsigned, /* complain_on_overflow */ 508 bfd_elf_generic_reloc, /* special_function */ 509 AARCH64_R_STR (ABS16), /* name */ 510 false, /* partial_inplace */ 511 0xffff, /* src_mask */ 512 0xffff, /* dst_mask */ 513 false), /* pcrel_offset */ 514 515 /* .xword: (S+A-P) */ 516 HOWTO64 (AARCH64_R (PREL64), /* type */ 517 0, /* rightshift */ 518 8, /* size */ 519 64, /* bitsize */ 520 true, /* pc_relative */ 521 0, /* bitpos */ 522 complain_overflow_signed, /* complain_on_overflow */ 523 bfd_elf_generic_reloc, /* special_function */ 524 AARCH64_R_STR (PREL64), /* name */ 525 false, /* partial_inplace */ 526 ALL_ONES, /* src_mask */ 527 ALL_ONES, /* dst_mask */ 528 true), /* pcrel_offset */ 529 530 /* .word: (S+A-P) */ 531 HOWTO (AARCH64_R (PREL32), /* type */ 532 0, /* rightshift */ 533 4, /* size */ 534 32, /* bitsize */ 535 true, /* pc_relative */ 536 0, /* bitpos */ 537 complain_overflow_signed, /* complain_on_overflow */ 538 bfd_elf_generic_reloc, /* special_function */ 539 AARCH64_R_STR (PREL32), /* name */ 540 false, /* partial_inplace */ 541 0xffffffff, /* src_mask */ 542 0xffffffff, /* dst_mask */ 543 true), /* pcrel_offset */ 544 545 /* .half: (S+A-P) */ 546 HOWTO (AARCH64_R (PREL16), /* type */ 547 0, /* rightshift */ 548 2, /* size */ 549 16, /* bitsize */ 550 true, /* pc_relative */ 551 0, /* bitpos */ 552 complain_overflow_signed, /* complain_on_overflow */ 553 bfd_elf_generic_reloc, /* special_function */ 554 AARCH64_R_STR (PREL16), /* name */ 555 false, /* partial_inplace */ 556 0xffff, /* src_mask */ 557 0xffff, /* dst_mask */ 558 true), /* pcrel_offset */ 559 560 /* Group relocations to create a 16, 32, 48 or 64 bit 561 unsigned data or abs address inline. */ 562 563 /* MOVZ: ((S+A) >> 0) & 0xffff */ 564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */ 565 0, /* rightshift */ 566 4, /* size */ 567 16, /* bitsize */ 568 false, /* pc_relative */ 569 0, /* bitpos */ 570 complain_overflow_unsigned, /* complain_on_overflow */ 571 bfd_elf_generic_reloc, /* special_function */ 572 AARCH64_R_STR (MOVW_UABS_G0), /* name */ 573 false, /* partial_inplace */ 574 0xffff, /* src_mask */ 575 0xffff, /* dst_mask */ 576 false), /* pcrel_offset */ 577 578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */ 579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */ 580 0, /* rightshift */ 581 4, /* size */ 582 16, /* bitsize */ 583 false, /* pc_relative */ 584 0, /* bitpos */ 585 complain_overflow_dont, /* complain_on_overflow */ 586 bfd_elf_generic_reloc, /* special_function */ 587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */ 588 false, /* partial_inplace */ 589 0xffff, /* src_mask */ 590 0xffff, /* dst_mask */ 591 false), /* pcrel_offset */ 592 593 /* MOVZ: ((S+A) >> 16) & 0xffff */ 594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */ 595 16, /* rightshift */ 596 4, /* size */ 597 16, /* bitsize */ 598 false, /* pc_relative */ 599 0, /* bitpos */ 600 complain_overflow_unsigned, /* complain_on_overflow */ 601 bfd_elf_generic_reloc, /* special_function */ 602 AARCH64_R_STR (MOVW_UABS_G1), /* name */ 603 false, /* partial_inplace */ 604 0xffff, /* src_mask */ 605 0xffff, /* dst_mask */ 606 false), /* pcrel_offset */ 607 608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */ 609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */ 610 16, /* rightshift */ 611 4, /* size */ 612 16, /* bitsize */ 613 false, /* pc_relative */ 614 0, /* bitpos */ 615 complain_overflow_dont, /* complain_on_overflow */ 616 bfd_elf_generic_reloc, /* special_function */ 617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */ 618 false, /* partial_inplace */ 619 0xffff, /* src_mask */ 620 0xffff, /* dst_mask */ 621 false), /* pcrel_offset */ 622 623 /* MOVZ: ((S+A) >> 32) & 0xffff */ 624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */ 625 32, /* rightshift */ 626 4, /* size */ 627 16, /* bitsize */ 628 false, /* pc_relative */ 629 0, /* bitpos */ 630 complain_overflow_unsigned, /* complain_on_overflow */ 631 bfd_elf_generic_reloc, /* special_function */ 632 AARCH64_R_STR (MOVW_UABS_G2), /* name */ 633 false, /* partial_inplace */ 634 0xffff, /* src_mask */ 635 0xffff, /* dst_mask */ 636 false), /* pcrel_offset */ 637 638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */ 639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */ 640 32, /* rightshift */ 641 4, /* size */ 642 16, /* bitsize */ 643 false, /* pc_relative */ 644 0, /* bitpos */ 645 complain_overflow_dont, /* complain_on_overflow */ 646 bfd_elf_generic_reloc, /* special_function */ 647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */ 648 false, /* partial_inplace */ 649 0xffff, /* src_mask */ 650 0xffff, /* dst_mask */ 651 false), /* pcrel_offset */ 652 653 /* MOVZ: ((S+A) >> 48) & 0xffff */ 654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */ 655 48, /* rightshift */ 656 4, /* size */ 657 16, /* bitsize */ 658 false, /* pc_relative */ 659 0, /* bitpos */ 660 complain_overflow_unsigned, /* complain_on_overflow */ 661 bfd_elf_generic_reloc, /* special_function */ 662 AARCH64_R_STR (MOVW_UABS_G3), /* name */ 663 false, /* partial_inplace */ 664 0xffff, /* src_mask */ 665 0xffff, /* dst_mask */ 666 false), /* pcrel_offset */ 667 668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit 669 signed data or abs address inline. Will change instruction 670 to MOVN or MOVZ depending on sign of calculated value. */ 671 672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */ 673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */ 674 0, /* rightshift */ 675 4, /* size */ 676 17, /* bitsize */ 677 false, /* pc_relative */ 678 0, /* bitpos */ 679 complain_overflow_signed, /* complain_on_overflow */ 680 bfd_elf_generic_reloc, /* special_function */ 681 AARCH64_R_STR (MOVW_SABS_G0), /* name */ 682 false, /* partial_inplace */ 683 0xffff, /* src_mask */ 684 0xffff, /* dst_mask */ 685 false), /* pcrel_offset */ 686 687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */ 688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */ 689 16, /* rightshift */ 690 4, /* size */ 691 17, /* bitsize */ 692 false, /* pc_relative */ 693 0, /* bitpos */ 694 complain_overflow_signed, /* complain_on_overflow */ 695 bfd_elf_generic_reloc, /* special_function */ 696 AARCH64_R_STR (MOVW_SABS_G1), /* name */ 697 false, /* partial_inplace */ 698 0xffff, /* src_mask */ 699 0xffff, /* dst_mask */ 700 false), /* pcrel_offset */ 701 702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */ 703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */ 704 32, /* rightshift */ 705 4, /* size */ 706 17, /* bitsize */ 707 false, /* pc_relative */ 708 0, /* bitpos */ 709 complain_overflow_signed, /* complain_on_overflow */ 710 bfd_elf_generic_reloc, /* special_function */ 711 AARCH64_R_STR (MOVW_SABS_G2), /* name */ 712 false, /* partial_inplace */ 713 0xffff, /* src_mask */ 714 0xffff, /* dst_mask */ 715 false), /* pcrel_offset */ 716 717 /* Group relocations to create a 16, 32, 48 or 64 bit 718 PC relative address inline. */ 719 720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */ 721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */ 722 0, /* rightshift */ 723 4, /* size */ 724 17, /* bitsize */ 725 true, /* pc_relative */ 726 0, /* bitpos */ 727 complain_overflow_signed, /* complain_on_overflow */ 728 bfd_elf_generic_reloc, /* special_function */ 729 AARCH64_R_STR (MOVW_PREL_G0), /* name */ 730 false, /* partial_inplace */ 731 0xffff, /* src_mask */ 732 0xffff, /* dst_mask */ 733 true), /* pcrel_offset */ 734 735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */ 736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */ 737 0, /* rightshift */ 738 4, /* size */ 739 16, /* bitsize */ 740 true, /* pc_relative */ 741 0, /* bitpos */ 742 complain_overflow_dont, /* complain_on_overflow */ 743 bfd_elf_generic_reloc, /* special_function */ 744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */ 745 false, /* partial_inplace */ 746 0xffff, /* src_mask */ 747 0xffff, /* dst_mask */ 748 true), /* pcrel_offset */ 749 750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */ 751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */ 752 16, /* rightshift */ 753 4, /* size */ 754 17, /* bitsize */ 755 true, /* pc_relative */ 756 0, /* bitpos */ 757 complain_overflow_signed, /* complain_on_overflow */ 758 bfd_elf_generic_reloc, /* special_function */ 759 AARCH64_R_STR (MOVW_PREL_G1), /* name */ 760 false, /* partial_inplace */ 761 0xffff, /* src_mask */ 762 0xffff, /* dst_mask */ 763 true), /* pcrel_offset */ 764 765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */ 766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */ 767 16, /* rightshift */ 768 4, /* size */ 769 16, /* bitsize */ 770 true, /* pc_relative */ 771 0, /* bitpos */ 772 complain_overflow_dont, /* complain_on_overflow */ 773 bfd_elf_generic_reloc, /* special_function */ 774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */ 775 false, /* partial_inplace */ 776 0xffff, /* src_mask */ 777 0xffff, /* dst_mask */ 778 true), /* pcrel_offset */ 779 780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */ 781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */ 782 32, /* rightshift */ 783 4, /* size */ 784 17, /* bitsize */ 785 true, /* pc_relative */ 786 0, /* bitpos */ 787 complain_overflow_signed, /* complain_on_overflow */ 788 bfd_elf_generic_reloc, /* special_function */ 789 AARCH64_R_STR (MOVW_PREL_G2), /* name */ 790 false, /* partial_inplace */ 791 0xffff, /* src_mask */ 792 0xffff, /* dst_mask */ 793 true), /* pcrel_offset */ 794 795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */ 796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */ 797 32, /* rightshift */ 798 4, /* size */ 799 16, /* bitsize */ 800 true, /* pc_relative */ 801 0, /* bitpos */ 802 complain_overflow_dont, /* complain_on_overflow */ 803 bfd_elf_generic_reloc, /* special_function */ 804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */ 805 false, /* partial_inplace */ 806 0xffff, /* src_mask */ 807 0xffff, /* dst_mask */ 808 true), /* pcrel_offset */ 809 810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */ 811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */ 812 48, /* rightshift */ 813 4, /* size */ 814 16, /* bitsize */ 815 true, /* pc_relative */ 816 0, /* bitpos */ 817 complain_overflow_dont, /* complain_on_overflow */ 818 bfd_elf_generic_reloc, /* special_function */ 819 AARCH64_R_STR (MOVW_PREL_G3), /* name */ 820 false, /* partial_inplace */ 821 0xffff, /* src_mask */ 822 0xffff, /* dst_mask */ 823 true), /* pcrel_offset */ 824 825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store 826 addresses: PG(x) is (x & ~0xfff). */ 827 828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */ 829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */ 830 2, /* rightshift */ 831 4, /* size */ 832 19, /* bitsize */ 833 true, /* pc_relative */ 834 0, /* bitpos */ 835 complain_overflow_signed, /* complain_on_overflow */ 836 bfd_elf_generic_reloc, /* special_function */ 837 AARCH64_R_STR (LD_PREL_LO19), /* name */ 838 false, /* partial_inplace */ 839 0x7ffff, /* src_mask */ 840 0x7ffff, /* dst_mask */ 841 true), /* pcrel_offset */ 842 843 /* ADR: (S+A-P) & 0x1fffff */ 844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */ 845 0, /* rightshift */ 846 4, /* size */ 847 21, /* bitsize */ 848 true, /* pc_relative */ 849 0, /* bitpos */ 850 complain_overflow_signed, /* complain_on_overflow */ 851 bfd_elf_generic_reloc, /* special_function */ 852 AARCH64_R_STR (ADR_PREL_LO21), /* name */ 853 false, /* partial_inplace */ 854 0x1fffff, /* src_mask */ 855 0x1fffff, /* dst_mask */ 856 true), /* pcrel_offset */ 857 858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */ 860 12, /* rightshift */ 861 4, /* size */ 862 21, /* bitsize */ 863 true, /* pc_relative */ 864 0, /* bitpos */ 865 complain_overflow_signed, /* complain_on_overflow */ 866 bfd_elf_generic_reloc, /* special_function */ 867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */ 868 false, /* partial_inplace */ 869 0x1fffff, /* src_mask */ 870 0x1fffff, /* dst_mask */ 871 true), /* pcrel_offset */ 872 873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */ 874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */ 875 12, /* rightshift */ 876 4, /* size */ 877 21, /* bitsize */ 878 true, /* pc_relative */ 879 0, /* bitpos */ 880 complain_overflow_dont, /* complain_on_overflow */ 881 bfd_elf_generic_reloc, /* special_function */ 882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */ 883 false, /* partial_inplace */ 884 0x1fffff, /* src_mask */ 885 0x1fffff, /* dst_mask */ 886 true), /* pcrel_offset */ 887 888 /* ADD: (S+A) & 0xfff [no overflow check] */ 889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */ 890 0, /* rightshift */ 891 4, /* size */ 892 12, /* bitsize */ 893 false, /* pc_relative */ 894 10, /* bitpos */ 895 complain_overflow_dont, /* complain_on_overflow */ 896 bfd_elf_generic_reloc, /* special_function */ 897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */ 898 false, /* partial_inplace */ 899 0x3ffc00, /* src_mask */ 900 0x3ffc00, /* dst_mask */ 901 false), /* pcrel_offset */ 902 903 /* LD/ST8: (S+A) & 0xfff */ 904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */ 905 0, /* rightshift */ 906 4, /* size */ 907 12, /* bitsize */ 908 false, /* pc_relative */ 909 0, /* bitpos */ 910 complain_overflow_dont, /* complain_on_overflow */ 911 bfd_elf_generic_reloc, /* special_function */ 912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */ 913 false, /* partial_inplace */ 914 0xfff, /* src_mask */ 915 0xfff, /* dst_mask */ 916 false), /* pcrel_offset */ 917 918 /* Relocations for control-flow instructions. */ 919 920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */ 921 HOWTO (AARCH64_R (TSTBR14), /* type */ 922 2, /* rightshift */ 923 4, /* size */ 924 14, /* bitsize */ 925 true, /* pc_relative */ 926 0, /* bitpos */ 927 complain_overflow_signed, /* complain_on_overflow */ 928 bfd_elf_generic_reloc, /* special_function */ 929 AARCH64_R_STR (TSTBR14), /* name */ 930 false, /* partial_inplace */ 931 0x3fff, /* src_mask */ 932 0x3fff, /* dst_mask */ 933 true), /* pcrel_offset */ 934 935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */ 936 HOWTO (AARCH64_R (CONDBR19), /* type */ 937 2, /* rightshift */ 938 4, /* size */ 939 19, /* bitsize */ 940 true, /* pc_relative */ 941 0, /* bitpos */ 942 complain_overflow_signed, /* complain_on_overflow */ 943 bfd_elf_generic_reloc, /* special_function */ 944 AARCH64_R_STR (CONDBR19), /* name */ 945 false, /* partial_inplace */ 946 0x7ffff, /* src_mask */ 947 0x7ffff, /* dst_mask */ 948 true), /* pcrel_offset */ 949 950 /* B: ((S+A-P) >> 2) & 0x3ffffff */ 951 HOWTO (AARCH64_R (JUMP26), /* type */ 952 2, /* rightshift */ 953 4, /* size */ 954 26, /* bitsize */ 955 true, /* pc_relative */ 956 0, /* bitpos */ 957 complain_overflow_signed, /* complain_on_overflow */ 958 bfd_elf_generic_reloc, /* special_function */ 959 AARCH64_R_STR (JUMP26), /* name */ 960 false, /* partial_inplace */ 961 0x3ffffff, /* src_mask */ 962 0x3ffffff, /* dst_mask */ 963 true), /* pcrel_offset */ 964 965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */ 966 HOWTO (AARCH64_R (CALL26), /* type */ 967 2, /* rightshift */ 968 4, /* size */ 969 26, /* bitsize */ 970 true, /* pc_relative */ 971 0, /* bitpos */ 972 complain_overflow_signed, /* complain_on_overflow */ 973 bfd_elf_generic_reloc, /* special_function */ 974 AARCH64_R_STR (CALL26), /* name */ 975 false, /* partial_inplace */ 976 0x3ffffff, /* src_mask */ 977 0x3ffffff, /* dst_mask */ 978 true), /* pcrel_offset */ 979 980 /* LD/ST16: (S+A) & 0xffe */ 981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */ 982 1, /* rightshift */ 983 4, /* size */ 984 12, /* bitsize */ 985 false, /* pc_relative */ 986 0, /* bitpos */ 987 complain_overflow_dont, /* complain_on_overflow */ 988 bfd_elf_generic_reloc, /* special_function */ 989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */ 990 false, /* partial_inplace */ 991 0xffe, /* src_mask */ 992 0xffe, /* dst_mask */ 993 false), /* pcrel_offset */ 994 995 /* LD/ST32: (S+A) & 0xffc */ 996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */ 997 2, /* rightshift */ 998 4, /* size */ 999 12, /* bitsize */ 1000 false, /* pc_relative */ 1001 0, /* bitpos */ 1002 complain_overflow_dont, /* complain_on_overflow */ 1003 bfd_elf_generic_reloc, /* special_function */ 1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */ 1005 false, /* partial_inplace */ 1006 0xffc, /* src_mask */ 1007 0xffc, /* dst_mask */ 1008 false), /* pcrel_offset */ 1009 1010 /* LD/ST64: (S+A) & 0xff8 */ 1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */ 1012 3, /* rightshift */ 1013 4, /* size */ 1014 12, /* bitsize */ 1015 false, /* pc_relative */ 1016 0, /* bitpos */ 1017 complain_overflow_dont, /* complain_on_overflow */ 1018 bfd_elf_generic_reloc, /* special_function */ 1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */ 1020 false, /* partial_inplace */ 1021 0xff8, /* src_mask */ 1022 0xff8, /* dst_mask */ 1023 false), /* pcrel_offset */ 1024 1025 /* LD/ST128: (S+A) & 0xff0 */ 1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */ 1027 4, /* rightshift */ 1028 4, /* size */ 1029 12, /* bitsize */ 1030 false, /* pc_relative */ 1031 0, /* bitpos */ 1032 complain_overflow_dont, /* complain_on_overflow */ 1033 bfd_elf_generic_reloc, /* special_function */ 1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */ 1035 false, /* partial_inplace */ 1036 0xff0, /* src_mask */ 1037 0xff0, /* dst_mask */ 1038 false), /* pcrel_offset */ 1039 1040 /* Set a load-literal immediate field to bits 1041 0x1FFFFC of G(S)-P */ 1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */ 1043 2, /* rightshift */ 1044 4, /* size */ 1045 19, /* bitsize */ 1046 true, /* pc_relative */ 1047 0, /* bitpos */ 1048 complain_overflow_signed, /* complain_on_overflow */ 1049 bfd_elf_generic_reloc, /* special_function */ 1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */ 1051 false, /* partial_inplace */ 1052 0xffffe0, /* src_mask */ 1053 0xffffe0, /* dst_mask */ 1054 true), /* pcrel_offset */ 1055 1056 /* Get to the page for the GOT entry for the symbol 1057 (G(S) - P) using an ADRP instruction. */ 1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */ 1059 12, /* rightshift */ 1060 4, /* size */ 1061 21, /* bitsize */ 1062 true, /* pc_relative */ 1063 0, /* bitpos */ 1064 complain_overflow_dont, /* complain_on_overflow */ 1065 bfd_elf_generic_reloc, /* special_function */ 1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */ 1067 false, /* partial_inplace */ 1068 0x1fffff, /* src_mask */ 1069 0x1fffff, /* dst_mask */ 1070 true), /* pcrel_offset */ 1071 1072 /* LD64: GOT offset G(S) & 0xff8 */ 1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */ 1074 3, /* rightshift */ 1075 4, /* size */ 1076 12, /* bitsize */ 1077 false, /* pc_relative */ 1078 0, /* bitpos */ 1079 complain_overflow_dont, /* complain_on_overflow */ 1080 bfd_elf_generic_reloc, /* special_function */ 1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */ 1082 false, /* partial_inplace */ 1083 0xff8, /* src_mask */ 1084 0xff8, /* dst_mask */ 1085 false), /* pcrel_offset */ 1086 1087 /* LD32: GOT offset G(S) & 0xffc */ 1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */ 1089 2, /* rightshift */ 1090 4, /* size */ 1091 12, /* bitsize */ 1092 false, /* pc_relative */ 1093 0, /* bitpos */ 1094 complain_overflow_dont, /* complain_on_overflow */ 1095 bfd_elf_generic_reloc, /* special_function */ 1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */ 1097 false, /* partial_inplace */ 1098 0xffc, /* src_mask */ 1099 0xffc, /* dst_mask */ 1100 false), /* pcrel_offset */ 1101 1102 /* Lower 16 bits of GOT offset for the symbol. */ 1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */ 1104 0, /* rightshift */ 1105 4, /* size */ 1106 16, /* bitsize */ 1107 false, /* pc_relative */ 1108 0, /* bitpos */ 1109 complain_overflow_dont, /* complain_on_overflow */ 1110 bfd_elf_generic_reloc, /* special_function */ 1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */ 1112 false, /* partial_inplace */ 1113 0xffff, /* src_mask */ 1114 0xffff, /* dst_mask */ 1115 false), /* pcrel_offset */ 1116 1117 /* Higher 16 bits of GOT offset for the symbol. */ 1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */ 1119 16, /* rightshift */ 1120 4, /* size */ 1121 16, /* bitsize */ 1122 false, /* pc_relative */ 1123 0, /* bitpos */ 1124 complain_overflow_unsigned, /* complain_on_overflow */ 1125 bfd_elf_generic_reloc, /* special_function */ 1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */ 1127 false, /* partial_inplace */ 1128 0xffff, /* src_mask */ 1129 0xffff, /* dst_mask */ 1130 false), /* pcrel_offset */ 1131 1132 /* LD64: GOT offset for the symbol. */ 1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */ 1134 3, /* rightshift */ 1135 4, /* size */ 1136 12, /* bitsize */ 1137 false, /* pc_relative */ 1138 0, /* bitpos */ 1139 complain_overflow_unsigned, /* complain_on_overflow */ 1140 bfd_elf_generic_reloc, /* special_function */ 1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */ 1142 false, /* partial_inplace */ 1143 0x7ff8, /* src_mask */ 1144 0x7ff8, /* dst_mask */ 1145 false), /* pcrel_offset */ 1146 1147 /* LD32: GOT offset to the page address of GOT table. 1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */ 1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */ 1150 2, /* rightshift */ 1151 4, /* size */ 1152 12, /* bitsize */ 1153 false, /* pc_relative */ 1154 0, /* bitpos */ 1155 complain_overflow_unsigned, /* complain_on_overflow */ 1156 bfd_elf_generic_reloc, /* special_function */ 1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */ 1158 false, /* partial_inplace */ 1159 0x5ffc, /* src_mask */ 1160 0x5ffc, /* dst_mask */ 1161 false), /* pcrel_offset */ 1162 1163 /* LD64: GOT offset to the page address of GOT table. 1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */ 1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */ 1166 3, /* rightshift */ 1167 4, /* size */ 1168 12, /* bitsize */ 1169 false, /* pc_relative */ 1170 0, /* bitpos */ 1171 complain_overflow_unsigned, /* complain_on_overflow */ 1172 bfd_elf_generic_reloc, /* special_function */ 1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */ 1174 false, /* partial_inplace */ 1175 0x7ff8, /* src_mask */ 1176 0x7ff8, /* dst_mask */ 1177 false), /* pcrel_offset */ 1178 1179 /* Get to the page for the GOT entry for the symbol 1180 (G(S) - P) using an ADRP instruction. */ 1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */ 1182 12, /* rightshift */ 1183 4, /* size */ 1184 21, /* bitsize */ 1185 true, /* pc_relative */ 1186 0, /* bitpos */ 1187 complain_overflow_dont, /* complain_on_overflow */ 1188 bfd_elf_generic_reloc, /* special_function */ 1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */ 1190 false, /* partial_inplace */ 1191 0x1fffff, /* src_mask */ 1192 0x1fffff, /* dst_mask */ 1193 true), /* pcrel_offset */ 1194 1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */ 1196 0, /* rightshift */ 1197 4, /* size */ 1198 21, /* bitsize */ 1199 true, /* pc_relative */ 1200 0, /* bitpos */ 1201 complain_overflow_dont, /* complain_on_overflow */ 1202 bfd_elf_generic_reloc, /* special_function */ 1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */ 1204 false, /* partial_inplace */ 1205 0x1fffff, /* src_mask */ 1206 0x1fffff, /* dst_mask */ 1207 true), /* pcrel_offset */ 1208 1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */ 1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */ 1211 0, /* rightshift */ 1212 4, /* size */ 1213 12, /* bitsize */ 1214 false, /* pc_relative */ 1215 0, /* bitpos */ 1216 complain_overflow_dont, /* complain_on_overflow */ 1217 bfd_elf_generic_reloc, /* special_function */ 1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */ 1219 false, /* partial_inplace */ 1220 0xfff, /* src_mask */ 1221 0xfff, /* dst_mask */ 1222 false), /* pcrel_offset */ 1223 1224 /* Lower 16 bits of GOT offset to tls_index. */ 1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */ 1226 0, /* rightshift */ 1227 4, /* size */ 1228 16, /* bitsize */ 1229 false, /* pc_relative */ 1230 0, /* bitpos */ 1231 complain_overflow_dont, /* complain_on_overflow */ 1232 bfd_elf_generic_reloc, /* special_function */ 1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */ 1234 false, /* partial_inplace */ 1235 0xffff, /* src_mask */ 1236 0xffff, /* dst_mask */ 1237 false), /* pcrel_offset */ 1238 1239 /* Higher 16 bits of GOT offset to tls_index. */ 1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */ 1241 16, /* rightshift */ 1242 4, /* size */ 1243 16, /* bitsize */ 1244 false, /* pc_relative */ 1245 0, /* bitpos */ 1246 complain_overflow_unsigned, /* complain_on_overflow */ 1247 bfd_elf_generic_reloc, /* special_function */ 1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */ 1249 false, /* partial_inplace */ 1250 0xffff, /* src_mask */ 1251 0xffff, /* dst_mask */ 1252 false), /* pcrel_offset */ 1253 1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */ 1255 12, /* rightshift */ 1256 4, /* size */ 1257 21, /* bitsize */ 1258 false, /* pc_relative */ 1259 0, /* bitpos */ 1260 complain_overflow_dont, /* complain_on_overflow */ 1261 bfd_elf_generic_reloc, /* special_function */ 1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */ 1263 false, /* partial_inplace */ 1264 0x1fffff, /* src_mask */ 1265 0x1fffff, /* dst_mask */ 1266 false), /* pcrel_offset */ 1267 1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */ 1269 3, /* rightshift */ 1270 4, /* size */ 1271 12, /* bitsize */ 1272 false, /* pc_relative */ 1273 0, /* bitpos */ 1274 complain_overflow_dont, /* complain_on_overflow */ 1275 bfd_elf_generic_reloc, /* special_function */ 1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */ 1277 false, /* partial_inplace */ 1278 0xff8, /* src_mask */ 1279 0xff8, /* dst_mask */ 1280 false), /* pcrel_offset */ 1281 1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */ 1283 2, /* rightshift */ 1284 4, /* size */ 1285 12, /* bitsize */ 1286 false, /* pc_relative */ 1287 0, /* bitpos */ 1288 complain_overflow_dont, /* complain_on_overflow */ 1289 bfd_elf_generic_reloc, /* special_function */ 1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */ 1291 false, /* partial_inplace */ 1292 0xffc, /* src_mask */ 1293 0xffc, /* dst_mask */ 1294 false), /* pcrel_offset */ 1295 1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */ 1297 2, /* rightshift */ 1298 4, /* size */ 1299 19, /* bitsize */ 1300 false, /* pc_relative */ 1301 0, /* bitpos */ 1302 complain_overflow_dont, /* complain_on_overflow */ 1303 bfd_elf_generic_reloc, /* special_function */ 1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */ 1305 false, /* partial_inplace */ 1306 0x1ffffc, /* src_mask */ 1307 0x1ffffc, /* dst_mask */ 1308 false), /* pcrel_offset */ 1309 1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */ 1311 0, /* rightshift */ 1312 4, /* size */ 1313 16, /* bitsize */ 1314 false, /* pc_relative */ 1315 0, /* bitpos */ 1316 complain_overflow_dont, /* complain_on_overflow */ 1317 bfd_elf_generic_reloc, /* special_function */ 1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */ 1319 false, /* partial_inplace */ 1320 0xffff, /* src_mask */ 1321 0xffff, /* dst_mask */ 1322 false), /* pcrel_offset */ 1323 1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */ 1325 16, /* rightshift */ 1326 4, /* size */ 1327 16, /* bitsize */ 1328 false, /* pc_relative */ 1329 0, /* bitpos */ 1330 complain_overflow_unsigned, /* complain_on_overflow */ 1331 bfd_elf_generic_reloc, /* special_function */ 1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */ 1333 false, /* partial_inplace */ 1334 0xffff, /* src_mask */ 1335 0xffff, /* dst_mask */ 1336 false), /* pcrel_offset */ 1337 1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */ 1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */ 1340 12, /* rightshift */ 1341 4, /* size */ 1342 12, /* bitsize */ 1343 false, /* pc_relative */ 1344 0, /* bitpos */ 1345 complain_overflow_unsigned, /* complain_on_overflow */ 1346 bfd_elf_generic_reloc, /* special_function */ 1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */ 1348 false, /* partial_inplace */ 1349 0xfff, /* src_mask */ 1350 0xfff, /* dst_mask */ 1351 false), /* pcrel_offset */ 1352 1353 /* Unsigned 12 bit byte offset to module TLS base address. */ 1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */ 1355 0, /* rightshift */ 1356 4, /* size */ 1357 12, /* bitsize */ 1358 false, /* pc_relative */ 1359 0, /* bitpos */ 1360 complain_overflow_unsigned, /* complain_on_overflow */ 1361 bfd_elf_generic_reloc, /* special_function */ 1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */ 1363 false, /* partial_inplace */ 1364 0xfff, /* src_mask */ 1365 0xfff, /* dst_mask */ 1366 false), /* pcrel_offset */ 1367 1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */ 1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */ 1370 0, /* rightshift */ 1371 4, /* size */ 1372 12, /* bitsize */ 1373 false, /* pc_relative */ 1374 0, /* bitpos */ 1375 complain_overflow_dont, /* complain_on_overflow */ 1376 bfd_elf_generic_reloc, /* special_function */ 1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */ 1378 false, /* partial_inplace */ 1379 0xfff, /* src_mask */ 1380 0xfff, /* dst_mask */ 1381 false), /* pcrel_offset */ 1382 1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */ 1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */ 1385 0, /* rightshift */ 1386 4, /* size */ 1387 12, /* bitsize */ 1388 false, /* pc_relative */ 1389 0, /* bitpos */ 1390 complain_overflow_dont, /* complain_on_overflow */ 1391 bfd_elf_generic_reloc, /* special_function */ 1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */ 1393 false, /* partial_inplace */ 1394 0xfff, /* src_mask */ 1395 0xfff, /* dst_mask */ 1396 false), /* pcrel_offset */ 1397 1398 /* Get to the page for the GOT entry for the symbol 1399 (G(S) - P) using an ADRP instruction. */ 1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */ 1401 12, /* rightshift */ 1402 4, /* size */ 1403 21, /* bitsize */ 1404 true, /* pc_relative */ 1405 0, /* bitpos */ 1406 complain_overflow_signed, /* complain_on_overflow */ 1407 bfd_elf_generic_reloc, /* special_function */ 1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */ 1409 false, /* partial_inplace */ 1410 0x1fffff, /* src_mask */ 1411 0x1fffff, /* dst_mask */ 1412 true), /* pcrel_offset */ 1413 1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */ 1415 0, /* rightshift */ 1416 4, /* size */ 1417 21, /* bitsize */ 1418 true, /* pc_relative */ 1419 0, /* bitpos */ 1420 complain_overflow_signed, /* complain_on_overflow */ 1421 bfd_elf_generic_reloc, /* special_function */ 1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */ 1423 false, /* partial_inplace */ 1424 0x1fffff, /* src_mask */ 1425 0x1fffff, /* dst_mask */ 1426 true), /* pcrel_offset */ 1427 1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */ 1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */ 1430 1, /* rightshift */ 1431 4, /* size */ 1432 11, /* bitsize */ 1433 false, /* pc_relative */ 1434 10, /* bitpos */ 1435 complain_overflow_unsigned, /* complain_on_overflow */ 1436 bfd_elf_generic_reloc, /* special_function */ 1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */ 1438 false, /* partial_inplace */ 1439 0x1ffc00, /* src_mask */ 1440 0x1ffc00, /* dst_mask */ 1441 false), /* pcrel_offset */ 1442 1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */ 1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */ 1445 1, /* rightshift */ 1446 4, /* size */ 1447 11, /* bitsize */ 1448 false, /* pc_relative */ 1449 10, /* bitpos */ 1450 complain_overflow_dont, /* complain_on_overflow */ 1451 bfd_elf_generic_reloc, /* special_function */ 1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */ 1453 false, /* partial_inplace */ 1454 0x1ffc00, /* src_mask */ 1455 0x1ffc00, /* dst_mask */ 1456 false), /* pcrel_offset */ 1457 1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */ 1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */ 1460 2, /* rightshift */ 1461 4, /* size */ 1462 10, /* bitsize */ 1463 false, /* pc_relative */ 1464 10, /* bitpos */ 1465 complain_overflow_unsigned, /* complain_on_overflow */ 1466 bfd_elf_generic_reloc, /* special_function */ 1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */ 1468 false, /* partial_inplace */ 1469 0x3ffc00, /* src_mask */ 1470 0x3ffc00, /* dst_mask */ 1471 false), /* pcrel_offset */ 1472 1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */ 1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */ 1475 2, /* rightshift */ 1476 4, /* size */ 1477 10, /* bitsize */ 1478 false, /* pc_relative */ 1479 10, /* bitpos */ 1480 complain_overflow_dont, /* complain_on_overflow */ 1481 bfd_elf_generic_reloc, /* special_function */ 1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */ 1483 false, /* partial_inplace */ 1484 0xffc00, /* src_mask */ 1485 0xffc00, /* dst_mask */ 1486 false), /* pcrel_offset */ 1487 1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */ 1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */ 1490 3, /* rightshift */ 1491 4, /* size */ 1492 9, /* bitsize */ 1493 false, /* pc_relative */ 1494 10, /* bitpos */ 1495 complain_overflow_unsigned, /* complain_on_overflow */ 1496 bfd_elf_generic_reloc, /* special_function */ 1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */ 1498 false, /* partial_inplace */ 1499 0x3ffc00, /* src_mask */ 1500 0x3ffc00, /* dst_mask */ 1501 false), /* pcrel_offset */ 1502 1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */ 1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */ 1505 3, /* rightshift */ 1506 4, /* size */ 1507 9, /* bitsize */ 1508 false, /* pc_relative */ 1509 10, /* bitpos */ 1510 complain_overflow_dont, /* complain_on_overflow */ 1511 bfd_elf_generic_reloc, /* special_function */ 1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */ 1513 false, /* partial_inplace */ 1514 0x7fc00, /* src_mask */ 1515 0x7fc00, /* dst_mask */ 1516 false), /* pcrel_offset */ 1517 1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */ 1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */ 1520 0, /* rightshift */ 1521 4, /* size */ 1522 12, /* bitsize */ 1523 false, /* pc_relative */ 1524 10, /* bitpos */ 1525 complain_overflow_unsigned, /* complain_on_overflow */ 1526 bfd_elf_generic_reloc, /* special_function */ 1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */ 1528 false, /* partial_inplace */ 1529 0x3ffc00, /* src_mask */ 1530 0x3ffc00, /* dst_mask */ 1531 false), /* pcrel_offset */ 1532 1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */ 1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */ 1535 0, /* rightshift */ 1536 4, /* size */ 1537 12, /* bitsize */ 1538 false, /* pc_relative */ 1539 10, /* bitpos */ 1540 complain_overflow_dont, /* complain_on_overflow */ 1541 bfd_elf_generic_reloc, /* special_function */ 1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */ 1543 false, /* partial_inplace */ 1544 0x3ffc00, /* src_mask */ 1545 0x3ffc00, /* dst_mask */ 1546 false), /* pcrel_offset */ 1547 1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */ 1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */ 1550 0, /* rightshift */ 1551 4, /* size */ 1552 16, /* bitsize */ 1553 false, /* pc_relative */ 1554 0, /* bitpos */ 1555 complain_overflow_unsigned, /* complain_on_overflow */ 1556 bfd_elf_generic_reloc, /* special_function */ 1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */ 1558 false, /* partial_inplace */ 1559 0xffff, /* src_mask */ 1560 0xffff, /* dst_mask */ 1561 false), /* pcrel_offset */ 1562 1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */ 1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */ 1565 0, /* rightshift */ 1566 4, /* size */ 1567 16, /* bitsize */ 1568 false, /* pc_relative */ 1569 0, /* bitpos */ 1570 complain_overflow_dont, /* complain_on_overflow */ 1571 bfd_elf_generic_reloc, /* special_function */ 1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */ 1573 false, /* partial_inplace */ 1574 0xffff, /* src_mask */ 1575 0xffff, /* dst_mask */ 1576 false), /* pcrel_offset */ 1577 1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */ 1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */ 1580 16, /* rightshift */ 1581 4, /* size */ 1582 16, /* bitsize */ 1583 false, /* pc_relative */ 1584 0, /* bitpos */ 1585 complain_overflow_unsigned, /* complain_on_overflow */ 1586 bfd_elf_generic_reloc, /* special_function */ 1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */ 1588 false, /* partial_inplace */ 1589 0xffff, /* src_mask */ 1590 0xffff, /* dst_mask */ 1591 false), /* pcrel_offset */ 1592 1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */ 1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */ 1595 16, /* rightshift */ 1596 4, /* size */ 1597 16, /* bitsize */ 1598 false, /* pc_relative */ 1599 0, /* bitpos */ 1600 complain_overflow_dont, /* complain_on_overflow */ 1601 bfd_elf_generic_reloc, /* special_function */ 1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */ 1603 false, /* partial_inplace */ 1604 0xffff, /* src_mask */ 1605 0xffff, /* dst_mask */ 1606 false), /* pcrel_offset */ 1607 1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */ 1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */ 1610 32, /* rightshift */ 1611 4, /* size */ 1612 16, /* bitsize */ 1613 false, /* pc_relative */ 1614 0, /* bitpos */ 1615 complain_overflow_unsigned, /* complain_on_overflow */ 1616 bfd_elf_generic_reloc, /* special_function */ 1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */ 1618 false, /* partial_inplace */ 1619 0xffff, /* src_mask */ 1620 0xffff, /* dst_mask */ 1621 false), /* pcrel_offset */ 1622 1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */ 1624 32, /* rightshift */ 1625 4, /* size */ 1626 16, /* bitsize */ 1627 false, /* pc_relative */ 1628 0, /* bitpos */ 1629 complain_overflow_unsigned, /* complain_on_overflow */ 1630 bfd_elf_generic_reloc, /* special_function */ 1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */ 1632 false, /* partial_inplace */ 1633 0xffff, /* src_mask */ 1634 0xffff, /* dst_mask */ 1635 false), /* pcrel_offset */ 1636 1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */ 1638 16, /* rightshift */ 1639 4, /* size */ 1640 16, /* bitsize */ 1641 false, /* pc_relative */ 1642 0, /* bitpos */ 1643 complain_overflow_dont, /* complain_on_overflow */ 1644 bfd_elf_generic_reloc, /* special_function */ 1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */ 1646 false, /* partial_inplace */ 1647 0xffff, /* src_mask */ 1648 0xffff, /* dst_mask */ 1649 false), /* pcrel_offset */ 1650 1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */ 1652 16, /* rightshift */ 1653 4, /* size */ 1654 16, /* bitsize */ 1655 false, /* pc_relative */ 1656 0, /* bitpos */ 1657 complain_overflow_dont, /* complain_on_overflow */ 1658 bfd_elf_generic_reloc, /* special_function */ 1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */ 1660 false, /* partial_inplace */ 1661 0xffff, /* src_mask */ 1662 0xffff, /* dst_mask */ 1663 false), /* pcrel_offset */ 1664 1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */ 1666 0, /* rightshift */ 1667 4, /* size */ 1668 16, /* bitsize */ 1669 false, /* pc_relative */ 1670 0, /* bitpos */ 1671 complain_overflow_dont, /* complain_on_overflow */ 1672 bfd_elf_generic_reloc, /* special_function */ 1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */ 1674 false, /* partial_inplace */ 1675 0xffff, /* src_mask */ 1676 0xffff, /* dst_mask */ 1677 false), /* pcrel_offset */ 1678 1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */ 1680 0, /* rightshift */ 1681 4, /* size */ 1682 16, /* bitsize */ 1683 false, /* pc_relative */ 1684 0, /* bitpos */ 1685 complain_overflow_dont, /* complain_on_overflow */ 1686 bfd_elf_generic_reloc, /* special_function */ 1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */ 1688 false, /* partial_inplace */ 1689 0xffff, /* src_mask */ 1690 0xffff, /* dst_mask */ 1691 false), /* pcrel_offset */ 1692 1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */ 1694 12, /* rightshift */ 1695 4, /* size */ 1696 12, /* bitsize */ 1697 false, /* pc_relative */ 1698 0, /* bitpos */ 1699 complain_overflow_unsigned, /* complain_on_overflow */ 1700 bfd_elf_generic_reloc, /* special_function */ 1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */ 1702 false, /* partial_inplace */ 1703 0xfff, /* src_mask */ 1704 0xfff, /* dst_mask */ 1705 false), /* pcrel_offset */ 1706 1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */ 1708 0, /* rightshift */ 1709 4, /* size */ 1710 12, /* bitsize */ 1711 false, /* pc_relative */ 1712 0, /* bitpos */ 1713 complain_overflow_unsigned, /* complain_on_overflow */ 1714 bfd_elf_generic_reloc, /* special_function */ 1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */ 1716 false, /* partial_inplace */ 1717 0xfff, /* src_mask */ 1718 0xfff, /* dst_mask */ 1719 false), /* pcrel_offset */ 1720 1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */ 1722 0, /* rightshift */ 1723 4, /* size */ 1724 12, /* bitsize */ 1725 false, /* pc_relative */ 1726 0, /* bitpos */ 1727 complain_overflow_dont, /* complain_on_overflow */ 1728 bfd_elf_generic_reloc, /* special_function */ 1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */ 1730 false, /* partial_inplace */ 1731 0xfff, /* src_mask */ 1732 0xfff, /* dst_mask */ 1733 false), /* pcrel_offset */ 1734 1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */ 1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */ 1737 1, /* rightshift */ 1738 4, /* size */ 1739 11, /* bitsize */ 1740 false, /* pc_relative */ 1741 10, /* bitpos */ 1742 complain_overflow_unsigned, /* complain_on_overflow */ 1743 bfd_elf_generic_reloc, /* special_function */ 1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */ 1745 false, /* partial_inplace */ 1746 0x1ffc00, /* src_mask */ 1747 0x1ffc00, /* dst_mask */ 1748 false), /* pcrel_offset */ 1749 1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */ 1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */ 1752 1, /* rightshift */ 1753 4, /* size */ 1754 11, /* bitsize */ 1755 false, /* pc_relative */ 1756 10, /* bitpos */ 1757 complain_overflow_dont, /* complain_on_overflow */ 1758 bfd_elf_generic_reloc, /* special_function */ 1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */ 1760 false, /* partial_inplace */ 1761 0x1ffc00, /* src_mask */ 1762 0x1ffc00, /* dst_mask */ 1763 false), /* pcrel_offset */ 1764 1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */ 1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */ 1767 2, /* rightshift */ 1768 4, /* size */ 1769 10, /* bitsize */ 1770 false, /* pc_relative */ 1771 10, /* bitpos */ 1772 complain_overflow_unsigned, /* complain_on_overflow */ 1773 bfd_elf_generic_reloc, /* special_function */ 1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */ 1775 false, /* partial_inplace */ 1776 0xffc00, /* src_mask */ 1777 0xffc00, /* dst_mask */ 1778 false), /* pcrel_offset */ 1779 1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */ 1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */ 1782 2, /* rightshift */ 1783 4, /* size */ 1784 10, /* bitsize */ 1785 false, /* pc_relative */ 1786 10, /* bitpos */ 1787 complain_overflow_dont, /* complain_on_overflow */ 1788 bfd_elf_generic_reloc, /* special_function */ 1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */ 1790 false, /* partial_inplace */ 1791 0xffc00, /* src_mask */ 1792 0xffc00, /* dst_mask */ 1793 false), /* pcrel_offset */ 1794 1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */ 1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */ 1797 3, /* rightshift */ 1798 4, /* size */ 1799 9, /* bitsize */ 1800 false, /* pc_relative */ 1801 10, /* bitpos */ 1802 complain_overflow_unsigned, /* complain_on_overflow */ 1803 bfd_elf_generic_reloc, /* special_function */ 1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */ 1805 false, /* partial_inplace */ 1806 0x7fc00, /* src_mask */ 1807 0x7fc00, /* dst_mask */ 1808 false), /* pcrel_offset */ 1809 1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */ 1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */ 1812 3, /* rightshift */ 1813 4, /* size */ 1814 9, /* bitsize */ 1815 false, /* pc_relative */ 1816 10, /* bitpos */ 1817 complain_overflow_dont, /* complain_on_overflow */ 1818 bfd_elf_generic_reloc, /* special_function */ 1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */ 1820 false, /* partial_inplace */ 1821 0x7fc00, /* src_mask */ 1822 0x7fc00, /* dst_mask */ 1823 false), /* pcrel_offset */ 1824 1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */ 1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */ 1827 0, /* rightshift */ 1828 4, /* size */ 1829 12, /* bitsize */ 1830 false, /* pc_relative */ 1831 10, /* bitpos */ 1832 complain_overflow_unsigned, /* complain_on_overflow */ 1833 bfd_elf_generic_reloc, /* special_function */ 1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */ 1835 false, /* partial_inplace */ 1836 0x3ffc00, /* src_mask */ 1837 0x3ffc00, /* dst_mask */ 1838 false), /* pcrel_offset */ 1839 1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */ 1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */ 1842 0, /* rightshift */ 1843 4, /* size */ 1844 12, /* bitsize */ 1845 false, /* pc_relative */ 1846 10, /* bitpos */ 1847 complain_overflow_dont, /* complain_on_overflow */ 1848 bfd_elf_generic_reloc, /* special_function */ 1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */ 1850 false, /* partial_inplace */ 1851 0x3ffc00, /* src_mask */ 1852 0x3ffc00, /* dst_mask */ 1853 false), /* pcrel_offset */ 1854 1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */ 1856 2, /* rightshift */ 1857 4, /* size */ 1858 19, /* bitsize */ 1859 true, /* pc_relative */ 1860 0, /* bitpos */ 1861 complain_overflow_dont, /* complain_on_overflow */ 1862 bfd_elf_generic_reloc, /* special_function */ 1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */ 1864 false, /* partial_inplace */ 1865 0x0ffffe0, /* src_mask */ 1866 0x0ffffe0, /* dst_mask */ 1867 true), /* pcrel_offset */ 1868 1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */ 1870 0, /* rightshift */ 1871 4, /* size */ 1872 21, /* bitsize */ 1873 true, /* pc_relative */ 1874 0, /* bitpos */ 1875 complain_overflow_dont, /* complain_on_overflow */ 1876 bfd_elf_generic_reloc, /* special_function */ 1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */ 1878 false, /* partial_inplace */ 1879 0x1fffff, /* src_mask */ 1880 0x1fffff, /* dst_mask */ 1881 true), /* pcrel_offset */ 1882 1883 /* Get to the page for the GOT entry for the symbol 1884 (G(S) - P) using an ADRP instruction. */ 1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */ 1886 12, /* rightshift */ 1887 4, /* size */ 1888 21, /* bitsize */ 1889 true, /* pc_relative */ 1890 0, /* bitpos */ 1891 complain_overflow_dont, /* complain_on_overflow */ 1892 bfd_elf_generic_reloc, /* special_function */ 1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */ 1894 false, /* partial_inplace */ 1895 0x1fffff, /* src_mask */ 1896 0x1fffff, /* dst_mask */ 1897 true), /* pcrel_offset */ 1898 1899 /* LD64: GOT offset G(S) & 0xff8. */ 1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */ 1901 3, /* rightshift */ 1902 4, /* size */ 1903 12, /* bitsize */ 1904 false, /* pc_relative */ 1905 0, /* bitpos */ 1906 complain_overflow_dont, /* complain_on_overflow */ 1907 bfd_elf_generic_reloc, /* special_function */ 1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */ 1909 false, /* partial_inplace */ 1910 0xff8, /* src_mask */ 1911 0xff8, /* dst_mask */ 1912 false), /* pcrel_offset */ 1913 1914 /* LD32: GOT offset G(S) & 0xffc. */ 1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */ 1916 2, /* rightshift */ 1917 4, /* size */ 1918 12, /* bitsize */ 1919 false, /* pc_relative */ 1920 0, /* bitpos */ 1921 complain_overflow_dont, /* complain_on_overflow */ 1922 bfd_elf_generic_reloc, /* special_function */ 1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */ 1924 false, /* partial_inplace */ 1925 0xffc, /* src_mask */ 1926 0xffc, /* dst_mask */ 1927 false), /* pcrel_offset */ 1928 1929 /* ADD: GOT offset G(S) & 0xfff. */ 1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */ 1931 0, /* rightshift */ 1932 4, /* size */ 1933 12, /* bitsize */ 1934 false, /* pc_relative */ 1935 0, /* bitpos */ 1936 complain_overflow_dont,/* complain_on_overflow */ 1937 bfd_elf_generic_reloc, /* special_function */ 1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */ 1939 false, /* partial_inplace */ 1940 0xfff, /* src_mask */ 1941 0xfff, /* dst_mask */ 1942 false), /* pcrel_offset */ 1943 1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */ 1945 16, /* rightshift */ 1946 4, /* size */ 1947 12, /* bitsize */ 1948 false, /* pc_relative */ 1949 0, /* bitpos */ 1950 complain_overflow_unsigned, /* complain_on_overflow */ 1951 bfd_elf_generic_reloc, /* special_function */ 1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */ 1953 false, /* partial_inplace */ 1954 0xffff, /* src_mask */ 1955 0xffff, /* dst_mask */ 1956 false), /* pcrel_offset */ 1957 1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */ 1959 0, /* rightshift */ 1960 4, /* size */ 1961 12, /* bitsize */ 1962 false, /* pc_relative */ 1963 0, /* bitpos */ 1964 complain_overflow_dont, /* complain_on_overflow */ 1965 bfd_elf_generic_reloc, /* special_function */ 1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */ 1967 false, /* partial_inplace */ 1968 0xffff, /* src_mask */ 1969 0xffff, /* dst_mask */ 1970 false), /* pcrel_offset */ 1971 1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */ 1973 0, /* rightshift */ 1974 4, /* size */ 1975 12, /* bitsize */ 1976 false, /* pc_relative */ 1977 0, /* bitpos */ 1978 complain_overflow_dont, /* complain_on_overflow */ 1979 bfd_elf_generic_reloc, /* special_function */ 1980 AARCH64_R_STR (TLSDESC_LDR), /* name */ 1981 false, /* partial_inplace */ 1982 0x0, /* src_mask */ 1983 0x0, /* dst_mask */ 1984 false), /* pcrel_offset */ 1985 1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */ 1987 0, /* rightshift */ 1988 4, /* size */ 1989 12, /* bitsize */ 1990 false, /* pc_relative */ 1991 0, /* bitpos */ 1992 complain_overflow_dont, /* complain_on_overflow */ 1993 bfd_elf_generic_reloc, /* special_function */ 1994 AARCH64_R_STR (TLSDESC_ADD), /* name */ 1995 false, /* partial_inplace */ 1996 0x0, /* src_mask */ 1997 0x0, /* dst_mask */ 1998 false), /* pcrel_offset */ 1999 2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */ 2001 0, /* rightshift */ 2002 4, /* size */ 2003 0, /* bitsize */ 2004 false, /* pc_relative */ 2005 0, /* bitpos */ 2006 complain_overflow_dont, /* complain_on_overflow */ 2007 bfd_elf_generic_reloc, /* special_function */ 2008 AARCH64_R_STR (TLSDESC_CALL), /* name */ 2009 false, /* partial_inplace */ 2010 0x0, /* src_mask */ 2011 0x0, /* dst_mask */ 2012 false), /* pcrel_offset */ 2013 2014 HOWTO (AARCH64_R (COPY), /* type */ 2015 0, /* rightshift */ 2016 4, /* size */ 2017 64, /* bitsize */ 2018 false, /* pc_relative */ 2019 0, /* bitpos */ 2020 complain_overflow_bitfield, /* complain_on_overflow */ 2021 bfd_elf_generic_reloc, /* special_function */ 2022 AARCH64_R_STR (COPY), /* name */ 2023 true, /* partial_inplace */ 2024 0xffffffff, /* src_mask */ 2025 0xffffffff, /* dst_mask */ 2026 false), /* pcrel_offset */ 2027 2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */ 2029 0, /* rightshift */ 2030 4, /* size */ 2031 64, /* bitsize */ 2032 false, /* pc_relative */ 2033 0, /* bitpos */ 2034 complain_overflow_bitfield, /* complain_on_overflow */ 2035 bfd_elf_generic_reloc, /* special_function */ 2036 AARCH64_R_STR (GLOB_DAT), /* name */ 2037 true, /* partial_inplace */ 2038 0xffffffff, /* src_mask */ 2039 0xffffffff, /* dst_mask */ 2040 false), /* pcrel_offset */ 2041 2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */ 2043 0, /* rightshift */ 2044 4, /* size */ 2045 64, /* bitsize */ 2046 false, /* pc_relative */ 2047 0, /* bitpos */ 2048 complain_overflow_bitfield, /* complain_on_overflow */ 2049 bfd_elf_generic_reloc, /* special_function */ 2050 AARCH64_R_STR (JUMP_SLOT), /* name */ 2051 true, /* partial_inplace */ 2052 0xffffffff, /* src_mask */ 2053 0xffffffff, /* dst_mask */ 2054 false), /* pcrel_offset */ 2055 2056 HOWTO (AARCH64_R (RELATIVE), /* type */ 2057 0, /* rightshift */ 2058 4, /* size */ 2059 64, /* bitsize */ 2060 false, /* pc_relative */ 2061 0, /* bitpos */ 2062 complain_overflow_bitfield, /* complain_on_overflow */ 2063 bfd_elf_generic_reloc, /* special_function */ 2064 AARCH64_R_STR (RELATIVE), /* name */ 2065 true, /* partial_inplace */ 2066 ALL_ONES, /* src_mask */ 2067 ALL_ONES, /* dst_mask */ 2068 false), /* pcrel_offset */ 2069 2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */ 2071 0, /* rightshift */ 2072 4, /* size */ 2073 64, /* bitsize */ 2074 false, /* pc_relative */ 2075 0, /* bitpos */ 2076 complain_overflow_dont, /* complain_on_overflow */ 2077 bfd_elf_generic_reloc, /* special_function */ 2078 #if ARCH_SIZE == 64 2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */ 2080 #else 2081 AARCH64_R_STR (TLS_DTPMOD), /* name */ 2082 #endif 2083 false, /* partial_inplace */ 2084 0, /* src_mask */ 2085 ALL_ONES, /* dst_mask */ 2086 false), /* pc_reloffset */ 2087 2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */ 2089 0, /* rightshift */ 2090 4, /* size */ 2091 64, /* bitsize */ 2092 false, /* pc_relative */ 2093 0, /* bitpos */ 2094 complain_overflow_dont, /* complain_on_overflow */ 2095 bfd_elf_generic_reloc, /* special_function */ 2096 #if ARCH_SIZE == 64 2097 AARCH64_R_STR (TLS_DTPREL64), /* name */ 2098 #else 2099 AARCH64_R_STR (TLS_DTPREL), /* name */ 2100 #endif 2101 false, /* partial_inplace */ 2102 0, /* src_mask */ 2103 ALL_ONES, /* dst_mask */ 2104 false), /* pcrel_offset */ 2105 2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */ 2107 0, /* rightshift */ 2108 4, /* size */ 2109 64, /* bitsize */ 2110 false, /* pc_relative */ 2111 0, /* bitpos */ 2112 complain_overflow_dont, /* complain_on_overflow */ 2113 bfd_elf_generic_reloc, /* special_function */ 2114 #if ARCH_SIZE == 64 2115 AARCH64_R_STR (TLS_TPREL64), /* name */ 2116 #else 2117 AARCH64_R_STR (TLS_TPREL), /* name */ 2118 #endif 2119 false, /* partial_inplace */ 2120 0, /* src_mask */ 2121 ALL_ONES, /* dst_mask */ 2122 false), /* pcrel_offset */ 2123 2124 HOWTO (AARCH64_R (TLSDESC), /* type */ 2125 0, /* rightshift */ 2126 4, /* size */ 2127 64, /* bitsize */ 2128 false, /* pc_relative */ 2129 0, /* bitpos */ 2130 complain_overflow_dont, /* complain_on_overflow */ 2131 bfd_elf_generic_reloc, /* special_function */ 2132 AARCH64_R_STR (TLSDESC), /* name */ 2133 false, /* partial_inplace */ 2134 0, /* src_mask */ 2135 ALL_ONES, /* dst_mask */ 2136 false), /* pcrel_offset */ 2137 2138 HOWTO (AARCH64_R (IRELATIVE), /* type */ 2139 0, /* rightshift */ 2140 4, /* size */ 2141 64, /* bitsize */ 2142 false, /* pc_relative */ 2143 0, /* bitpos */ 2144 complain_overflow_bitfield, /* complain_on_overflow */ 2145 bfd_elf_generic_reloc, /* special_function */ 2146 AARCH64_R_STR (IRELATIVE), /* name */ 2147 false, /* partial_inplace */ 2148 0, /* src_mask */ 2149 ALL_ONES, /* dst_mask */ 2150 false), /* pcrel_offset */ 2151 2152 EMPTY_HOWTO (0), 2153 }; 2154 2155 static reloc_howto_type elfNN_aarch64_howto_none = 2156 HOWTO (R_AARCH64_NONE, /* type */ 2157 0, /* rightshift */ 2158 0, /* size */ 2159 0, /* bitsize */ 2160 false, /* pc_relative */ 2161 0, /* bitpos */ 2162 complain_overflow_dont,/* complain_on_overflow */ 2163 bfd_elf_generic_reloc, /* special_function */ 2164 "R_AARCH64_NONE", /* name */ 2165 false, /* partial_inplace */ 2166 0, /* src_mask */ 2167 0, /* dst_mask */ 2168 false); /* pcrel_offset */ 2169 2170 /* Given HOWTO, return the bfd internal relocation enumerator. */ 2171 2172 static bfd_reloc_code_real_type 2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto) 2174 { 2175 const int size 2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table); 2177 const ptrdiff_t offset 2178 = howto - elfNN_aarch64_howto_table; 2179 2180 if (offset > 0 && offset < size - 1) 2181 return BFD_RELOC_AARCH64_RELOC_START + offset; 2182 2183 if (howto == &elfNN_aarch64_howto_none) 2184 return BFD_RELOC_AARCH64_NONE; 2185 2186 return BFD_RELOC_AARCH64_RELOC_START; 2187 } 2188 2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */ 2190 2191 static bfd_reloc_code_real_type 2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type) 2193 { 2194 static bool initialized_p = false; 2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */ 2196 static unsigned int offsets[R_AARCH64_end]; 2197 2198 if (!initialized_p) 2199 { 2200 unsigned int i; 2201 2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i) 2203 if (elfNN_aarch64_howto_table[i].type != 0) 2204 offsets[elfNN_aarch64_howto_table[i].type] = i; 2205 2206 initialized_p = true; 2207 } 2208 2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL) 2210 return BFD_RELOC_AARCH64_NONE; 2211 2212 /* PR 17512: file: b371e70a. */ 2213 if (r_type >= R_AARCH64_end) 2214 { 2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), 2216 abfd, r_type); 2217 bfd_set_error (bfd_error_bad_value); 2218 return BFD_RELOC_AARCH64_NONE; 2219 } 2220 2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type]; 2222 } 2223 2224 struct elf_aarch64_reloc_map 2225 { 2226 bfd_reloc_code_real_type from; 2227 bfd_reloc_code_real_type to; 2228 }; 2229 2230 /* Map bfd generic reloc to AArch64-specific reloc. */ 2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] = 2232 { 2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE}, 2234 2235 /* Basic data relocations. */ 2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN}, 2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64}, 2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32}, 2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16}, 2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL}, 2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL}, 2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL}, 2243 }; 2244 2245 /* Given the bfd internal relocation enumerator in CODE, return the 2246 corresponding howto entry. */ 2247 2248 static reloc_howto_type * 2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code) 2250 { 2251 unsigned int i; 2252 2253 /* Convert bfd generic reloc to AArch64-specific reloc. */ 2254 if (code < BFD_RELOC_AARCH64_RELOC_START 2255 || code > BFD_RELOC_AARCH64_RELOC_END) 2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++) 2257 if (elf_aarch64_reloc_map[i].from == code) 2258 { 2259 code = elf_aarch64_reloc_map[i].to; 2260 break; 2261 } 2262 2263 if (code > BFD_RELOC_AARCH64_RELOC_START 2264 && code < BFD_RELOC_AARCH64_RELOC_END) 2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type) 2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START]; 2267 2268 if (code == BFD_RELOC_AARCH64_NONE) 2269 return &elfNN_aarch64_howto_none; 2270 2271 return NULL; 2272 } 2273 2274 static reloc_howto_type * 2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type) 2276 { 2277 bfd_reloc_code_real_type val; 2278 reloc_howto_type *howto; 2279 2280 #if ARCH_SIZE == 32 2281 if (r_type > 256) 2282 { 2283 bfd_set_error (bfd_error_bad_value); 2284 return NULL; 2285 } 2286 #endif 2287 2288 if (r_type == R_AARCH64_NONE) 2289 return &elfNN_aarch64_howto_none; 2290 2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type); 2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val); 2293 2294 if (howto != NULL) 2295 return howto; 2296 2297 bfd_set_error (bfd_error_bad_value); 2298 return NULL; 2299 } 2300 2301 static bool 2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc, 2303 Elf_Internal_Rela *elf_reloc) 2304 { 2305 unsigned int r_type; 2306 2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info); 2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type); 2309 2310 if (bfd_reloc->howto == NULL) 2311 { 2312 /* xgettext:c-format */ 2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type); 2314 return false; 2315 } 2316 return true; 2317 } 2318 2319 static reloc_howto_type * 2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, 2321 bfd_reloc_code_real_type code) 2322 { 2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code); 2324 2325 if (howto != NULL) 2326 return howto; 2327 2328 bfd_set_error (bfd_error_bad_value); 2329 return NULL; 2330 } 2331 2332 static reloc_howto_type * 2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, 2334 const char *r_name) 2335 { 2336 unsigned int i; 2337 2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i) 2339 if (elfNN_aarch64_howto_table[i].name != NULL 2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0) 2341 return &elfNN_aarch64_howto_table[i]; 2342 2343 return NULL; 2344 } 2345 2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec 2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64" 2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec 2349 #define TARGET_BIG_NAME "elfNN-bigaarch64" 2350 2351 /* The linker script knows the section names for placement. 2352 The entry_names are used to do simple name mangling on the stubs. 2353 Given a function name, and its type, the stub can be found. The 2354 name can be changed. The only requirement is the %s be present. */ 2355 #define STUB_ENTRY_NAME "__%s_veneer" 2356 2357 /* The name of the dynamic interpreter. This is put in the .interp 2358 section. */ 2359 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1" 2360 2361 #define AARCH64_MAX_FWD_BRANCH_OFFSET \ 2362 (((1 << 25) - 1) << 2) 2363 #define AARCH64_MAX_BWD_BRANCH_OFFSET \ 2364 (-((1 << 25) << 2)) 2365 2366 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1) 2367 #define AARCH64_MIN_ADRP_IMM (-(1 << 20)) 2368 2369 static int 2370 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place) 2371 { 2372 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12; 2373 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM; 2374 } 2375 2376 static int 2377 aarch64_valid_branch_p (bfd_vma value, bfd_vma place) 2378 { 2379 bfd_signed_vma offset = (bfd_signed_vma) (value - place); 2380 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET 2381 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET); 2382 } 2383 2384 static const uint32_t aarch64_adrp_branch_stub [] = 2385 { 2386 0x90000010, /* adrp ip0, X */ 2387 /* R_AARCH64_ADR_HI21_PCREL(X) */ 2388 0x91000210, /* add ip0, ip0, :lo12:X */ 2389 /* R_AARCH64_ADD_ABS_LO12_NC(X) */ 2390 0xd61f0200, /* br ip0 */ 2391 }; 2392 2393 static const uint32_t aarch64_long_branch_stub[] = 2394 { 2395 #if ARCH_SIZE == 64 2396 0x58000090, /* ldr ip0, 1f */ 2397 #else 2398 0x18000090, /* ldr wip0, 1f */ 2399 #endif 2400 0x10000011, /* adr ip1, #0 */ 2401 0x8b110210, /* add ip0, ip0, ip1 */ 2402 0xd61f0200, /* br ip0 */ 2403 0x00000000, /* 1: .xword or .word 2404 R_AARCH64_PRELNN(X) + 12 2405 */ 2406 0x00000000, 2407 }; 2408 2409 static const uint32_t aarch64_erratum_835769_stub[] = 2410 { 2411 0x00000000, /* Placeholder for multiply accumulate. */ 2412 0x14000000, /* b <label> */ 2413 }; 2414 2415 static const uint32_t aarch64_erratum_843419_stub[] = 2416 { 2417 0x00000000, /* Placeholder for LDR instruction. */ 2418 0x14000000, /* b <label> */ 2419 }; 2420 2421 /* Section name for stubs is the associated section name plus this 2422 string. */ 2423 #define STUB_SUFFIX ".stub" 2424 2425 enum elf_aarch64_stub_type 2426 { 2427 aarch64_stub_none, 2428 aarch64_stub_adrp_branch, 2429 aarch64_stub_long_branch, 2430 aarch64_stub_erratum_835769_veneer, 2431 aarch64_stub_erratum_843419_veneer, 2432 }; 2433 2434 struct elf_aarch64_stub_hash_entry 2435 { 2436 /* Base hash table entry structure. */ 2437 struct bfd_hash_entry root; 2438 2439 /* The stub section. */ 2440 asection *stub_sec; 2441 2442 /* Offset within stub_sec of the beginning of this stub. */ 2443 bfd_vma stub_offset; 2444 2445 /* Given the symbol's value and its section we can determine its final 2446 value when building the stubs (so the stub knows where to jump). */ 2447 bfd_vma target_value; 2448 asection *target_section; 2449 2450 enum elf_aarch64_stub_type stub_type; 2451 2452 /* The symbol table entry, if any, that this was derived from. */ 2453 struct elf_aarch64_link_hash_entry *h; 2454 2455 /* Destination symbol type */ 2456 unsigned char st_type; 2457 2458 /* Where this stub is being called from, or, in the case of combined 2459 stub sections, the first input section in the group. */ 2460 asection *id_sec; 2461 2462 /* The name for the local symbol at the start of this stub. The 2463 stub name in the hash table has to be unique; this does not, so 2464 it can be friendlier. */ 2465 char *output_name; 2466 2467 /* The instruction which caused this stub to be generated (only valid for 2468 erratum 835769 workaround stubs at present). */ 2469 uint32_t veneered_insn; 2470 2471 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */ 2472 bfd_vma adrp_offset; 2473 }; 2474 2475 /* Used to build a map of a section. This is required for mixed-endian 2476 code/data. */ 2477 2478 typedef struct elf_elf_section_map 2479 { 2480 bfd_vma vma; 2481 char type; 2482 } 2483 elf_aarch64_section_map; 2484 2485 2486 typedef struct _aarch64_elf_section_data 2487 { 2488 struct bfd_elf_section_data elf; 2489 unsigned int mapcount; 2490 unsigned int mapsize; 2491 elf_aarch64_section_map *map; 2492 } 2493 _aarch64_elf_section_data; 2494 2495 #define elf_aarch64_section_data(sec) \ 2496 ((_aarch64_elf_section_data *) elf_section_data (sec)) 2497 2498 /* The size of the thread control block which is defined to be two pointers. */ 2499 #define TCB_SIZE (ARCH_SIZE/8)*2 2500 2501 struct elf_aarch64_local_symbol 2502 { 2503 unsigned int got_type; 2504 bfd_signed_vma got_refcount; 2505 bfd_vma got_offset; 2506 2507 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The 2508 offset is from the end of the jump table and reserved entries 2509 within the PLTGOT. 2510 2511 The magic value (bfd_vma) -1 indicates that an offset has not be 2512 allocated. */ 2513 bfd_vma tlsdesc_got_jump_table_offset; 2514 }; 2515 2516 struct elf_aarch64_obj_tdata 2517 { 2518 struct elf_obj_tdata root; 2519 2520 /* local symbol descriptors */ 2521 struct elf_aarch64_local_symbol *locals; 2522 2523 /* Zero to warn when linking objects with incompatible enum sizes. */ 2524 int no_enum_size_warning; 2525 2526 /* Zero to warn when linking objects with incompatible wchar_t sizes. */ 2527 int no_wchar_size_warning; 2528 2529 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */ 2530 uint32_t gnu_and_prop; 2531 2532 /* Zero to warn when linking objects with incompatible 2533 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */ 2534 int no_bti_warn; 2535 2536 /* PLT type based on security. */ 2537 aarch64_plt_type plt_type; 2538 }; 2539 2540 #define elf_aarch64_tdata(bfd) \ 2541 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any) 2542 2543 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals) 2544 2545 #define is_aarch64_elf(bfd) \ 2546 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 2547 && elf_tdata (bfd) != NULL \ 2548 && elf_object_id (bfd) == AARCH64_ELF_DATA) 2549 2550 static bool 2551 elfNN_aarch64_mkobject (bfd *abfd) 2552 { 2553 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata), 2554 AARCH64_ELF_DATA); 2555 } 2556 2557 #define elf_aarch64_hash_entry(ent) \ 2558 ((struct elf_aarch64_link_hash_entry *)(ent)) 2559 2560 #define GOT_UNKNOWN 0 2561 #define GOT_NORMAL 1 2562 #define GOT_TLS_GD 2 2563 #define GOT_TLS_IE 4 2564 #define GOT_TLSDESC_GD 8 2565 2566 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD)) 2567 2568 /* AArch64 ELF linker hash entry. */ 2569 struct elf_aarch64_link_hash_entry 2570 { 2571 struct elf_link_hash_entry root; 2572 2573 /* Since PLT entries have variable size, we need to record the 2574 index into .got.plt instead of recomputing it from the PLT 2575 offset. */ 2576 bfd_signed_vma plt_got_offset; 2577 2578 /* Bit mask representing the type of GOT entry(s) if any required by 2579 this symbol. */ 2580 unsigned int got_type; 2581 2582 /* TRUE if symbol is defined as a protected symbol. */ 2583 unsigned int def_protected : 1; 2584 2585 /* A pointer to the most recently used stub hash entry against this 2586 symbol. */ 2587 struct elf_aarch64_stub_hash_entry *stub_cache; 2588 2589 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset 2590 is from the end of the jump table and reserved entries within the PLTGOT. 2591 2592 The magic value (bfd_vma) -1 indicates that an offset has not 2593 be allocated. */ 2594 bfd_vma tlsdesc_got_jump_table_offset; 2595 }; 2596 2597 static unsigned int 2598 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h, 2599 bfd *abfd, 2600 unsigned long r_symndx) 2601 { 2602 if (h) 2603 return elf_aarch64_hash_entry (h)->got_type; 2604 2605 if (! elf_aarch64_locals (abfd)) 2606 return GOT_UNKNOWN; 2607 2608 return elf_aarch64_locals (abfd)[r_symndx].got_type; 2609 } 2610 2611 /* Get the AArch64 elf linker hash table from a link_info structure. */ 2612 #define elf_aarch64_hash_table(info) \ 2613 ((struct elf_aarch64_link_hash_table *) ((info)->hash)) 2614 2615 #define aarch64_stub_hash_lookup(table, string, create, copy) \ 2616 ((struct elf_aarch64_stub_hash_entry *) \ 2617 bfd_hash_lookup ((table), (string), (create), (copy))) 2618 2619 /* AArch64 ELF linker hash table. */ 2620 struct elf_aarch64_link_hash_table 2621 { 2622 /* The main hash table. */ 2623 struct elf_link_hash_table root; 2624 2625 /* Nonzero to force PIC branch veneers. */ 2626 int pic_veneer; 2627 2628 /* Fix erratum 835769. */ 2629 int fix_erratum_835769; 2630 2631 /* Fix erratum 843419. */ 2632 erratum_84319_opts fix_erratum_843419; 2633 2634 /* Don't apply link-time values for dynamic relocations. */ 2635 int no_apply_dynamic_relocs; 2636 2637 /* The number of bytes in the initial entry in the PLT. */ 2638 bfd_size_type plt_header_size; 2639 2640 /* The bytes of the initial PLT entry. */ 2641 const bfd_byte *plt0_entry; 2642 2643 /* The number of bytes in the subsequent PLT entries. */ 2644 bfd_size_type plt_entry_size; 2645 2646 /* The bytes of the subsequent PLT entry. */ 2647 const bfd_byte *plt_entry; 2648 2649 /* For convenience in allocate_dynrelocs. */ 2650 bfd *obfd; 2651 2652 /* The amount of space used by the reserved portion of the sgotplt 2653 section, plus whatever space is used by the jump slots. */ 2654 bfd_vma sgotplt_jump_table_size; 2655 2656 /* The stub hash table. */ 2657 struct bfd_hash_table stub_hash_table; 2658 2659 /* Linker stub bfd. */ 2660 bfd *stub_bfd; 2661 2662 /* Linker call-backs. */ 2663 asection *(*add_stub_section) (const char *, asection *); 2664 void (*layout_sections_again) (void); 2665 2666 /* Array to keep track of which stub sections have been created, and 2667 information on stub grouping. */ 2668 struct map_stub 2669 { 2670 /* This is the section to which stubs in the group will be 2671 attached. */ 2672 asection *link_sec; 2673 /* The stub section. */ 2674 asection *stub_sec; 2675 } *stub_group; 2676 2677 /* Assorted information used by elfNN_aarch64_size_stubs. */ 2678 unsigned int bfd_count; 2679 unsigned int top_index; 2680 asection **input_list; 2681 2682 /* JUMP_SLOT relocs for variant PCS symbols may be present. */ 2683 int variant_pcs; 2684 2685 /* The number of bytes in the PLT enty for the TLS descriptor. */ 2686 bfd_size_type tlsdesc_plt_entry_size; 2687 2688 /* Used by local STT_GNU_IFUNC symbols. */ 2689 htab_t loc_hash_table; 2690 void * loc_hash_memory; 2691 }; 2692 2693 /* Create an entry in an AArch64 ELF linker hash table. */ 2694 2695 static struct bfd_hash_entry * 2696 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry, 2697 struct bfd_hash_table *table, 2698 const char *string) 2699 { 2700 struct elf_aarch64_link_hash_entry *ret = 2701 (struct elf_aarch64_link_hash_entry *) entry; 2702 2703 /* Allocate the structure if it has not already been allocated by a 2704 subclass. */ 2705 if (ret == NULL) 2706 ret = bfd_hash_allocate (table, 2707 sizeof (struct elf_aarch64_link_hash_entry)); 2708 if (ret == NULL) 2709 return (struct bfd_hash_entry *) ret; 2710 2711 /* Call the allocation method of the superclass. */ 2712 ret = ((struct elf_aarch64_link_hash_entry *) 2713 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, 2714 table, string)); 2715 if (ret != NULL) 2716 { 2717 ret->got_type = GOT_UNKNOWN; 2718 ret->def_protected = 0; 2719 ret->plt_got_offset = (bfd_vma) - 1; 2720 ret->stub_cache = NULL; 2721 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 2722 } 2723 2724 return (struct bfd_hash_entry *) ret; 2725 } 2726 2727 /* Initialize an entry in the stub hash table. */ 2728 2729 static struct bfd_hash_entry * 2730 stub_hash_newfunc (struct bfd_hash_entry *entry, 2731 struct bfd_hash_table *table, const char *string) 2732 { 2733 /* Allocate the structure if it has not already been allocated by a 2734 subclass. */ 2735 if (entry == NULL) 2736 { 2737 entry = bfd_hash_allocate (table, 2738 sizeof (struct 2739 elf_aarch64_stub_hash_entry)); 2740 if (entry == NULL) 2741 return entry; 2742 } 2743 2744 /* Call the allocation method of the superclass. */ 2745 entry = bfd_hash_newfunc (entry, table, string); 2746 if (entry != NULL) 2747 { 2748 struct elf_aarch64_stub_hash_entry *eh; 2749 2750 /* Initialize the local fields. */ 2751 eh = (struct elf_aarch64_stub_hash_entry *) entry; 2752 eh->adrp_offset = 0; 2753 eh->stub_sec = NULL; 2754 eh->stub_offset = 0; 2755 eh->target_value = 0; 2756 eh->target_section = NULL; 2757 eh->stub_type = aarch64_stub_none; 2758 eh->h = NULL; 2759 eh->id_sec = NULL; 2760 } 2761 2762 return entry; 2763 } 2764 2765 /* Compute a hash of a local hash entry. We use elf_link_hash_entry 2766 for local symbol so that we can handle local STT_GNU_IFUNC symbols 2767 as global symbol. We reuse indx and dynstr_index for local symbol 2768 hash since they aren't used by global symbols in this backend. */ 2769 2770 static hashval_t 2771 elfNN_aarch64_local_htab_hash (const void *ptr) 2772 { 2773 struct elf_link_hash_entry *h 2774 = (struct elf_link_hash_entry *) ptr; 2775 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index); 2776 } 2777 2778 /* Compare local hash entries. */ 2779 2780 static int 2781 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2) 2782 { 2783 struct elf_link_hash_entry *h1 2784 = (struct elf_link_hash_entry *) ptr1; 2785 struct elf_link_hash_entry *h2 2786 = (struct elf_link_hash_entry *) ptr2; 2787 2788 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index; 2789 } 2790 2791 /* Find and/or create a hash entry for local symbol. */ 2792 2793 static struct elf_link_hash_entry * 2794 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab, 2795 bfd *abfd, const Elf_Internal_Rela *rel, 2796 bool create) 2797 { 2798 struct elf_aarch64_link_hash_entry e, *ret; 2799 asection *sec = abfd->sections; 2800 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id, 2801 ELFNN_R_SYM (rel->r_info)); 2802 void **slot; 2803 2804 e.root.indx = sec->id; 2805 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info); 2806 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h, 2807 create ? INSERT : NO_INSERT); 2808 2809 if (!slot) 2810 return NULL; 2811 2812 if (*slot) 2813 { 2814 ret = (struct elf_aarch64_link_hash_entry *) *slot; 2815 return &ret->root; 2816 } 2817 2818 ret = (struct elf_aarch64_link_hash_entry *) 2819 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory, 2820 sizeof (struct elf_aarch64_link_hash_entry)); 2821 if (ret) 2822 { 2823 memset (ret, 0, sizeof (*ret)); 2824 ret->root.indx = sec->id; 2825 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info); 2826 ret->root.dynindx = -1; 2827 *slot = ret; 2828 } 2829 return &ret->root; 2830 } 2831 2832 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 2833 2834 static void 2835 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info, 2836 struct elf_link_hash_entry *dir, 2837 struct elf_link_hash_entry *ind) 2838 { 2839 struct elf_aarch64_link_hash_entry *edir, *eind; 2840 2841 edir = (struct elf_aarch64_link_hash_entry *) dir; 2842 eind = (struct elf_aarch64_link_hash_entry *) ind; 2843 2844 if (ind->root.type == bfd_link_hash_indirect) 2845 { 2846 /* Copy over PLT info. */ 2847 if (dir->got.refcount <= 0) 2848 { 2849 edir->got_type = eind->got_type; 2850 eind->got_type = GOT_UNKNOWN; 2851 } 2852 } 2853 2854 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 2855 } 2856 2857 /* Merge non-visibility st_other attributes. */ 2858 2859 static void 2860 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h, 2861 unsigned int st_other, 2862 bool definition, 2863 bool dynamic ATTRIBUTE_UNUSED) 2864 { 2865 if (definition) 2866 { 2867 struct elf_aarch64_link_hash_entry *eh 2868 = (struct elf_aarch64_link_hash_entry *)h; 2869 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED; 2870 } 2871 2872 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1); 2873 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1); 2874 2875 if (isym_sto == h_sto) 2876 return; 2877 2878 if (isym_sto & ~STO_AARCH64_VARIANT_PCS) 2879 /* Not fatal, this callback cannot fail. */ 2880 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"), 2881 h->root.root.string, isym_sto); 2882 2883 /* Note: Ideally we would warn about any attribute mismatch, but 2884 this api does not allow that without substantial changes. */ 2885 if (isym_sto & STO_AARCH64_VARIANT_PCS) 2886 h->other |= STO_AARCH64_VARIANT_PCS; 2887 } 2888 2889 /* Destroy an AArch64 elf linker hash table. */ 2890 2891 static void 2892 elfNN_aarch64_link_hash_table_free (bfd *obfd) 2893 { 2894 struct elf_aarch64_link_hash_table *ret 2895 = (struct elf_aarch64_link_hash_table *) obfd->link.hash; 2896 2897 if (ret->loc_hash_table) 2898 htab_delete (ret->loc_hash_table); 2899 if (ret->loc_hash_memory) 2900 objalloc_free ((struct objalloc *) ret->loc_hash_memory); 2901 2902 bfd_hash_table_free (&ret->stub_hash_table); 2903 _bfd_elf_link_hash_table_free (obfd); 2904 } 2905 2906 /* Create an AArch64 elf linker hash table. */ 2907 2908 static struct bfd_link_hash_table * 2909 elfNN_aarch64_link_hash_table_create (bfd *abfd) 2910 { 2911 struct elf_aarch64_link_hash_table *ret; 2912 size_t amt = sizeof (struct elf_aarch64_link_hash_table); 2913 2914 ret = bfd_zmalloc (amt); 2915 if (ret == NULL) 2916 return NULL; 2917 2918 if (!_bfd_elf_link_hash_table_init 2919 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc, 2920 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA)) 2921 { 2922 free (ret); 2923 return NULL; 2924 } 2925 2926 ret->plt_header_size = PLT_ENTRY_SIZE; 2927 ret->plt0_entry = elfNN_aarch64_small_plt0_entry; 2928 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE; 2929 ret->plt_entry = elfNN_aarch64_small_plt_entry; 2930 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; 2931 ret->obfd = abfd; 2932 ret->root.tlsdesc_got = (bfd_vma) - 1; 2933 2934 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, 2935 sizeof (struct elf_aarch64_stub_hash_entry))) 2936 { 2937 _bfd_elf_link_hash_table_free (abfd); 2938 return NULL; 2939 } 2940 2941 ret->loc_hash_table = htab_try_create (1024, 2942 elfNN_aarch64_local_htab_hash, 2943 elfNN_aarch64_local_htab_eq, 2944 NULL); 2945 ret->loc_hash_memory = objalloc_create (); 2946 if (!ret->loc_hash_table || !ret->loc_hash_memory) 2947 { 2948 elfNN_aarch64_link_hash_table_free (abfd); 2949 return NULL; 2950 } 2951 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free; 2952 2953 return &ret->root.root; 2954 } 2955 2956 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */ 2957 2958 static bool 2959 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section, 2960 bfd_vma offset, bfd_vma value) 2961 { 2962 reloc_howto_type *howto; 2963 bfd_vma place; 2964 2965 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); 2966 place = (input_section->output_section->vma + input_section->output_offset 2967 + offset); 2968 2969 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 2970 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place, 2971 value, 0, false); 2972 return _bfd_aarch64_elf_put_addend (input_bfd, 2973 input_section->contents + offset, r_type, 2974 howto, value) == bfd_reloc_ok; 2975 } 2976 2977 static enum elf_aarch64_stub_type 2978 aarch64_select_branch_stub (bfd_vma value, bfd_vma place) 2979 { 2980 if (aarch64_valid_for_adrp_p (value, place)) 2981 return aarch64_stub_adrp_branch; 2982 return aarch64_stub_long_branch; 2983 } 2984 2985 /* Determine the type of stub needed, if any, for a call. */ 2986 2987 static enum elf_aarch64_stub_type 2988 aarch64_type_of_stub (asection *input_sec, 2989 const Elf_Internal_Rela *rel, 2990 asection *sym_sec, 2991 unsigned char st_type, 2992 bfd_vma destination) 2993 { 2994 bfd_vma location; 2995 bfd_signed_vma branch_offset; 2996 unsigned int r_type; 2997 enum elf_aarch64_stub_type stub_type = aarch64_stub_none; 2998 2999 if (st_type != STT_FUNC 3000 && (sym_sec == input_sec)) 3001 return stub_type; 3002 3003 /* Determine where the call point is. */ 3004 location = (input_sec->output_offset 3005 + input_sec->output_section->vma + rel->r_offset); 3006 3007 branch_offset = (bfd_signed_vma) (destination - location); 3008 3009 r_type = ELFNN_R_TYPE (rel->r_info); 3010 3011 /* We don't want to redirect any old unconditional jump in this way, 3012 only one which is being used for a sibcall, where it is 3013 acceptable for the IP0 and IP1 registers to be clobbered. */ 3014 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26)) 3015 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET 3016 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET)) 3017 { 3018 stub_type = aarch64_stub_long_branch; 3019 } 3020 3021 return stub_type; 3022 } 3023 3024 /* Build a name for an entry in the stub hash table. */ 3025 3026 static char * 3027 elfNN_aarch64_stub_name (const asection *input_section, 3028 const asection *sym_sec, 3029 const struct elf_aarch64_link_hash_entry *hash, 3030 const Elf_Internal_Rela *rel) 3031 { 3032 char *stub_name; 3033 bfd_size_type len; 3034 3035 if (hash) 3036 { 3037 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1; 3038 stub_name = bfd_malloc (len); 3039 if (stub_name != NULL) 3040 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x", 3041 (unsigned int) input_section->id, 3042 hash->root.root.root.string, 3043 rel->r_addend); 3044 } 3045 else 3046 { 3047 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1; 3048 stub_name = bfd_malloc (len); 3049 if (stub_name != NULL) 3050 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x", 3051 (unsigned int) input_section->id, 3052 (unsigned int) sym_sec->id, 3053 (unsigned int) ELFNN_R_SYM (rel->r_info), 3054 rel->r_addend); 3055 } 3056 3057 return stub_name; 3058 } 3059 3060 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For 3061 executable PLT slots where the executable never takes the address of those 3062 functions, the function symbols are not added to the hash table. */ 3063 3064 static bool 3065 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h) 3066 { 3067 if (h->plt.offset != (bfd_vma) -1 3068 && !h->def_regular 3069 && !h->pointer_equality_needed) 3070 return false; 3071 3072 return _bfd_elf_hash_symbol (h); 3073 } 3074 3075 3076 /* Look up an entry in the stub hash. Stub entries are cached because 3077 creating the stub name takes a bit of time. */ 3078 3079 static struct elf_aarch64_stub_hash_entry * 3080 elfNN_aarch64_get_stub_entry (const asection *input_section, 3081 const asection *sym_sec, 3082 struct elf_link_hash_entry *hash, 3083 const Elf_Internal_Rela *rel, 3084 struct elf_aarch64_link_hash_table *htab) 3085 { 3086 struct elf_aarch64_stub_hash_entry *stub_entry; 3087 struct elf_aarch64_link_hash_entry *h = 3088 (struct elf_aarch64_link_hash_entry *) hash; 3089 const asection *id_sec; 3090 3091 if ((input_section->flags & SEC_CODE) == 0) 3092 return NULL; 3093 3094 /* If this input section is part of a group of sections sharing one 3095 stub section, then use the id of the first section in the group. 3096 Stub names need to include a section id, as there may well be 3097 more than one stub used to reach say, printf, and we need to 3098 distinguish between them. */ 3099 id_sec = htab->stub_group[input_section->id].link_sec; 3100 3101 if (h != NULL && h->stub_cache != NULL 3102 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec) 3103 { 3104 stub_entry = h->stub_cache; 3105 } 3106 else 3107 { 3108 char *stub_name; 3109 3110 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel); 3111 if (stub_name == NULL) 3112 return NULL; 3113 3114 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, 3115 stub_name, false, false); 3116 if (h != NULL) 3117 h->stub_cache = stub_entry; 3118 3119 free (stub_name); 3120 } 3121 3122 return stub_entry; 3123 } 3124 3125 3126 /* Create a stub section. */ 3127 3128 static asection * 3129 _bfd_aarch64_create_stub_section (asection *section, 3130 struct elf_aarch64_link_hash_table *htab) 3131 { 3132 size_t namelen; 3133 bfd_size_type len; 3134 char *s_name; 3135 3136 namelen = strlen (section->name); 3137 len = namelen + sizeof (STUB_SUFFIX); 3138 s_name = bfd_alloc (htab->stub_bfd, len); 3139 if (s_name == NULL) 3140 return NULL; 3141 3142 memcpy (s_name, section->name, namelen); 3143 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); 3144 return (*htab->add_stub_section) (s_name, section); 3145 } 3146 3147 3148 /* Find or create a stub section for a link section. 3149 3150 Fix or create the stub section used to collect stubs attached to 3151 the specified link section. */ 3152 3153 static asection * 3154 _bfd_aarch64_get_stub_for_link_section (asection *link_section, 3155 struct elf_aarch64_link_hash_table *htab) 3156 { 3157 if (htab->stub_group[link_section->id].stub_sec == NULL) 3158 htab->stub_group[link_section->id].stub_sec 3159 = _bfd_aarch64_create_stub_section (link_section, htab); 3160 return htab->stub_group[link_section->id].stub_sec; 3161 } 3162 3163 3164 /* Find or create a stub section in the stub group for an input 3165 section. */ 3166 3167 static asection * 3168 _bfd_aarch64_create_or_find_stub_sec (asection *section, 3169 struct elf_aarch64_link_hash_table *htab) 3170 { 3171 asection *link_sec = htab->stub_group[section->id].link_sec; 3172 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab); 3173 } 3174 3175 3176 /* Add a new stub entry in the stub group associated with an input 3177 section to the stub hash. Not all fields of the new stub entry are 3178 initialised. */ 3179 3180 static struct elf_aarch64_stub_hash_entry * 3181 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name, 3182 asection *section, 3183 struct elf_aarch64_link_hash_table *htab) 3184 { 3185 asection *link_sec; 3186 asection *stub_sec; 3187 struct elf_aarch64_stub_hash_entry *stub_entry; 3188 3189 link_sec = htab->stub_group[section->id].link_sec; 3190 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab); 3191 3192 /* Enter this entry into the linker stub hash table. */ 3193 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 3194 true, false); 3195 if (stub_entry == NULL) 3196 { 3197 /* xgettext:c-format */ 3198 _bfd_error_handler (_("%pB: cannot create stub entry %s"), 3199 section->owner, stub_name); 3200 return NULL; 3201 } 3202 3203 stub_entry->stub_sec = stub_sec; 3204 stub_entry->stub_offset = 0; 3205 stub_entry->id_sec = link_sec; 3206 3207 return stub_entry; 3208 } 3209 3210 /* Add a new stub entry in the final stub section to the stub hash. 3211 Not all fields of the new stub entry are initialised. */ 3212 3213 static struct elf_aarch64_stub_hash_entry * 3214 _bfd_aarch64_add_stub_entry_after (const char *stub_name, 3215 asection *link_section, 3216 struct elf_aarch64_link_hash_table *htab) 3217 { 3218 asection *stub_sec; 3219 struct elf_aarch64_stub_hash_entry *stub_entry; 3220 3221 stub_sec = NULL; 3222 /* Only create the actual stub if we will end up needing it. */ 3223 if (htab->fix_erratum_843419 & ERRAT_ADRP) 3224 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab); 3225 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 3226 true, false); 3227 if (stub_entry == NULL) 3228 { 3229 _bfd_error_handler (_("cannot create stub entry %s"), stub_name); 3230 return NULL; 3231 } 3232 3233 stub_entry->stub_sec = stub_sec; 3234 stub_entry->stub_offset = 0; 3235 stub_entry->id_sec = link_section; 3236 3237 return stub_entry; 3238 } 3239 3240 3241 static bool 3242 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, 3243 void *in_arg) 3244 { 3245 struct elf_aarch64_stub_hash_entry *stub_entry; 3246 asection *stub_sec; 3247 bfd *stub_bfd; 3248 bfd_byte *loc; 3249 bfd_vma sym_value; 3250 bfd_vma veneered_insn_loc; 3251 bfd_vma veneer_entry_loc; 3252 bfd_signed_vma branch_offset = 0; 3253 unsigned int template_size; 3254 const uint32_t *template; 3255 unsigned int i; 3256 struct bfd_link_info *info; 3257 3258 /* Massage our args to the form they really have. */ 3259 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 3260 3261 info = (struct bfd_link_info *) in_arg; 3262 3263 /* Fail if the target section could not be assigned to an output 3264 section. The user should fix his linker script. */ 3265 if (stub_entry->target_section->output_section == NULL 3266 && info->non_contiguous_regions) 3267 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. " 3268 "Retry without " 3269 "--enable-non-contiguous-regions.\n"), 3270 stub_entry->target_section); 3271 3272 stub_sec = stub_entry->stub_sec; 3273 3274 /* Make a note of the offset within the stubs for this entry. */ 3275 stub_entry->stub_offset = stub_sec->size; 3276 loc = stub_sec->contents + stub_entry->stub_offset; 3277 3278 stub_bfd = stub_sec->owner; 3279 3280 /* This is the address of the stub destination. */ 3281 sym_value = (stub_entry->target_value 3282 + stub_entry->target_section->output_offset 3283 + stub_entry->target_section->output_section->vma); 3284 3285 if (stub_entry->stub_type == aarch64_stub_long_branch) 3286 { 3287 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma 3288 + stub_sec->output_offset); 3289 3290 /* See if we can relax the stub. */ 3291 if (aarch64_valid_for_adrp_p (sym_value, place)) 3292 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place); 3293 } 3294 3295 switch (stub_entry->stub_type) 3296 { 3297 case aarch64_stub_adrp_branch: 3298 template = aarch64_adrp_branch_stub; 3299 template_size = sizeof (aarch64_adrp_branch_stub); 3300 break; 3301 case aarch64_stub_long_branch: 3302 template = aarch64_long_branch_stub; 3303 template_size = sizeof (aarch64_long_branch_stub); 3304 break; 3305 case aarch64_stub_erratum_835769_veneer: 3306 template = aarch64_erratum_835769_stub; 3307 template_size = sizeof (aarch64_erratum_835769_stub); 3308 break; 3309 case aarch64_stub_erratum_843419_veneer: 3310 template = aarch64_erratum_843419_stub; 3311 template_size = sizeof (aarch64_erratum_843419_stub); 3312 break; 3313 default: 3314 abort (); 3315 } 3316 3317 for (i = 0; i < (template_size / sizeof template[0]); i++) 3318 { 3319 bfd_putl32 (template[i], loc); 3320 loc += 4; 3321 } 3322 3323 template_size = (template_size + 7) & ~7; 3324 stub_sec->size += template_size; 3325 3326 switch (stub_entry->stub_type) 3327 { 3328 case aarch64_stub_adrp_branch: 3329 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec, 3330 stub_entry->stub_offset, sym_value)) 3331 /* The stub would not have been relaxed if the offset was out 3332 of range. */ 3333 BFD_FAIL (); 3334 3335 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec, 3336 stub_entry->stub_offset + 4, sym_value)) 3337 BFD_FAIL (); 3338 break; 3339 3340 case aarch64_stub_long_branch: 3341 /* We want the value relative to the address 12 bytes back from the 3342 value itself. */ 3343 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec, 3344 stub_entry->stub_offset + 16, sym_value + 12)) 3345 BFD_FAIL (); 3346 break; 3347 3348 case aarch64_stub_erratum_835769_veneer: 3349 veneered_insn_loc = stub_entry->target_section->output_section->vma 3350 + stub_entry->target_section->output_offset 3351 + stub_entry->target_value; 3352 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 3353 + stub_entry->stub_sec->output_offset 3354 + stub_entry->stub_offset; 3355 branch_offset = veneered_insn_loc - veneer_entry_loc; 3356 branch_offset >>= 2; 3357 branch_offset &= 0x3ffffff; 3358 bfd_putl32 (stub_entry->veneered_insn, 3359 stub_sec->contents + stub_entry->stub_offset); 3360 bfd_putl32 (template[1] | branch_offset, 3361 stub_sec->contents + stub_entry->stub_offset + 4); 3362 break; 3363 3364 case aarch64_stub_erratum_843419_veneer: 3365 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec, 3366 stub_entry->stub_offset + 4, sym_value + 4)) 3367 BFD_FAIL (); 3368 break; 3369 3370 default: 3371 abort (); 3372 } 3373 3374 return true; 3375 } 3376 3377 /* As above, but don't actually build the stub. Just bump offset so 3378 we know stub section sizes. */ 3379 3380 static bool 3381 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) 3382 { 3383 struct elf_aarch64_stub_hash_entry *stub_entry; 3384 struct elf_aarch64_link_hash_table *htab; 3385 int size; 3386 3387 /* Massage our args to the form they really have. */ 3388 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 3389 htab = (struct elf_aarch64_link_hash_table *) in_arg; 3390 3391 switch (stub_entry->stub_type) 3392 { 3393 case aarch64_stub_adrp_branch: 3394 size = sizeof (aarch64_adrp_branch_stub); 3395 break; 3396 case aarch64_stub_long_branch: 3397 size = sizeof (aarch64_long_branch_stub); 3398 break; 3399 case aarch64_stub_erratum_835769_veneer: 3400 size = sizeof (aarch64_erratum_835769_stub); 3401 break; 3402 case aarch64_stub_erratum_843419_veneer: 3403 { 3404 if (htab->fix_erratum_843419 == ERRAT_ADR) 3405 return true; 3406 size = sizeof (aarch64_erratum_843419_stub); 3407 } 3408 break; 3409 default: 3410 abort (); 3411 } 3412 3413 size = (size + 7) & ~7; 3414 stub_entry->stub_sec->size += size; 3415 return true; 3416 } 3417 3418 /* External entry points for sizing and building linker stubs. */ 3419 3420 /* Set up various things so that we can make a list of input sections 3421 for each output section included in the link. Returns -1 on error, 3422 0 when no stubs will be needed, and 1 on success. */ 3423 3424 int 3425 elfNN_aarch64_setup_section_lists (bfd *output_bfd, 3426 struct bfd_link_info *info) 3427 { 3428 bfd *input_bfd; 3429 unsigned int bfd_count; 3430 unsigned int top_id, top_index; 3431 asection *section; 3432 asection **input_list, **list; 3433 size_t amt; 3434 struct elf_aarch64_link_hash_table *htab = 3435 elf_aarch64_hash_table (info); 3436 3437 if (!is_elf_hash_table (&htab->root.root)) 3438 return 0; 3439 3440 /* Count the number of input BFDs and find the top input section id. */ 3441 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; 3442 input_bfd != NULL; input_bfd = input_bfd->link.next) 3443 { 3444 bfd_count += 1; 3445 for (section = input_bfd->sections; 3446 section != NULL; section = section->next) 3447 { 3448 if (top_id < section->id) 3449 top_id = section->id; 3450 } 3451 } 3452 htab->bfd_count = bfd_count; 3453 3454 amt = sizeof (struct map_stub) * (top_id + 1); 3455 htab->stub_group = bfd_zmalloc (amt); 3456 if (htab->stub_group == NULL) 3457 return -1; 3458 3459 /* We can't use output_bfd->section_count here to find the top output 3460 section index as some sections may have been removed, and 3461 _bfd_strip_section_from_output doesn't renumber the indices. */ 3462 for (section = output_bfd->sections, top_index = 0; 3463 section != NULL; section = section->next) 3464 { 3465 if (top_index < section->index) 3466 top_index = section->index; 3467 } 3468 3469 htab->top_index = top_index; 3470 amt = sizeof (asection *) * (top_index + 1); 3471 input_list = bfd_malloc (amt); 3472 htab->input_list = input_list; 3473 if (input_list == NULL) 3474 return -1; 3475 3476 /* For sections we aren't interested in, mark their entries with a 3477 value we can check later. */ 3478 list = input_list + top_index; 3479 do 3480 *list = bfd_abs_section_ptr; 3481 while (list-- != input_list); 3482 3483 for (section = output_bfd->sections; 3484 section != NULL; section = section->next) 3485 { 3486 if ((section->flags & SEC_CODE) != 0) 3487 input_list[section->index] = NULL; 3488 } 3489 3490 return 1; 3491 } 3492 3493 /* Used by elfNN_aarch64_next_input_section and group_sections. */ 3494 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) 3495 3496 /* The linker repeatedly calls this function for each input section, 3497 in the order that input sections are linked into output sections. 3498 Build lists of input sections to determine groupings between which 3499 we may insert linker stubs. */ 3500 3501 void 3502 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec) 3503 { 3504 struct elf_aarch64_link_hash_table *htab = 3505 elf_aarch64_hash_table (info); 3506 3507 if (isec->output_section->index <= htab->top_index) 3508 { 3509 asection **list = htab->input_list + isec->output_section->index; 3510 3511 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) 3512 { 3513 /* Steal the link_sec pointer for our list. */ 3514 /* This happens to make the list in reverse order, 3515 which is what we want. */ 3516 PREV_SEC (isec) = *list; 3517 *list = isec; 3518 } 3519 } 3520 } 3521 3522 /* See whether we can group stub sections together. Grouping stub 3523 sections may result in fewer stubs. More importantly, we need to 3524 put all .init* and .fini* stubs at the beginning of the .init or 3525 .fini output sections respectively, because glibc splits the 3526 _init and _fini functions into multiple parts. Putting a stub in 3527 the middle of a function is not a good idea. */ 3528 3529 static void 3530 group_sections (struct elf_aarch64_link_hash_table *htab, 3531 bfd_size_type stub_group_size, 3532 bool stubs_always_after_branch) 3533 { 3534 asection **list = htab->input_list; 3535 3536 do 3537 { 3538 asection *tail = *list; 3539 asection *head; 3540 3541 if (tail == bfd_abs_section_ptr) 3542 continue; 3543 3544 /* Reverse the list: we must avoid placing stubs at the 3545 beginning of the section because the beginning of the text 3546 section may be required for an interrupt vector in bare metal 3547 code. */ 3548 #define NEXT_SEC PREV_SEC 3549 head = NULL; 3550 while (tail != NULL) 3551 { 3552 /* Pop from tail. */ 3553 asection *item = tail; 3554 tail = PREV_SEC (item); 3555 3556 /* Push on head. */ 3557 NEXT_SEC (item) = head; 3558 head = item; 3559 } 3560 3561 while (head != NULL) 3562 { 3563 asection *curr; 3564 asection *next; 3565 bfd_vma stub_group_start = head->output_offset; 3566 bfd_vma end_of_next; 3567 3568 curr = head; 3569 while (NEXT_SEC (curr) != NULL) 3570 { 3571 next = NEXT_SEC (curr); 3572 end_of_next = next->output_offset + next->size; 3573 if (end_of_next - stub_group_start >= stub_group_size) 3574 /* End of NEXT is too far from start, so stop. */ 3575 break; 3576 /* Add NEXT to the group. */ 3577 curr = next; 3578 } 3579 3580 /* OK, the size from the start to the start of CURR is less 3581 than stub_group_size and thus can be handled by one stub 3582 section. (Or the head section is itself larger than 3583 stub_group_size, in which case we may be toast.) 3584 We should really be keeping track of the total size of 3585 stubs added here, as stubs contribute to the final output 3586 section size. */ 3587 do 3588 { 3589 next = NEXT_SEC (head); 3590 /* Set up this stub group. */ 3591 htab->stub_group[head->id].link_sec = curr; 3592 } 3593 while (head != curr && (head = next) != NULL); 3594 3595 /* But wait, there's more! Input sections up to stub_group_size 3596 bytes after the stub section can be handled by it too. */ 3597 if (!stubs_always_after_branch) 3598 { 3599 stub_group_start = curr->output_offset + curr->size; 3600 3601 while (next != NULL) 3602 { 3603 end_of_next = next->output_offset + next->size; 3604 if (end_of_next - stub_group_start >= stub_group_size) 3605 /* End of NEXT is too far from stubs, so stop. */ 3606 break; 3607 /* Add NEXT to the stub group. */ 3608 head = next; 3609 next = NEXT_SEC (head); 3610 htab->stub_group[head->id].link_sec = curr; 3611 } 3612 } 3613 head = next; 3614 } 3615 } 3616 while (list++ != htab->input_list + htab->top_index); 3617 3618 free (htab->input_list); 3619 } 3620 3621 #undef PREV_SEC 3622 #undef PREV_SEC 3623 3624 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1)) 3625 3626 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5) 3627 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5) 3628 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5) 3629 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5) 3630 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5) 3631 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5) 3632 3633 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000) 3634 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1) 3635 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3) 3636 #define AARCH64_ZR 0x1f 3637 3638 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for 3639 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */ 3640 3641 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1) 3642 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000) 3643 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000) 3644 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000) 3645 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000) 3646 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000) 3647 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000) 3648 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000) 3649 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000) 3650 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400) 3651 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800) 3652 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00) 3653 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800) 3654 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000) 3655 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000) 3656 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000) 3657 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000) 3658 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000) 3659 3660 /* Classify an INSN if it is indeed a load/store. 3661 3662 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE. 3663 3664 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2 3665 is set equal to RT. 3666 3667 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */ 3668 3669 static bool 3670 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2, 3671 bool *pair, bool *load) 3672 { 3673 uint32_t opcode; 3674 unsigned int r; 3675 uint32_t opc = 0; 3676 uint32_t v = 0; 3677 uint32_t opc_v = 0; 3678 3679 /* Bail out quickly if INSN doesn't fall into the load-store 3680 encoding space. */ 3681 if (!AARCH64_LDST (insn)) 3682 return false; 3683 3684 *pair = false; 3685 *load = false; 3686 if (AARCH64_LDST_EX (insn)) 3687 { 3688 *rt = AARCH64_RT (insn); 3689 *rt2 = *rt; 3690 if (AARCH64_BIT (insn, 21) == 1) 3691 { 3692 *pair = true; 3693 *rt2 = AARCH64_RT2 (insn); 3694 } 3695 *load = AARCH64_LD (insn); 3696 return true; 3697 } 3698 else if (AARCH64_LDST_NAP (insn) 3699 || AARCH64_LDSTP_PI (insn) 3700 || AARCH64_LDSTP_O (insn) 3701 || AARCH64_LDSTP_PRE (insn)) 3702 { 3703 *pair = true; 3704 *rt = AARCH64_RT (insn); 3705 *rt2 = AARCH64_RT2 (insn); 3706 *load = AARCH64_LD (insn); 3707 return true; 3708 } 3709 else if (AARCH64_LDST_PCREL (insn) 3710 || AARCH64_LDST_UI (insn) 3711 || AARCH64_LDST_PIIMM (insn) 3712 || AARCH64_LDST_U (insn) 3713 || AARCH64_LDST_PREIMM (insn) 3714 || AARCH64_LDST_RO (insn) 3715 || AARCH64_LDST_UIMM (insn)) 3716 { 3717 *rt = AARCH64_RT (insn); 3718 *rt2 = *rt; 3719 if (AARCH64_LDST_PCREL (insn)) 3720 *load = true; 3721 opc = AARCH64_BITS (insn, 22, 2); 3722 v = AARCH64_BIT (insn, 26); 3723 opc_v = opc | (v << 2); 3724 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 3725 || opc_v == 5 || opc_v == 7); 3726 return true; 3727 } 3728 else if (AARCH64_LDST_SIMD_M (insn) 3729 || AARCH64_LDST_SIMD_M_PI (insn)) 3730 { 3731 *rt = AARCH64_RT (insn); 3732 *load = AARCH64_BIT (insn, 22); 3733 opcode = (insn >> 12) & 0xf; 3734 switch (opcode) 3735 { 3736 case 0: 3737 case 2: 3738 *rt2 = *rt + 3; 3739 break; 3740 3741 case 4: 3742 case 6: 3743 *rt2 = *rt + 2; 3744 break; 3745 3746 case 7: 3747 *rt2 = *rt; 3748 break; 3749 3750 case 8: 3751 case 10: 3752 *rt2 = *rt + 1; 3753 break; 3754 3755 default: 3756 return false; 3757 } 3758 return true; 3759 } 3760 else if (AARCH64_LDST_SIMD_S (insn) 3761 || AARCH64_LDST_SIMD_S_PI (insn)) 3762 { 3763 *rt = AARCH64_RT (insn); 3764 r = (insn >> 21) & 1; 3765 *load = AARCH64_BIT (insn, 22); 3766 opcode = (insn >> 13) & 0x7; 3767 switch (opcode) 3768 { 3769 case 0: 3770 case 2: 3771 case 4: 3772 *rt2 = *rt + r; 3773 break; 3774 3775 case 1: 3776 case 3: 3777 case 5: 3778 *rt2 = *rt + (r == 0 ? 2 : 3); 3779 break; 3780 3781 case 6: 3782 *rt2 = *rt + r; 3783 break; 3784 3785 case 7: 3786 *rt2 = *rt + (r == 0 ? 2 : 3); 3787 break; 3788 3789 default: 3790 return false; 3791 } 3792 return true; 3793 } 3794 3795 return false; 3796 } 3797 3798 /* Return TRUE if INSN is multiply-accumulate. */ 3799 3800 static bool 3801 aarch64_mlxl_p (uint32_t insn) 3802 { 3803 uint32_t op31 = AARCH64_OP31 (insn); 3804 3805 if (AARCH64_MAC (insn) 3806 && (op31 == 0 || op31 == 1 || op31 == 5) 3807 /* Exclude MUL instructions which are encoded as a multiple accumulate 3808 with RA = XZR. */ 3809 && AARCH64_RA (insn) != AARCH64_ZR) 3810 return true; 3811 3812 return false; 3813 } 3814 3815 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby 3816 it is possible for a 64-bit multiply-accumulate instruction to generate an 3817 incorrect result. The details are quite complex and hard to 3818 determine statically, since branches in the code may exist in some 3819 circumstances, but all cases end with a memory (load, store, or 3820 prefetch) instruction followed immediately by the multiply-accumulate 3821 operation. We employ a linker patching technique, by moving the potentially 3822 affected multiply-accumulate instruction into a patch region and replacing 3823 the original instruction with a branch to the patch. This function checks 3824 if INSN_1 is the memory operation followed by a multiply-accumulate 3825 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE 3826 if INSN_1 and INSN_2 are safe. */ 3827 3828 static bool 3829 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2) 3830 { 3831 uint32_t rt; 3832 uint32_t rt2; 3833 uint32_t rn; 3834 uint32_t rm; 3835 uint32_t ra; 3836 bool pair; 3837 bool load; 3838 3839 if (aarch64_mlxl_p (insn_2) 3840 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load)) 3841 { 3842 /* Any SIMD memory op is independent of the subsequent MLA 3843 by definition of the erratum. */ 3844 if (AARCH64_BIT (insn_1, 26)) 3845 return true; 3846 3847 /* If not SIMD, check for integer memory ops and MLA relationship. */ 3848 rn = AARCH64_RN (insn_2); 3849 ra = AARCH64_RA (insn_2); 3850 rm = AARCH64_RM (insn_2); 3851 3852 /* If this is a load and there's a true(RAW) dependency, we are safe 3853 and this is not an erratum sequence. */ 3854 if (load && 3855 (rt == rn || rt == rm || rt == ra 3856 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra)))) 3857 return false; 3858 3859 /* We conservatively put out stubs for all other cases (including 3860 writebacks). */ 3861 return true; 3862 } 3863 3864 return false; 3865 } 3866 3867 /* Used to order a list of mapping symbols by address. */ 3868 3869 static int 3870 elf_aarch64_compare_mapping (const void *a, const void *b) 3871 { 3872 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a; 3873 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b; 3874 3875 if (amap->vma > bmap->vma) 3876 return 1; 3877 else if (amap->vma < bmap->vma) 3878 return -1; 3879 else if (amap->type > bmap->type) 3880 /* Ensure results do not depend on the host qsort for objects with 3881 multiple mapping symbols at the same address by sorting on type 3882 after vma. */ 3883 return 1; 3884 else if (amap->type < bmap->type) 3885 return -1; 3886 else 3887 return 0; 3888 } 3889 3890 3891 static char * 3892 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes) 3893 { 3894 char *stub_name = (char *) bfd_malloc 3895 (strlen ("__erratum_835769_veneer_") + 16); 3896 if (stub_name != NULL) 3897 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes); 3898 return stub_name; 3899 } 3900 3901 /* Scan for Cortex-A53 erratum 835769 sequence. 3902 3903 Return TRUE else FALSE on abnormal termination. */ 3904 3905 static bool 3906 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd, 3907 struct bfd_link_info *info, 3908 unsigned int *num_fixes_p) 3909 { 3910 asection *section; 3911 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 3912 unsigned int num_fixes = *num_fixes_p; 3913 3914 if (htab == NULL) 3915 return true; 3916 3917 for (section = input_bfd->sections; 3918 section != NULL; 3919 section = section->next) 3920 { 3921 bfd_byte *contents = NULL; 3922 struct _aarch64_elf_section_data *sec_data; 3923 unsigned int span; 3924 3925 if (elf_section_type (section) != SHT_PROGBITS 3926 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 3927 || (section->flags & SEC_EXCLUDE) != 0 3928 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 3929 || (section->output_section == bfd_abs_section_ptr)) 3930 continue; 3931 3932 if (elf_section_data (section)->this_hdr.contents != NULL) 3933 contents = elf_section_data (section)->this_hdr.contents; 3934 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 3935 return false; 3936 3937 sec_data = elf_aarch64_section_data (section); 3938 3939 if (sec_data->mapcount) 3940 qsort (sec_data->map, sec_data->mapcount, 3941 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); 3942 3943 for (span = 0; span < sec_data->mapcount; span++) 3944 { 3945 unsigned int span_start = sec_data->map[span].vma; 3946 unsigned int span_end = ((span == sec_data->mapcount - 1) 3947 ? sec_data->map[0].vma + section->size 3948 : sec_data->map[span + 1].vma); 3949 unsigned int i; 3950 char span_type = sec_data->map[span].type; 3951 3952 if (span_type == 'd') 3953 continue; 3954 3955 for (i = span_start; i + 4 < span_end; i += 4) 3956 { 3957 uint32_t insn_1 = bfd_getl32 (contents + i); 3958 uint32_t insn_2 = bfd_getl32 (contents + i + 4); 3959 3960 if (aarch64_erratum_sequence (insn_1, insn_2)) 3961 { 3962 struct elf_aarch64_stub_hash_entry *stub_entry; 3963 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes); 3964 if (! stub_name) 3965 return false; 3966 3967 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name, 3968 section, 3969 htab); 3970 if (! stub_entry) 3971 return false; 3972 3973 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer; 3974 stub_entry->target_section = section; 3975 stub_entry->target_value = i + 4; 3976 stub_entry->veneered_insn = insn_2; 3977 stub_entry->output_name = stub_name; 3978 num_fixes++; 3979 } 3980 } 3981 } 3982 if (elf_section_data (section)->this_hdr.contents == NULL) 3983 free (contents); 3984 } 3985 3986 *num_fixes_p = num_fixes; 3987 3988 return true; 3989 } 3990 3991 3992 /* Test if instruction INSN is ADRP. */ 3993 3994 static bool 3995 _bfd_aarch64_adrp_p (uint32_t insn) 3996 { 3997 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP); 3998 } 3999 4000 4001 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */ 4002 4003 static bool 4004 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2, 4005 uint32_t insn_3) 4006 { 4007 uint32_t rt; 4008 uint32_t rt2; 4009 bool pair; 4010 bool load; 4011 4012 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load) 4013 && (!pair 4014 || (pair && !load)) 4015 && AARCH64_LDST_UIMM (insn_3) 4016 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1)); 4017 } 4018 4019 4020 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence. 4021 4022 Return TRUE if section CONTENTS at offset I contains one of the 4023 erratum 843419 sequences, otherwise return FALSE. If a sequence is 4024 seen set P_VENEER_I to the offset of the final LOAD/STORE 4025 instruction in the sequence. 4026 */ 4027 4028 static bool 4029 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma, 4030 bfd_vma i, bfd_vma span_end, 4031 bfd_vma *p_veneer_i) 4032 { 4033 uint32_t insn_1 = bfd_getl32 (contents + i); 4034 4035 if (!_bfd_aarch64_adrp_p (insn_1)) 4036 return false; 4037 4038 if (span_end < i + 12) 4039 return false; 4040 4041 uint32_t insn_2 = bfd_getl32 (contents + i + 4); 4042 uint32_t insn_3 = bfd_getl32 (contents + i + 8); 4043 4044 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc) 4045 return false; 4046 4047 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3)) 4048 { 4049 *p_veneer_i = i + 8; 4050 return true; 4051 } 4052 4053 if (span_end < i + 16) 4054 return false; 4055 4056 uint32_t insn_4 = bfd_getl32 (contents + i + 12); 4057 4058 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4)) 4059 { 4060 *p_veneer_i = i + 12; 4061 return true; 4062 } 4063 4064 return false; 4065 } 4066 4067 4068 /* Resize all stub sections. */ 4069 4070 static void 4071 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab) 4072 { 4073 asection *section; 4074 4075 /* OK, we've added some stubs. Find out the new size of the 4076 stub sections. */ 4077 for (section = htab->stub_bfd->sections; 4078 section != NULL; section = section->next) 4079 { 4080 /* Ignore non-stub sections. */ 4081 if (!strstr (section->name, STUB_SUFFIX)) 4082 continue; 4083 section->size = 0; 4084 } 4085 4086 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab); 4087 4088 for (section = htab->stub_bfd->sections; 4089 section != NULL; section = section->next) 4090 { 4091 if (!strstr (section->name, STUB_SUFFIX)) 4092 continue; 4093 4094 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned, 4095 as long branch stubs contain a 64-bit address. */ 4096 if (section->size) 4097 section->size += 8; 4098 4099 /* Ensure all stub sections have a size which is a multiple of 4100 4096. This is important in order to ensure that the insertion 4101 of stub sections does not in itself move existing code around 4102 in such a way that new errata sequences are created. We only do this 4103 when the ADRP workaround is enabled. If only the ADR workaround is 4104 enabled then the stubs workaround won't ever be used. */ 4105 if (htab->fix_erratum_843419 & ERRAT_ADRP) 4106 if (section->size) 4107 section->size = BFD_ALIGN (section->size, 0x1000); 4108 } 4109 } 4110 4111 /* Construct an erratum 843419 workaround stub name. */ 4112 4113 static char * 4114 _bfd_aarch64_erratum_843419_stub_name (asection *input_section, 4115 bfd_vma offset) 4116 { 4117 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1; 4118 char *stub_name = bfd_malloc (len); 4119 4120 if (stub_name != NULL) 4121 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x", 4122 input_section->owner->id, 4123 input_section->id, 4124 offset); 4125 return stub_name; 4126 } 4127 4128 /* Build a stub_entry structure describing an 843419 fixup. 4129 4130 The stub_entry constructed is populated with the bit pattern INSN 4131 of the instruction located at OFFSET within input SECTION. 4132 4133 Returns TRUE on success. */ 4134 4135 static bool 4136 _bfd_aarch64_erratum_843419_fixup (uint32_t insn, 4137 bfd_vma adrp_offset, 4138 bfd_vma ldst_offset, 4139 asection *section, 4140 struct bfd_link_info *info) 4141 { 4142 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4143 char *stub_name; 4144 struct elf_aarch64_stub_hash_entry *stub_entry; 4145 4146 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset); 4147 if (stub_name == NULL) 4148 return false; 4149 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 4150 false, false); 4151 if (stub_entry) 4152 { 4153 free (stub_name); 4154 return true; 4155 } 4156 4157 /* We always place an 843419 workaround veneer in the stub section 4158 attached to the input section in which an erratum sequence has 4159 been found. This ensures that later in the link process (in 4160 elfNN_aarch64_write_section) when we copy the veneered 4161 instruction from the input section into the stub section the 4162 copied instruction will have had any relocations applied to it. 4163 If we placed workaround veneers in any other stub section then we 4164 could not assume that all relocations have been processed on the 4165 corresponding input section at the point we output the stub 4166 section. */ 4167 4168 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab); 4169 if (stub_entry == NULL) 4170 { 4171 free (stub_name); 4172 return false; 4173 } 4174 4175 stub_entry->adrp_offset = adrp_offset; 4176 stub_entry->target_value = ldst_offset; 4177 stub_entry->target_section = section; 4178 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer; 4179 stub_entry->veneered_insn = insn; 4180 stub_entry->output_name = stub_name; 4181 4182 return true; 4183 } 4184 4185 4186 /* Scan an input section looking for the signature of erratum 843419. 4187 4188 Scans input SECTION in INPUT_BFD looking for erratum 843419 4189 signatures, for each signature found a stub_entry is created 4190 describing the location of the erratum for subsequent fixup. 4191 4192 Return TRUE on successful scan, FALSE on failure to scan. 4193 */ 4194 4195 static bool 4196 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section, 4197 struct bfd_link_info *info) 4198 { 4199 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4200 4201 if (htab == NULL) 4202 return true; 4203 4204 if (elf_section_type (section) != SHT_PROGBITS 4205 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 4206 || (section->flags & SEC_EXCLUDE) != 0 4207 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 4208 || (section->output_section == bfd_abs_section_ptr)) 4209 return true; 4210 4211 do 4212 { 4213 bfd_byte *contents = NULL; 4214 struct _aarch64_elf_section_data *sec_data; 4215 unsigned int span; 4216 4217 if (elf_section_data (section)->this_hdr.contents != NULL) 4218 contents = elf_section_data (section)->this_hdr.contents; 4219 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 4220 return false; 4221 4222 sec_data = elf_aarch64_section_data (section); 4223 4224 if (sec_data->mapcount) 4225 qsort (sec_data->map, sec_data->mapcount, 4226 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); 4227 4228 for (span = 0; span < sec_data->mapcount; span++) 4229 { 4230 unsigned int span_start = sec_data->map[span].vma; 4231 unsigned int span_end = ((span == sec_data->mapcount - 1) 4232 ? sec_data->map[0].vma + section->size 4233 : sec_data->map[span + 1].vma); 4234 unsigned int i; 4235 char span_type = sec_data->map[span].type; 4236 4237 if (span_type == 'd') 4238 continue; 4239 4240 for (i = span_start; i + 8 < span_end; i += 4) 4241 { 4242 bfd_vma vma = (section->output_section->vma 4243 + section->output_offset 4244 + i); 4245 bfd_vma veneer_i; 4246 4247 if (_bfd_aarch64_erratum_843419_p 4248 (contents, vma, i, span_end, &veneer_i)) 4249 { 4250 uint32_t insn = bfd_getl32 (contents + veneer_i); 4251 4252 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i, 4253 section, info)) 4254 return false; 4255 } 4256 } 4257 } 4258 4259 if (elf_section_data (section)->this_hdr.contents == NULL) 4260 free (contents); 4261 } 4262 while (0); 4263 4264 return true; 4265 } 4266 4267 4268 /* Determine and set the size of the stub section for a final link. 4269 4270 The basic idea here is to examine all the relocations looking for 4271 PC-relative calls to a target that is unreachable with a "bl" 4272 instruction. */ 4273 4274 bool 4275 elfNN_aarch64_size_stubs (bfd *output_bfd, 4276 bfd *stub_bfd, 4277 struct bfd_link_info *info, 4278 bfd_signed_vma group_size, 4279 asection * (*add_stub_section) (const char *, 4280 asection *), 4281 void (*layout_sections_again) (void)) 4282 { 4283 bfd_size_type stub_group_size; 4284 bool stubs_always_before_branch; 4285 bool stub_changed = false; 4286 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4287 unsigned int num_erratum_835769_fixes = 0; 4288 4289 /* Propagate mach to stub bfd, because it may not have been 4290 finalized when we created stub_bfd. */ 4291 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), 4292 bfd_get_mach (output_bfd)); 4293 4294 /* Stash our params away. */ 4295 htab->stub_bfd = stub_bfd; 4296 htab->add_stub_section = add_stub_section; 4297 htab->layout_sections_again = layout_sections_again; 4298 stubs_always_before_branch = group_size < 0; 4299 if (group_size < 0) 4300 stub_group_size = -group_size; 4301 else 4302 stub_group_size = group_size; 4303 4304 if (stub_group_size == 1) 4305 { 4306 /* Default values. */ 4307 /* AArch64 branch range is +-128MB. The value used is 1MB less. */ 4308 stub_group_size = 127 * 1024 * 1024; 4309 } 4310 4311 group_sections (htab, stub_group_size, stubs_always_before_branch); 4312 4313 (*htab->layout_sections_again) (); 4314 4315 if (htab->fix_erratum_835769) 4316 { 4317 bfd *input_bfd; 4318 4319 for (input_bfd = info->input_bfds; 4320 input_bfd != NULL; input_bfd = input_bfd->link.next) 4321 { 4322 if (!is_aarch64_elf (input_bfd) 4323 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4324 continue; 4325 4326 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info, 4327 &num_erratum_835769_fixes)) 4328 return false; 4329 } 4330 4331 _bfd_aarch64_resize_stubs (htab); 4332 (*htab->layout_sections_again) (); 4333 } 4334 4335 if (htab->fix_erratum_843419 != ERRAT_NONE) 4336 { 4337 bfd *input_bfd; 4338 4339 for (input_bfd = info->input_bfds; 4340 input_bfd != NULL; 4341 input_bfd = input_bfd->link.next) 4342 { 4343 asection *section; 4344 4345 if (!is_aarch64_elf (input_bfd) 4346 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4347 continue; 4348 4349 for (section = input_bfd->sections; 4350 section != NULL; 4351 section = section->next) 4352 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info)) 4353 return false; 4354 } 4355 4356 _bfd_aarch64_resize_stubs (htab); 4357 (*htab->layout_sections_again) (); 4358 } 4359 4360 while (1) 4361 { 4362 bfd *input_bfd; 4363 4364 for (input_bfd = info->input_bfds; 4365 input_bfd != NULL; input_bfd = input_bfd->link.next) 4366 { 4367 Elf_Internal_Shdr *symtab_hdr; 4368 asection *section; 4369 Elf_Internal_Sym *local_syms = NULL; 4370 4371 if (!is_aarch64_elf (input_bfd) 4372 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4373 continue; 4374 4375 /* We'll need the symbol table in a second. */ 4376 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; 4377 if (symtab_hdr->sh_info == 0) 4378 continue; 4379 4380 /* Walk over each section attached to the input bfd. */ 4381 for (section = input_bfd->sections; 4382 section != NULL; section = section->next) 4383 { 4384 Elf_Internal_Rela *internal_relocs, *irelaend, *irela; 4385 4386 /* If there aren't any relocs, then there's nothing more 4387 to do. */ 4388 if ((section->flags & SEC_RELOC) == 0 4389 || section->reloc_count == 0 4390 || (section->flags & SEC_CODE) == 0) 4391 continue; 4392 4393 /* If this section is a link-once section that will be 4394 discarded, then don't create any stubs. */ 4395 if (section->output_section == NULL 4396 || section->output_section->owner != output_bfd) 4397 continue; 4398 4399 /* Get the relocs. */ 4400 internal_relocs 4401 = _bfd_elf_link_read_relocs (input_bfd, section, NULL, 4402 NULL, info->keep_memory); 4403 if (internal_relocs == NULL) 4404 goto error_ret_free_local; 4405 4406 /* Now examine each relocation. */ 4407 irela = internal_relocs; 4408 irelaend = irela + section->reloc_count; 4409 for (; irela < irelaend; irela++) 4410 { 4411 unsigned int r_type, r_indx; 4412 enum elf_aarch64_stub_type stub_type; 4413 struct elf_aarch64_stub_hash_entry *stub_entry; 4414 asection *sym_sec; 4415 bfd_vma sym_value; 4416 bfd_vma destination; 4417 struct elf_aarch64_link_hash_entry *hash; 4418 const char *sym_name; 4419 char *stub_name; 4420 const asection *id_sec; 4421 unsigned char st_type; 4422 bfd_size_type len; 4423 4424 r_type = ELFNN_R_TYPE (irela->r_info); 4425 r_indx = ELFNN_R_SYM (irela->r_info); 4426 4427 if (r_type >= (unsigned int) R_AARCH64_end) 4428 { 4429 bfd_set_error (bfd_error_bad_value); 4430 error_ret_free_internal: 4431 if (elf_section_data (section)->relocs == NULL) 4432 free (internal_relocs); 4433 goto error_ret_free_local; 4434 } 4435 4436 /* Only look for stubs on unconditional branch and 4437 branch and link instructions. */ 4438 if (r_type != (unsigned int) AARCH64_R (CALL26) 4439 && r_type != (unsigned int) AARCH64_R (JUMP26)) 4440 continue; 4441 4442 /* Now determine the call target, its name, value, 4443 section. */ 4444 sym_sec = NULL; 4445 sym_value = 0; 4446 destination = 0; 4447 hash = NULL; 4448 sym_name = NULL; 4449 if (r_indx < symtab_hdr->sh_info) 4450 { 4451 /* It's a local symbol. */ 4452 Elf_Internal_Sym *sym; 4453 Elf_Internal_Shdr *hdr; 4454 4455 if (local_syms == NULL) 4456 { 4457 local_syms 4458 = (Elf_Internal_Sym *) symtab_hdr->contents; 4459 if (local_syms == NULL) 4460 local_syms 4461 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, 4462 symtab_hdr->sh_info, 0, 4463 NULL, NULL, NULL); 4464 if (local_syms == NULL) 4465 goto error_ret_free_internal; 4466 } 4467 4468 sym = local_syms + r_indx; 4469 hdr = elf_elfsections (input_bfd)[sym->st_shndx]; 4470 sym_sec = hdr->bfd_section; 4471 if (!sym_sec) 4472 /* This is an undefined symbol. It can never 4473 be resolved. */ 4474 continue; 4475 4476 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) 4477 sym_value = sym->st_value; 4478 destination = (sym_value + irela->r_addend 4479 + sym_sec->output_offset 4480 + sym_sec->output_section->vma); 4481 st_type = ELF_ST_TYPE (sym->st_info); 4482 sym_name 4483 = bfd_elf_string_from_elf_section (input_bfd, 4484 symtab_hdr->sh_link, 4485 sym->st_name); 4486 } 4487 else 4488 { 4489 int e_indx; 4490 4491 e_indx = r_indx - symtab_hdr->sh_info; 4492 hash = ((struct elf_aarch64_link_hash_entry *) 4493 elf_sym_hashes (input_bfd)[e_indx]); 4494 4495 while (hash->root.root.type == bfd_link_hash_indirect 4496 || hash->root.root.type == bfd_link_hash_warning) 4497 hash = ((struct elf_aarch64_link_hash_entry *) 4498 hash->root.root.u.i.link); 4499 4500 if (hash->root.root.type == bfd_link_hash_defined 4501 || hash->root.root.type == bfd_link_hash_defweak) 4502 { 4503 struct elf_aarch64_link_hash_table *globals = 4504 elf_aarch64_hash_table (info); 4505 sym_sec = hash->root.root.u.def.section; 4506 sym_value = hash->root.root.u.def.value; 4507 /* For a destination in a shared library, 4508 use the PLT stub as target address to 4509 decide whether a branch stub is 4510 needed. */ 4511 if (globals->root.splt != NULL && hash != NULL 4512 && hash->root.plt.offset != (bfd_vma) - 1) 4513 { 4514 sym_sec = globals->root.splt; 4515 sym_value = hash->root.plt.offset; 4516 if (sym_sec->output_section != NULL) 4517 destination = (sym_value 4518 + sym_sec->output_offset 4519 + 4520 sym_sec->output_section->vma); 4521 } 4522 else if (sym_sec->output_section != NULL) 4523 destination = (sym_value + irela->r_addend 4524 + sym_sec->output_offset 4525 + sym_sec->output_section->vma); 4526 } 4527 else if (hash->root.root.type == bfd_link_hash_undefined 4528 || (hash->root.root.type 4529 == bfd_link_hash_undefweak)) 4530 { 4531 /* For a shared library, use the PLT stub as 4532 target address to decide whether a long 4533 branch stub is needed. 4534 For absolute code, they cannot be handled. */ 4535 struct elf_aarch64_link_hash_table *globals = 4536 elf_aarch64_hash_table (info); 4537 4538 if (globals->root.splt != NULL && hash != NULL 4539 && hash->root.plt.offset != (bfd_vma) - 1) 4540 { 4541 sym_sec = globals->root.splt; 4542 sym_value = hash->root.plt.offset; 4543 if (sym_sec->output_section != NULL) 4544 destination = (sym_value 4545 + sym_sec->output_offset 4546 + 4547 sym_sec->output_section->vma); 4548 } 4549 else 4550 continue; 4551 } 4552 else 4553 { 4554 bfd_set_error (bfd_error_bad_value); 4555 goto error_ret_free_internal; 4556 } 4557 st_type = ELF_ST_TYPE (hash->root.type); 4558 sym_name = hash->root.root.root.string; 4559 } 4560 4561 /* Determine what (if any) linker stub is needed. */ 4562 stub_type = aarch64_type_of_stub (section, irela, sym_sec, 4563 st_type, destination); 4564 if (stub_type == aarch64_stub_none) 4565 continue; 4566 4567 /* Support for grouping stub sections. */ 4568 id_sec = htab->stub_group[section->id].link_sec; 4569 4570 /* Get the name of this stub. */ 4571 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash, 4572 irela); 4573 if (!stub_name) 4574 goto error_ret_free_internal; 4575 4576 stub_entry = 4577 aarch64_stub_hash_lookup (&htab->stub_hash_table, 4578 stub_name, false, false); 4579 if (stub_entry != NULL) 4580 { 4581 /* The proper stub has already been created. */ 4582 free (stub_name); 4583 /* Always update this stub's target since it may have 4584 changed after layout. */ 4585 stub_entry->target_value = sym_value + irela->r_addend; 4586 continue; 4587 } 4588 4589 stub_entry = _bfd_aarch64_add_stub_entry_in_group 4590 (stub_name, section, htab); 4591 if (stub_entry == NULL) 4592 { 4593 free (stub_name); 4594 goto error_ret_free_internal; 4595 } 4596 4597 stub_entry->target_value = sym_value + irela->r_addend; 4598 stub_entry->target_section = sym_sec; 4599 stub_entry->stub_type = stub_type; 4600 stub_entry->h = hash; 4601 stub_entry->st_type = st_type; 4602 4603 if (sym_name == NULL) 4604 sym_name = "unnamed"; 4605 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name); 4606 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len); 4607 if (stub_entry->output_name == NULL) 4608 { 4609 free (stub_name); 4610 goto error_ret_free_internal; 4611 } 4612 4613 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME, 4614 sym_name); 4615 4616 stub_changed = true; 4617 } 4618 4619 /* We're done with the internal relocs, free them. */ 4620 if (elf_section_data (section)->relocs == NULL) 4621 free (internal_relocs); 4622 } 4623 } 4624 4625 if (!stub_changed) 4626 break; 4627 4628 _bfd_aarch64_resize_stubs (htab); 4629 4630 /* Ask the linker to do its stuff. */ 4631 (*htab->layout_sections_again) (); 4632 stub_changed = false; 4633 } 4634 4635 return true; 4636 4637 error_ret_free_local: 4638 return false; 4639 } 4640 4641 /* Build all the stubs associated with the current output file. The 4642 stubs are kept in a hash table attached to the main linker hash 4643 table. We also set up the .plt entries for statically linked PIC 4644 functions here. This function is called via aarch64_elf_finish in the 4645 linker. */ 4646 4647 bool 4648 elfNN_aarch64_build_stubs (struct bfd_link_info *info) 4649 { 4650 asection *stub_sec; 4651 struct bfd_hash_table *table; 4652 struct elf_aarch64_link_hash_table *htab; 4653 4654 htab = elf_aarch64_hash_table (info); 4655 4656 for (stub_sec = htab->stub_bfd->sections; 4657 stub_sec != NULL; stub_sec = stub_sec->next) 4658 { 4659 bfd_size_type size; 4660 4661 /* Ignore non-stub sections. */ 4662 if (!strstr (stub_sec->name, STUB_SUFFIX)) 4663 continue; 4664 4665 /* Allocate memory to hold the linker stubs. */ 4666 size = stub_sec->size; 4667 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size); 4668 if (stub_sec->contents == NULL && size != 0) 4669 return false; 4670 stub_sec->size = 0; 4671 4672 /* Add a branch around the stub section, and a nop, to keep it 8 byte 4673 aligned, as long branch stubs contain a 64-bit address. */ 4674 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents); 4675 bfd_putl32 (INSN_NOP, stub_sec->contents + 4); 4676 stub_sec->size += 8; 4677 } 4678 4679 /* Build the stubs as directed by the stub hash table. */ 4680 table = &htab->stub_hash_table; 4681 bfd_hash_traverse (table, aarch64_build_one_stub, info); 4682 4683 return true; 4684 } 4685 4686 4687 /* Add an entry to the code/data map for section SEC. */ 4688 4689 static void 4690 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma) 4691 { 4692 struct _aarch64_elf_section_data *sec_data = 4693 elf_aarch64_section_data (sec); 4694 unsigned int newidx; 4695 4696 if (sec_data->map == NULL) 4697 { 4698 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map)); 4699 sec_data->mapcount = 0; 4700 sec_data->mapsize = 1; 4701 } 4702 4703 newidx = sec_data->mapcount++; 4704 4705 if (sec_data->mapcount > sec_data->mapsize) 4706 { 4707 sec_data->mapsize *= 2; 4708 sec_data->map = bfd_realloc_or_free 4709 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map)); 4710 } 4711 4712 if (sec_data->map) 4713 { 4714 sec_data->map[newidx].vma = vma; 4715 sec_data->map[newidx].type = type; 4716 } 4717 } 4718 4719 4720 /* Initialise maps of insn/data for input BFDs. */ 4721 void 4722 bfd_elfNN_aarch64_init_maps (bfd *abfd) 4723 { 4724 Elf_Internal_Sym *isymbuf; 4725 Elf_Internal_Shdr *hdr; 4726 unsigned int i, localsyms; 4727 4728 /* Make sure that we are dealing with an AArch64 elf binary. */ 4729 if (!is_aarch64_elf (abfd)) 4730 return; 4731 4732 if ((abfd->flags & DYNAMIC) != 0) 4733 return; 4734 4735 hdr = &elf_symtab_hdr (abfd); 4736 localsyms = hdr->sh_info; 4737 4738 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field 4739 should contain the number of local symbols, which should come before any 4740 global symbols. Mapping symbols are always local. */ 4741 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL); 4742 4743 /* No internal symbols read? Skip this BFD. */ 4744 if (isymbuf == NULL) 4745 return; 4746 4747 for (i = 0; i < localsyms; i++) 4748 { 4749 Elf_Internal_Sym *isym = &isymbuf[i]; 4750 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); 4751 const char *name; 4752 4753 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL) 4754 { 4755 name = bfd_elf_string_from_elf_section (abfd, 4756 hdr->sh_link, 4757 isym->st_name); 4758 4759 if (bfd_is_aarch64_special_symbol_name 4760 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP)) 4761 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value); 4762 } 4763 } 4764 } 4765 4766 static void 4767 setup_plt_values (struct bfd_link_info *link_info, 4768 aarch64_plt_type plt_type) 4769 { 4770 struct elf_aarch64_link_hash_table *globals; 4771 globals = elf_aarch64_hash_table (link_info); 4772 4773 if (plt_type == PLT_BTI_PAC) 4774 { 4775 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; 4776 4777 /* Only in ET_EXEC we need PLTn with BTI. */ 4778 if (bfd_link_pde (link_info)) 4779 { 4780 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; 4781 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry; 4782 } 4783 else 4784 { 4785 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; 4786 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; 4787 } 4788 } 4789 else if (plt_type == PLT_BTI) 4790 { 4791 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; 4792 4793 /* Only in ET_EXEC we need PLTn with BTI. */ 4794 if (bfd_link_pde (link_info)) 4795 { 4796 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE; 4797 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry; 4798 } 4799 } 4800 else if (plt_type == PLT_PAC) 4801 { 4802 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; 4803 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; 4804 } 4805 } 4806 4807 /* Set option values needed during linking. */ 4808 void 4809 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd, 4810 struct bfd_link_info *link_info, 4811 int no_enum_warn, 4812 int no_wchar_warn, int pic_veneer, 4813 int fix_erratum_835769, 4814 erratum_84319_opts fix_erratum_843419, 4815 int no_apply_dynamic_relocs, 4816 aarch64_bti_pac_info bp_info) 4817 { 4818 struct elf_aarch64_link_hash_table *globals; 4819 4820 globals = elf_aarch64_hash_table (link_info); 4821 globals->pic_veneer = pic_veneer; 4822 globals->fix_erratum_835769 = fix_erratum_835769; 4823 /* If the default options are used, then ERRAT_ADR will be set by default 4824 which will enable the ADRP->ADR workaround for the erratum 843419 4825 workaround. */ 4826 globals->fix_erratum_843419 = fix_erratum_843419; 4827 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs; 4828 4829 BFD_ASSERT (is_aarch64_elf (output_bfd)); 4830 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; 4831 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; 4832 4833 switch (bp_info.bti_type) 4834 { 4835 case BTI_WARN: 4836 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0; 4837 elf_aarch64_tdata (output_bfd)->gnu_and_prop 4838 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI; 4839 break; 4840 4841 default: 4842 break; 4843 } 4844 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type; 4845 setup_plt_values (link_info, bp_info.plt_type); 4846 } 4847 4848 static bfd_vma 4849 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h, 4850 struct elf_aarch64_link_hash_table 4851 *globals, struct bfd_link_info *info, 4852 bfd_vma value, bfd *output_bfd, 4853 bool *unresolved_reloc_p) 4854 { 4855 bfd_vma off = (bfd_vma) - 1; 4856 asection *basegot = globals->root.sgot; 4857 bool dyn = globals->root.dynamic_sections_created; 4858 4859 if (h != NULL) 4860 { 4861 BFD_ASSERT (basegot != NULL); 4862 off = h->got.offset; 4863 BFD_ASSERT (off != (bfd_vma) - 1); 4864 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h) 4865 || (bfd_link_pic (info) 4866 && SYMBOL_REFERENCES_LOCAL (info, h)) 4867 || (ELF_ST_VISIBILITY (h->other) 4868 && h->root.type == bfd_link_hash_undefweak)) 4869 { 4870 /* This is actually a static link, or it is a -Bsymbolic link 4871 and the symbol is defined locally. We must initialize this 4872 entry in the global offset table. Since the offset must 4873 always be a multiple of 8 (4 in the case of ILP32), we use 4874 the least significant bit to record whether we have 4875 initialized it already. 4876 When doing a dynamic link, we create a .rel(a).got relocation 4877 entry to initialize the value. This is done in the 4878 finish_dynamic_symbol routine. */ 4879 if ((off & 1) != 0) 4880 off &= ~1; 4881 else 4882 { 4883 bfd_put_NN (output_bfd, value, basegot->contents + off); 4884 h->got.offset |= 1; 4885 } 4886 } 4887 else 4888 *unresolved_reloc_p = false; 4889 4890 off = off + basegot->output_section->vma + basegot->output_offset; 4891 } 4892 4893 return off; 4894 } 4895 4896 /* Change R_TYPE to a more efficient access model where possible, 4897 return the new reloc type. */ 4898 4899 static bfd_reloc_code_real_type 4900 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type, 4901 struct elf_link_hash_entry *h) 4902 { 4903 bool is_local = h == NULL; 4904 4905 switch (r_type) 4906 { 4907 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 4908 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 4909 return (is_local 4910 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 4911 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21); 4912 4913 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 4914 return (is_local 4915 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 4916 : r_type); 4917 4918 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 4919 return (is_local 4920 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 4921 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19); 4922 4923 case BFD_RELOC_AARCH64_TLSDESC_LDR: 4924 return (is_local 4925 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 4926 : BFD_RELOC_AARCH64_NONE); 4927 4928 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 4929 return (is_local 4930 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC 4931 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC); 4932 4933 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 4934 return (is_local 4935 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 4936 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1); 4937 4938 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 4939 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 4940 return (is_local 4941 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 4942 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC); 4943 4944 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 4945 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type; 4946 4947 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 4948 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type; 4949 4950 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 4951 return r_type; 4952 4953 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 4954 return (is_local 4955 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 4956 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19); 4957 4958 case BFD_RELOC_AARCH64_TLSDESC_ADD: 4959 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 4960 case BFD_RELOC_AARCH64_TLSDESC_CALL: 4961 /* Instructions with these relocations will become NOPs. */ 4962 return BFD_RELOC_AARCH64_NONE; 4963 4964 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 4965 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 4966 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 4967 return is_local ? BFD_RELOC_AARCH64_NONE : r_type; 4968 4969 #if ARCH_SIZE == 64 4970 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 4971 return is_local 4972 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC 4973 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC; 4974 4975 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 4976 return is_local 4977 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 4978 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1; 4979 #endif 4980 4981 default: 4982 break; 4983 } 4984 4985 return r_type; 4986 } 4987 4988 static unsigned int 4989 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type) 4990 { 4991 switch (r_type) 4992 { 4993 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 4994 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 4995 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 4996 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 4997 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 4998 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 4999 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 5000 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5001 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5002 return GOT_NORMAL; 5003 5004 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 5005 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 5006 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 5007 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 5008 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 5009 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 5010 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 5011 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 5012 return GOT_TLS_GD; 5013 5014 case BFD_RELOC_AARCH64_TLSDESC_ADD: 5015 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 5016 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 5017 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 5018 case BFD_RELOC_AARCH64_TLSDESC_CALL: 5019 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 5020 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 5021 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 5022 case BFD_RELOC_AARCH64_TLSDESC_LDR: 5023 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 5024 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 5025 return GOT_TLSDESC_GD; 5026 5027 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5028 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 5029 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 5030 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5031 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5032 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5033 return GOT_TLS_IE; 5034 5035 default: 5036 break; 5037 } 5038 return GOT_UNKNOWN; 5039 } 5040 5041 static bool 5042 aarch64_can_relax_tls (bfd *input_bfd, 5043 struct bfd_link_info *info, 5044 bfd_reloc_code_real_type r_type, 5045 struct elf_link_hash_entry *h, 5046 unsigned long r_symndx) 5047 { 5048 unsigned int symbol_got_type; 5049 unsigned int reloc_got_type; 5050 5051 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type)) 5052 return false; 5053 5054 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx); 5055 reloc_got_type = aarch64_reloc_got_type (r_type); 5056 5057 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type)) 5058 return true; 5059 5060 if (!bfd_link_executable (info)) 5061 return false; 5062 5063 if (h && h->root.type == bfd_link_hash_undefweak) 5064 return false; 5065 5066 return true; 5067 } 5068 5069 /* Given the relocation code R_TYPE, return the relaxed bfd reloc 5070 enumerator. */ 5071 5072 static bfd_reloc_code_real_type 5073 aarch64_tls_transition (bfd *input_bfd, 5074 struct bfd_link_info *info, 5075 unsigned int r_type, 5076 struct elf_link_hash_entry *h, 5077 unsigned long r_symndx) 5078 { 5079 bfd_reloc_code_real_type bfd_r_type 5080 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 5081 5082 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx)) 5083 return bfd_r_type; 5084 5085 return aarch64_tls_transition_without_check (bfd_r_type, h); 5086 } 5087 5088 /* Return the base VMA address which should be subtracted from real addresses 5089 when resolving R_AARCH64_TLS_DTPREL relocation. */ 5090 5091 static bfd_vma 5092 dtpoff_base (struct bfd_link_info *info) 5093 { 5094 /* If tls_sec is NULL, we should have signalled an error already. */ 5095 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL); 5096 return elf_hash_table (info)->tls_sec->vma; 5097 } 5098 5099 /* Return the base VMA address which should be subtracted from real addresses 5100 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */ 5101 5102 static bfd_vma 5103 tpoff_base (struct bfd_link_info *info) 5104 { 5105 struct elf_link_hash_table *htab = elf_hash_table (info); 5106 5107 /* If tls_sec is NULL, we should have signalled an error already. */ 5108 BFD_ASSERT (htab->tls_sec != NULL); 5109 5110 bfd_vma base = align_power ((bfd_vma) TCB_SIZE, 5111 htab->tls_sec->alignment_power); 5112 return htab->tls_sec->vma - base; 5113 } 5114 5115 static bfd_vma * 5116 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, 5117 unsigned long r_symndx) 5118 { 5119 /* Calculate the address of the GOT entry for symbol 5120 referred to in h. */ 5121 if (h != NULL) 5122 return &h->got.offset; 5123 else 5124 { 5125 /* local symbol */ 5126 struct elf_aarch64_local_symbol *l; 5127 5128 l = elf_aarch64_locals (input_bfd); 5129 return &l[r_symndx].got_offset; 5130 } 5131 } 5132 5133 static void 5134 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, 5135 unsigned long r_symndx) 5136 { 5137 bfd_vma *p; 5138 p = symbol_got_offset_ref (input_bfd, h, r_symndx); 5139 *p |= 1; 5140 } 5141 5142 static int 5143 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h, 5144 unsigned long r_symndx) 5145 { 5146 bfd_vma value; 5147 value = * symbol_got_offset_ref (input_bfd, h, r_symndx); 5148 return value & 1; 5149 } 5150 5151 static bfd_vma 5152 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, 5153 unsigned long r_symndx) 5154 { 5155 bfd_vma value; 5156 value = * symbol_got_offset_ref (input_bfd, h, r_symndx); 5157 value &= ~1; 5158 return value; 5159 } 5160 5161 static bfd_vma * 5162 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, 5163 unsigned long r_symndx) 5164 { 5165 /* Calculate the address of the GOT entry for symbol 5166 referred to in h. */ 5167 if (h != NULL) 5168 { 5169 struct elf_aarch64_link_hash_entry *eh; 5170 eh = (struct elf_aarch64_link_hash_entry *) h; 5171 return &eh->tlsdesc_got_jump_table_offset; 5172 } 5173 else 5174 { 5175 /* local symbol */ 5176 struct elf_aarch64_local_symbol *l; 5177 5178 l = elf_aarch64_locals (input_bfd); 5179 return &l[r_symndx].tlsdesc_got_jump_table_offset; 5180 } 5181 } 5182 5183 static void 5184 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, 5185 unsigned long r_symndx) 5186 { 5187 bfd_vma *p; 5188 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5189 *p |= 1; 5190 } 5191 5192 static int 5193 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd, 5194 struct elf_link_hash_entry *h, 5195 unsigned long r_symndx) 5196 { 5197 bfd_vma value; 5198 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5199 return value & 1; 5200 } 5201 5202 static bfd_vma 5203 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, 5204 unsigned long r_symndx) 5205 { 5206 bfd_vma value; 5207 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5208 value &= ~1; 5209 return value; 5210 } 5211 5212 /* Data for make_branch_to_erratum_835769_stub(). */ 5213 5214 struct erratum_835769_branch_to_stub_data 5215 { 5216 struct bfd_link_info *info; 5217 asection *output_section; 5218 bfd_byte *contents; 5219 }; 5220 5221 /* Helper to insert branches to erratum 835769 stubs in the right 5222 places for a particular section. */ 5223 5224 static bool 5225 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry, 5226 void *in_arg) 5227 { 5228 struct elf_aarch64_stub_hash_entry *stub_entry; 5229 struct erratum_835769_branch_to_stub_data *data; 5230 bfd_byte *contents; 5231 unsigned long branch_insn = 0; 5232 bfd_vma veneered_insn_loc, veneer_entry_loc; 5233 bfd_signed_vma branch_offset; 5234 unsigned int target; 5235 bfd *abfd; 5236 5237 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 5238 data = (struct erratum_835769_branch_to_stub_data *) in_arg; 5239 5240 if (stub_entry->target_section != data->output_section 5241 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer) 5242 return true; 5243 5244 contents = data->contents; 5245 veneered_insn_loc = stub_entry->target_section->output_section->vma 5246 + stub_entry->target_section->output_offset 5247 + stub_entry->target_value; 5248 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 5249 + stub_entry->stub_sec->output_offset 5250 + stub_entry->stub_offset; 5251 branch_offset = veneer_entry_loc - veneered_insn_loc; 5252 5253 abfd = stub_entry->target_section->owner; 5254 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) 5255 _bfd_error_handler 5256 (_("%pB: error: erratum 835769 stub out " 5257 "of range (input file too large)"), abfd); 5258 5259 target = stub_entry->target_value; 5260 branch_insn = 0x14000000; 5261 branch_offset >>= 2; 5262 branch_offset &= 0x3ffffff; 5263 branch_insn |= branch_offset; 5264 bfd_putl32 (branch_insn, &contents[target]); 5265 5266 return true; 5267 } 5268 5269 5270 static bool 5271 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry, 5272 void *in_arg) 5273 { 5274 struct elf_aarch64_stub_hash_entry *stub_entry 5275 = (struct elf_aarch64_stub_hash_entry *) gen_entry; 5276 struct erratum_835769_branch_to_stub_data *data 5277 = (struct erratum_835769_branch_to_stub_data *) in_arg; 5278 struct bfd_link_info *info; 5279 struct elf_aarch64_link_hash_table *htab; 5280 bfd_byte *contents; 5281 asection *section; 5282 bfd *abfd; 5283 bfd_vma place; 5284 uint32_t insn; 5285 5286 info = data->info; 5287 contents = data->contents; 5288 section = data->output_section; 5289 5290 htab = elf_aarch64_hash_table (info); 5291 5292 if (stub_entry->target_section != section 5293 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer) 5294 return true; 5295 5296 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec) 5297 || (htab->fix_erratum_843419 & ERRAT_ADR)); 5298 5299 /* Only update the stub section if we have one. We should always have one if 5300 we're allowed to use the ADRP errata workaround, otherwise it is not 5301 required. */ 5302 if (stub_entry->stub_sec) 5303 { 5304 insn = bfd_getl32 (contents + stub_entry->target_value); 5305 bfd_putl32 (insn, 5306 stub_entry->stub_sec->contents + stub_entry->stub_offset); 5307 } 5308 5309 place = (section->output_section->vma + section->output_offset 5310 + stub_entry->adrp_offset); 5311 insn = bfd_getl32 (contents + stub_entry->adrp_offset); 5312 5313 if (!_bfd_aarch64_adrp_p (insn)) 5314 abort (); 5315 5316 bfd_signed_vma imm = 5317 (_bfd_aarch64_sign_extend 5318 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33) 5319 - (place & 0xfff)); 5320 5321 if ((htab->fix_erratum_843419 & ERRAT_ADR) 5322 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM)) 5323 { 5324 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm) 5325 | AARCH64_RT (insn)); 5326 bfd_putl32 (insn, contents + stub_entry->adrp_offset); 5327 /* Stub is not needed, don't map it out. */ 5328 stub_entry->stub_type = aarch64_stub_none; 5329 } 5330 else if (htab->fix_erratum_843419 & ERRAT_ADRP) 5331 { 5332 bfd_vma veneered_insn_loc; 5333 bfd_vma veneer_entry_loc; 5334 bfd_signed_vma branch_offset; 5335 uint32_t branch_insn; 5336 5337 veneered_insn_loc = stub_entry->target_section->output_section->vma 5338 + stub_entry->target_section->output_offset 5339 + stub_entry->target_value; 5340 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 5341 + stub_entry->stub_sec->output_offset 5342 + stub_entry->stub_offset; 5343 branch_offset = veneer_entry_loc - veneered_insn_loc; 5344 5345 abfd = stub_entry->target_section->owner; 5346 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) 5347 _bfd_error_handler 5348 (_("%pB: error: erratum 843419 stub out " 5349 "of range (input file too large)"), abfd); 5350 5351 branch_insn = 0x14000000; 5352 branch_offset >>= 2; 5353 branch_offset &= 0x3ffffff; 5354 branch_insn |= branch_offset; 5355 bfd_putl32 (branch_insn, contents + stub_entry->target_value); 5356 } 5357 else 5358 { 5359 char imm_buf[128]; 5360 5361 sprintf (imm_buf, "%" BFD_VMA_FMT "x", imm); 5362 abfd = stub_entry->target_section->owner; 5363 _bfd_error_handler 5364 (_("%pB: error: erratum 843419 immediate 0x%s " 5365 "out of range for ADR (input file too large) and " 5366 "--fix-cortex-a53-843419=adr used. Run the linker with " 5367 "--fix-cortex-a53-843419=full instead"), abfd, imm_buf); 5368 bfd_set_error (bfd_error_bad_value); 5369 /* This function is called inside a hashtable traversal and the error 5370 handlers called above turn into non-fatal errors. Which means this 5371 case ld returns an exit code 0 and also produces a broken object file. 5372 To prevent this, issue a hard abort. */ 5373 BFD_FAIL (); 5374 } 5375 return true; 5376 } 5377 5378 5379 static bool 5380 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED, 5381 struct bfd_link_info *link_info, 5382 asection *sec, 5383 bfd_byte *contents) 5384 5385 { 5386 struct elf_aarch64_link_hash_table *globals = 5387 elf_aarch64_hash_table (link_info); 5388 5389 if (globals == NULL) 5390 return false; 5391 5392 /* Fix code to point to erratum 835769 stubs. */ 5393 if (globals->fix_erratum_835769) 5394 { 5395 struct erratum_835769_branch_to_stub_data data; 5396 5397 data.info = link_info; 5398 data.output_section = sec; 5399 data.contents = contents; 5400 bfd_hash_traverse (&globals->stub_hash_table, 5401 make_branch_to_erratum_835769_stub, &data); 5402 } 5403 5404 if (globals->fix_erratum_843419) 5405 { 5406 struct erratum_835769_branch_to_stub_data data; 5407 5408 data.info = link_info; 5409 data.output_section = sec; 5410 data.contents = contents; 5411 bfd_hash_traverse (&globals->stub_hash_table, 5412 _bfd_aarch64_erratum_843419_branch_to_stub, &data); 5413 } 5414 5415 return false; 5416 } 5417 5418 /* Return TRUE if RELOC is a relocation against the base of GOT table. */ 5419 5420 static bool 5421 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc) 5422 { 5423 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14 5424 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15 5425 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15 5426 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC 5427 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1); 5428 } 5429 5430 /* Perform a relocation as part of a final link. The input relocation type 5431 should be TLS relaxed. */ 5432 5433 static bfd_reloc_status_type 5434 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, 5435 bfd *input_bfd, 5436 bfd *output_bfd, 5437 asection *input_section, 5438 bfd_byte *contents, 5439 Elf_Internal_Rela *rel, 5440 bfd_vma value, 5441 struct bfd_link_info *info, 5442 asection *sym_sec, 5443 struct elf_link_hash_entry *h, 5444 bool *unresolved_reloc_p, 5445 bool save_addend, 5446 bfd_vma *saved_addend, 5447 Elf_Internal_Sym *sym) 5448 { 5449 Elf_Internal_Shdr *symtab_hdr; 5450 unsigned int r_type = howto->type; 5451 bfd_reloc_code_real_type bfd_r_type 5452 = elfNN_aarch64_bfd_reloc_from_howto (howto); 5453 unsigned long r_symndx; 5454 bfd_byte *hit_data = contents + rel->r_offset; 5455 bfd_vma place, off, got_entry_addr = 0; 5456 bfd_signed_vma signed_addend; 5457 struct elf_aarch64_link_hash_table *globals; 5458 bool weak_undef_p; 5459 bool relative_reloc; 5460 asection *base_got; 5461 bfd_vma orig_value = value; 5462 bool resolved_to_zero; 5463 bool abs_symbol_p; 5464 5465 globals = elf_aarch64_hash_table (info); 5466 5467 symtab_hdr = &elf_symtab_hdr (input_bfd); 5468 5469 BFD_ASSERT (is_aarch64_elf (input_bfd)); 5470 5471 r_symndx = ELFNN_R_SYM (rel->r_info); 5472 5473 place = input_section->output_section->vma 5474 + input_section->output_offset + rel->r_offset; 5475 5476 /* Get addend, accumulating the addend for consecutive relocs 5477 which refer to the same offset. */ 5478 signed_addend = saved_addend ? *saved_addend : 0; 5479 signed_addend += rel->r_addend; 5480 5481 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak 5482 : bfd_is_und_section (sym_sec)); 5483 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root); 5484 5485 5486 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle 5487 it here if it is defined in a non-shared object. */ 5488 if (h != NULL 5489 && h->type == STT_GNU_IFUNC 5490 && h->def_regular) 5491 { 5492 asection *plt; 5493 const char *name; 5494 bfd_vma addend = 0; 5495 5496 if ((input_section->flags & SEC_ALLOC) == 0) 5497 { 5498 /* If this is a SHT_NOTE section without SHF_ALLOC, treat 5499 STT_GNU_IFUNC symbol as STT_FUNC. */ 5500 if (elf_section_type (input_section) == SHT_NOTE) 5501 goto skip_ifunc; 5502 5503 /* Dynamic relocs are not propagated for SEC_DEBUGGING 5504 sections because such sections are not SEC_ALLOC and 5505 thus ld.so will not process them. */ 5506 if ((input_section->flags & SEC_DEBUGGING) != 0) 5507 return bfd_reloc_ok; 5508 5509 if (h->root.root.string) 5510 name = h->root.root.string; 5511 else 5512 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); 5513 _bfd_error_handler 5514 /* xgettext:c-format */ 5515 (_("%pB(%pA+%#" PRIx64 "): " 5516 "unresolvable %s relocation against symbol `%s'"), 5517 input_bfd, input_section, (uint64_t) rel->r_offset, 5518 howto->name, name); 5519 bfd_set_error (bfd_error_bad_value); 5520 return bfd_reloc_notsupported; 5521 } 5522 else if (h->plt.offset == (bfd_vma) -1) 5523 goto bad_ifunc_reloc; 5524 5525 /* STT_GNU_IFUNC symbol must go through PLT. */ 5526 plt = globals->root.splt ? globals->root.splt : globals->root.iplt; 5527 value = (plt->output_section->vma + plt->output_offset + h->plt.offset); 5528 5529 switch (bfd_r_type) 5530 { 5531 default: 5532 bad_ifunc_reloc: 5533 if (h->root.root.string) 5534 name = h->root.root.string; 5535 else 5536 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, 5537 NULL); 5538 _bfd_error_handler 5539 /* xgettext:c-format */ 5540 (_("%pB: relocation %s against STT_GNU_IFUNC " 5541 "symbol `%s' isn't handled by %s"), input_bfd, 5542 howto->name, name, __FUNCTION__); 5543 bfd_set_error (bfd_error_bad_value); 5544 return bfd_reloc_notsupported; 5545 5546 case BFD_RELOC_AARCH64_NN: 5547 if (rel->r_addend != 0) 5548 { 5549 if (h->root.root.string) 5550 name = h->root.root.string; 5551 else 5552 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 5553 sym, NULL); 5554 _bfd_error_handler 5555 /* xgettext:c-format */ 5556 (_("%pB: relocation %s against STT_GNU_IFUNC " 5557 "symbol `%s' has non-zero addend: %" PRId64), 5558 input_bfd, howto->name, name, (int64_t) rel->r_addend); 5559 bfd_set_error (bfd_error_bad_value); 5560 return bfd_reloc_notsupported; 5561 } 5562 5563 /* Generate dynamic relocation only when there is a 5564 non-GOT reference in a shared object. */ 5565 if (bfd_link_pic (info) && h->non_got_ref) 5566 { 5567 Elf_Internal_Rela outrel; 5568 asection *sreloc; 5569 5570 /* Need a dynamic relocation to get the real function 5571 address. */ 5572 outrel.r_offset = _bfd_elf_section_offset (output_bfd, 5573 info, 5574 input_section, 5575 rel->r_offset); 5576 if (outrel.r_offset == (bfd_vma) -1 5577 || outrel.r_offset == (bfd_vma) -2) 5578 abort (); 5579 5580 outrel.r_offset += (input_section->output_section->vma 5581 + input_section->output_offset); 5582 5583 if (h->dynindx == -1 5584 || h->forced_local 5585 || bfd_link_executable (info)) 5586 { 5587 /* This symbol is resolved locally. */ 5588 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)); 5589 outrel.r_addend = (h->root.u.def.value 5590 + h->root.u.def.section->output_section->vma 5591 + h->root.u.def.section->output_offset); 5592 } 5593 else 5594 { 5595 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type); 5596 outrel.r_addend = 0; 5597 } 5598 5599 sreloc = globals->root.irelifunc; 5600 elf_append_rela (output_bfd, sreloc, &outrel); 5601 5602 /* If this reloc is against an external symbol, we 5603 do not want to fiddle with the addend. Otherwise, 5604 we need to include the symbol value so that it 5605 becomes an addend for the dynamic reloc. For an 5606 internal symbol, we have updated addend. */ 5607 return bfd_reloc_ok; 5608 } 5609 /* FALLTHROUGH */ 5610 case BFD_RELOC_AARCH64_CALL26: 5611 case BFD_RELOC_AARCH64_JUMP26: 5612 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5613 place, value, 5614 signed_addend, 5615 weak_undef_p); 5616 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, 5617 howto, value); 5618 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 5619 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 5620 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 5621 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 5622 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 5623 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5624 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5625 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 5626 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 5627 base_got = globals->root.sgot; 5628 off = h->got.offset; 5629 5630 if (base_got == NULL) 5631 abort (); 5632 5633 if (off == (bfd_vma) -1) 5634 { 5635 bfd_vma plt_index; 5636 5637 /* We can't use h->got.offset here to save state, or 5638 even just remember the offset, as finish_dynamic_symbol 5639 would use that as offset into .got. */ 5640 5641 if (globals->root.splt != NULL) 5642 { 5643 plt_index = ((h->plt.offset - globals->plt_header_size) / 5644 globals->plt_entry_size); 5645 off = (plt_index + 3) * GOT_ENTRY_SIZE; 5646 base_got = globals->root.sgotplt; 5647 } 5648 else 5649 { 5650 plt_index = h->plt.offset / globals->plt_entry_size; 5651 off = plt_index * GOT_ENTRY_SIZE; 5652 base_got = globals->root.igotplt; 5653 } 5654 5655 if (h->dynindx == -1 5656 || h->forced_local 5657 || info->symbolic) 5658 { 5659 /* This references the local definition. We must 5660 initialize this entry in the global offset table. 5661 Since the offset must always be a multiple of 8, 5662 we use the least significant bit to record 5663 whether we have initialized it already. 5664 5665 When doing a dynamic link, we create a .rela.got 5666 relocation entry to initialize the value. This 5667 is done in the finish_dynamic_symbol routine. */ 5668 if ((off & 1) != 0) 5669 off &= ~1; 5670 else 5671 { 5672 bfd_put_NN (output_bfd, value, 5673 base_got->contents + off); 5674 /* Note that this is harmless as -1 | 1 still is -1. */ 5675 h->got.offset |= 1; 5676 } 5677 } 5678 value = (base_got->output_section->vma 5679 + base_got->output_offset + off); 5680 } 5681 else 5682 value = aarch64_calculate_got_entry_vma (h, globals, info, 5683 value, output_bfd, 5684 unresolved_reloc_p); 5685 5686 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 5687 addend = (globals->root.sgot->output_section->vma 5688 + globals->root.sgot->output_offset); 5689 5690 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5691 place, value, 5692 addend, weak_undef_p); 5693 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value); 5694 case BFD_RELOC_AARCH64_ADD_LO12: 5695 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 5696 break; 5697 } 5698 } 5699 5700 skip_ifunc: 5701 resolved_to_zero = (h != NULL 5702 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)); 5703 5704 switch (bfd_r_type) 5705 { 5706 case BFD_RELOC_AARCH64_NONE: 5707 case BFD_RELOC_AARCH64_TLSDESC_ADD: 5708 case BFD_RELOC_AARCH64_TLSDESC_CALL: 5709 case BFD_RELOC_AARCH64_TLSDESC_LDR: 5710 *unresolved_reloc_p = false; 5711 return bfd_reloc_ok; 5712 5713 case BFD_RELOC_AARCH64_NN: 5714 5715 /* When generating a shared object or relocatable executable, these 5716 relocations are copied into the output file to be resolved at 5717 run time. */ 5718 if (((bfd_link_pic (info) 5719 || globals->root.is_relocatable_executable) 5720 && (input_section->flags & SEC_ALLOC) 5721 && (h == NULL 5722 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 5723 && !resolved_to_zero) 5724 || h->root.type != bfd_link_hash_undefweak)) 5725 /* Or we are creating an executable, we may need to keep relocations 5726 for symbols satisfied by a dynamic library if we manage to avoid 5727 copy relocs for the symbol. */ 5728 || (ELIMINATE_COPY_RELOCS 5729 && !bfd_link_pic (info) 5730 && h != NULL 5731 && (input_section->flags & SEC_ALLOC) 5732 && h->dynindx != -1 5733 && !h->non_got_ref 5734 && ((h->def_dynamic 5735 && !h->def_regular) 5736 || h->root.type == bfd_link_hash_undefweak 5737 || h->root.type == bfd_link_hash_undefined))) 5738 { 5739 Elf_Internal_Rela outrel; 5740 bfd_byte *loc; 5741 bool skip, relocate; 5742 asection *sreloc; 5743 5744 *unresolved_reloc_p = false; 5745 5746 skip = false; 5747 relocate = false; 5748 5749 outrel.r_addend = signed_addend; 5750 outrel.r_offset = 5751 _bfd_elf_section_offset (output_bfd, info, input_section, 5752 rel->r_offset); 5753 if (outrel.r_offset == (bfd_vma) - 1) 5754 skip = true; 5755 else if (outrel.r_offset == (bfd_vma) - 2) 5756 { 5757 skip = true; 5758 relocate = true; 5759 } 5760 else if (abs_symbol_p) 5761 { 5762 /* Local absolute symbol. */ 5763 skip = (h->forced_local || (h->dynindx == -1)); 5764 relocate = skip; 5765 } 5766 5767 outrel.r_offset += (input_section->output_section->vma 5768 + input_section->output_offset); 5769 5770 if (skip) 5771 memset (&outrel, 0, sizeof outrel); 5772 else if (h != NULL 5773 && h->dynindx != -1 5774 && (!bfd_link_pic (info) 5775 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h)) 5776 || !h->def_regular)) 5777 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type); 5778 else 5779 { 5780 int symbol; 5781 5782 /* On SVR4-ish systems, the dynamic loader cannot 5783 relocate the text and data segments independently, 5784 so the symbol does not matter. */ 5785 symbol = 0; 5786 relocate = !globals->no_apply_dynamic_relocs; 5787 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE)); 5788 outrel.r_addend += value; 5789 } 5790 5791 sreloc = elf_section_data (input_section)->sreloc; 5792 if (sreloc == NULL || sreloc->contents == NULL) 5793 return bfd_reloc_notsupported; 5794 5795 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals); 5796 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc); 5797 5798 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size) 5799 { 5800 /* Sanity to check that we have previously allocated 5801 sufficient space in the relocation section for the 5802 number of relocations we actually want to emit. */ 5803 abort (); 5804 } 5805 5806 /* If this reloc is against an external symbol, we do not want to 5807 fiddle with the addend. Otherwise, we need to include the symbol 5808 value so that it becomes an addend for the dynamic reloc. */ 5809 if (!relocate) 5810 return bfd_reloc_ok; 5811 5812 return _bfd_final_link_relocate (howto, input_bfd, input_section, 5813 contents, rel->r_offset, value, 5814 signed_addend); 5815 } 5816 else 5817 value += signed_addend; 5818 break; 5819 5820 case BFD_RELOC_AARCH64_CALL26: 5821 case BFD_RELOC_AARCH64_JUMP26: 5822 { 5823 asection *splt = globals->root.splt; 5824 bool via_plt_p = 5825 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1; 5826 5827 /* A call to an undefined weak symbol is converted to a jump to 5828 the next instruction unless a PLT entry will be created. 5829 The jump to the next instruction is optimized as a NOP. 5830 Do the same for local undefined symbols. */ 5831 if (weak_undef_p && ! via_plt_p) 5832 { 5833 bfd_putl32 (INSN_NOP, hit_data); 5834 return bfd_reloc_ok; 5835 } 5836 5837 /* If the call goes through a PLT entry, make sure to 5838 check distance to the right destination address. */ 5839 if (via_plt_p) 5840 value = (splt->output_section->vma 5841 + splt->output_offset + h->plt.offset); 5842 5843 /* Check if a stub has to be inserted because the destination 5844 is too far away. */ 5845 struct elf_aarch64_stub_hash_entry *stub_entry = NULL; 5846 5847 /* If the branch destination is directed to plt stub, "value" will be 5848 the final destination, otherwise we should plus signed_addend, it may 5849 contain non-zero value, for example call to local function symbol 5850 which are turned into "sec_sym + sec_off", and sec_off is kept in 5851 signed_addend. */ 5852 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend, 5853 place)) 5854 /* The target is out of reach, so redirect the branch to 5855 the local stub for this function. */ 5856 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h, 5857 rel, globals); 5858 if (stub_entry != NULL) 5859 { 5860 value = (stub_entry->stub_offset 5861 + stub_entry->stub_sec->output_offset 5862 + stub_entry->stub_sec->output_section->vma); 5863 5864 /* We have redirected the destination to stub entry address, 5865 so ignore any addend record in the original rela entry. */ 5866 signed_addend = 0; 5867 } 5868 } 5869 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5870 place, value, 5871 signed_addend, weak_undef_p); 5872 *unresolved_reloc_p = false; 5873 break; 5874 5875 case BFD_RELOC_AARCH64_16_PCREL: 5876 case BFD_RELOC_AARCH64_32_PCREL: 5877 case BFD_RELOC_AARCH64_64_PCREL: 5878 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 5879 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 5880 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 5881 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 5882 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 5883 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: 5884 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 5885 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: 5886 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 5887 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: 5888 case BFD_RELOC_AARCH64_MOVW_PREL_G3: 5889 if (bfd_link_pic (info) 5890 && (input_section->flags & SEC_ALLOC) != 0 5891 && (input_section->flags & SEC_READONLY) != 0 5892 && !_bfd_elf_symbol_refs_local_p (h, info, 1)) 5893 { 5894 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 5895 5896 _bfd_error_handler 5897 /* xgettext:c-format */ 5898 (_("%pB: relocation %s against symbol `%s' which may bind " 5899 "externally can not be used when making a shared object; " 5900 "recompile with -fPIC"), 5901 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 5902 h->root.root.string); 5903 bfd_set_error (bfd_error_bad_value); 5904 return bfd_reloc_notsupported; 5905 } 5906 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5907 place, value, 5908 signed_addend, 5909 weak_undef_p); 5910 break; 5911 5912 case BFD_RELOC_AARCH64_BRANCH19: 5913 case BFD_RELOC_AARCH64_TSTBR14: 5914 if (h && h->root.type == bfd_link_hash_undefined) 5915 { 5916 _bfd_error_handler 5917 /* xgettext:c-format */ 5918 (_("%pB: conditional branch to undefined symbol `%s' " 5919 "not allowed"), input_bfd, h->root.root.string); 5920 bfd_set_error (bfd_error_bad_value); 5921 return bfd_reloc_notsupported; 5922 } 5923 /* Fall through. */ 5924 5925 case BFD_RELOC_AARCH64_16: 5926 #if ARCH_SIZE == 64 5927 case BFD_RELOC_AARCH64_32: 5928 #endif 5929 case BFD_RELOC_AARCH64_ADD_LO12: 5930 case BFD_RELOC_AARCH64_LDST128_LO12: 5931 case BFD_RELOC_AARCH64_LDST16_LO12: 5932 case BFD_RELOC_AARCH64_LDST32_LO12: 5933 case BFD_RELOC_AARCH64_LDST64_LO12: 5934 case BFD_RELOC_AARCH64_LDST8_LO12: 5935 case BFD_RELOC_AARCH64_MOVW_G0: 5936 case BFD_RELOC_AARCH64_MOVW_G0_NC: 5937 case BFD_RELOC_AARCH64_MOVW_G0_S: 5938 case BFD_RELOC_AARCH64_MOVW_G1: 5939 case BFD_RELOC_AARCH64_MOVW_G1_NC: 5940 case BFD_RELOC_AARCH64_MOVW_G1_S: 5941 case BFD_RELOC_AARCH64_MOVW_G2: 5942 case BFD_RELOC_AARCH64_MOVW_G2_NC: 5943 case BFD_RELOC_AARCH64_MOVW_G2_S: 5944 case BFD_RELOC_AARCH64_MOVW_G3: 5945 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5946 place, value, 5947 signed_addend, weak_undef_p); 5948 break; 5949 5950 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 5951 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 5952 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 5953 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 5954 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 5955 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 5956 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 5957 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5958 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5959 if (globals->root.sgot == NULL) 5960 BFD_ASSERT (h != NULL); 5961 5962 relative_reloc = false; 5963 if (h != NULL) 5964 { 5965 bfd_vma addend = 0; 5966 5967 /* If a symbol is not dynamic and is not undefined weak, bind it 5968 locally and generate a RELATIVE relocation under PIC mode. 5969 5970 NOTE: one symbol may be referenced by several relocations, we 5971 should only generate one RELATIVE relocation for that symbol. 5972 Therefore, check GOT offset mark first. */ 5973 if (h->dynindx == -1 5974 && !h->forced_local 5975 && h->root.type != bfd_link_hash_undefweak 5976 && bfd_link_pic (info) 5977 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 5978 relative_reloc = true; 5979 5980 value = aarch64_calculate_got_entry_vma (h, globals, info, value, 5981 output_bfd, 5982 unresolved_reloc_p); 5983 /* Record the GOT entry address which will be used when generating 5984 RELATIVE relocation. */ 5985 if (relative_reloc) 5986 got_entry_addr = value; 5987 5988 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 5989 addend = (globals->root.sgot->output_section->vma 5990 + globals->root.sgot->output_offset); 5991 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5992 place, value, 5993 addend, weak_undef_p); 5994 } 5995 else 5996 { 5997 bfd_vma addend = 0; 5998 struct elf_aarch64_local_symbol *locals 5999 = elf_aarch64_locals (input_bfd); 6000 6001 if (locals == NULL) 6002 { 6003 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6004 _bfd_error_handler 6005 /* xgettext:c-format */ 6006 (_("%pB: local symbol descriptor table be NULL when applying " 6007 "relocation %s against local symbol"), 6008 input_bfd, elfNN_aarch64_howto_table[howto_index].name); 6009 abort (); 6010 } 6011 6012 off = symbol_got_offset (input_bfd, h, r_symndx); 6013 base_got = globals->root.sgot; 6014 got_entry_addr = (base_got->output_section->vma 6015 + base_got->output_offset + off); 6016 6017 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 6018 { 6019 bfd_put_64 (output_bfd, value, base_got->contents + off); 6020 6021 /* For local symbol, we have done absolute relocation in static 6022 linking stage. While for shared library, we need to update the 6023 content of GOT entry according to the shared object's runtime 6024 base address. So, we need to generate a R_AARCH64_RELATIVE reloc 6025 for dynamic linker. */ 6026 if (bfd_link_pic (info)) 6027 relative_reloc = true; 6028 6029 symbol_got_offset_mark (input_bfd, h, r_symndx); 6030 } 6031 6032 /* Update the relocation value to GOT entry addr as we have transformed 6033 the direct data access into indirect data access through GOT. */ 6034 value = got_entry_addr; 6035 6036 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 6037 addend = base_got->output_section->vma + base_got->output_offset; 6038 6039 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6040 place, value, 6041 addend, weak_undef_p); 6042 } 6043 6044 if (relative_reloc) 6045 { 6046 asection *s; 6047 Elf_Internal_Rela outrel; 6048 6049 s = globals->root.srelgot; 6050 if (s == NULL) 6051 abort (); 6052 6053 outrel.r_offset = got_entry_addr; 6054 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE)); 6055 outrel.r_addend = orig_value; 6056 elf_append_rela (output_bfd, s, &outrel); 6057 } 6058 break; 6059 6060 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6061 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6062 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 6063 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6064 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 6065 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6066 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 6067 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 6068 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 6069 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 6070 if (globals->root.sgot == NULL) 6071 return bfd_reloc_notsupported; 6072 6073 value = (symbol_got_offset (input_bfd, h, r_symndx) 6074 + globals->root.sgot->output_section->vma 6075 + globals->root.sgot->output_offset); 6076 6077 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6078 place, value, 6079 0, weak_undef_p); 6080 *unresolved_reloc_p = false; 6081 break; 6082 6083 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 6084 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 6085 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 6086 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 6087 if (globals->root.sgot == NULL) 6088 return bfd_reloc_notsupported; 6089 6090 value = symbol_got_offset (input_bfd, h, r_symndx); 6091 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6092 place, value, 6093 0, weak_undef_p); 6094 *unresolved_reloc_p = false; 6095 break; 6096 6097 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 6098 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 6099 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 6100 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 6101 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 6102 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 6103 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 6104 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 6105 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 6106 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 6107 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 6108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 6109 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6110 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 6111 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 6112 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 6113 { 6114 if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) 6115 { 6116 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6117 _bfd_error_handler 6118 /* xgettext:c-format */ 6119 (_("%pB: TLS relocation %s against undefined symbol `%s'"), 6120 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 6121 h->root.root.string); 6122 bfd_set_error (bfd_error_bad_value); 6123 return bfd_reloc_notsupported; 6124 } 6125 6126 bfd_vma def_value 6127 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info); 6128 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6129 place, value, 6130 def_value, weak_undef_p); 6131 break; 6132 } 6133 6134 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 6135 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 6136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6137 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: 6138 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 6139 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: 6140 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 6141 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: 6142 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 6143 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: 6144 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 6145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 6146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 6148 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 6150 { 6151 if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) 6152 { 6153 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6154 _bfd_error_handler 6155 /* xgettext:c-format */ 6156 (_("%pB: TLS relocation %s against undefined symbol `%s'"), 6157 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 6158 h->root.root.string); 6159 bfd_set_error (bfd_error_bad_value); 6160 return bfd_reloc_notsupported; 6161 } 6162 6163 bfd_vma def_value 6164 = weak_undef_p ? 0 : signed_addend - tpoff_base (info); 6165 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6166 place, value, 6167 def_value, weak_undef_p); 6168 *unresolved_reloc_p = false; 6169 break; 6170 } 6171 6172 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 6173 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6174 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 6175 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 6176 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 6177 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 6178 if (globals->root.sgot == NULL) 6179 return bfd_reloc_notsupported; 6180 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) 6181 + globals->root.sgotplt->output_section->vma 6182 + globals->root.sgotplt->output_offset 6183 + globals->sgotplt_jump_table_size); 6184 6185 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6186 place, value, 6187 0, weak_undef_p); 6188 *unresolved_reloc_p = false; 6189 break; 6190 6191 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 6192 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 6193 if (globals->root.sgot == NULL) 6194 return bfd_reloc_notsupported; 6195 6196 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) 6197 + globals->root.sgotplt->output_section->vma 6198 + globals->root.sgotplt->output_offset 6199 + globals->sgotplt_jump_table_size); 6200 6201 value -= (globals->root.sgot->output_section->vma 6202 + globals->root.sgot->output_offset); 6203 6204 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6205 place, value, 6206 0, weak_undef_p); 6207 *unresolved_reloc_p = false; 6208 break; 6209 6210 default: 6211 return bfd_reloc_notsupported; 6212 } 6213 6214 if (saved_addend) 6215 *saved_addend = value; 6216 6217 /* Only apply the final relocation in a sequence. */ 6218 if (save_addend) 6219 return bfd_reloc_continue; 6220 6221 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, 6222 howto, value); 6223 } 6224 6225 /* LP64 and ILP32 operates on x- and w-registers respectively. 6226 Next definitions take into account the difference between 6227 corresponding machine codes. R means x-register if the target 6228 arch is LP64, and w-register if the target is ILP32. */ 6229 6230 #if ARCH_SIZE == 64 6231 # define add_R0_R0 (0x91000000) 6232 # define add_R0_R0_R1 (0x8b000020) 6233 # define add_R0_R1 (0x91400020) 6234 # define ldr_R0 (0x58000000) 6235 # define ldr_R0_mask(i) (i & 0xffffffe0) 6236 # define ldr_R0_x0 (0xf9400000) 6237 # define ldr_hw_R0 (0xf2a00000) 6238 # define movk_R0 (0xf2800000) 6239 # define movz_R0 (0xd2a00000) 6240 # define movz_hw_R0 (0xd2c00000) 6241 #else /*ARCH_SIZE == 32 */ 6242 # define add_R0_R0 (0x11000000) 6243 # define add_R0_R0_R1 (0x0b000020) 6244 # define add_R0_R1 (0x11400020) 6245 # define ldr_R0 (0x18000000) 6246 # define ldr_R0_mask(i) (i & 0xbfffffe0) 6247 # define ldr_R0_x0 (0xb9400000) 6248 # define ldr_hw_R0 (0x72a00000) 6249 # define movk_R0 (0x72800000) 6250 # define movz_R0 (0x52a00000) 6251 # define movz_hw_R0 (0x52c00000) 6252 #endif 6253 6254 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub, 6255 it is used to identify the stub information to reset. */ 6256 6257 struct erratum_843419_branch_to_stub_clear_data 6258 { 6259 bfd_vma adrp_offset; 6260 asection *output_section; 6261 }; 6262 6263 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and 6264 section inside IN_ARG matches. The clearing is done by setting the 6265 stub_type to none. */ 6266 6267 static bool 6268 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry, 6269 void *in_arg) 6270 { 6271 struct elf_aarch64_stub_hash_entry *stub_entry 6272 = (struct elf_aarch64_stub_hash_entry *) gen_entry; 6273 struct erratum_843419_branch_to_stub_clear_data *data 6274 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg; 6275 6276 if (stub_entry->target_section != data->output_section 6277 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer 6278 || stub_entry->adrp_offset != data->adrp_offset) 6279 return true; 6280 6281 /* Change the stub type instead of removing the entry, removing from the hash 6282 table would be slower and we have already reserved the memory for the entry 6283 so there wouldn't be much gain. Changing the stub also keeps around a 6284 record of what was there before. */ 6285 stub_entry->stub_type = aarch64_stub_none; 6286 6287 /* We're done and there could have been only one matching stub at that 6288 particular offset, so abort further traversal. */ 6289 return false; 6290 } 6291 6292 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419 6293 sequence. In this case the erratum no longer applies and we need to remove 6294 the entry from the pending stub generation. This clears matching adrp insn 6295 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */ 6296 6297 static void 6298 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals, 6299 bfd_vma adrp_offset, asection *input_section) 6300 { 6301 if (globals->fix_erratum_843419 & ERRAT_ADRP) 6302 { 6303 struct erratum_843419_branch_to_stub_clear_data data; 6304 data.adrp_offset = adrp_offset; 6305 data.output_section = input_section; 6306 6307 bfd_hash_traverse (&globals->stub_hash_table, 6308 _bfd_aarch64_erratum_843419_clear_stub, &data); 6309 } 6310 } 6311 6312 /* Handle TLS relaxations. Relaxing is possible for symbols that use 6313 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static 6314 link. 6315 6316 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller 6317 is to then call final_link_relocate. Return other values in the 6318 case of error. */ 6319 6320 static bfd_reloc_status_type 6321 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals, 6322 bfd *input_bfd, asection *input_section, 6323 bfd_byte *contents, Elf_Internal_Rela *rel, 6324 struct elf_link_hash_entry *h) 6325 { 6326 bool is_local = h == NULL; 6327 unsigned int r_type = ELFNN_R_TYPE (rel->r_info); 6328 unsigned long insn; 6329 6330 BFD_ASSERT (globals && input_bfd && contents && rel); 6331 6332 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) 6333 { 6334 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6335 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6336 if (is_local) 6337 { 6338 /* GD->LE relaxation: 6339 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var 6340 or 6341 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var 6342 6343 Where R is x for LP64, and w for ILP32. */ 6344 bfd_putl32 (movz_R0, contents + rel->r_offset); 6345 /* We have relaxed the adrp into a mov, we may have to clear any 6346 pending erratum fixes. */ 6347 clear_erratum_843419_entry (globals, rel->r_offset, input_section); 6348 return bfd_reloc_continue; 6349 } 6350 else 6351 { 6352 /* GD->IE relaxation: 6353 adrp x0, :tlsgd:var => adrp x0, :gottprel:var 6354 or 6355 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var 6356 */ 6357 return bfd_reloc_continue; 6358 } 6359 6360 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 6361 BFD_ASSERT (0); 6362 break; 6363 6364 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 6365 if (is_local) 6366 { 6367 /* Tiny TLSDESC->LE relaxation: 6368 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var 6369 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var 6370 .tlsdesccall var 6371 blr x1 => nop 6372 6373 Where R is x for LP64, and w for ILP32. */ 6374 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21)); 6375 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL)); 6376 6377 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6378 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC)); 6379 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6380 6381 bfd_putl32 (movz_R0, contents + rel->r_offset); 6382 bfd_putl32 (movk_R0, contents + rel->r_offset + 4); 6383 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8); 6384 return bfd_reloc_continue; 6385 } 6386 else 6387 { 6388 /* Tiny TLSDESC->IE relaxation: 6389 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var 6390 adr x0, :tlsdesc:var => nop 6391 .tlsdesccall var 6392 blr x1 => nop 6393 */ 6394 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21)); 6395 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL)); 6396 6397 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6398 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6399 6400 bfd_putl32 (ldr_R0, contents + rel->r_offset); 6401 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4); 6402 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8); 6403 return bfd_reloc_continue; 6404 } 6405 6406 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 6407 if (is_local) 6408 { 6409 /* Tiny GD->LE relaxation: 6410 adr x0, :tlsgd:var => mrs x1, tpidr_el0 6411 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12 6412 nop => add R0, R0, #:tprel_lo12_nc:x 6413 6414 Where R is x for LP64, and x for Ilp32. */ 6415 6416 /* First kill the tls_get_addr reloc on the bl instruction. */ 6417 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6418 6419 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0); 6420 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4); 6421 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8); 6422 6423 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6424 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC)); 6425 rel[1].r_offset = rel->r_offset + 8; 6426 6427 /* Move the current relocation to the second instruction in 6428 the sequence. */ 6429 rel->r_offset += 4; 6430 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6431 AARCH64_R (TLSLE_ADD_TPREL_HI12)); 6432 return bfd_reloc_continue; 6433 } 6434 else 6435 { 6436 /* Tiny GD->IE relaxation: 6437 adr x0, :tlsgd:var => ldr R0, :gottprel:var 6438 bl __tls_get_addr => mrs x1, tpidr_el0 6439 nop => add R0, R0, R1 6440 6441 Where R is x for LP64, and w for Ilp32. */ 6442 6443 /* First kill the tls_get_addr reloc on the bl instruction. */ 6444 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6445 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6446 6447 bfd_putl32 (ldr_R0, contents + rel->r_offset); 6448 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4); 6449 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8); 6450 return bfd_reloc_continue; 6451 } 6452 6453 #if ARCH_SIZE == 64 6454 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 6455 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC)); 6456 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset); 6457 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26)); 6458 6459 if (is_local) 6460 { 6461 /* Large GD->LE relaxation: 6462 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32 6463 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16 6464 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var 6465 bl __tls_get_addr => mrs x1, tpidr_el0 6466 nop => add x0, x0, x1 6467 */ 6468 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6469 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC)); 6470 rel[2].r_offset = rel->r_offset + 8; 6471 6472 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0); 6473 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4); 6474 bfd_putl32 (movk_R0, contents + rel->r_offset + 8); 6475 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12); 6476 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16); 6477 } 6478 else 6479 { 6480 /* Large GD->IE relaxation: 6481 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16 6482 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var 6483 add x0, gp, x0 => ldr x0, [gp, x0] 6484 bl __tls_get_addr => mrs x1, tpidr_el0 6485 nop => add x0, x0, x1 6486 */ 6487 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6488 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0); 6489 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8); 6490 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12); 6491 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16); 6492 } 6493 return bfd_reloc_continue; 6494 6495 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 6496 return bfd_reloc_continue; 6497 #endif 6498 6499 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 6500 return bfd_reloc_continue; 6501 6502 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 6503 if (is_local) 6504 { 6505 /* GD->LE relaxation: 6506 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var 6507 6508 Where R is x for lp64 mode, and w for ILP32 mode. */ 6509 bfd_putl32 (movk_R0, contents + rel->r_offset); 6510 return bfd_reloc_continue; 6511 } 6512 else 6513 { 6514 /* GD->IE relaxation: 6515 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var] 6516 6517 Where R is x for lp64 mode, and w for ILP32 mode. */ 6518 insn = bfd_getl32 (contents + rel->r_offset); 6519 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset); 6520 return bfd_reloc_continue; 6521 } 6522 6523 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6524 if (is_local) 6525 { 6526 /* GD->LE relaxation 6527 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var 6528 bl __tls_get_addr => mrs x1, tpidr_el0 6529 nop => add R0, R1, R0 6530 6531 Where R is x for lp64 mode, and w for ILP32 mode. */ 6532 6533 /* First kill the tls_get_addr reloc on the bl instruction. */ 6534 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6535 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6536 6537 bfd_putl32 (movk_R0, contents + rel->r_offset); 6538 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4); 6539 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8); 6540 return bfd_reloc_continue; 6541 } 6542 else 6543 { 6544 /* GD->IE relaxation 6545 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var] 6546 BL __tls_get_addr => mrs x1, tpidr_el0 6547 R_AARCH64_CALL26 6548 NOP => add R0, R1, R0 6549 6550 Where R is x for lp64 mode, and w for ilp32 mode. */ 6551 6552 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6553 6554 /* Remove the relocation on the BL instruction. */ 6555 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6556 6557 /* We choose to fixup the BL and NOP instructions using the 6558 offset from the second relocation to allow flexibility in 6559 scheduling instructions between the ADD and BL. */ 6560 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset); 6561 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset); 6562 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4); 6563 return bfd_reloc_continue; 6564 } 6565 6566 case BFD_RELOC_AARCH64_TLSDESC_ADD: 6567 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 6568 case BFD_RELOC_AARCH64_TLSDESC_CALL: 6569 /* GD->IE/LE relaxation: 6570 add x0, x0, #:tlsdesc_lo12:var => nop 6571 blr xd => nop 6572 */ 6573 bfd_putl32 (INSN_NOP, contents + rel->r_offset); 6574 return bfd_reloc_ok; 6575 6576 case BFD_RELOC_AARCH64_TLSDESC_LDR: 6577 if (is_local) 6578 { 6579 /* GD->LE relaxation: 6580 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var 6581 6582 Where R is x for lp64 mode, and w for ILP32 mode. */ 6583 bfd_putl32 (movk_R0, contents + rel->r_offset); 6584 return bfd_reloc_continue; 6585 } 6586 else 6587 { 6588 /* GD->IE relaxation: 6589 ldr xd, [gp, xn] => ldr R0, [gp, xn] 6590 6591 Where R is x for lp64 mode, and w for ILP32 mode. */ 6592 insn = bfd_getl32 (contents + rel->r_offset); 6593 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset); 6594 return bfd_reloc_ok; 6595 } 6596 6597 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 6598 /* GD->LE relaxation: 6599 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16 6600 GD->IE relaxation: 6601 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var 6602 6603 Where R is x for lp64 mode, and w for ILP32 mode. */ 6604 if (is_local) 6605 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset); 6606 return bfd_reloc_continue; 6607 6608 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 6609 if (is_local) 6610 { 6611 /* GD->LE relaxation: 6612 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32 6613 6614 Where R is x for lp64 mode, and w for ILP32 mode. */ 6615 bfd_putl32 (movz_hw_R0, contents + rel->r_offset); 6616 return bfd_reloc_continue; 6617 } 6618 else 6619 { 6620 /* GD->IE relaxation: 6621 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16 6622 6623 Where R is x for lp64 mode, and w for ILP32 mode. */ 6624 insn = bfd_getl32 (contents + rel->r_offset); 6625 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset); 6626 return bfd_reloc_continue; 6627 } 6628 6629 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6630 /* IE->LE relaxation: 6631 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var 6632 6633 Where R is x for lp64 mode, and w for ILP32 mode. */ 6634 if (is_local) 6635 { 6636 insn = bfd_getl32 (contents + rel->r_offset); 6637 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset); 6638 /* We have relaxed the adrp into a mov, we may have to clear any 6639 pending erratum fixes. */ 6640 clear_erratum_843419_entry (globals, rel->r_offset, input_section); 6641 } 6642 return bfd_reloc_continue; 6643 6644 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 6645 /* IE->LE relaxation: 6646 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var 6647 6648 Where R is x for lp64 mode, and w for ILP32 mode. */ 6649 if (is_local) 6650 { 6651 insn = bfd_getl32 (contents + rel->r_offset); 6652 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset); 6653 } 6654 return bfd_reloc_continue; 6655 6656 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 6657 /* LD->LE relaxation (tiny): 6658 adr x0, :tlsldm:x => mrs x0, tpidr_el0 6659 bl __tls_get_addr => add R0, R0, TCB_SIZE 6660 6661 Where R is x for lp64 mode, and w for ilp32 mode. */ 6662 if (is_local) 6663 { 6664 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6665 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6666 /* No need of CALL26 relocation for tls_get_addr. */ 6667 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6668 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0); 6669 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10), 6670 contents + rel->r_offset + 4); 6671 return bfd_reloc_ok; 6672 } 6673 return bfd_reloc_continue; 6674 6675 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 6676 /* LD->LE relaxation (small): 6677 adrp x0, :tlsldm:x => mrs x0, tpidr_el0 6678 */ 6679 if (is_local) 6680 { 6681 bfd_putl32 (0xd53bd040, contents + rel->r_offset); 6682 return bfd_reloc_ok; 6683 } 6684 return bfd_reloc_continue; 6685 6686 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 6687 /* LD->LE relaxation (small): 6688 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE 6689 bl __tls_get_addr => nop 6690 6691 Where R is x for lp64 mode, and w for ilp32 mode. */ 6692 if (is_local) 6693 { 6694 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6695 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6696 /* No need of CALL26 relocation for tls_get_addr. */ 6697 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6698 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10), 6699 contents + rel->r_offset + 0); 6700 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4); 6701 return bfd_reloc_ok; 6702 } 6703 return bfd_reloc_continue; 6704 6705 default: 6706 return bfd_reloc_continue; 6707 } 6708 6709 return bfd_reloc_ok; 6710 } 6711 6712 /* Relocate an AArch64 ELF section. */ 6713 6714 static int 6715 elfNN_aarch64_relocate_section (bfd *output_bfd, 6716 struct bfd_link_info *info, 6717 bfd *input_bfd, 6718 asection *input_section, 6719 bfd_byte *contents, 6720 Elf_Internal_Rela *relocs, 6721 Elf_Internal_Sym *local_syms, 6722 asection **local_sections) 6723 { 6724 Elf_Internal_Shdr *symtab_hdr; 6725 struct elf_link_hash_entry **sym_hashes; 6726 Elf_Internal_Rela *rel; 6727 Elf_Internal_Rela *relend; 6728 const char *name; 6729 struct elf_aarch64_link_hash_table *globals; 6730 bool save_addend = false; 6731 bfd_vma addend = 0; 6732 6733 globals = elf_aarch64_hash_table (info); 6734 6735 symtab_hdr = &elf_symtab_hdr (input_bfd); 6736 sym_hashes = elf_sym_hashes (input_bfd); 6737 6738 rel = relocs; 6739 relend = relocs + input_section->reloc_count; 6740 for (; rel < relend; rel++) 6741 { 6742 unsigned int r_type; 6743 bfd_reloc_code_real_type bfd_r_type; 6744 bfd_reloc_code_real_type relaxed_bfd_r_type; 6745 reloc_howto_type *howto; 6746 unsigned long r_symndx; 6747 Elf_Internal_Sym *sym; 6748 asection *sec; 6749 struct elf_link_hash_entry *h; 6750 bfd_vma relocation; 6751 bfd_reloc_status_type r; 6752 arelent bfd_reloc; 6753 char sym_type; 6754 bool unresolved_reloc = false; 6755 char *error_message = NULL; 6756 6757 r_symndx = ELFNN_R_SYM (rel->r_info); 6758 r_type = ELFNN_R_TYPE (rel->r_info); 6759 6760 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); 6761 howto = bfd_reloc.howto; 6762 6763 if (howto == NULL) 6764 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); 6765 6766 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto); 6767 6768 h = NULL; 6769 sym = NULL; 6770 sec = NULL; 6771 6772 if (r_symndx < symtab_hdr->sh_info) 6773 { 6774 sym = local_syms + r_symndx; 6775 sym_type = ELFNN_ST_TYPE (sym->st_info); 6776 sec = local_sections[r_symndx]; 6777 6778 /* An object file might have a reference to a local 6779 undefined symbol. This is a daft object file, but we 6780 should at least do something about it. */ 6781 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL 6782 && bfd_is_und_section (sec) 6783 && ELF_ST_BIND (sym->st_info) != STB_WEAK) 6784 (*info->callbacks->undefined_symbol) 6785 (info, bfd_elf_string_from_elf_section 6786 (input_bfd, symtab_hdr->sh_link, sym->st_name), 6787 input_bfd, input_section, rel->r_offset, true); 6788 6789 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); 6790 6791 /* Relocate against local STT_GNU_IFUNC symbol. */ 6792 if (!bfd_link_relocatable (info) 6793 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) 6794 { 6795 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd, 6796 rel, false); 6797 if (h == NULL) 6798 abort (); 6799 6800 /* Set STT_GNU_IFUNC symbol value. */ 6801 h->root.u.def.value = sym->st_value; 6802 h->root.u.def.section = sec; 6803 } 6804 } 6805 else 6806 { 6807 bool warned, ignored; 6808 6809 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 6810 r_symndx, symtab_hdr, sym_hashes, 6811 h, sec, relocation, 6812 unresolved_reloc, warned, ignored); 6813 6814 sym_type = h->type; 6815 } 6816 6817 if (sec != NULL && discarded_section (sec)) 6818 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 6819 rel, 1, relend, howto, 0, contents); 6820 6821 if (bfd_link_relocatable (info)) 6822 continue; 6823 6824 if (h != NULL) 6825 name = h->root.root.string; 6826 else 6827 { 6828 name = (bfd_elf_string_from_elf_section 6829 (input_bfd, symtab_hdr->sh_link, sym->st_name)); 6830 if (name == NULL || *name == '\0') 6831 name = bfd_section_name (sec); 6832 } 6833 6834 if (r_symndx != 0 6835 && r_type != R_AARCH64_NONE 6836 && r_type != R_AARCH64_NULL 6837 && (h == NULL 6838 || h->root.type == bfd_link_hash_defined 6839 || h->root.type == bfd_link_hash_defweak) 6840 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS)) 6841 { 6842 _bfd_error_handler 6843 ((sym_type == STT_TLS 6844 /* xgettext:c-format */ 6845 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s") 6846 /* xgettext:c-format */ 6847 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")), 6848 input_bfd, 6849 input_section, (uint64_t) rel->r_offset, howto->name, name); 6850 } 6851 6852 /* We relax only if we can see that there can be a valid transition 6853 from a reloc type to another. 6854 We call elfNN_aarch64_final_link_relocate unless we're completely 6855 done, i.e., the relaxation produced the final output we want. */ 6856 6857 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, 6858 h, r_symndx); 6859 if (relaxed_bfd_r_type != bfd_r_type) 6860 { 6861 bfd_r_type = relaxed_bfd_r_type; 6862 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type); 6863 BFD_ASSERT (howto != NULL); 6864 r_type = howto->type; 6865 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section, 6866 contents, rel, h); 6867 unresolved_reloc = 0; 6868 } 6869 else 6870 r = bfd_reloc_continue; 6871 6872 /* There may be multiple consecutive relocations for the 6873 same offset. In that case we are supposed to treat the 6874 output of each relocation as the addend for the next. */ 6875 if (rel + 1 < relend 6876 && rel->r_offset == rel[1].r_offset 6877 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE 6878 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL) 6879 save_addend = true; 6880 else 6881 save_addend = false; 6882 6883 if (r == bfd_reloc_continue) 6884 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd, 6885 input_section, contents, rel, 6886 relocation, info, sec, 6887 h, &unresolved_reloc, 6888 save_addend, &addend, sym); 6889 6890 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) 6891 { 6892 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6893 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6894 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 6895 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 6896 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 6897 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 6898 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 6899 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 6900 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 6901 { 6902 bool need_relocs = false; 6903 bfd_byte *loc; 6904 int indx; 6905 bfd_vma off; 6906 6907 off = symbol_got_offset (input_bfd, h, r_symndx); 6908 indx = h && h->dynindx != -1 ? h->dynindx : 0; 6909 6910 need_relocs = 6911 (!bfd_link_executable (info) || indx != 0) && 6912 (h == NULL 6913 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 6914 || h->root.type != bfd_link_hash_undefweak); 6915 6916 BFD_ASSERT (globals->root.srelgot != NULL); 6917 6918 if (need_relocs) 6919 { 6920 Elf_Internal_Rela rela; 6921 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD)); 6922 rela.r_addend = 0; 6923 rela.r_offset = globals->root.sgot->output_section->vma + 6924 globals->root.sgot->output_offset + off; 6925 6926 6927 loc = globals->root.srelgot->contents; 6928 loc += globals->root.srelgot->reloc_count++ 6929 * RELOC_SIZE (htab); 6930 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 6931 6932 bfd_reloc_code_real_type real_type = 6933 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 6934 6935 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 6936 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 6937 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC) 6938 { 6939 /* For local dynamic, don't generate DTPREL in any case. 6940 Initialize the DTPREL slot into zero, so we get module 6941 base address when invoke runtime TLS resolver. */ 6942 bfd_put_NN (output_bfd, 0, 6943 globals->root.sgot->contents + off 6944 + GOT_ENTRY_SIZE); 6945 } 6946 else if (indx == 0) 6947 { 6948 bfd_put_NN (output_bfd, 6949 relocation - dtpoff_base (info), 6950 globals->root.sgot->contents + off 6951 + GOT_ENTRY_SIZE); 6952 } 6953 else 6954 { 6955 /* This TLS symbol is global. We emit a 6956 relocation to fixup the tls offset at load 6957 time. */ 6958 rela.r_info = 6959 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL)); 6960 rela.r_addend = 0; 6961 rela.r_offset = 6962 (globals->root.sgot->output_section->vma 6963 + globals->root.sgot->output_offset + off 6964 + GOT_ENTRY_SIZE); 6965 6966 loc = globals->root.srelgot->contents; 6967 loc += globals->root.srelgot->reloc_count++ 6968 * RELOC_SIZE (globals); 6969 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 6970 bfd_put_NN (output_bfd, (bfd_vma) 0, 6971 globals->root.sgot->contents + off 6972 + GOT_ENTRY_SIZE); 6973 } 6974 } 6975 else 6976 { 6977 bfd_put_NN (output_bfd, (bfd_vma) 1, 6978 globals->root.sgot->contents + off); 6979 bfd_put_NN (output_bfd, 6980 relocation - dtpoff_base (info), 6981 globals->root.sgot->contents + off 6982 + GOT_ENTRY_SIZE); 6983 } 6984 6985 symbol_got_offset_mark (input_bfd, h, r_symndx); 6986 } 6987 break; 6988 6989 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6990 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 6991 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 6992 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 6993 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 6994 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 6995 { 6996 bool need_relocs = false; 6997 bfd_byte *loc; 6998 int indx; 6999 bfd_vma off; 7000 7001 off = symbol_got_offset (input_bfd, h, r_symndx); 7002 7003 indx = h && h->dynindx != -1 ? h->dynindx : 0; 7004 7005 need_relocs = 7006 (!bfd_link_executable (info) || indx != 0) && 7007 (h == NULL 7008 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 7009 || h->root.type != bfd_link_hash_undefweak); 7010 7011 BFD_ASSERT (globals->root.srelgot != NULL); 7012 7013 if (need_relocs) 7014 { 7015 Elf_Internal_Rela rela; 7016 7017 if (indx == 0) 7018 rela.r_addend = relocation - dtpoff_base (info); 7019 else 7020 rela.r_addend = 0; 7021 7022 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL)); 7023 rela.r_offset = globals->root.sgot->output_section->vma + 7024 globals->root.sgot->output_offset + off; 7025 7026 loc = globals->root.srelgot->contents; 7027 loc += globals->root.srelgot->reloc_count++ 7028 * RELOC_SIZE (htab); 7029 7030 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7031 7032 bfd_put_NN (output_bfd, rela.r_addend, 7033 globals->root.sgot->contents + off); 7034 } 7035 else 7036 bfd_put_NN (output_bfd, relocation - tpoff_base (info), 7037 globals->root.sgot->contents + off); 7038 7039 symbol_got_offset_mark (input_bfd, h, r_symndx); 7040 } 7041 break; 7042 7043 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 7044 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 7045 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 7046 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 7047 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 7048 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 7049 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 7050 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx)) 7051 { 7052 bool need_relocs = false; 7053 int indx = h && h->dynindx != -1 ? h->dynindx : 0; 7054 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx); 7055 7056 need_relocs = (h == NULL 7057 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 7058 || h->root.type != bfd_link_hash_undefweak); 7059 7060 BFD_ASSERT (globals->root.srelgot != NULL); 7061 BFD_ASSERT (globals->root.sgot != NULL); 7062 7063 if (need_relocs) 7064 { 7065 bfd_byte *loc; 7066 Elf_Internal_Rela rela; 7067 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC)); 7068 7069 rela.r_addend = 0; 7070 rela.r_offset = (globals->root.sgotplt->output_section->vma 7071 + globals->root.sgotplt->output_offset 7072 + off + globals->sgotplt_jump_table_size); 7073 7074 if (indx == 0) 7075 rela.r_addend = relocation - dtpoff_base (info); 7076 7077 /* Allocate the next available slot in the PLT reloc 7078 section to hold our R_AARCH64_TLSDESC, the next 7079 available slot is determined from reloc_count, 7080 which we step. But note, reloc_count was 7081 artifically moved down while allocating slots for 7082 real PLT relocs such that all of the PLT relocs 7083 will fit above the initial reloc_count and the 7084 extra stuff will fit below. */ 7085 loc = globals->root.srelplt->contents; 7086 loc += globals->root.srelplt->reloc_count++ 7087 * RELOC_SIZE (globals); 7088 7089 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7090 7091 bfd_put_NN (output_bfd, (bfd_vma) 0, 7092 globals->root.sgotplt->contents + off + 7093 globals->sgotplt_jump_table_size); 7094 bfd_put_NN (output_bfd, (bfd_vma) 0, 7095 globals->root.sgotplt->contents + off + 7096 globals->sgotplt_jump_table_size + 7097 GOT_ENTRY_SIZE); 7098 } 7099 7100 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx); 7101 } 7102 break; 7103 default: 7104 break; 7105 } 7106 7107 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 7108 because such sections are not SEC_ALLOC and thus ld.so will 7109 not process them. */ 7110 if (unresolved_reloc 7111 && !((input_section->flags & SEC_DEBUGGING) != 0 7112 && h->def_dynamic) 7113 && _bfd_elf_section_offset (output_bfd, info, input_section, 7114 +rel->r_offset) != (bfd_vma) - 1) 7115 { 7116 _bfd_error_handler 7117 /* xgettext:c-format */ 7118 (_("%pB(%pA+%#" PRIx64 "): " 7119 "unresolvable %s relocation against symbol `%s'"), 7120 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name, 7121 h->root.root.string); 7122 return false; 7123 } 7124 7125 if (r != bfd_reloc_ok && r != bfd_reloc_continue) 7126 { 7127 bfd_reloc_code_real_type real_r_type 7128 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 7129 7130 switch (r) 7131 { 7132 case bfd_reloc_overflow: 7133 (*info->callbacks->reloc_overflow) 7134 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0, 7135 input_bfd, input_section, rel->r_offset); 7136 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15 7137 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14) 7138 { 7139 (*info->callbacks->warning) 7140 (info, 7141 _("too many GOT entries for -fpic, " 7142 "please recompile with -fPIC"), 7143 name, input_bfd, input_section, rel->r_offset); 7144 return false; 7145 } 7146 /* Overflow can occur when a variable is referenced with a type 7147 that has a larger alignment than the type with which it was 7148 declared. eg: 7149 file1.c: extern int foo; int a (void) { return foo; } 7150 file2.c: char bar, foo, baz; 7151 If the variable is placed into a data section at an offset 7152 that is incompatible with the larger alignment requirement 7153 overflow will occur. (Strictly speaking this is not overflow 7154 but rather an alignment problem, but the bfd_reloc_ error 7155 enum does not have a value to cover that situation). 7156 7157 Try to catch this situation here and provide a more helpful 7158 error message to the user. */ 7159 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1) 7160 /* FIXME: Are we testing all of the appropriate reloc 7161 types here ? */ 7162 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL 7163 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12 7164 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12 7165 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12 7166 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12)) 7167 { 7168 info->callbacks->warning 7169 (info, _("one possible cause of this error is that the \ 7170 symbol is being referenced in the indicated code as if it had a larger \ 7171 alignment than was declared where it was defined"), 7172 name, input_bfd, input_section, rel->r_offset); 7173 } 7174 break; 7175 7176 case bfd_reloc_undefined: 7177 (*info->callbacks->undefined_symbol) 7178 (info, name, input_bfd, input_section, rel->r_offset, true); 7179 break; 7180 7181 case bfd_reloc_outofrange: 7182 error_message = _("out of range"); 7183 goto common_error; 7184 7185 case bfd_reloc_notsupported: 7186 error_message = _("unsupported relocation"); 7187 goto common_error; 7188 7189 case bfd_reloc_dangerous: 7190 /* error_message should already be set. */ 7191 goto common_error; 7192 7193 default: 7194 error_message = _("unknown error"); 7195 /* Fall through. */ 7196 7197 common_error: 7198 BFD_ASSERT (error_message != NULL); 7199 (*info->callbacks->reloc_dangerous) 7200 (info, error_message, input_bfd, input_section, rel->r_offset); 7201 break; 7202 } 7203 } 7204 7205 if (!save_addend) 7206 addend = 0; 7207 } 7208 7209 return true; 7210 } 7211 7212 /* Set the right machine number. */ 7213 7214 static bool 7215 elfNN_aarch64_object_p (bfd *abfd) 7216 { 7217 #if ARCH_SIZE == 32 7218 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32); 7219 #else 7220 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64); 7221 #endif 7222 return true; 7223 } 7224 7225 /* Function to keep AArch64 specific flags in the ELF header. */ 7226 7227 static bool 7228 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags) 7229 { 7230 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags) 7231 { 7232 } 7233 else 7234 { 7235 elf_elfheader (abfd)->e_flags = flags; 7236 elf_flags_init (abfd) = true; 7237 } 7238 7239 return true; 7240 } 7241 7242 /* Merge backend specific data from an object file to the output 7243 object file when linking. */ 7244 7245 static bool 7246 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info) 7247 { 7248 bfd *obfd = info->output_bfd; 7249 flagword out_flags; 7250 flagword in_flags; 7251 bool flags_compatible = true; 7252 asection *sec; 7253 7254 /* Check if we have the same endianess. */ 7255 if (!_bfd_generic_verify_endian_match (ibfd, info)) 7256 return false; 7257 7258 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd)) 7259 return true; 7260 7261 /* The input BFD must have had its flags initialised. */ 7262 /* The following seems bogus to me -- The flags are initialized in 7263 the assembler but I don't think an elf_flags_init field is 7264 written into the object. */ 7265 /* BFD_ASSERT (elf_flags_init (ibfd)); */ 7266 7267 in_flags = elf_elfheader (ibfd)->e_flags; 7268 out_flags = elf_elfheader (obfd)->e_flags; 7269 7270 if (!elf_flags_init (obfd)) 7271 { 7272 /* If the input is the default architecture and had the default 7273 flags then do not bother setting the flags for the output 7274 architecture, instead allow future merges to do this. If no 7275 future merges ever set these flags then they will retain their 7276 uninitialised values, which surprise surprise, correspond 7277 to the default values. */ 7278 if (bfd_get_arch_info (ibfd)->the_default 7279 && elf_elfheader (ibfd)->e_flags == 0) 7280 return true; 7281 7282 elf_flags_init (obfd) = true; 7283 elf_elfheader (obfd)->e_flags = in_flags; 7284 7285 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) 7286 && bfd_get_arch_info (obfd)->the_default) 7287 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), 7288 bfd_get_mach (ibfd)); 7289 7290 return true; 7291 } 7292 7293 /* Identical flags must be compatible. */ 7294 if (in_flags == out_flags) 7295 return true; 7296 7297 /* Check to see if the input BFD actually contains any sections. If 7298 not, its flags may not have been initialised either, but it 7299 cannot actually cause any incompatiblity. Do not short-circuit 7300 dynamic objects; their section list may be emptied by 7301 elf_link_add_object_symbols. 7302 7303 Also check to see if there are no code sections in the input. 7304 In this case there is no need to check for code specific flags. 7305 XXX - do we need to worry about floating-point format compatability 7306 in data sections ? */ 7307 if (!(ibfd->flags & DYNAMIC)) 7308 { 7309 bool null_input_bfd = true; 7310 bool only_data_sections = true; 7311 7312 for (sec = ibfd->sections; sec != NULL; sec = sec->next) 7313 { 7314 if ((bfd_section_flags (sec) 7315 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 7316 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 7317 only_data_sections = false; 7318 7319 null_input_bfd = false; 7320 break; 7321 } 7322 7323 if (null_input_bfd || only_data_sections) 7324 return true; 7325 } 7326 7327 return flags_compatible; 7328 } 7329 7330 /* Display the flags field. */ 7331 7332 static bool 7333 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr) 7334 { 7335 FILE *file = (FILE *) ptr; 7336 unsigned long flags; 7337 7338 BFD_ASSERT (abfd != NULL && ptr != NULL); 7339 7340 /* Print normal ELF private data. */ 7341 _bfd_elf_print_private_bfd_data (abfd, ptr); 7342 7343 flags = elf_elfheader (abfd)->e_flags; 7344 /* Ignore init flag - it may not be set, despite the flags field 7345 containing valid data. */ 7346 7347 /* xgettext:c-format */ 7348 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags); 7349 7350 if (flags) 7351 fprintf (file, _(" <Unrecognised flag bits set>")); 7352 7353 fputc ('\n', file); 7354 7355 return true; 7356 } 7357 7358 /* Return true if we need copy relocation against EH. */ 7359 7360 static bool 7361 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh) 7362 { 7363 struct elf_dyn_relocs *p; 7364 asection *s; 7365 7366 for (p = eh->root.dyn_relocs; p != NULL; p = p->next) 7367 { 7368 /* If there is any pc-relative reference, we need to keep copy relocation 7369 to avoid propagating the relocation into runtime that current glibc 7370 does not support. */ 7371 if (p->pc_count) 7372 return true; 7373 7374 s = p->sec->output_section; 7375 /* Need copy relocation if it's against read-only section. */ 7376 if (s != NULL && (s->flags & SEC_READONLY) != 0) 7377 return true; 7378 } 7379 7380 return false; 7381 } 7382 7383 /* Adjust a symbol defined by a dynamic object and referenced by a 7384 regular object. The current definition is in some section of the 7385 dynamic object, but we're not including those sections. We have to 7386 change the definition to something the rest of the link can 7387 understand. */ 7388 7389 static bool 7390 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info, 7391 struct elf_link_hash_entry *h) 7392 { 7393 struct elf_aarch64_link_hash_table *htab; 7394 asection *s, *srel; 7395 7396 /* If this is a function, put it in the procedure linkage table. We 7397 will fill in the contents of the procedure linkage table later, 7398 when we know the address of the .got section. */ 7399 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt) 7400 { 7401 if (h->plt.refcount <= 0 7402 || (h->type != STT_GNU_IFUNC 7403 && (SYMBOL_CALLS_LOCAL (info, h) 7404 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 7405 && h->root.type == bfd_link_hash_undefweak)))) 7406 { 7407 /* This case can occur if we saw a CALL26 reloc in 7408 an input file, but the symbol wasn't referred to 7409 by a dynamic object or all references were 7410 garbage collected. In which case we can end up 7411 resolving. */ 7412 h->plt.offset = (bfd_vma) - 1; 7413 h->needs_plt = 0; 7414 } 7415 7416 return true; 7417 } 7418 else 7419 /* Otherwise, reset to -1. */ 7420 h->plt.offset = (bfd_vma) - 1; 7421 7422 7423 /* If this is a weak symbol, and there is a real definition, the 7424 processor independent code will have arranged for us to see the 7425 real definition first, and we can just use the same value. */ 7426 if (h->is_weakalias) 7427 { 7428 struct elf_link_hash_entry *def = weakdef (h); 7429 BFD_ASSERT (def->root.type == bfd_link_hash_defined); 7430 h->root.u.def.section = def->root.u.def.section; 7431 h->root.u.def.value = def->root.u.def.value; 7432 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) 7433 h->non_got_ref = def->non_got_ref; 7434 return true; 7435 } 7436 7437 /* If we are creating a shared library, we must presume that the 7438 only references to the symbol are via the global offset table. 7439 For such cases we need not do anything here; the relocations will 7440 be handled correctly by relocate_section. */ 7441 if (bfd_link_pic (info)) 7442 return true; 7443 7444 /* If there are no references to this symbol that do not use the 7445 GOT, we don't need to generate a copy reloc. */ 7446 if (!h->non_got_ref) 7447 return true; 7448 7449 /* If -z nocopyreloc was given, we won't generate them either. */ 7450 if (info->nocopyreloc) 7451 { 7452 h->non_got_ref = 0; 7453 return true; 7454 } 7455 7456 if (ELIMINATE_COPY_RELOCS) 7457 { 7458 struct elf_aarch64_link_hash_entry *eh; 7459 /* If we don't find any dynamic relocs in read-only sections, then 7460 we'll be keeping the dynamic relocs and avoiding the copy reloc. */ 7461 eh = (struct elf_aarch64_link_hash_entry *) h; 7462 if (!need_copy_relocation_p (eh)) 7463 { 7464 h->non_got_ref = 0; 7465 return true; 7466 } 7467 } 7468 7469 /* We must allocate the symbol in our .dynbss section, which will 7470 become part of the .bss section of the executable. There will be 7471 an entry for this symbol in the .dynsym section. The dynamic 7472 object will contain position independent code, so all references 7473 from the dynamic object to this symbol will go through the global 7474 offset table. The dynamic linker will use the .dynsym entry to 7475 determine the address it must put in the global offset table, so 7476 both the dynamic object and the regular object will refer to the 7477 same memory location for the variable. */ 7478 7479 htab = elf_aarch64_hash_table (info); 7480 7481 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker 7482 to copy the initial value out of the dynamic object and into the 7483 runtime process image. */ 7484 if ((h->root.u.def.section->flags & SEC_READONLY) != 0) 7485 { 7486 s = htab->root.sdynrelro; 7487 srel = htab->root.sreldynrelro; 7488 } 7489 else 7490 { 7491 s = htab->root.sdynbss; 7492 srel = htab->root.srelbss; 7493 } 7494 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) 7495 { 7496 srel->size += RELOC_SIZE (htab); 7497 h->needs_copy = 1; 7498 } 7499 7500 return _bfd_elf_adjust_dynamic_copy (info, h, s); 7501 7502 } 7503 7504 static bool 7505 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number) 7506 { 7507 struct elf_aarch64_local_symbol *locals; 7508 locals = elf_aarch64_locals (abfd); 7509 if (locals == NULL) 7510 { 7511 locals = (struct elf_aarch64_local_symbol *) 7512 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol)); 7513 if (locals == NULL) 7514 return false; 7515 elf_aarch64_locals (abfd) = locals; 7516 } 7517 return true; 7518 } 7519 7520 /* Create the .got section to hold the global offset table. */ 7521 7522 static bool 7523 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info) 7524 { 7525 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 7526 flagword flags; 7527 asection *s; 7528 struct elf_link_hash_entry *h; 7529 struct elf_link_hash_table *htab = elf_hash_table (info); 7530 7531 /* This function may be called more than once. */ 7532 if (htab->sgot != NULL) 7533 return true; 7534 7535 flags = bed->dynamic_sec_flags; 7536 7537 s = bfd_make_section_anyway_with_flags (abfd, 7538 (bed->rela_plts_and_copies_p 7539 ? ".rela.got" : ".rel.got"), 7540 (bed->dynamic_sec_flags 7541 | SEC_READONLY)); 7542 if (s == NULL 7543 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7544 return false; 7545 htab->srelgot = s; 7546 7547 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags); 7548 if (s == NULL 7549 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7550 return false; 7551 htab->sgot = s; 7552 htab->sgot->size += GOT_ENTRY_SIZE; 7553 7554 if (bed->want_got_sym) 7555 { 7556 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got 7557 (or .got.plt) section. We don't do this in the linker script 7558 because we don't want to define the symbol if we are not creating 7559 a global offset table. */ 7560 h = _bfd_elf_define_linkage_sym (abfd, info, s, 7561 "_GLOBAL_OFFSET_TABLE_"); 7562 elf_hash_table (info)->hgot = h; 7563 if (h == NULL) 7564 return false; 7565 } 7566 7567 if (bed->want_got_plt) 7568 { 7569 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags); 7570 if (s == NULL 7571 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7572 return false; 7573 htab->sgotplt = s; 7574 } 7575 7576 /* The first bit of the global offset table is the header. */ 7577 s->size += bed->got_header_size; 7578 7579 return true; 7580 } 7581 7582 /* Look through the relocs for a section during the first phase. */ 7583 7584 static bool 7585 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info, 7586 asection *sec, const Elf_Internal_Rela *relocs) 7587 { 7588 Elf_Internal_Shdr *symtab_hdr; 7589 struct elf_link_hash_entry **sym_hashes; 7590 const Elf_Internal_Rela *rel; 7591 const Elf_Internal_Rela *rel_end; 7592 asection *sreloc; 7593 7594 struct elf_aarch64_link_hash_table *htab; 7595 7596 if (bfd_link_relocatable (info)) 7597 return true; 7598 7599 BFD_ASSERT (is_aarch64_elf (abfd)); 7600 7601 htab = elf_aarch64_hash_table (info); 7602 sreloc = NULL; 7603 7604 symtab_hdr = &elf_symtab_hdr (abfd); 7605 sym_hashes = elf_sym_hashes (abfd); 7606 7607 rel_end = relocs + sec->reloc_count; 7608 for (rel = relocs; rel < rel_end; rel++) 7609 { 7610 struct elf_link_hash_entry *h; 7611 unsigned int r_symndx; 7612 unsigned int r_type; 7613 bfd_reloc_code_real_type bfd_r_type; 7614 Elf_Internal_Sym *isym; 7615 7616 r_symndx = ELFNN_R_SYM (rel->r_info); 7617 r_type = ELFNN_R_TYPE (rel->r_info); 7618 7619 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) 7620 { 7621 /* xgettext:c-format */ 7622 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx); 7623 return false; 7624 } 7625 7626 if (r_symndx < symtab_hdr->sh_info) 7627 { 7628 /* A local symbol. */ 7629 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, 7630 abfd, r_symndx); 7631 if (isym == NULL) 7632 return false; 7633 7634 /* Check relocation against local STT_GNU_IFUNC symbol. */ 7635 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 7636 { 7637 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, 7638 true); 7639 if (h == NULL) 7640 return false; 7641 7642 /* Fake a STT_GNU_IFUNC symbol. */ 7643 h->type = STT_GNU_IFUNC; 7644 h->def_regular = 1; 7645 h->ref_regular = 1; 7646 h->forced_local = 1; 7647 h->root.type = bfd_link_hash_defined; 7648 } 7649 else 7650 h = NULL; 7651 } 7652 else 7653 { 7654 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 7655 while (h->root.type == bfd_link_hash_indirect 7656 || h->root.type == bfd_link_hash_warning) 7657 h = (struct elf_link_hash_entry *) h->root.u.i.link; 7658 } 7659 7660 /* Could be done earlier, if h were already available. */ 7661 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx); 7662 7663 if (h != NULL) 7664 { 7665 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got. 7666 This shows up in particular in an R_AARCH64_PREL64 in large model 7667 when calculating the pc-relative address to .got section which is 7668 used to initialize the gp register. */ 7669 if (h->root.root.string 7670 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0) 7671 { 7672 if (htab->root.dynobj == NULL) 7673 htab->root.dynobj = abfd; 7674 7675 if (! aarch64_elf_create_got_section (htab->root.dynobj, info)) 7676 return false; 7677 7678 BFD_ASSERT (h == htab->root.hgot); 7679 } 7680 7681 /* Create the ifunc sections for static executables. If we 7682 never see an indirect function symbol nor we are building 7683 a static executable, those sections will be empty and 7684 won't appear in output. */ 7685 switch (bfd_r_type) 7686 { 7687 default: 7688 break; 7689 7690 case BFD_RELOC_AARCH64_ADD_LO12: 7691 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 7692 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7693 case BFD_RELOC_AARCH64_CALL26: 7694 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 7695 case BFD_RELOC_AARCH64_JUMP26: 7696 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 7697 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 7698 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 7699 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 7700 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 7701 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 7702 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 7703 case BFD_RELOC_AARCH64_NN: 7704 if (htab->root.dynobj == NULL) 7705 htab->root.dynobj = abfd; 7706 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info)) 7707 return false; 7708 break; 7709 } 7710 7711 /* It is referenced by a non-shared object. */ 7712 h->ref_regular = 1; 7713 } 7714 7715 switch (bfd_r_type) 7716 { 7717 case BFD_RELOC_AARCH64_16: 7718 #if ARCH_SIZE == 64 7719 case BFD_RELOC_AARCH64_32: 7720 #endif 7721 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0) 7722 { 7723 if (h != NULL 7724 /* This is an absolute symbol. It represents a value instead 7725 of an address. */ 7726 && (bfd_is_abs_symbol (&h->root) 7727 /* This is an undefined symbol. */ 7728 || h->root.type == bfd_link_hash_undefined)) 7729 break; 7730 7731 /* For local symbols, defined global symbols in a non-ABS section, 7732 it is assumed that the value is an address. */ 7733 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 7734 _bfd_error_handler 7735 /* xgettext:c-format */ 7736 (_("%pB: relocation %s against `%s' can not be used when making " 7737 "a shared object"), 7738 abfd, elfNN_aarch64_howto_table[howto_index].name, 7739 (h) ? h->root.root.string : "a local symbol"); 7740 bfd_set_error (bfd_error_bad_value); 7741 return false; 7742 } 7743 else 7744 break; 7745 7746 case BFD_RELOC_AARCH64_MOVW_G0_NC: 7747 case BFD_RELOC_AARCH64_MOVW_G1_NC: 7748 case BFD_RELOC_AARCH64_MOVW_G2_NC: 7749 case BFD_RELOC_AARCH64_MOVW_G3: 7750 if (bfd_link_pic (info)) 7751 { 7752 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 7753 _bfd_error_handler 7754 /* xgettext:c-format */ 7755 (_("%pB: relocation %s against `%s' can not be used when making " 7756 "a shared object; recompile with -fPIC"), 7757 abfd, elfNN_aarch64_howto_table[howto_index].name, 7758 (h) ? h->root.root.string : "a local symbol"); 7759 bfd_set_error (bfd_error_bad_value); 7760 return false; 7761 } 7762 /* Fall through. */ 7763 7764 case BFD_RELOC_AARCH64_16_PCREL: 7765 case BFD_RELOC_AARCH64_32_PCREL: 7766 case BFD_RELOC_AARCH64_64_PCREL: 7767 case BFD_RELOC_AARCH64_ADD_LO12: 7768 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 7769 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7770 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 7771 case BFD_RELOC_AARCH64_LDST128_LO12: 7772 case BFD_RELOC_AARCH64_LDST16_LO12: 7773 case BFD_RELOC_AARCH64_LDST32_LO12: 7774 case BFD_RELOC_AARCH64_LDST64_LO12: 7775 case BFD_RELOC_AARCH64_LDST8_LO12: 7776 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 7777 if (h == NULL || bfd_link_pic (info)) 7778 break; 7779 /* Fall through. */ 7780 7781 case BFD_RELOC_AARCH64_NN: 7782 7783 /* We don't need to handle relocs into sections not going into 7784 the "real" output. */ 7785 if ((sec->flags & SEC_ALLOC) == 0) 7786 break; 7787 7788 if (h != NULL) 7789 { 7790 if (!bfd_link_pic (info)) 7791 h->non_got_ref = 1; 7792 7793 h->plt.refcount += 1; 7794 h->pointer_equality_needed = 1; 7795 } 7796 7797 /* No need to do anything if we're not creating a shared 7798 object. */ 7799 if (!(bfd_link_pic (info) 7800 /* If on the other hand, we are creating an executable, we 7801 may need to keep relocations for symbols satisfied by a 7802 dynamic library if we manage to avoid copy relocs for the 7803 symbol. 7804 7805 NOTE: Currently, there is no support of copy relocs 7806 elimination on pc-relative relocation types, because there is 7807 no dynamic relocation support for them in glibc. We still 7808 record the dynamic symbol reference for them. This is 7809 because one symbol may be referenced by both absolute 7810 relocation (for example, BFD_RELOC_AARCH64_NN) and 7811 pc-relative relocation. We need full symbol reference 7812 information to make correct decision later in 7813 elfNN_aarch64_adjust_dynamic_symbol. */ 7814 || (ELIMINATE_COPY_RELOCS 7815 && !bfd_link_pic (info) 7816 && h != NULL 7817 && (h->root.type == bfd_link_hash_defweak 7818 || !h->def_regular)))) 7819 break; 7820 7821 { 7822 struct elf_dyn_relocs *p; 7823 struct elf_dyn_relocs **head; 7824 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 7825 7826 /* We must copy these reloc types into the output file. 7827 Create a reloc section in dynobj and make room for 7828 this reloc. */ 7829 if (sreloc == NULL) 7830 { 7831 if (htab->root.dynobj == NULL) 7832 htab->root.dynobj = abfd; 7833 7834 sreloc = _bfd_elf_make_dynamic_reloc_section 7835 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true); 7836 7837 if (sreloc == NULL) 7838 return false; 7839 } 7840 7841 /* If this is a global symbol, we count the number of 7842 relocations we need for this symbol. */ 7843 if (h != NULL) 7844 { 7845 head = &h->dyn_relocs; 7846 } 7847 else 7848 { 7849 /* Track dynamic relocs needed for local syms too. 7850 We really need local syms available to do this 7851 easily. Oh well. */ 7852 7853 asection *s; 7854 void **vpp; 7855 7856 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, 7857 abfd, r_symndx); 7858 if (isym == NULL) 7859 return false; 7860 7861 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 7862 if (s == NULL) 7863 s = sec; 7864 7865 /* Beware of type punned pointers vs strict aliasing 7866 rules. */ 7867 vpp = &(elf_section_data (s)->local_dynrel); 7868 head = (struct elf_dyn_relocs **) vpp; 7869 } 7870 7871 p = *head; 7872 if (p == NULL || p->sec != sec) 7873 { 7874 size_t amt = sizeof *p; 7875 p = ((struct elf_dyn_relocs *) 7876 bfd_zalloc (htab->root.dynobj, amt)); 7877 if (p == NULL) 7878 return false; 7879 p->next = *head; 7880 *head = p; 7881 p->sec = sec; 7882 } 7883 7884 p->count += 1; 7885 7886 if (elfNN_aarch64_howto_table[howto_index].pc_relative) 7887 p->pc_count += 1; 7888 } 7889 break; 7890 7891 /* RR: We probably want to keep a consistency check that 7892 there are no dangling GOT_PAGE relocs. */ 7893 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 7894 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 7895 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 7896 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 7897 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 7898 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 7899 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 7900 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 7901 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 7902 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 7903 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 7904 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 7905 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 7906 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 7907 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 7908 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 7909 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 7910 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 7911 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 7912 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 7913 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 7914 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 7915 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7916 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 7917 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7918 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 7919 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 7920 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 7921 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 7922 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 7923 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 7924 { 7925 unsigned got_type; 7926 unsigned old_got_type; 7927 7928 got_type = aarch64_reloc_got_type (bfd_r_type); 7929 7930 if (h) 7931 { 7932 h->got.refcount += 1; 7933 old_got_type = elf_aarch64_hash_entry (h)->got_type; 7934 } 7935 else 7936 { 7937 struct elf_aarch64_local_symbol *locals; 7938 7939 if (!elfNN_aarch64_allocate_local_symbols 7940 (abfd, symtab_hdr->sh_info)) 7941 return false; 7942 7943 locals = elf_aarch64_locals (abfd); 7944 BFD_ASSERT (r_symndx < symtab_hdr->sh_info); 7945 locals[r_symndx].got_refcount += 1; 7946 old_got_type = locals[r_symndx].got_type; 7947 } 7948 7949 /* If a variable is accessed with both general dynamic TLS 7950 methods, two slots may be created. */ 7951 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type)) 7952 got_type |= old_got_type; 7953 7954 /* We will already have issued an error message if there 7955 is a TLS/non-TLS mismatch, based on the symbol type. 7956 So just combine any TLS types needed. */ 7957 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL 7958 && got_type != GOT_NORMAL) 7959 got_type |= old_got_type; 7960 7961 /* If the symbol is accessed by both IE and GD methods, we 7962 are able to relax. Turn off the GD flag, without 7963 messing up with any other kind of TLS types that may be 7964 involved. */ 7965 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) 7966 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); 7967 7968 if (old_got_type != got_type) 7969 { 7970 if (h != NULL) 7971 elf_aarch64_hash_entry (h)->got_type = got_type; 7972 else 7973 { 7974 struct elf_aarch64_local_symbol *locals; 7975 locals = elf_aarch64_locals (abfd); 7976 BFD_ASSERT (r_symndx < symtab_hdr->sh_info); 7977 locals[r_symndx].got_type = got_type; 7978 } 7979 } 7980 7981 if (htab->root.dynobj == NULL) 7982 htab->root.dynobj = abfd; 7983 if (! aarch64_elf_create_got_section (htab->root.dynobj, info)) 7984 return false; 7985 break; 7986 } 7987 7988 case BFD_RELOC_AARCH64_CALL26: 7989 case BFD_RELOC_AARCH64_JUMP26: 7990 /* If this is a local symbol then we resolve it 7991 directly without creating a PLT entry. */ 7992 if (h == NULL) 7993 continue; 7994 7995 h->needs_plt = 1; 7996 if (h->plt.refcount <= 0) 7997 h->plt.refcount = 1; 7998 else 7999 h->plt.refcount += 1; 8000 break; 8001 8002 default: 8003 break; 8004 } 8005 } 8006 8007 return true; 8008 } 8009 8010 /* Treat mapping symbols as special target symbols. */ 8011 8012 static bool 8013 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED, 8014 asymbol *sym) 8015 { 8016 return bfd_is_aarch64_special_symbol_name (sym->name, 8017 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY); 8018 } 8019 8020 /* If the ELF symbol SYM might be a function in SEC, return the 8021 function size and set *CODE_OFF to the function's entry point, 8022 otherwise return zero. */ 8023 8024 static bfd_size_type 8025 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec, 8026 bfd_vma *code_off) 8027 { 8028 bfd_size_type size; 8029 elf_symbol_type * elf_sym = (elf_symbol_type *) sym; 8030 8031 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT 8032 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0 8033 || sym->section != sec) 8034 return 0; 8035 8036 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size; 8037 8038 if (!(sym->flags & BSF_SYNTHETIC)) 8039 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info)) 8040 { 8041 case STT_NOTYPE: 8042 /* Ignore symbols created by the annobin plugin for gcc and clang. 8043 These symbols are hidden, local, notype and have a size of 0. */ 8044 if (size == 0 8045 && sym->flags & BSF_LOCAL 8046 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN) 8047 return 0; 8048 /* Fall through. */ 8049 case STT_FUNC: 8050 /* FIXME: Allow STT_GNU_IFUNC as well ? */ 8051 break; 8052 default: 8053 return 0; 8054 } 8055 8056 if ((sym->flags & BSF_LOCAL) 8057 && bfd_is_aarch64_special_symbol_name (sym->name, 8058 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)) 8059 return 0; 8060 8061 *code_off = sym->value; 8062 8063 /* Do not return 0 for the function's size. */ 8064 return size ? size : 1; 8065 } 8066 8067 static bool 8068 elfNN_aarch64_find_inliner_info (bfd *abfd, 8069 const char **filename_ptr, 8070 const char **functionname_ptr, 8071 unsigned int *line_ptr) 8072 { 8073 bool found; 8074 found = _bfd_dwarf2_find_inliner_info 8075 (abfd, filename_ptr, 8076 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info); 8077 return found; 8078 } 8079 8080 8081 static bool 8082 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info) 8083 { 8084 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */ 8085 8086 if (!_bfd_elf_init_file_header (abfd, link_info)) 8087 return false; 8088 8089 i_ehdrp = elf_elfheader (abfd); 8090 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION; 8091 return true; 8092 } 8093 8094 static enum elf_reloc_type_class 8095 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, 8096 const asection *rel_sec ATTRIBUTE_UNUSED, 8097 const Elf_Internal_Rela *rela) 8098 { 8099 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 8100 8101 if (htab->root.dynsym != NULL 8102 && htab->root.dynsym->contents != NULL) 8103 { 8104 /* Check relocation against STT_GNU_IFUNC symbol if there are 8105 dynamic symbols. */ 8106 bfd *abfd = info->output_bfd; 8107 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 8108 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info); 8109 if (r_symndx != STN_UNDEF) 8110 { 8111 Elf_Internal_Sym sym; 8112 if (!bed->s->swap_symbol_in (abfd, 8113 (htab->root.dynsym->contents 8114 + r_symndx * bed->s->sizeof_sym), 8115 0, &sym)) 8116 { 8117 /* xgettext:c-format */ 8118 _bfd_error_handler (_("%pB symbol number %lu references" 8119 " nonexistent SHT_SYMTAB_SHNDX section"), 8120 abfd, r_symndx); 8121 /* Ideally an error class should be returned here. */ 8122 } 8123 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) 8124 return reloc_class_ifunc; 8125 } 8126 } 8127 8128 switch ((int) ELFNN_R_TYPE (rela->r_info)) 8129 { 8130 case AARCH64_R (IRELATIVE): 8131 return reloc_class_ifunc; 8132 case AARCH64_R (RELATIVE): 8133 return reloc_class_relative; 8134 case AARCH64_R (JUMP_SLOT): 8135 return reloc_class_plt; 8136 case AARCH64_R (COPY): 8137 return reloc_class_copy; 8138 default: 8139 return reloc_class_normal; 8140 } 8141 } 8142 8143 /* Handle an AArch64 specific section when reading an object file. This is 8144 called when bfd_section_from_shdr finds a section with an unknown 8145 type. */ 8146 8147 static bool 8148 elfNN_aarch64_section_from_shdr (bfd *abfd, 8149 Elf_Internal_Shdr *hdr, 8150 const char *name, int shindex) 8151 { 8152 /* There ought to be a place to keep ELF backend specific flags, but 8153 at the moment there isn't one. We just keep track of the 8154 sections by their name, instead. Fortunately, the ABI gives 8155 names for all the AArch64 specific sections, so we will probably get 8156 away with this. */ 8157 switch (hdr->sh_type) 8158 { 8159 case SHT_AARCH64_ATTRIBUTES: 8160 break; 8161 8162 default: 8163 return false; 8164 } 8165 8166 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 8167 return false; 8168 8169 return true; 8170 } 8171 8172 /* A structure used to record a list of sections, independently 8173 of the next and prev fields in the asection structure. */ 8174 typedef struct section_list 8175 { 8176 asection *sec; 8177 struct section_list *next; 8178 struct section_list *prev; 8179 } 8180 section_list; 8181 8182 /* Unfortunately we need to keep a list of sections for which 8183 an _aarch64_elf_section_data structure has been allocated. This 8184 is because it is possible for functions like elfNN_aarch64_write_section 8185 to be called on a section which has had an elf_data_structure 8186 allocated for it (and so the used_by_bfd field is valid) but 8187 for which the AArch64 extended version of this structure - the 8188 _aarch64_elf_section_data structure - has not been allocated. */ 8189 static section_list *sections_with_aarch64_elf_section_data = NULL; 8190 8191 static void 8192 record_section_with_aarch64_elf_section_data (asection *sec) 8193 { 8194 struct section_list *entry; 8195 8196 entry = bfd_malloc (sizeof (*entry)); 8197 if (entry == NULL) 8198 return; 8199 entry->sec = sec; 8200 entry->next = sections_with_aarch64_elf_section_data; 8201 entry->prev = NULL; 8202 if (entry->next != NULL) 8203 entry->next->prev = entry; 8204 sections_with_aarch64_elf_section_data = entry; 8205 } 8206 8207 static struct section_list * 8208 find_aarch64_elf_section_entry (asection *sec) 8209 { 8210 struct section_list *entry; 8211 static struct section_list *last_entry = NULL; 8212 8213 /* This is a short cut for the typical case where the sections are added 8214 to the sections_with_aarch64_elf_section_data list in forward order and 8215 then looked up here in backwards order. This makes a real difference 8216 to the ld-srec/sec64k.exp linker test. */ 8217 entry = sections_with_aarch64_elf_section_data; 8218 if (last_entry != NULL) 8219 { 8220 if (last_entry->sec == sec) 8221 entry = last_entry; 8222 else if (last_entry->next != NULL && last_entry->next->sec == sec) 8223 entry = last_entry->next; 8224 } 8225 8226 for (; entry; entry = entry->next) 8227 if (entry->sec == sec) 8228 break; 8229 8230 if (entry) 8231 /* Record the entry prior to this one - it is the entry we are 8232 most likely to want to locate next time. Also this way if we 8233 have been called from 8234 unrecord_section_with_aarch64_elf_section_data () we will not 8235 be caching a pointer that is about to be freed. */ 8236 last_entry = entry->prev; 8237 8238 return entry; 8239 } 8240 8241 static void 8242 unrecord_section_with_aarch64_elf_section_data (asection *sec) 8243 { 8244 struct section_list *entry; 8245 8246 entry = find_aarch64_elf_section_entry (sec); 8247 8248 if (entry) 8249 { 8250 if (entry->prev != NULL) 8251 entry->prev->next = entry->next; 8252 if (entry->next != NULL) 8253 entry->next->prev = entry->prev; 8254 if (entry == sections_with_aarch64_elf_section_data) 8255 sections_with_aarch64_elf_section_data = entry->next; 8256 free (entry); 8257 } 8258 } 8259 8260 8261 typedef struct 8262 { 8263 void *finfo; 8264 struct bfd_link_info *info; 8265 asection *sec; 8266 int sec_shndx; 8267 int (*func) (void *, const char *, Elf_Internal_Sym *, 8268 asection *, struct elf_link_hash_entry *); 8269 } output_arch_syminfo; 8270 8271 enum map_symbol_type 8272 { 8273 AARCH64_MAP_INSN, 8274 AARCH64_MAP_DATA 8275 }; 8276 8277 8278 /* Output a single mapping symbol. */ 8279 8280 static bool 8281 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi, 8282 enum map_symbol_type type, bfd_vma offset) 8283 { 8284 static const char *names[2] = { "$x", "$d" }; 8285 Elf_Internal_Sym sym; 8286 8287 sym.st_value = (osi->sec->output_section->vma 8288 + osi->sec->output_offset + offset); 8289 sym.st_size = 0; 8290 sym.st_other = 0; 8291 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 8292 sym.st_shndx = osi->sec_shndx; 8293 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1; 8294 } 8295 8296 /* Output a single local symbol for a generated stub. */ 8297 8298 static bool 8299 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name, 8300 bfd_vma offset, bfd_vma size) 8301 { 8302 Elf_Internal_Sym sym; 8303 8304 sym.st_value = (osi->sec->output_section->vma 8305 + osi->sec->output_offset + offset); 8306 sym.st_size = size; 8307 sym.st_other = 0; 8308 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 8309 sym.st_shndx = osi->sec_shndx; 8310 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; 8311 } 8312 8313 static bool 8314 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) 8315 { 8316 struct elf_aarch64_stub_hash_entry *stub_entry; 8317 asection *stub_sec; 8318 bfd_vma addr; 8319 char *stub_name; 8320 output_arch_syminfo *osi; 8321 8322 /* Massage our args to the form they really have. */ 8323 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 8324 osi = (output_arch_syminfo *) in_arg; 8325 8326 stub_sec = stub_entry->stub_sec; 8327 8328 /* Ensure this stub is attached to the current section being 8329 processed. */ 8330 if (stub_sec != osi->sec) 8331 return true; 8332 8333 addr = (bfd_vma) stub_entry->stub_offset; 8334 8335 stub_name = stub_entry->output_name; 8336 8337 switch (stub_entry->stub_type) 8338 { 8339 case aarch64_stub_adrp_branch: 8340 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8341 sizeof (aarch64_adrp_branch_stub))) 8342 return false; 8343 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8344 return false; 8345 break; 8346 case aarch64_stub_long_branch: 8347 if (!elfNN_aarch64_output_stub_sym 8348 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub))) 8349 return false; 8350 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8351 return false; 8352 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16)) 8353 return false; 8354 break; 8355 case aarch64_stub_erratum_835769_veneer: 8356 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8357 sizeof (aarch64_erratum_835769_stub))) 8358 return false; 8359 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8360 return false; 8361 break; 8362 case aarch64_stub_erratum_843419_veneer: 8363 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8364 sizeof (aarch64_erratum_843419_stub))) 8365 return false; 8366 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8367 return false; 8368 break; 8369 case aarch64_stub_none: 8370 break; 8371 8372 default: 8373 abort (); 8374 } 8375 8376 return true; 8377 } 8378 8379 /* Output mapping symbols for linker generated sections. */ 8380 8381 static bool 8382 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd, 8383 struct bfd_link_info *info, 8384 void *finfo, 8385 int (*func) (void *, const char *, 8386 Elf_Internal_Sym *, 8387 asection *, 8388 struct elf_link_hash_entry 8389 *)) 8390 { 8391 output_arch_syminfo osi; 8392 struct elf_aarch64_link_hash_table *htab; 8393 8394 htab = elf_aarch64_hash_table (info); 8395 8396 osi.finfo = finfo; 8397 osi.info = info; 8398 osi.func = func; 8399 8400 /* Long calls stubs. */ 8401 if (htab->stub_bfd && htab->stub_bfd->sections) 8402 { 8403 asection *stub_sec; 8404 8405 for (stub_sec = htab->stub_bfd->sections; 8406 stub_sec != NULL; stub_sec = stub_sec->next) 8407 { 8408 /* Ignore non-stub sections. */ 8409 if (!strstr (stub_sec->name, STUB_SUFFIX)) 8410 continue; 8411 8412 osi.sec = stub_sec; 8413 8414 osi.sec_shndx = _bfd_elf_section_from_bfd_section 8415 (output_bfd, osi.sec->output_section); 8416 8417 /* The first instruction in a stub is always a branch. */ 8418 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0)) 8419 return false; 8420 8421 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub, 8422 &osi); 8423 } 8424 } 8425 8426 /* Finally, output mapping symbols for the PLT. */ 8427 if (!htab->root.splt || htab->root.splt->size == 0) 8428 return true; 8429 8430 osi.sec_shndx = _bfd_elf_section_from_bfd_section 8431 (output_bfd, htab->root.splt->output_section); 8432 osi.sec = htab->root.splt; 8433 8434 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0); 8435 8436 return true; 8437 8438 } 8439 8440 /* Allocate target specific section data. */ 8441 8442 static bool 8443 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec) 8444 { 8445 if (!sec->used_by_bfd) 8446 { 8447 _aarch64_elf_section_data *sdata; 8448 size_t amt = sizeof (*sdata); 8449 8450 sdata = bfd_zalloc (abfd, amt); 8451 if (sdata == NULL) 8452 return false; 8453 sec->used_by_bfd = sdata; 8454 } 8455 8456 record_section_with_aarch64_elf_section_data (sec); 8457 8458 return _bfd_elf_new_section_hook (abfd, sec); 8459 } 8460 8461 8462 static void 8463 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED, 8464 asection *sec, 8465 void *ignore ATTRIBUTE_UNUSED) 8466 { 8467 unrecord_section_with_aarch64_elf_section_data (sec); 8468 } 8469 8470 static bool 8471 elfNN_aarch64_close_and_cleanup (bfd *abfd) 8472 { 8473 if (abfd->sections) 8474 bfd_map_over_sections (abfd, 8475 unrecord_section_via_map_over_sections, NULL); 8476 8477 return _bfd_elf_close_and_cleanup (abfd); 8478 } 8479 8480 static bool 8481 elfNN_aarch64_bfd_free_cached_info (bfd *abfd) 8482 { 8483 if (abfd->sections) 8484 bfd_map_over_sections (abfd, 8485 unrecord_section_via_map_over_sections, NULL); 8486 8487 return _bfd_free_cached_info (abfd); 8488 } 8489 8490 /* Create dynamic sections. This is different from the ARM backend in that 8491 the got, plt, gotplt and their relocation sections are all created in the 8492 standard part of the bfd elf backend. */ 8493 8494 static bool 8495 elfNN_aarch64_create_dynamic_sections (bfd *dynobj, 8496 struct bfd_link_info *info) 8497 { 8498 /* We need to create .got section. */ 8499 if (!aarch64_elf_create_got_section (dynobj, info)) 8500 return false; 8501 8502 return _bfd_elf_create_dynamic_sections (dynobj, info); 8503 } 8504 8505 8506 /* Allocate space in .plt, .got and associated reloc sections for 8507 dynamic relocs. */ 8508 8509 static bool 8510 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf) 8511 { 8512 struct bfd_link_info *info; 8513 struct elf_aarch64_link_hash_table *htab; 8514 struct elf_aarch64_link_hash_entry *eh; 8515 struct elf_dyn_relocs *p; 8516 8517 /* An example of a bfd_link_hash_indirect symbol is versioned 8518 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) 8519 -> __gxx_personality_v0(bfd_link_hash_defined) 8520 8521 There is no need to process bfd_link_hash_indirect symbols here 8522 because we will also be presented with the concrete instance of 8523 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been 8524 called to copy all relevant data from the generic to the concrete 8525 symbol instance. */ 8526 if (h->root.type == bfd_link_hash_indirect) 8527 return true; 8528 8529 if (h->root.type == bfd_link_hash_warning) 8530 h = (struct elf_link_hash_entry *) h->root.u.i.link; 8531 8532 info = (struct bfd_link_info *) inf; 8533 htab = elf_aarch64_hash_table (info); 8534 8535 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it 8536 here if it is defined and referenced in a non-shared object. */ 8537 if (h->type == STT_GNU_IFUNC 8538 && h->def_regular) 8539 return true; 8540 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0) 8541 { 8542 /* Make sure this symbol is output as a dynamic symbol. 8543 Undefined weak syms won't yet be marked as dynamic. */ 8544 if (h->dynindx == -1 && !h->forced_local 8545 && h->root.type == bfd_link_hash_undefweak) 8546 { 8547 if (!bfd_elf_link_record_dynamic_symbol (info, h)) 8548 return false; 8549 } 8550 8551 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 8552 { 8553 asection *s = htab->root.splt; 8554 8555 /* If this is the first .plt entry, make room for the special 8556 first entry. */ 8557 if (s->size == 0) 8558 s->size += htab->plt_header_size; 8559 8560 h->plt.offset = s->size; 8561 8562 /* If this symbol is not defined in a regular file, and we are 8563 not generating a shared library, then set the symbol to this 8564 location in the .plt. This is required to make function 8565 pointers compare as equal between the normal executable and 8566 the shared library. */ 8567 if (!bfd_link_pic (info) && !h->def_regular) 8568 { 8569 h->root.u.def.section = s; 8570 h->root.u.def.value = h->plt.offset; 8571 } 8572 8573 /* Make room for this entry. For now we only create the 8574 small model PLT entries. We later need to find a way 8575 of relaxing into these from the large model PLT entries. */ 8576 s->size += htab->plt_entry_size; 8577 8578 /* We also need to make an entry in the .got.plt section, which 8579 will be placed in the .got section by the linker script. */ 8580 htab->root.sgotplt->size += GOT_ENTRY_SIZE; 8581 8582 /* We also need to make an entry in the .rela.plt section. */ 8583 htab->root.srelplt->size += RELOC_SIZE (htab); 8584 8585 /* We need to ensure that all GOT entries that serve the PLT 8586 are consecutive with the special GOT slots [0] [1] and 8587 [2]. Any addtional relocations, such as 8588 R_AARCH64_TLSDESC, must be placed after the PLT related 8589 entries. We abuse the reloc_count such that during 8590 sizing we adjust reloc_count to indicate the number of 8591 PLT related reserved entries. In subsequent phases when 8592 filling in the contents of the reloc entries, PLT related 8593 entries are placed by computing their PLT index (0 8594 .. reloc_count). While other none PLT relocs are placed 8595 at the slot indicated by reloc_count and reloc_count is 8596 updated. */ 8597 8598 htab->root.srelplt->reloc_count++; 8599 8600 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against 8601 variant PCS symbols are present. */ 8602 if (h->other & STO_AARCH64_VARIANT_PCS) 8603 htab->variant_pcs = 1; 8604 8605 } 8606 else 8607 { 8608 h->plt.offset = (bfd_vma) - 1; 8609 h->needs_plt = 0; 8610 } 8611 } 8612 else 8613 { 8614 h->plt.offset = (bfd_vma) - 1; 8615 h->needs_plt = 0; 8616 } 8617 8618 eh = (struct elf_aarch64_link_hash_entry *) h; 8619 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 8620 8621 if (h->got.refcount > 0) 8622 { 8623 bool dyn; 8624 unsigned got_type = elf_aarch64_hash_entry (h)->got_type; 8625 8626 h->got.offset = (bfd_vma) - 1; 8627 8628 dyn = htab->root.dynamic_sections_created; 8629 8630 /* Make sure this symbol is output as a dynamic symbol. 8631 Undefined weak syms won't yet be marked as dynamic. */ 8632 if (dyn && h->dynindx == -1 && !h->forced_local 8633 && h->root.type == bfd_link_hash_undefweak) 8634 { 8635 if (!bfd_elf_link_record_dynamic_symbol (info, h)) 8636 return false; 8637 } 8638 8639 if (got_type == GOT_UNKNOWN) 8640 { 8641 } 8642 else if (got_type == GOT_NORMAL) 8643 { 8644 h->got.offset = htab->root.sgot->size; 8645 htab->root.sgot->size += GOT_ENTRY_SIZE; 8646 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8647 || h->root.type != bfd_link_hash_undefweak) 8648 && (bfd_link_pic (info) 8649 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)) 8650 /* Undefined weak symbol in static PIE resolves to 0 without 8651 any dynamic relocations. */ 8652 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 8653 { 8654 htab->root.srelgot->size += RELOC_SIZE (htab); 8655 } 8656 } 8657 else 8658 { 8659 int indx; 8660 if (got_type & GOT_TLSDESC_GD) 8661 { 8662 eh->tlsdesc_got_jump_table_offset = 8663 (htab->root.sgotplt->size 8664 - aarch64_compute_jump_table_size (htab)); 8665 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; 8666 h->got.offset = (bfd_vma) - 2; 8667 } 8668 8669 if (got_type & GOT_TLS_GD) 8670 { 8671 h->got.offset = htab->root.sgot->size; 8672 htab->root.sgot->size += GOT_ENTRY_SIZE * 2; 8673 } 8674 8675 if (got_type & GOT_TLS_IE) 8676 { 8677 h->got.offset = htab->root.sgot->size; 8678 htab->root.sgot->size += GOT_ENTRY_SIZE; 8679 } 8680 8681 indx = h && h->dynindx != -1 ? h->dynindx : 0; 8682 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8683 || h->root.type != bfd_link_hash_undefweak) 8684 && (!bfd_link_executable (info) 8685 || indx != 0 8686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) 8687 { 8688 if (got_type & GOT_TLSDESC_GD) 8689 { 8690 htab->root.srelplt->size += RELOC_SIZE (htab); 8691 /* Note reloc_count not incremented here! We have 8692 already adjusted reloc_count for this relocation 8693 type. */ 8694 8695 /* TLSDESC PLT is now needed, but not yet determined. */ 8696 htab->root.tlsdesc_plt = (bfd_vma) - 1; 8697 } 8698 8699 if (got_type & GOT_TLS_GD) 8700 htab->root.srelgot->size += RELOC_SIZE (htab) * 2; 8701 8702 if (got_type & GOT_TLS_IE) 8703 htab->root.srelgot->size += RELOC_SIZE (htab); 8704 } 8705 } 8706 } 8707 else 8708 { 8709 h->got.offset = (bfd_vma) - 1; 8710 } 8711 8712 if (h->dyn_relocs == NULL) 8713 return true; 8714 8715 for (p = h->dyn_relocs; p != NULL; p = p->next) 8716 if (eh->def_protected) 8717 { 8718 /* Disallow copy relocations against protected symbol. */ 8719 asection *s = p->sec->output_section; 8720 if (s != NULL && (s->flags & SEC_READONLY) != 0) 8721 { 8722 info->callbacks->einfo 8723 /* xgettext:c-format */ 8724 (_ ("%F%P: %pB: copy relocation against non-copyable " 8725 "protected symbol `%s'\n"), 8726 p->sec->owner, h->root.root.string); 8727 return false; 8728 } 8729 } 8730 8731 /* In the shared -Bsymbolic case, discard space allocated for 8732 dynamic pc-relative relocs against symbols which turn out to be 8733 defined in regular objects. For the normal shared case, discard 8734 space for pc-relative relocs that have become local due to symbol 8735 visibility changes. */ 8736 8737 if (bfd_link_pic (info)) 8738 { 8739 /* Relocs that use pc_count are those that appear on a call 8740 insn, or certain REL relocs that can generated via assembly. 8741 We want calls to protected symbols to resolve directly to the 8742 function rather than going via the plt. If people want 8743 function pointer comparisons to work as expected then they 8744 should avoid writing weird assembly. */ 8745 if (SYMBOL_CALLS_LOCAL (info, h)) 8746 { 8747 struct elf_dyn_relocs **pp; 8748 8749 for (pp = &h->dyn_relocs; (p = *pp) != NULL;) 8750 { 8751 p->count -= p->pc_count; 8752 p->pc_count = 0; 8753 if (p->count == 0) 8754 *pp = p->next; 8755 else 8756 pp = &p->next; 8757 } 8758 } 8759 8760 /* Also discard relocs on undefined weak syms with non-default 8761 visibility. */ 8762 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak) 8763 { 8764 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 8765 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 8766 h->dyn_relocs = NULL; 8767 8768 /* Make sure undefined weak symbols are output as a dynamic 8769 symbol in PIEs. */ 8770 else if (h->dynindx == -1 8771 && !h->forced_local 8772 && h->root.type == bfd_link_hash_undefweak 8773 && !bfd_elf_link_record_dynamic_symbol (info, h)) 8774 return false; 8775 } 8776 8777 } 8778 else if (ELIMINATE_COPY_RELOCS) 8779 { 8780 /* For the non-shared case, discard space for relocs against 8781 symbols which turn out to need copy relocs or are not 8782 dynamic. */ 8783 8784 if (!h->non_got_ref 8785 && ((h->def_dynamic 8786 && !h->def_regular) 8787 || (htab->root.dynamic_sections_created 8788 && (h->root.type == bfd_link_hash_undefweak 8789 || h->root.type == bfd_link_hash_undefined)))) 8790 { 8791 /* Make sure this symbol is output as a dynamic symbol. 8792 Undefined weak syms won't yet be marked as dynamic. */ 8793 if (h->dynindx == -1 8794 && !h->forced_local 8795 && h->root.type == bfd_link_hash_undefweak 8796 && !bfd_elf_link_record_dynamic_symbol (info, h)) 8797 return false; 8798 8799 /* If that succeeded, we know we'll be keeping all the 8800 relocs. */ 8801 if (h->dynindx != -1) 8802 goto keep; 8803 } 8804 8805 h->dyn_relocs = NULL; 8806 8807 keep:; 8808 } 8809 8810 /* Finally, allocate space. */ 8811 for (p = h->dyn_relocs; p != NULL; p = p->next) 8812 { 8813 asection *sreloc; 8814 8815 sreloc = elf_section_data (p->sec)->sreloc; 8816 8817 BFD_ASSERT (sreloc != NULL); 8818 8819 sreloc->size += p->count * RELOC_SIZE (htab); 8820 } 8821 8822 return true; 8823 } 8824 8825 /* Allocate space in .plt, .got and associated reloc sections for 8826 ifunc dynamic relocs. */ 8827 8828 static bool 8829 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h, 8830 void *inf) 8831 { 8832 struct bfd_link_info *info; 8833 struct elf_aarch64_link_hash_table *htab; 8834 8835 /* An example of a bfd_link_hash_indirect symbol is versioned 8836 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) 8837 -> __gxx_personality_v0(bfd_link_hash_defined) 8838 8839 There is no need to process bfd_link_hash_indirect symbols here 8840 because we will also be presented with the concrete instance of 8841 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been 8842 called to copy all relevant data from the generic to the concrete 8843 symbol instance. */ 8844 if (h->root.type == bfd_link_hash_indirect) 8845 return true; 8846 8847 if (h->root.type == bfd_link_hash_warning) 8848 h = (struct elf_link_hash_entry *) h->root.u.i.link; 8849 8850 info = (struct bfd_link_info *) inf; 8851 htab = elf_aarch64_hash_table (info); 8852 8853 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it 8854 here if it is defined and referenced in a non-shared object. */ 8855 if (h->type == STT_GNU_IFUNC 8856 && h->def_regular) 8857 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h, 8858 &h->dyn_relocs, 8859 htab->plt_entry_size, 8860 htab->plt_header_size, 8861 GOT_ENTRY_SIZE, 8862 false); 8863 return true; 8864 } 8865 8866 /* Allocate space in .plt, .got and associated reloc sections for 8867 local ifunc dynamic relocs. */ 8868 8869 static int 8870 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf) 8871 { 8872 struct elf_link_hash_entry *h 8873 = (struct elf_link_hash_entry *) *slot; 8874 8875 if (h->type != STT_GNU_IFUNC 8876 || !h->def_regular 8877 || !h->ref_regular 8878 || !h->forced_local 8879 || h->root.type != bfd_link_hash_defined) 8880 abort (); 8881 8882 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf); 8883 } 8884 8885 /* This is the most important function of all . Innocuosly named 8886 though ! */ 8887 8888 static bool 8889 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED, 8890 struct bfd_link_info *info) 8891 { 8892 struct elf_aarch64_link_hash_table *htab; 8893 bfd *dynobj; 8894 asection *s; 8895 bool relocs; 8896 bfd *ibfd; 8897 8898 htab = elf_aarch64_hash_table ((info)); 8899 dynobj = htab->root.dynobj; 8900 8901 BFD_ASSERT (dynobj != NULL); 8902 8903 if (htab->root.dynamic_sections_created) 8904 { 8905 if (bfd_link_executable (info) && !info->nointerp) 8906 { 8907 s = bfd_get_linker_section (dynobj, ".interp"); 8908 if (s == NULL) 8909 abort (); 8910 s->size = sizeof ELF_DYNAMIC_INTERPRETER; 8911 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; 8912 } 8913 } 8914 8915 /* Set up .got offsets for local syms, and space for local dynamic 8916 relocs. */ 8917 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 8918 { 8919 struct elf_aarch64_local_symbol *locals = NULL; 8920 Elf_Internal_Shdr *symtab_hdr; 8921 asection *srel; 8922 unsigned int i; 8923 8924 if (!is_aarch64_elf (ibfd)) 8925 continue; 8926 8927 for (s = ibfd->sections; s != NULL; s = s->next) 8928 { 8929 struct elf_dyn_relocs *p; 8930 8931 for (p = (struct elf_dyn_relocs *) 8932 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next) 8933 { 8934 if (!bfd_is_abs_section (p->sec) 8935 && bfd_is_abs_section (p->sec->output_section)) 8936 { 8937 /* Input section has been discarded, either because 8938 it is a copy of a linkonce section or due to 8939 linker script /DISCARD/, so we'll be discarding 8940 the relocs too. */ 8941 } 8942 else if (p->count != 0) 8943 { 8944 srel = elf_section_data (p->sec)->sreloc; 8945 srel->size += p->count * RELOC_SIZE (htab); 8946 if ((p->sec->output_section->flags & SEC_READONLY) != 0) 8947 info->flags |= DF_TEXTREL; 8948 } 8949 } 8950 } 8951 8952 locals = elf_aarch64_locals (ibfd); 8953 if (!locals) 8954 continue; 8955 8956 symtab_hdr = &elf_symtab_hdr (ibfd); 8957 srel = htab->root.srelgot; 8958 for (i = 0; i < symtab_hdr->sh_info; i++) 8959 { 8960 locals[i].got_offset = (bfd_vma) - 1; 8961 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 8962 if (locals[i].got_refcount > 0) 8963 { 8964 unsigned got_type = locals[i].got_type; 8965 if (got_type & GOT_TLSDESC_GD) 8966 { 8967 locals[i].tlsdesc_got_jump_table_offset = 8968 (htab->root.sgotplt->size 8969 - aarch64_compute_jump_table_size (htab)); 8970 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; 8971 locals[i].got_offset = (bfd_vma) - 2; 8972 } 8973 8974 if (got_type & GOT_TLS_GD) 8975 { 8976 locals[i].got_offset = htab->root.sgot->size; 8977 htab->root.sgot->size += GOT_ENTRY_SIZE * 2; 8978 } 8979 8980 if (got_type & GOT_TLS_IE 8981 || got_type & GOT_NORMAL) 8982 { 8983 locals[i].got_offset = htab->root.sgot->size; 8984 htab->root.sgot->size += GOT_ENTRY_SIZE; 8985 } 8986 8987 if (got_type == GOT_UNKNOWN) 8988 { 8989 } 8990 8991 if (bfd_link_pic (info)) 8992 { 8993 if (got_type & GOT_TLSDESC_GD) 8994 { 8995 htab->root.srelplt->size += RELOC_SIZE (htab); 8996 /* Note RELOC_COUNT not incremented here! */ 8997 htab->root.tlsdesc_plt = (bfd_vma) - 1; 8998 } 8999 9000 if (got_type & GOT_TLS_GD) 9001 htab->root.srelgot->size += RELOC_SIZE (htab) * 2; 9002 9003 if (got_type & GOT_TLS_IE 9004 || got_type & GOT_NORMAL) 9005 htab->root.srelgot->size += RELOC_SIZE (htab); 9006 } 9007 } 9008 else 9009 { 9010 locals[i].got_refcount = (bfd_vma) - 1; 9011 } 9012 } 9013 } 9014 9015 9016 /* Allocate global sym .plt and .got entries, and space for global 9017 sym dynamic relocs. */ 9018 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs, 9019 info); 9020 9021 /* Allocate global ifunc sym .plt and .got entries, and space for global 9022 ifunc sym dynamic relocs. */ 9023 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs, 9024 info); 9025 9026 /* Allocate .plt and .got entries, and space for local ifunc symbols. */ 9027 htab_traverse (htab->loc_hash_table, 9028 elfNN_aarch64_allocate_local_ifunc_dynrelocs, 9029 info); 9030 9031 /* For every jump slot reserved in the sgotplt, reloc_count is 9032 incremented. However, when we reserve space for TLS descriptors, 9033 it's not incremented, so in order to compute the space reserved 9034 for them, it suffices to multiply the reloc count by the jump 9035 slot size. */ 9036 9037 if (htab->root.srelplt) 9038 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab); 9039 9040 if (htab->root.tlsdesc_plt) 9041 { 9042 if (htab->root.splt->size == 0) 9043 htab->root.splt->size += htab->plt_header_size; 9044 9045 /* If we're not using lazy TLS relocations, don't generate the 9046 GOT and PLT entry required. */ 9047 if ((info->flags & DF_BIND_NOW)) 9048 htab->root.tlsdesc_plt = 0; 9049 else 9050 { 9051 htab->root.tlsdesc_plt = htab->root.splt->size; 9052 htab->root.splt->size += htab->tlsdesc_plt_entry_size; 9053 9054 htab->root.tlsdesc_got = htab->root.sgot->size; 9055 htab->root.sgot->size += GOT_ENTRY_SIZE; 9056 } 9057 } 9058 9059 /* Init mapping symbols information to use later to distingush between 9060 code and data while scanning for errata. */ 9061 if (htab->fix_erratum_835769 || htab->fix_erratum_843419) 9062 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 9063 { 9064 if (!is_aarch64_elf (ibfd)) 9065 continue; 9066 bfd_elfNN_aarch64_init_maps (ibfd); 9067 } 9068 9069 /* We now have determined the sizes of the various dynamic sections. 9070 Allocate memory for them. */ 9071 relocs = false; 9072 for (s = dynobj->sections; s != NULL; s = s->next) 9073 { 9074 if ((s->flags & SEC_LINKER_CREATED) == 0) 9075 continue; 9076 9077 if (s == htab->root.splt 9078 || s == htab->root.sgot 9079 || s == htab->root.sgotplt 9080 || s == htab->root.iplt 9081 || s == htab->root.igotplt 9082 || s == htab->root.sdynbss 9083 || s == htab->root.sdynrelro) 9084 { 9085 /* Strip this section if we don't need it; see the 9086 comment below. */ 9087 } 9088 else if (startswith (bfd_section_name (s), ".rela")) 9089 { 9090 if (s->size != 0 && s != htab->root.srelplt) 9091 relocs = true; 9092 9093 /* We use the reloc_count field as a counter if we need 9094 to copy relocs into the output file. */ 9095 if (s != htab->root.srelplt) 9096 s->reloc_count = 0; 9097 } 9098 else 9099 { 9100 /* It's not one of our sections, so don't allocate space. */ 9101 continue; 9102 } 9103 9104 if (s->size == 0) 9105 { 9106 /* If we don't need this section, strip it from the 9107 output file. This is mostly to handle .rela.bss and 9108 .rela.plt. We must create both sections in 9109 create_dynamic_sections, because they must be created 9110 before the linker maps input sections to output 9111 sections. The linker does that before 9112 adjust_dynamic_symbol is called, and it is that 9113 function which decides whether anything needs to go 9114 into these sections. */ 9115 s->flags |= SEC_EXCLUDE; 9116 continue; 9117 } 9118 9119 if ((s->flags & SEC_HAS_CONTENTS) == 0) 9120 continue; 9121 9122 /* Allocate memory for the section contents. We use bfd_zalloc 9123 here in case unused entries are not reclaimed before the 9124 section's contents are written out. This should not happen, 9125 but this way if it does, we get a R_AARCH64_NONE reloc instead 9126 of garbage. */ 9127 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); 9128 if (s->contents == NULL) 9129 return false; 9130 } 9131 9132 if (htab->root.dynamic_sections_created) 9133 { 9134 /* Add some entries to the .dynamic section. We fill in the 9135 values later, in elfNN_aarch64_finish_dynamic_sections, but we 9136 must add the entries now so that we get the correct size for 9137 the .dynamic section. The DT_DEBUG entry is filled in by the 9138 dynamic linker and used by the debugger. */ 9139 #define add_dynamic_entry(TAG, VAL) \ 9140 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 9141 9142 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs)) 9143 return false; 9144 9145 if (htab->root.splt->size != 0) 9146 { 9147 if (htab->variant_pcs 9148 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0)) 9149 return false; 9150 9151 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC) 9152 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0) 9153 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))) 9154 return false; 9155 9156 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI) 9157 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)) 9158 return false; 9159 9160 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC) 9161 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)) 9162 return false; 9163 } 9164 } 9165 #undef add_dynamic_entry 9166 9167 return true; 9168 } 9169 9170 static inline void 9171 elf_aarch64_update_plt_entry (bfd *output_bfd, 9172 bfd_reloc_code_real_type r_type, 9173 bfd_byte *plt_entry, bfd_vma value) 9174 { 9175 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type); 9176 9177 /* FIXME: We should check the return value from this function call. */ 9178 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value); 9179 } 9180 9181 static void 9182 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h, 9183 struct elf_aarch64_link_hash_table 9184 *htab, bfd *output_bfd, 9185 struct bfd_link_info *info) 9186 { 9187 bfd_byte *plt_entry; 9188 bfd_vma plt_index; 9189 bfd_vma got_offset; 9190 bfd_vma gotplt_entry_address; 9191 bfd_vma plt_entry_address; 9192 Elf_Internal_Rela rela; 9193 bfd_byte *loc; 9194 asection *plt, *gotplt, *relplt; 9195 9196 /* When building a static executable, use .iplt, .igot.plt and 9197 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 9198 if (htab->root.splt != NULL) 9199 { 9200 plt = htab->root.splt; 9201 gotplt = htab->root.sgotplt; 9202 relplt = htab->root.srelplt; 9203 } 9204 else 9205 { 9206 plt = htab->root.iplt; 9207 gotplt = htab->root.igotplt; 9208 relplt = htab->root.irelplt; 9209 } 9210 9211 /* Get the index in the procedure linkage table which 9212 corresponds to this symbol. This is the index of this symbol 9213 in all the symbols for which we are making plt entries. The 9214 first entry in the procedure linkage table is reserved. 9215 9216 Get the offset into the .got table of the entry that 9217 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE 9218 bytes. The first three are reserved for the dynamic linker. 9219 9220 For static executables, we don't reserve anything. */ 9221 9222 if (plt == htab->root.splt) 9223 { 9224 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size; 9225 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE; 9226 } 9227 else 9228 { 9229 plt_index = h->plt.offset / htab->plt_entry_size; 9230 got_offset = plt_index * GOT_ENTRY_SIZE; 9231 } 9232 9233 plt_entry = plt->contents + h->plt.offset; 9234 plt_entry_address = plt->output_section->vma 9235 + plt->output_offset + h->plt.offset; 9236 gotplt_entry_address = gotplt->output_section->vma + 9237 gotplt->output_offset + got_offset; 9238 9239 /* Copy in the boiler-plate for the PLTn entry. */ 9240 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size); 9241 9242 /* First instruction in BTI enabled PLT stub is a BTI 9243 instruction so skip it. */ 9244 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI 9245 && elf_elfheader (output_bfd)->e_type == ET_EXEC) 9246 plt_entry = plt_entry + 4; 9247 9248 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. 9249 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 9250 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9251 plt_entry, 9252 PG (gotplt_entry_address) - 9253 PG (plt_entry_address)); 9254 9255 /* Fill in the lo12 bits for the load from the pltgot. */ 9256 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12, 9257 plt_entry + 4, 9258 PG_OFFSET (gotplt_entry_address)); 9259 9260 /* Fill in the lo12 bits for the add from the pltgot entry. */ 9261 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12, 9262 plt_entry + 8, 9263 PG_OFFSET (gotplt_entry_address)); 9264 9265 /* All the GOTPLT Entries are essentially initialized to PLT0. */ 9266 bfd_put_NN (output_bfd, 9267 plt->output_section->vma + plt->output_offset, 9268 gotplt->contents + got_offset); 9269 9270 rela.r_offset = gotplt_entry_address; 9271 9272 if (h->dynindx == -1 9273 || ((bfd_link_executable (info) 9274 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 9275 && h->def_regular 9276 && h->type == STT_GNU_IFUNC)) 9277 { 9278 /* If an STT_GNU_IFUNC symbol is locally defined, generate 9279 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */ 9280 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)); 9281 rela.r_addend = (h->root.u.def.value 9282 + h->root.u.def.section->output_section->vma 9283 + h->root.u.def.section->output_offset); 9284 } 9285 else 9286 { 9287 /* Fill in the entry in the .rela.plt section. */ 9288 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT)); 9289 rela.r_addend = 0; 9290 } 9291 9292 /* Compute the relocation entry to used based on PLT index and do 9293 not adjust reloc_count. The reloc_count has already been adjusted 9294 to account for this entry. */ 9295 loc = relplt->contents + plt_index * RELOC_SIZE (htab); 9296 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9297 } 9298 9299 /* Size sections even though they're not dynamic. We use it to setup 9300 _TLS_MODULE_BASE_, if needed. */ 9301 9302 static bool 9303 elfNN_aarch64_always_size_sections (bfd *output_bfd, 9304 struct bfd_link_info *info) 9305 { 9306 asection *tls_sec; 9307 9308 if (bfd_link_relocatable (info)) 9309 return true; 9310 9311 tls_sec = elf_hash_table (info)->tls_sec; 9312 9313 if (tls_sec) 9314 { 9315 struct elf_link_hash_entry *tlsbase; 9316 9317 tlsbase = elf_link_hash_lookup (elf_hash_table (info), 9318 "_TLS_MODULE_BASE_", true, true, false); 9319 9320 if (tlsbase) 9321 { 9322 struct bfd_link_hash_entry *h = NULL; 9323 const struct elf_backend_data *bed = 9324 get_elf_backend_data (output_bfd); 9325 9326 if (!(_bfd_generic_link_add_one_symbol 9327 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 9328 tls_sec, 0, NULL, false, bed->collect, &h))) 9329 return false; 9330 9331 tlsbase->type = STT_TLS; 9332 tlsbase = (struct elf_link_hash_entry *) h; 9333 tlsbase->def_regular = 1; 9334 tlsbase->other = STV_HIDDEN; 9335 (*bed->elf_backend_hide_symbol) (info, tlsbase, true); 9336 } 9337 } 9338 9339 return true; 9340 } 9341 9342 /* Finish up dynamic symbol handling. We set the contents of various 9343 dynamic sections here. */ 9344 9345 static bool 9346 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd, 9347 struct bfd_link_info *info, 9348 struct elf_link_hash_entry *h, 9349 Elf_Internal_Sym *sym) 9350 { 9351 struct elf_aarch64_link_hash_table *htab; 9352 htab = elf_aarch64_hash_table (info); 9353 9354 if (h->plt.offset != (bfd_vma) - 1) 9355 { 9356 asection *plt, *gotplt, *relplt; 9357 9358 /* This symbol has an entry in the procedure linkage table. Set 9359 it up. */ 9360 9361 /* When building a static executable, use .iplt, .igot.plt and 9362 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 9363 if (htab->root.splt != NULL) 9364 { 9365 plt = htab->root.splt; 9366 gotplt = htab->root.sgotplt; 9367 relplt = htab->root.srelplt; 9368 } 9369 else 9370 { 9371 plt = htab->root.iplt; 9372 gotplt = htab->root.igotplt; 9373 relplt = htab->root.irelplt; 9374 } 9375 9376 /* This symbol has an entry in the procedure linkage table. Set 9377 it up. */ 9378 if ((h->dynindx == -1 9379 && !((h->forced_local || bfd_link_executable (info)) 9380 && h->def_regular 9381 && h->type == STT_GNU_IFUNC)) 9382 || plt == NULL 9383 || gotplt == NULL 9384 || relplt == NULL) 9385 return false; 9386 9387 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info); 9388 if (!h->def_regular) 9389 { 9390 /* Mark the symbol as undefined, rather than as defined in 9391 the .plt section. */ 9392 sym->st_shndx = SHN_UNDEF; 9393 /* If the symbol is weak we need to clear the value. 9394 Otherwise, the PLT entry would provide a definition for 9395 the symbol even if the symbol wasn't defined anywhere, 9396 and so the symbol would never be NULL. Leave the value if 9397 there were any relocations where pointer equality matters 9398 (this is a clue for the dynamic linker, to make function 9399 pointer comparisons work between an application and shared 9400 library). */ 9401 if (!h->ref_regular_nonweak || !h->pointer_equality_needed) 9402 sym->st_value = 0; 9403 } 9404 } 9405 9406 if (h->got.offset != (bfd_vma) - 1 9407 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL 9408 /* Undefined weak symbol in static PIE resolves to 0 without 9409 any dynamic relocations. */ 9410 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 9411 { 9412 Elf_Internal_Rela rela; 9413 bfd_byte *loc; 9414 9415 /* This symbol has an entry in the global offset table. Set it 9416 up. */ 9417 if (htab->root.sgot == NULL || htab->root.srelgot == NULL) 9418 abort (); 9419 9420 rela.r_offset = (htab->root.sgot->output_section->vma 9421 + htab->root.sgot->output_offset 9422 + (h->got.offset & ~(bfd_vma) 1)); 9423 9424 if (h->def_regular 9425 && h->type == STT_GNU_IFUNC) 9426 { 9427 if (bfd_link_pic (info)) 9428 { 9429 /* Generate R_AARCH64_GLOB_DAT. */ 9430 goto do_glob_dat; 9431 } 9432 else 9433 { 9434 asection *plt; 9435 9436 if (!h->pointer_equality_needed) 9437 abort (); 9438 9439 /* For non-shared object, we can't use .got.plt, which 9440 contains the real function address if we need pointer 9441 equality. We load the GOT entry with the PLT entry. */ 9442 plt = htab->root.splt ? htab->root.splt : htab->root.iplt; 9443 bfd_put_NN (output_bfd, (plt->output_section->vma 9444 + plt->output_offset 9445 + h->plt.offset), 9446 htab->root.sgot->contents 9447 + (h->got.offset & ~(bfd_vma) 1)); 9448 return true; 9449 } 9450 } 9451 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h)) 9452 { 9453 if (!(h->def_regular || ELF_COMMON_DEF_P (h))) 9454 return false; 9455 9456 BFD_ASSERT ((h->got.offset & 1) != 0); 9457 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE)); 9458 rela.r_addend = (h->root.u.def.value 9459 + h->root.u.def.section->output_section->vma 9460 + h->root.u.def.section->output_offset); 9461 } 9462 else 9463 { 9464 do_glob_dat: 9465 BFD_ASSERT ((h->got.offset & 1) == 0); 9466 bfd_put_NN (output_bfd, (bfd_vma) 0, 9467 htab->root.sgot->contents + h->got.offset); 9468 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT)); 9469 rela.r_addend = 0; 9470 } 9471 9472 loc = htab->root.srelgot->contents; 9473 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab); 9474 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9475 } 9476 9477 if (h->needs_copy) 9478 { 9479 Elf_Internal_Rela rela; 9480 asection *s; 9481 bfd_byte *loc; 9482 9483 /* This symbol needs a copy reloc. Set it up. */ 9484 if (h->dynindx == -1 9485 || (h->root.type != bfd_link_hash_defined 9486 && h->root.type != bfd_link_hash_defweak) 9487 || htab->root.srelbss == NULL) 9488 abort (); 9489 9490 rela.r_offset = (h->root.u.def.value 9491 + h->root.u.def.section->output_section->vma 9492 + h->root.u.def.section->output_offset); 9493 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY)); 9494 rela.r_addend = 0; 9495 if (h->root.u.def.section == htab->root.sdynrelro) 9496 s = htab->root.sreldynrelro; 9497 else 9498 s = htab->root.srelbss; 9499 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab); 9500 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9501 } 9502 9503 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may 9504 be NULL for local symbols. */ 9505 if (sym != NULL 9506 && (h == elf_hash_table (info)->hdynamic 9507 || h == elf_hash_table (info)->hgot)) 9508 sym->st_shndx = SHN_ABS; 9509 9510 return true; 9511 } 9512 9513 /* Finish up local dynamic symbol handling. We set the contents of 9514 various dynamic sections here. */ 9515 9516 static int 9517 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf) 9518 { 9519 struct elf_link_hash_entry *h 9520 = (struct elf_link_hash_entry *) *slot; 9521 struct bfd_link_info *info 9522 = (struct bfd_link_info *) inf; 9523 9524 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd, 9525 info, h, NULL); 9526 } 9527 9528 static void 9529 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED, 9530 struct elf_aarch64_link_hash_table 9531 *htab) 9532 { 9533 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between 9534 small and large plts and at the minute just generates 9535 the small PLT. */ 9536 9537 /* PLT0 of the small PLT looks like this in ELF64 - 9538 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack. 9539 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT 9540 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the 9541 // symbol resolver 9542 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the 9543 // GOTPLT entry for this. 9544 br x17 9545 PLT0 will be slightly different in ELF32 due to different got entry 9546 size. */ 9547 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */ 9548 bfd_vma plt_base; 9549 9550 9551 memcpy (htab->root.splt->contents, htab->plt0_entry, 9552 htab->plt_header_size); 9553 9554 /* PR 26312: Explicitly set the sh_entsize to 0 so that 9555 consumers do not think that the section contains fixed 9556 sized objects. */ 9557 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0; 9558 9559 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma 9560 + htab->root.sgotplt->output_offset 9561 + GOT_ENTRY_SIZE * 2); 9562 9563 plt_base = htab->root.splt->output_section->vma + 9564 htab->root.splt->output_offset; 9565 9566 /* First instruction in BTI enabled PLT stub is a BTI 9567 instruction so skip it. */ 9568 bfd_byte *plt0_entry = htab->root.splt->contents; 9569 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI) 9570 plt0_entry = plt0_entry + 4; 9571 9572 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. 9573 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 9574 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9575 plt0_entry + 4, 9576 PG (plt_got_2nd_ent) - PG (plt_base + 4)); 9577 9578 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12, 9579 plt0_entry + 8, 9580 PG_OFFSET (plt_got_2nd_ent)); 9581 9582 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12, 9583 plt0_entry + 12, 9584 PG_OFFSET (plt_got_2nd_ent)); 9585 } 9586 9587 static bool 9588 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd, 9589 struct bfd_link_info *info) 9590 { 9591 struct elf_aarch64_link_hash_table *htab; 9592 bfd *dynobj; 9593 asection *sdyn; 9594 9595 htab = elf_aarch64_hash_table (info); 9596 dynobj = htab->root.dynobj; 9597 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 9598 9599 if (htab->root.dynamic_sections_created) 9600 { 9601 ElfNN_External_Dyn *dyncon, *dynconend; 9602 9603 if (sdyn == NULL || htab->root.sgot == NULL) 9604 abort (); 9605 9606 dyncon = (ElfNN_External_Dyn *) sdyn->contents; 9607 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size); 9608 for (; dyncon < dynconend; dyncon++) 9609 { 9610 Elf_Internal_Dyn dyn; 9611 asection *s; 9612 9613 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn); 9614 9615 switch (dyn.d_tag) 9616 { 9617 default: 9618 continue; 9619 9620 case DT_PLTGOT: 9621 s = htab->root.sgotplt; 9622 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 9623 break; 9624 9625 case DT_JMPREL: 9626 s = htab->root.srelplt; 9627 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 9628 break; 9629 9630 case DT_PLTRELSZ: 9631 s = htab->root.srelplt; 9632 dyn.d_un.d_val = s->size; 9633 break; 9634 9635 case DT_TLSDESC_PLT: 9636 s = htab->root.splt; 9637 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 9638 + htab->root.tlsdesc_plt; 9639 break; 9640 9641 case DT_TLSDESC_GOT: 9642 s = htab->root.sgot; 9643 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1); 9644 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 9645 + htab->root.tlsdesc_got; 9646 break; 9647 } 9648 9649 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon); 9650 } 9651 9652 } 9653 9654 /* Fill in the special first entry in the procedure linkage table. */ 9655 if (htab->root.splt && htab->root.splt->size > 0) 9656 { 9657 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab); 9658 9659 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW)) 9660 { 9661 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1); 9662 bfd_put_NN (output_bfd, (bfd_vma) 0, 9663 htab->root.sgot->contents + htab->root.tlsdesc_got); 9664 9665 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry; 9666 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; 9667 9668 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type; 9669 if (type == PLT_BTI || type == PLT_BTI_PAC) 9670 { 9671 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry; 9672 } 9673 9674 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt, 9675 entry, htab->tlsdesc_plt_entry_size); 9676 9677 { 9678 bfd_vma adrp1_addr = 9679 htab->root.splt->output_section->vma 9680 + htab->root.splt->output_offset 9681 + htab->root.tlsdesc_plt + 4; 9682 9683 bfd_vma adrp2_addr = adrp1_addr + 4; 9684 9685 bfd_vma got_addr = 9686 htab->root.sgot->output_section->vma 9687 + htab->root.sgot->output_offset; 9688 9689 bfd_vma pltgot_addr = 9690 htab->root.sgotplt->output_section->vma 9691 + htab->root.sgotplt->output_offset; 9692 9693 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got; 9694 9695 bfd_byte *plt_entry = 9696 htab->root.splt->contents + htab->root.tlsdesc_plt; 9697 9698 /* First instruction in BTI enabled PLT stub is a BTI 9699 instruction so skip it. */ 9700 if (type & PLT_BTI) 9701 { 9702 plt_entry = plt_entry + 4; 9703 adrp1_addr = adrp1_addr + 4; 9704 adrp2_addr = adrp2_addr + 4; 9705 } 9706 9707 /* adrp x2, DT_TLSDESC_GOT */ 9708 elf_aarch64_update_plt_entry (output_bfd, 9709 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9710 plt_entry + 4, 9711 (PG (dt_tlsdesc_got) 9712 - PG (adrp1_addr))); 9713 9714 /* adrp x3, 0 */ 9715 elf_aarch64_update_plt_entry (output_bfd, 9716 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9717 plt_entry + 8, 9718 (PG (pltgot_addr) 9719 - PG (adrp2_addr))); 9720 9721 /* ldr x2, [x2, #0] */ 9722 elf_aarch64_update_plt_entry (output_bfd, 9723 BFD_RELOC_AARCH64_LDSTNN_LO12, 9724 plt_entry + 12, 9725 PG_OFFSET (dt_tlsdesc_got)); 9726 9727 /* add x3, x3, 0 */ 9728 elf_aarch64_update_plt_entry (output_bfd, 9729 BFD_RELOC_AARCH64_ADD_LO12, 9730 plt_entry + 16, 9731 PG_OFFSET (pltgot_addr)); 9732 } 9733 } 9734 } 9735 9736 if (htab->root.sgotplt) 9737 { 9738 if (bfd_is_abs_section (htab->root.sgotplt->output_section)) 9739 { 9740 _bfd_error_handler 9741 (_("discarded output section: `%pA'"), htab->root.sgotplt); 9742 return false; 9743 } 9744 9745 /* Fill in the first three entries in the global offset table. */ 9746 if (htab->root.sgotplt->size > 0) 9747 { 9748 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents); 9749 9750 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ 9751 bfd_put_NN (output_bfd, 9752 (bfd_vma) 0, 9753 htab->root.sgotplt->contents + GOT_ENTRY_SIZE); 9754 bfd_put_NN (output_bfd, 9755 (bfd_vma) 0, 9756 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2); 9757 } 9758 9759 if (htab->root.sgot) 9760 { 9761 if (htab->root.sgot->size > 0) 9762 { 9763 bfd_vma addr = 9764 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0; 9765 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents); 9766 } 9767 } 9768 9769 elf_section_data (htab->root.sgotplt->output_section)-> 9770 this_hdr.sh_entsize = GOT_ENTRY_SIZE; 9771 } 9772 9773 if (htab->root.sgot && htab->root.sgot->size > 0) 9774 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize 9775 = GOT_ENTRY_SIZE; 9776 9777 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ 9778 htab_traverse (htab->loc_hash_table, 9779 elfNN_aarch64_finish_local_dynamic_symbol, 9780 info); 9781 9782 return true; 9783 } 9784 9785 /* Check if BTI enabled PLTs are needed. Returns the type needed. */ 9786 static aarch64_plt_type 9787 get_plt_type (bfd *abfd) 9788 { 9789 aarch64_plt_type ret = PLT_NORMAL; 9790 bfd_byte *contents, *extdyn, *extdynend; 9791 asection *sec = bfd_get_section_by_name (abfd, ".dynamic"); 9792 if (!sec 9793 || sec->size < sizeof (ElfNN_External_Dyn) 9794 || !bfd_malloc_and_get_section (abfd, sec, &contents)) 9795 return ret; 9796 extdyn = contents; 9797 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn); 9798 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn)) 9799 { 9800 Elf_Internal_Dyn dyn; 9801 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn); 9802 9803 /* Let's check the processor specific dynamic array tags. */ 9804 bfd_vma tag = dyn.d_tag; 9805 if (tag < DT_LOPROC || tag > DT_HIPROC) 9806 continue; 9807 9808 switch (tag) 9809 { 9810 case DT_AARCH64_BTI_PLT: 9811 ret |= PLT_BTI; 9812 break; 9813 9814 case DT_AARCH64_PAC_PLT: 9815 ret |= PLT_PAC; 9816 break; 9817 9818 default: break; 9819 } 9820 } 9821 free (contents); 9822 return ret; 9823 } 9824 9825 static long 9826 elfNN_aarch64_get_synthetic_symtab (bfd *abfd, 9827 long symcount, 9828 asymbol **syms, 9829 long dynsymcount, 9830 asymbol **dynsyms, 9831 asymbol **ret) 9832 { 9833 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd); 9834 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms, 9835 dynsymcount, dynsyms, ret); 9836 } 9837 9838 /* Return address for Ith PLT stub in section PLT, for relocation REL 9839 or (bfd_vma) -1 if it should not be included. */ 9840 9841 static bfd_vma 9842 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt, 9843 const arelent *rel ATTRIBUTE_UNUSED) 9844 { 9845 size_t plt0_size = PLT_ENTRY_SIZE; 9846 size_t pltn_size = PLT_SMALL_ENTRY_SIZE; 9847 9848 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC) 9849 { 9850 if (elf_elfheader (plt->owner)->e_type == ET_EXEC) 9851 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; 9852 else 9853 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; 9854 } 9855 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI) 9856 { 9857 if (elf_elfheader (plt->owner)->e_type == ET_EXEC) 9858 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE; 9859 } 9860 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC) 9861 { 9862 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; 9863 } 9864 9865 return plt->vma + plt0_size + i * pltn_size; 9866 } 9867 9868 /* Returns TRUE if NAME is an AArch64 mapping symbol. 9869 The ARM ELF standard defines $x (for A64 code) and $d (for data). 9870 It also allows a period initiated suffix to be added to the symbol, ie: 9871 "$[adtx]\.[:sym_char]+". */ 9872 9873 static bool 9874 is_aarch64_mapping_symbol (const char * name) 9875 { 9876 return name != NULL /* Paranoia. */ 9877 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then 9878 the mapping symbols could have acquired a prefix. 9879 We do not support this here, since such symbols no 9880 longer conform to the ARM ELF ABI. */ 9881 && (name[1] == 'd' || name[1] == 'x') 9882 && (name[2] == 0 || name[2] == '.'); 9883 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if 9884 any characters that follow the period are legal characters for the body 9885 of a symbol's name. For now we just assume that this is the case. */ 9886 } 9887 9888 /* Make sure that mapping symbols in object files are not removed via the 9889 "strip --strip-unneeded" tool. These symbols might needed in order to 9890 correctly generate linked files. Once an object file has been linked, 9891 it should be safe to remove them. */ 9892 9893 static void 9894 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym) 9895 { 9896 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0) 9897 && sym->section != bfd_abs_section_ptr 9898 && is_aarch64_mapping_symbol (sym->name)) 9899 sym->flags |= BSF_KEEP; 9900 } 9901 9902 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a 9903 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account 9904 for the effect of GNU properties of the output_bfd. */ 9905 static bfd * 9906 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info) 9907 { 9908 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; 9909 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop); 9910 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop; 9911 elf_aarch64_tdata (info->output_bfd)->plt_type 9912 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0; 9913 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type); 9914 return pbfd; 9915 } 9916 9917 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a 9918 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account 9919 for the effect of GNU properties of the output_bfd. */ 9920 static bool 9921 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info, 9922 bfd *abfd, bfd *bbfd, 9923 elf_property *aprop, 9924 elf_property *bprop) 9925 { 9926 uint32_t prop 9927 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; 9928 9929 /* If output has been marked with BTI using command line argument, give out 9930 warning if necessary. */ 9931 /* Properties are merged per type, hence only check for warnings when merging 9932 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */ 9933 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) 9934 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)) 9935 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 9936 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn)) 9937 { 9938 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) 9939 || !aprop) 9940 { 9941 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " 9942 "all inputs do not have BTI in NOTE section."), 9943 abfd); 9944 } 9945 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) 9946 || !bprop) 9947 { 9948 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " 9949 "all inputs do not have BTI in NOTE section."), 9950 bbfd); 9951 } 9952 } 9953 9954 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop, 9955 bprop, prop); 9956 } 9957 9958 /* We use this so we can override certain functions 9959 (though currently we don't). */ 9960 9961 const struct elf_size_info elfNN_aarch64_size_info = 9962 { 9963 sizeof (ElfNN_External_Ehdr), 9964 sizeof (ElfNN_External_Phdr), 9965 sizeof (ElfNN_External_Shdr), 9966 sizeof (ElfNN_External_Rel), 9967 sizeof (ElfNN_External_Rela), 9968 sizeof (ElfNN_External_Sym), 9969 sizeof (ElfNN_External_Dyn), 9970 sizeof (Elf_External_Note), 9971 4, /* Hash table entry size. */ 9972 1, /* Internal relocs per external relocs. */ 9973 ARCH_SIZE, /* Arch size. */ 9974 LOG_FILE_ALIGN, /* Log_file_align. */ 9975 ELFCLASSNN, EV_CURRENT, 9976 bfd_elfNN_write_out_phdrs, 9977 bfd_elfNN_write_shdrs_and_ehdr, 9978 bfd_elfNN_checksum_contents, 9979 bfd_elfNN_write_relocs, 9980 bfd_elfNN_swap_symbol_in, 9981 bfd_elfNN_swap_symbol_out, 9982 bfd_elfNN_slurp_reloc_table, 9983 bfd_elfNN_slurp_symbol_table, 9984 bfd_elfNN_swap_dyn_in, 9985 bfd_elfNN_swap_dyn_out, 9986 bfd_elfNN_swap_reloc_in, 9987 bfd_elfNN_swap_reloc_out, 9988 bfd_elfNN_swap_reloca_in, 9989 bfd_elfNN_swap_reloca_out 9990 }; 9991 9992 #define ELF_ARCH bfd_arch_aarch64 9993 #define ELF_MACHINE_CODE EM_AARCH64 9994 #define ELF_MAXPAGESIZE 0x10000 9995 #define ELF_COMMONPAGESIZE 0x1000 9996 9997 #define bfd_elfNN_close_and_cleanup \ 9998 elfNN_aarch64_close_and_cleanup 9999 10000 #define bfd_elfNN_bfd_free_cached_info \ 10001 elfNN_aarch64_bfd_free_cached_info 10002 10003 #define bfd_elfNN_bfd_is_target_special_symbol \ 10004 elfNN_aarch64_is_target_special_symbol 10005 10006 #define bfd_elfNN_bfd_link_hash_table_create \ 10007 elfNN_aarch64_link_hash_table_create 10008 10009 #define bfd_elfNN_bfd_merge_private_bfd_data \ 10010 elfNN_aarch64_merge_private_bfd_data 10011 10012 #define bfd_elfNN_bfd_print_private_bfd_data \ 10013 elfNN_aarch64_print_private_bfd_data 10014 10015 #define bfd_elfNN_bfd_reloc_type_lookup \ 10016 elfNN_aarch64_reloc_type_lookup 10017 10018 #define bfd_elfNN_bfd_reloc_name_lookup \ 10019 elfNN_aarch64_reloc_name_lookup 10020 10021 #define bfd_elfNN_bfd_set_private_flags \ 10022 elfNN_aarch64_set_private_flags 10023 10024 #define bfd_elfNN_find_inliner_info \ 10025 elfNN_aarch64_find_inliner_info 10026 10027 #define bfd_elfNN_get_synthetic_symtab \ 10028 elfNN_aarch64_get_synthetic_symtab 10029 10030 #define bfd_elfNN_mkobject \ 10031 elfNN_aarch64_mkobject 10032 10033 #define bfd_elfNN_new_section_hook \ 10034 elfNN_aarch64_new_section_hook 10035 10036 #define elf_backend_adjust_dynamic_symbol \ 10037 elfNN_aarch64_adjust_dynamic_symbol 10038 10039 #define elf_backend_always_size_sections \ 10040 elfNN_aarch64_always_size_sections 10041 10042 #define elf_backend_check_relocs \ 10043 elfNN_aarch64_check_relocs 10044 10045 #define elf_backend_copy_indirect_symbol \ 10046 elfNN_aarch64_copy_indirect_symbol 10047 10048 #define elf_backend_merge_symbol_attribute \ 10049 elfNN_aarch64_merge_symbol_attribute 10050 10051 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts 10052 to them in our hash. */ 10053 #define elf_backend_create_dynamic_sections \ 10054 elfNN_aarch64_create_dynamic_sections 10055 10056 #define elf_backend_init_index_section \ 10057 _bfd_elf_init_2_index_sections 10058 10059 #define elf_backend_finish_dynamic_sections \ 10060 elfNN_aarch64_finish_dynamic_sections 10061 10062 #define elf_backend_finish_dynamic_symbol \ 10063 elfNN_aarch64_finish_dynamic_symbol 10064 10065 #define elf_backend_object_p \ 10066 elfNN_aarch64_object_p 10067 10068 #define elf_backend_output_arch_local_syms \ 10069 elfNN_aarch64_output_arch_local_syms 10070 10071 #define elf_backend_maybe_function_sym \ 10072 elfNN_aarch64_maybe_function_sym 10073 10074 #define elf_backend_plt_sym_val \ 10075 elfNN_aarch64_plt_sym_val 10076 10077 #define elf_backend_init_file_header \ 10078 elfNN_aarch64_init_file_header 10079 10080 #define elf_backend_relocate_section \ 10081 elfNN_aarch64_relocate_section 10082 10083 #define elf_backend_reloc_type_class \ 10084 elfNN_aarch64_reloc_type_class 10085 10086 #define elf_backend_section_from_shdr \ 10087 elfNN_aarch64_section_from_shdr 10088 10089 #define elf_backend_size_dynamic_sections \ 10090 elfNN_aarch64_size_dynamic_sections 10091 10092 #define elf_backend_size_info \ 10093 elfNN_aarch64_size_info 10094 10095 #define elf_backend_write_section \ 10096 elfNN_aarch64_write_section 10097 10098 #define elf_backend_symbol_processing \ 10099 elfNN_aarch64_backend_symbol_processing 10100 10101 #define elf_backend_setup_gnu_properties \ 10102 elfNN_aarch64_link_setup_gnu_properties 10103 10104 #define elf_backend_merge_gnu_properties \ 10105 elfNN_aarch64_merge_gnu_properties 10106 10107 #define elf_backend_can_refcount 1 10108 #define elf_backend_can_gc_sections 1 10109 #define elf_backend_plt_readonly 1 10110 #define elf_backend_want_got_plt 1 10111 #define elf_backend_want_plt_sym 0 10112 #define elf_backend_want_dynrelro 1 10113 #define elf_backend_may_use_rel_p 0 10114 #define elf_backend_may_use_rela_p 1 10115 #define elf_backend_default_use_rela_p 1 10116 #define elf_backend_rela_normal 1 10117 #define elf_backend_dtrel_excludes_plt 1 10118 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3) 10119 #define elf_backend_default_execstack 0 10120 #define elf_backend_extern_protected_data 0 10121 #define elf_backend_hash_symbol elf_aarch64_hash_symbol 10122 10123 #undef elf_backend_obj_attrs_section 10124 #define elf_backend_obj_attrs_section ".ARM.attributes" 10125 10126 #include "elfNN-target.h" 10127 10128 /* CloudABI support. */ 10129 10130 #undef TARGET_LITTLE_SYM 10131 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec 10132 #undef TARGET_LITTLE_NAME 10133 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi" 10134 #undef TARGET_BIG_SYM 10135 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec 10136 #undef TARGET_BIG_NAME 10137 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi" 10138 10139 #undef ELF_OSABI 10140 #define ELF_OSABI ELFOSABI_CLOUDABI 10141 10142 #undef elfNN_bed 10143 #define elfNN_bed elfNN_aarch64_cloudabi_bed 10144 10145 #include "elfNN-target.h" 10146