1 /* AArch64-specific support for NN-bit ELF. 2 Copyright (C) 2009-2024 Free Software Foundation, Inc. 3 Contributed by ARM Ltd. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; see the file COPYING3. If not, 19 see <http://www.gnu.org/licenses/>. */ 20 21 /* Notes on implementation: 22 23 Thread Local Store (TLS) 24 25 Overview: 26 27 The implementation currently supports both traditional TLS and TLS 28 descriptors, but only general dynamic (GD). 29 30 For traditional TLS the assembler will present us with code 31 fragments of the form: 32 33 adrp x0, :tlsgd:foo 34 R_AARCH64_TLSGD_ADR_PAGE21(foo) 35 add x0, :tlsgd_lo12:foo 36 R_AARCH64_TLSGD_ADD_LO12_NC(foo) 37 bl __tls_get_addr 38 nop 39 40 For TLS descriptors the assembler will present us with code 41 fragments of the form: 42 43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo) 44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo) 45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo) 46 .tlsdesccall foo 47 blr x1 R_AARCH64_TLSDESC_CALL(foo) 48 49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo 50 indicate that foo is thread local and should be accessed via the 51 traditional TLS mechanims. 52 53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} 54 against foo indicate that 'foo' is thread local and should be accessed 55 via a TLS descriptor mechanism. 56 57 The precise instruction sequence is only relevant from the 58 perspective of linker relaxation which is currently not implemented. 59 60 The static linker must detect that 'foo' is a TLS object and 61 allocate a double GOT entry. The GOT entry must be created for both 62 global and local TLS symbols. Note that this is different to none 63 TLS local objects which do not need a GOT entry. 64 65 In the traditional TLS mechanism, the double GOT entry is used to 66 provide the tls_index structure, containing module and offset 67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD 68 on the module entry. The loader will subsequently fixup this 69 relocation with the module identity. 70 71 For global traditional TLS symbols the static linker places an 72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader 73 will subsequently fixup the offset. For local TLS symbols the static 74 linker fixes up offset. 75 76 In the TLS descriptor mechanism the double GOT entry is used to 77 provide the descriptor. The static linker places the relocation 78 R_AARCH64_TLSDESC on the first GOT slot. The loader will 79 subsequently fix this up. 80 81 Implementation: 82 83 The handling of TLS symbols is implemented across a number of 84 different backend functions. The following is a top level view of 85 what processing is performed where. 86 87 The TLS implementation maintains state information for each TLS 88 symbol. The state information for local and global symbols is kept 89 in different places. Global symbols use generic BFD structures while 90 local symbols use backend specific structures that are allocated and 91 maintained entirely by the backend. 92 93 The flow: 94 95 elfNN_aarch64_check_relocs() 96 97 This function is invoked for each relocation. 98 99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and 100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are 101 spotted. One time creation of local symbol data structures are 102 created when the first local symbol is seen. 103 104 The reference count for a symbol is incremented. The GOT type for 105 each symbol is marked as general dynamic. 106 107 elfNN_aarch64_allocate_dynrelocs () 108 109 For each global with positive reference count we allocate a double 110 GOT slot. For a traditional TLS symbol we allocate space for two 111 relocation entries on the GOT, for a TLS descriptor symbol we 112 allocate space for one relocation on the slot. Record the GOT offset 113 for this symbol. 114 115 elfNN_aarch64_late_size_sections () 116 117 Iterate all input BFDS, look for in the local symbol data structure 118 constructed earlier for local TLS symbols and allocate them double 119 GOT slots along with space for a single GOT relocation. Update the 120 local symbol structure to record the GOT offset allocated. 121 122 elfNN_aarch64_relocate_section () 123 124 Calls elfNN_aarch64_final_link_relocate () 125 126 Emit the relevant TLS relocations against the GOT for each TLS 127 symbol. For local TLS symbols emit the GOT offset directly. The GOT 128 relocations are emitted once the first time a TLS symbol is 129 encountered. The implementation uses the LSB of the GOT offset to 130 flag that the relevant GOT relocations for a symbol have been 131 emitted. All of the TLS code that uses the GOT offset needs to take 132 care to mask out this flag bit before using the offset. 133 134 elfNN_aarch64_final_link_relocate () 135 136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */ 137 138 #include "sysdep.h" 139 #include "bfd.h" 140 #include "libiberty.h" 141 #include "libbfd.h" 142 #include "elf-bfd.h" 143 #include "bfdlink.h" 144 #include "objalloc.h" 145 #include "elf/aarch64.h" 146 #include "elfxx-aarch64.h" 147 #include "cpu-aarch64.h" 148 149 #define ARCH_SIZE NN 150 151 #if ARCH_SIZE == 64 152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME 153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME 154 #define HOWTO64(...) HOWTO (__VA_ARGS__) 155 #define HOWTO32(...) EMPTY_HOWTO (0) 156 #define LOG_FILE_ALIGN 3 157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 158 #endif 159 160 #if ARCH_SIZE == 32 161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME 162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME 163 #define HOWTO64(...) EMPTY_HOWTO (0) 164 #define HOWTO32(...) HOWTO (__VA_ARGS__) 165 #define LOG_FILE_ALIGN 2 166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC 167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC 168 #endif 169 170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \ 171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \ 172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \ 173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \ 174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \ 175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \ 176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \ 177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \ 178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \ 179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \ 180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \ 181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \ 182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \ 183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \ 184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \ 185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \ 186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \ 187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \ 188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \ 189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \ 190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \ 191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \ 192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \ 193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \ 194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \ 195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \ 196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \ 197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \ 198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \ 199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \ 200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \ 201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \ 202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \ 203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \ 204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \ 205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \ 206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \ 207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \ 208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \ 209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \ 210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \ 211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \ 212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \ 213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \ 214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \ 215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \ 216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \ 217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \ 218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \ 219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \ 220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE))) 221 222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \ 223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \ 224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \ 225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \ 226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \ 227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \ 228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \ 229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \ 230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \ 232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \ 233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \ 235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \ 236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \ 237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \ 238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \ 239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \ 240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \ 241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \ 242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \ 243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \ 244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21) 245 246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \ 247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \ 248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \ 249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \ 250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \ 251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \ 252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \ 253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \ 254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \ 255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \ 256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \ 257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \ 258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1) 259 260 #define ELIMINATE_COPY_RELOCS 1 261 262 /* Return size of a relocation entry. HTAB is the bfd's 263 elf_aarch64_link_hash_entry. */ 264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela)) 265 266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */ 267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8) 268 #define PLT_ENTRY_SIZE (32) 269 #define PLT_SMALL_ENTRY_SIZE (16) 270 #define PLT_TLSDESC_ENTRY_SIZE (32) 271 /* PLT sizes with BTI insn. */ 272 #define PLT_BTI_SMALL_ENTRY_SIZE (24) 273 /* PLT sizes with PAC insn. */ 274 #define PLT_PAC_SMALL_ENTRY_SIZE (24) 275 /* PLT sizes with BTI and PAC insn. */ 276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24) 277 278 /* Encoding of the nop instruction. */ 279 #define INSN_NOP 0xd503201f 280 281 #define aarch64_compute_jump_table_size(htab) \ 282 (((htab)->root.srelplt == NULL) ? 0 \ 283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE) 284 285 /* The first entry in a procedure linkage table looks like this 286 if the distance between the PLTGOT and the PLT is < 4GB use 287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2] 288 in x16 and needs to work out PLTGOT[1] by using an address of 289 [x16,#-GOT_ENTRY_SIZE]. */ 290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] = 291 { 292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ 293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ 294 #if ARCH_SIZE == 64 295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ 296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ 297 #else 298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */ 299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */ 300 #endif 301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ 302 0x1f, 0x20, 0x03, 0xd5, /* nop */ 303 0x1f, 0x20, 0x03, 0xd5, /* nop */ 304 0x1f, 0x20, 0x03, 0xd5, /* nop */ 305 }; 306 307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] = 308 { 309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ 311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ 312 #if ARCH_SIZE == 64 313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ 314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ 315 #else 316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */ 317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */ 318 #endif 319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ 320 0x1f, 0x20, 0x03, 0xd5, /* nop */ 321 0x1f, 0x20, 0x03, 0xd5, /* nop */ 322 }; 323 324 /* Per function entry in a procedure linkage table looks like this 325 if the distance between the PLTGOT and the PLT is < 4GB use 326 these PLT entries. Use BTI versions of the PLTs when enabled. */ 327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] = 328 { 329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 330 #if ARCH_SIZE == 64 331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 333 #else 334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 336 #endif 337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 338 }; 339 340 static const bfd_byte 341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] = 342 { 343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 345 #if ARCH_SIZE == 64 346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 348 #else 349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 351 #endif 352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 353 0x1f, 0x20, 0x03, 0xd5, /* nop */ 354 }; 355 356 static const bfd_byte 357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] = 358 { 359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 360 #if ARCH_SIZE == 64 361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 363 #else 364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 366 #endif 367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ 368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 369 0x1f, 0x20, 0x03, 0xd5, /* nop */ 370 }; 371 372 static const bfd_byte 373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] = 374 { 375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ 377 #if ARCH_SIZE == 64 378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ 379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 380 #else 381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */ 382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 383 #endif 384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */ 385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ 386 }; 387 388 static const bfd_byte 389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] = 390 { 391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ 392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ 393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ 394 #if ARCH_SIZE == 64 395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */ 396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ 397 #else 398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */ 399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */ 400 #endif 401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */ 402 0x1f, 0x20, 0x03, 0xd5, /* nop */ 403 0x1f, 0x20, 0x03, 0xd5, /* nop */ 404 }; 405 406 static const bfd_byte 407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] = 408 { 409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */ 410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ 411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ 412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ 413 #if ARCH_SIZE == 64 414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */ 415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ 416 #else 417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */ 418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */ 419 #endif 420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */ 421 0x1f, 0x20, 0x03, 0xd5, /* nop */ 422 }; 423 424 #define elf_info_to_howto elfNN_aarch64_info_to_howto 425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto 426 427 #define AARCH64_ELF_ABI_VERSION 0 428 429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ 430 #define ALL_ONES (~ (bfd_vma) 0) 431 432 /* Indexed by the bfd interal reloc enumerators. 433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_* 434 in reloc.c. */ 435 436 static reloc_howto_type elfNN_aarch64_howto_table[] = 437 { 438 EMPTY_HOWTO (0), 439 440 /* Basic data relocations. */ 441 442 /* Deprecated, but retained for backwards compatibility. */ 443 HOWTO64 (R_AARCH64_NULL, /* type */ 444 0, /* rightshift */ 445 0, /* size */ 446 0, /* bitsize */ 447 false, /* pc_relative */ 448 0, /* bitpos */ 449 complain_overflow_dont, /* complain_on_overflow */ 450 bfd_elf_generic_reloc, /* special_function */ 451 "R_AARCH64_NULL", /* name */ 452 false, /* partial_inplace */ 453 0, /* src_mask */ 454 0, /* dst_mask */ 455 false), /* pcrel_offset */ 456 HOWTO (R_AARCH64_NONE, /* type */ 457 0, /* rightshift */ 458 0, /* size */ 459 0, /* bitsize */ 460 false, /* pc_relative */ 461 0, /* bitpos */ 462 complain_overflow_dont, /* complain_on_overflow */ 463 bfd_elf_generic_reloc, /* special_function */ 464 "R_AARCH64_NONE", /* name */ 465 false, /* partial_inplace */ 466 0, /* src_mask */ 467 0, /* dst_mask */ 468 false), /* pcrel_offset */ 469 470 /* .xword: (S+A) */ 471 HOWTO64 (AARCH64_R (ABS64), /* type */ 472 0, /* rightshift */ 473 8, /* size */ 474 64, /* bitsize */ 475 false, /* pc_relative */ 476 0, /* bitpos */ 477 complain_overflow_unsigned, /* complain_on_overflow */ 478 bfd_elf_generic_reloc, /* special_function */ 479 AARCH64_R_STR (ABS64), /* name */ 480 false, /* partial_inplace */ 481 0, /* src_mask */ 482 ALL_ONES, /* dst_mask */ 483 false), /* pcrel_offset */ 484 485 /* .word: (S+A) */ 486 HOWTO (AARCH64_R (ABS32), /* type */ 487 0, /* rightshift */ 488 4, /* size */ 489 32, /* bitsize */ 490 false, /* pc_relative */ 491 0, /* bitpos */ 492 complain_overflow_unsigned, /* complain_on_overflow */ 493 bfd_elf_generic_reloc, /* special_function */ 494 AARCH64_R_STR (ABS32), /* name */ 495 false, /* partial_inplace */ 496 0, /* src_mask */ 497 0xffffffff, /* dst_mask */ 498 false), /* pcrel_offset */ 499 500 /* .half: (S+A) */ 501 HOWTO (AARCH64_R (ABS16), /* type */ 502 0, /* rightshift */ 503 2, /* size */ 504 16, /* bitsize */ 505 false, /* pc_relative */ 506 0, /* bitpos */ 507 complain_overflow_unsigned, /* complain_on_overflow */ 508 bfd_elf_generic_reloc, /* special_function */ 509 AARCH64_R_STR (ABS16), /* name */ 510 false, /* partial_inplace */ 511 0, /* src_mask */ 512 0xffff, /* dst_mask */ 513 false), /* pcrel_offset */ 514 515 /* .xword: (S+A-P) */ 516 HOWTO64 (AARCH64_R (PREL64), /* type */ 517 0, /* rightshift */ 518 8, /* size */ 519 64, /* bitsize */ 520 true, /* pc_relative */ 521 0, /* bitpos */ 522 complain_overflow_signed, /* complain_on_overflow */ 523 bfd_elf_generic_reloc, /* special_function */ 524 AARCH64_R_STR (PREL64), /* name */ 525 false, /* partial_inplace */ 526 0, /* src_mask */ 527 ALL_ONES, /* dst_mask */ 528 true), /* pcrel_offset */ 529 530 /* .word: (S+A-P) */ 531 HOWTO (AARCH64_R (PREL32), /* type */ 532 0, /* rightshift */ 533 4, /* size */ 534 32, /* bitsize */ 535 true, /* pc_relative */ 536 0, /* bitpos */ 537 complain_overflow_signed, /* complain_on_overflow */ 538 bfd_elf_generic_reloc, /* special_function */ 539 AARCH64_R_STR (PREL32), /* name */ 540 false, /* partial_inplace */ 541 0, /* src_mask */ 542 0xffffffff, /* dst_mask */ 543 true), /* pcrel_offset */ 544 545 /* .half: (S+A-P) */ 546 HOWTO (AARCH64_R (PREL16), /* type */ 547 0, /* rightshift */ 548 2, /* size */ 549 16, /* bitsize */ 550 true, /* pc_relative */ 551 0, /* bitpos */ 552 complain_overflow_signed, /* complain_on_overflow */ 553 bfd_elf_generic_reloc, /* special_function */ 554 AARCH64_R_STR (PREL16), /* name */ 555 false, /* partial_inplace */ 556 0, /* src_mask */ 557 0xffff, /* dst_mask */ 558 true), /* pcrel_offset */ 559 560 /* Group relocations to create a 16, 32, 48 or 64 bit 561 unsigned data or abs address inline. */ 562 563 /* MOVZ: ((S+A) >> 0) & 0xffff */ 564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */ 565 0, /* rightshift */ 566 4, /* size */ 567 16, /* bitsize */ 568 false, /* pc_relative */ 569 0, /* bitpos */ 570 complain_overflow_unsigned, /* complain_on_overflow */ 571 bfd_elf_generic_reloc, /* special_function */ 572 AARCH64_R_STR (MOVW_UABS_G0), /* name */ 573 false, /* partial_inplace */ 574 0, /* src_mask */ 575 0xffff, /* dst_mask */ 576 false), /* pcrel_offset */ 577 578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */ 579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */ 580 0, /* rightshift */ 581 4, /* size */ 582 16, /* bitsize */ 583 false, /* pc_relative */ 584 0, /* bitpos */ 585 complain_overflow_dont, /* complain_on_overflow */ 586 bfd_elf_generic_reloc, /* special_function */ 587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */ 588 false, /* partial_inplace */ 589 0, /* src_mask */ 590 0xffff, /* dst_mask */ 591 false), /* pcrel_offset */ 592 593 /* MOVZ: ((S+A) >> 16) & 0xffff */ 594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */ 595 16, /* rightshift */ 596 4, /* size */ 597 16, /* bitsize */ 598 false, /* pc_relative */ 599 0, /* bitpos */ 600 complain_overflow_unsigned, /* complain_on_overflow */ 601 bfd_elf_generic_reloc, /* special_function */ 602 AARCH64_R_STR (MOVW_UABS_G1), /* name */ 603 false, /* partial_inplace */ 604 0, /* src_mask */ 605 0xffff, /* dst_mask */ 606 false), /* pcrel_offset */ 607 608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */ 609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */ 610 16, /* rightshift */ 611 4, /* size */ 612 16, /* bitsize */ 613 false, /* pc_relative */ 614 0, /* bitpos */ 615 complain_overflow_dont, /* complain_on_overflow */ 616 bfd_elf_generic_reloc, /* special_function */ 617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */ 618 false, /* partial_inplace */ 619 0, /* src_mask */ 620 0xffff, /* dst_mask */ 621 false), /* pcrel_offset */ 622 623 /* MOVZ: ((S+A) >> 32) & 0xffff */ 624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */ 625 32, /* rightshift */ 626 4, /* size */ 627 16, /* bitsize */ 628 false, /* pc_relative */ 629 0, /* bitpos */ 630 complain_overflow_unsigned, /* complain_on_overflow */ 631 bfd_elf_generic_reloc, /* special_function */ 632 AARCH64_R_STR (MOVW_UABS_G2), /* name */ 633 false, /* partial_inplace */ 634 0, /* src_mask */ 635 0xffff, /* dst_mask */ 636 false), /* pcrel_offset */ 637 638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */ 639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */ 640 32, /* rightshift */ 641 4, /* size */ 642 16, /* bitsize */ 643 false, /* pc_relative */ 644 0, /* bitpos */ 645 complain_overflow_dont, /* complain_on_overflow */ 646 bfd_elf_generic_reloc, /* special_function */ 647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */ 648 false, /* partial_inplace */ 649 0, /* src_mask */ 650 0xffff, /* dst_mask */ 651 false), /* pcrel_offset */ 652 653 /* MOVZ: ((S+A) >> 48) & 0xffff */ 654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */ 655 48, /* rightshift */ 656 4, /* size */ 657 16, /* bitsize */ 658 false, /* pc_relative */ 659 0, /* bitpos */ 660 complain_overflow_unsigned, /* complain_on_overflow */ 661 bfd_elf_generic_reloc, /* special_function */ 662 AARCH64_R_STR (MOVW_UABS_G3), /* name */ 663 false, /* partial_inplace */ 664 0, /* src_mask */ 665 0xffff, /* dst_mask */ 666 false), /* pcrel_offset */ 667 668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit 669 signed data or abs address inline. Will change instruction 670 to MOVN or MOVZ depending on sign of calculated value. */ 671 672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */ 673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */ 674 0, /* rightshift */ 675 4, /* size */ 676 17, /* bitsize */ 677 false, /* pc_relative */ 678 0, /* bitpos */ 679 complain_overflow_signed, /* complain_on_overflow */ 680 bfd_elf_generic_reloc, /* special_function */ 681 AARCH64_R_STR (MOVW_SABS_G0), /* name */ 682 false, /* partial_inplace */ 683 0, /* src_mask */ 684 0xffff, /* dst_mask */ 685 false), /* pcrel_offset */ 686 687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */ 688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */ 689 16, /* rightshift */ 690 4, /* size */ 691 17, /* bitsize */ 692 false, /* pc_relative */ 693 0, /* bitpos */ 694 complain_overflow_signed, /* complain_on_overflow */ 695 bfd_elf_generic_reloc, /* special_function */ 696 AARCH64_R_STR (MOVW_SABS_G1), /* name */ 697 false, /* partial_inplace */ 698 0, /* src_mask */ 699 0xffff, /* dst_mask */ 700 false), /* pcrel_offset */ 701 702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */ 703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */ 704 32, /* rightshift */ 705 4, /* size */ 706 17, /* bitsize */ 707 false, /* pc_relative */ 708 0, /* bitpos */ 709 complain_overflow_signed, /* complain_on_overflow */ 710 bfd_elf_generic_reloc, /* special_function */ 711 AARCH64_R_STR (MOVW_SABS_G2), /* name */ 712 false, /* partial_inplace */ 713 0, /* src_mask */ 714 0xffff, /* dst_mask */ 715 false), /* pcrel_offset */ 716 717 /* Group relocations to create a 16, 32, 48 or 64 bit 718 PC relative address inline. */ 719 720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */ 721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */ 722 0, /* rightshift */ 723 4, /* size */ 724 17, /* bitsize */ 725 true, /* pc_relative */ 726 0, /* bitpos */ 727 complain_overflow_signed, /* complain_on_overflow */ 728 bfd_elf_generic_reloc, /* special_function */ 729 AARCH64_R_STR (MOVW_PREL_G0), /* name */ 730 false, /* partial_inplace */ 731 0, /* src_mask */ 732 0xffff, /* dst_mask */ 733 true), /* pcrel_offset */ 734 735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */ 736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */ 737 0, /* rightshift */ 738 4, /* size */ 739 16, /* bitsize */ 740 true, /* pc_relative */ 741 0, /* bitpos */ 742 complain_overflow_dont, /* complain_on_overflow */ 743 bfd_elf_generic_reloc, /* special_function */ 744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */ 745 false, /* partial_inplace */ 746 0, /* src_mask */ 747 0xffff, /* dst_mask */ 748 true), /* pcrel_offset */ 749 750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */ 751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */ 752 16, /* rightshift */ 753 4, /* size */ 754 17, /* bitsize */ 755 true, /* pc_relative */ 756 0, /* bitpos */ 757 complain_overflow_signed, /* complain_on_overflow */ 758 bfd_elf_generic_reloc, /* special_function */ 759 AARCH64_R_STR (MOVW_PREL_G1), /* name */ 760 false, /* partial_inplace */ 761 0, /* src_mask */ 762 0xffff, /* dst_mask */ 763 true), /* pcrel_offset */ 764 765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */ 766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */ 767 16, /* rightshift */ 768 4, /* size */ 769 16, /* bitsize */ 770 true, /* pc_relative */ 771 0, /* bitpos */ 772 complain_overflow_dont, /* complain_on_overflow */ 773 bfd_elf_generic_reloc, /* special_function */ 774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */ 775 false, /* partial_inplace */ 776 0, /* src_mask */ 777 0xffff, /* dst_mask */ 778 true), /* pcrel_offset */ 779 780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */ 781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */ 782 32, /* rightshift */ 783 4, /* size */ 784 17, /* bitsize */ 785 true, /* pc_relative */ 786 0, /* bitpos */ 787 complain_overflow_signed, /* complain_on_overflow */ 788 bfd_elf_generic_reloc, /* special_function */ 789 AARCH64_R_STR (MOVW_PREL_G2), /* name */ 790 false, /* partial_inplace */ 791 0, /* src_mask */ 792 0xffff, /* dst_mask */ 793 true), /* pcrel_offset */ 794 795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */ 796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */ 797 32, /* rightshift */ 798 4, /* size */ 799 16, /* bitsize */ 800 true, /* pc_relative */ 801 0, /* bitpos */ 802 complain_overflow_dont, /* complain_on_overflow */ 803 bfd_elf_generic_reloc, /* special_function */ 804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */ 805 false, /* partial_inplace */ 806 0, /* src_mask */ 807 0xffff, /* dst_mask */ 808 true), /* pcrel_offset */ 809 810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */ 811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */ 812 48, /* rightshift */ 813 4, /* size */ 814 16, /* bitsize */ 815 true, /* pc_relative */ 816 0, /* bitpos */ 817 complain_overflow_dont, /* complain_on_overflow */ 818 bfd_elf_generic_reloc, /* special_function */ 819 AARCH64_R_STR (MOVW_PREL_G3), /* name */ 820 false, /* partial_inplace */ 821 0, /* src_mask */ 822 0xffff, /* dst_mask */ 823 true), /* pcrel_offset */ 824 825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store 826 addresses: PG(x) is (x & ~0xfff). */ 827 828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */ 829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */ 830 2, /* rightshift */ 831 4, /* size */ 832 19, /* bitsize */ 833 true, /* pc_relative */ 834 0, /* bitpos */ 835 complain_overflow_signed, /* complain_on_overflow */ 836 bfd_elf_generic_reloc, /* special_function */ 837 AARCH64_R_STR (LD_PREL_LO19), /* name */ 838 false, /* partial_inplace */ 839 0, /* src_mask */ 840 0x7ffff, /* dst_mask */ 841 true), /* pcrel_offset */ 842 843 /* ADR: (S+A-P) & 0x1fffff */ 844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */ 845 0, /* rightshift */ 846 4, /* size */ 847 21, /* bitsize */ 848 true, /* pc_relative */ 849 0, /* bitpos */ 850 complain_overflow_signed, /* complain_on_overflow */ 851 bfd_elf_generic_reloc, /* special_function */ 852 AARCH64_R_STR (ADR_PREL_LO21), /* name */ 853 false, /* partial_inplace */ 854 0, /* src_mask */ 855 0x1fffff, /* dst_mask */ 856 true), /* pcrel_offset */ 857 858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */ 860 12, /* rightshift */ 861 4, /* size */ 862 21, /* bitsize */ 863 true, /* pc_relative */ 864 0, /* bitpos */ 865 complain_overflow_signed, /* complain_on_overflow */ 866 bfd_elf_generic_reloc, /* special_function */ 867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */ 868 false, /* partial_inplace */ 869 0, /* src_mask */ 870 0x1fffff, /* dst_mask */ 871 true), /* pcrel_offset */ 872 873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */ 874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */ 875 12, /* rightshift */ 876 4, /* size */ 877 21, /* bitsize */ 878 true, /* pc_relative */ 879 0, /* bitpos */ 880 complain_overflow_dont, /* complain_on_overflow */ 881 bfd_elf_generic_reloc, /* special_function */ 882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */ 883 false, /* partial_inplace */ 884 0, /* src_mask */ 885 0x1fffff, /* dst_mask */ 886 true), /* pcrel_offset */ 887 888 /* ADD: (S+A) & 0xfff [no overflow check] */ 889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */ 890 0, /* rightshift */ 891 4, /* size */ 892 12, /* bitsize */ 893 false, /* pc_relative */ 894 10, /* bitpos */ 895 complain_overflow_dont, /* complain_on_overflow */ 896 bfd_elf_generic_reloc, /* special_function */ 897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */ 898 false, /* partial_inplace */ 899 0, /* src_mask */ 900 0x3ffc00, /* dst_mask */ 901 false), /* pcrel_offset */ 902 903 /* LD/ST8: (S+A) & 0xfff */ 904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */ 905 0, /* rightshift */ 906 4, /* size */ 907 12, /* bitsize */ 908 false, /* pc_relative */ 909 0, /* bitpos */ 910 complain_overflow_dont, /* complain_on_overflow */ 911 bfd_elf_generic_reloc, /* special_function */ 912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */ 913 false, /* partial_inplace */ 914 0, /* src_mask */ 915 0xfff, /* dst_mask */ 916 false), /* pcrel_offset */ 917 918 /* Relocations for control-flow instructions. */ 919 920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */ 921 HOWTO (AARCH64_R (TSTBR14), /* type */ 922 2, /* rightshift */ 923 4, /* size */ 924 14, /* bitsize */ 925 true, /* pc_relative */ 926 0, /* bitpos */ 927 complain_overflow_signed, /* complain_on_overflow */ 928 bfd_elf_generic_reloc, /* special_function */ 929 AARCH64_R_STR (TSTBR14), /* name */ 930 false, /* partial_inplace */ 931 0, /* src_mask */ 932 0x3fff, /* dst_mask */ 933 true), /* pcrel_offset */ 934 935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */ 936 HOWTO (AARCH64_R (CONDBR19), /* type */ 937 2, /* rightshift */ 938 4, /* size */ 939 19, /* bitsize */ 940 true, /* pc_relative */ 941 0, /* bitpos */ 942 complain_overflow_signed, /* complain_on_overflow */ 943 bfd_elf_generic_reloc, /* special_function */ 944 AARCH64_R_STR (CONDBR19), /* name */ 945 false, /* partial_inplace */ 946 0, /* src_mask */ 947 0x7ffff, /* dst_mask */ 948 true), /* pcrel_offset */ 949 950 /* B: ((S+A-P) >> 2) & 0x3ffffff */ 951 HOWTO (AARCH64_R (JUMP26), /* type */ 952 2, /* rightshift */ 953 4, /* size */ 954 26, /* bitsize */ 955 true, /* pc_relative */ 956 0, /* bitpos */ 957 complain_overflow_signed, /* complain_on_overflow */ 958 bfd_elf_generic_reloc, /* special_function */ 959 AARCH64_R_STR (JUMP26), /* name */ 960 false, /* partial_inplace */ 961 0, /* src_mask */ 962 0x3ffffff, /* dst_mask */ 963 true), /* pcrel_offset */ 964 965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */ 966 HOWTO (AARCH64_R (CALL26), /* type */ 967 2, /* rightshift */ 968 4, /* size */ 969 26, /* bitsize */ 970 true, /* pc_relative */ 971 0, /* bitpos */ 972 complain_overflow_signed, /* complain_on_overflow */ 973 bfd_elf_generic_reloc, /* special_function */ 974 AARCH64_R_STR (CALL26), /* name */ 975 false, /* partial_inplace */ 976 0, /* src_mask */ 977 0x3ffffff, /* dst_mask */ 978 true), /* pcrel_offset */ 979 980 /* LD/ST16: (S+A) & 0xffe */ 981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */ 982 1, /* rightshift */ 983 4, /* size */ 984 12, /* bitsize */ 985 false, /* pc_relative */ 986 0, /* bitpos */ 987 complain_overflow_dont, /* complain_on_overflow */ 988 bfd_elf_generic_reloc, /* special_function */ 989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */ 990 false, /* partial_inplace */ 991 0, /* src_mask */ 992 0xffe, /* dst_mask */ 993 false), /* pcrel_offset */ 994 995 /* LD/ST32: (S+A) & 0xffc */ 996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */ 997 2, /* rightshift */ 998 4, /* size */ 999 12, /* bitsize */ 1000 false, /* pc_relative */ 1001 0, /* bitpos */ 1002 complain_overflow_dont, /* complain_on_overflow */ 1003 bfd_elf_generic_reloc, /* special_function */ 1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */ 1005 false, /* partial_inplace */ 1006 0, /* src_mask */ 1007 0xffc, /* dst_mask */ 1008 false), /* pcrel_offset */ 1009 1010 /* LD/ST64: (S+A) & 0xff8 */ 1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */ 1012 3, /* rightshift */ 1013 4, /* size */ 1014 12, /* bitsize */ 1015 false, /* pc_relative */ 1016 0, /* bitpos */ 1017 complain_overflow_dont, /* complain_on_overflow */ 1018 bfd_elf_generic_reloc, /* special_function */ 1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */ 1020 false, /* partial_inplace */ 1021 0, /* src_mask */ 1022 0xff8, /* dst_mask */ 1023 false), /* pcrel_offset */ 1024 1025 /* LD/ST128: (S+A) & 0xff0 */ 1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */ 1027 4, /* rightshift */ 1028 4, /* size */ 1029 12, /* bitsize */ 1030 false, /* pc_relative */ 1031 0, /* bitpos */ 1032 complain_overflow_dont, /* complain_on_overflow */ 1033 bfd_elf_generic_reloc, /* special_function */ 1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */ 1035 false, /* partial_inplace */ 1036 0, /* src_mask */ 1037 0xff0, /* dst_mask */ 1038 false), /* pcrel_offset */ 1039 1040 /* Set a load-literal immediate field to bits 1041 0x1FFFFC of G(S)-P */ 1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */ 1043 2, /* rightshift */ 1044 4, /* size */ 1045 19, /* bitsize */ 1046 true, /* pc_relative */ 1047 0, /* bitpos */ 1048 complain_overflow_signed, /* complain_on_overflow */ 1049 bfd_elf_generic_reloc, /* special_function */ 1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */ 1051 false, /* partial_inplace */ 1052 0, /* src_mask */ 1053 0xffffe0, /* dst_mask */ 1054 true), /* pcrel_offset */ 1055 1056 /* Get to the page for the GOT entry for the symbol 1057 (G(S) - P) using an ADRP instruction. */ 1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */ 1059 12, /* rightshift */ 1060 4, /* size */ 1061 21, /* bitsize */ 1062 true, /* pc_relative */ 1063 0, /* bitpos */ 1064 complain_overflow_dont, /* complain_on_overflow */ 1065 bfd_elf_generic_reloc, /* special_function */ 1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */ 1067 false, /* partial_inplace */ 1068 0, /* src_mask */ 1069 0x1fffff, /* dst_mask */ 1070 true), /* pcrel_offset */ 1071 1072 /* LD64: GOT offset G(S) & 0xff8 */ 1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */ 1074 3, /* rightshift */ 1075 4, /* size */ 1076 12, /* bitsize */ 1077 false, /* pc_relative */ 1078 0, /* bitpos */ 1079 complain_overflow_dont, /* complain_on_overflow */ 1080 bfd_elf_generic_reloc, /* special_function */ 1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */ 1082 false, /* partial_inplace */ 1083 0, /* src_mask */ 1084 0xff8, /* dst_mask */ 1085 false), /* pcrel_offset */ 1086 1087 /* LD32: GOT offset G(S) & 0xffc */ 1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */ 1089 2, /* rightshift */ 1090 4, /* size */ 1091 12, /* bitsize */ 1092 false, /* pc_relative */ 1093 0, /* bitpos */ 1094 complain_overflow_dont, /* complain_on_overflow */ 1095 bfd_elf_generic_reloc, /* special_function */ 1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */ 1097 false, /* partial_inplace */ 1098 0, /* src_mask */ 1099 0xffc, /* dst_mask */ 1100 false), /* pcrel_offset */ 1101 1102 /* Lower 16 bits of GOT offset for the symbol. */ 1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */ 1104 0, /* rightshift */ 1105 4, /* size */ 1106 16, /* bitsize */ 1107 false, /* pc_relative */ 1108 0, /* bitpos */ 1109 complain_overflow_dont, /* complain_on_overflow */ 1110 bfd_elf_generic_reloc, /* special_function */ 1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */ 1112 false, /* partial_inplace */ 1113 0, /* src_mask */ 1114 0xffff, /* dst_mask */ 1115 false), /* pcrel_offset */ 1116 1117 /* Higher 16 bits of GOT offset for the symbol. */ 1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */ 1119 16, /* rightshift */ 1120 4, /* size */ 1121 16, /* bitsize */ 1122 false, /* pc_relative */ 1123 0, /* bitpos */ 1124 complain_overflow_unsigned, /* complain_on_overflow */ 1125 bfd_elf_generic_reloc, /* special_function */ 1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */ 1127 false, /* partial_inplace */ 1128 0, /* src_mask */ 1129 0xffff, /* dst_mask */ 1130 false), /* pcrel_offset */ 1131 1132 /* LD64: GOT offset for the symbol. */ 1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */ 1134 3, /* rightshift */ 1135 4, /* size */ 1136 12, /* bitsize */ 1137 false, /* pc_relative */ 1138 0, /* bitpos */ 1139 complain_overflow_unsigned, /* complain_on_overflow */ 1140 bfd_elf_generic_reloc, /* special_function */ 1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */ 1142 false, /* partial_inplace */ 1143 0, /* src_mask */ 1144 0x7ff8, /* dst_mask */ 1145 false), /* pcrel_offset */ 1146 1147 /* LD32: GOT offset to the page address of GOT table. 1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */ 1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */ 1150 2, /* rightshift */ 1151 4, /* size */ 1152 12, /* bitsize */ 1153 false, /* pc_relative */ 1154 0, /* bitpos */ 1155 complain_overflow_unsigned, /* complain_on_overflow */ 1156 bfd_elf_generic_reloc, /* special_function */ 1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */ 1158 false, /* partial_inplace */ 1159 0, /* src_mask */ 1160 0x5ffc, /* dst_mask */ 1161 false), /* pcrel_offset */ 1162 1163 /* LD64: GOT offset to the page address of GOT table. 1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */ 1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */ 1166 3, /* rightshift */ 1167 4, /* size */ 1168 12, /* bitsize */ 1169 false, /* pc_relative */ 1170 0, /* bitpos */ 1171 complain_overflow_unsigned, /* complain_on_overflow */ 1172 bfd_elf_generic_reloc, /* special_function */ 1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */ 1174 false, /* partial_inplace */ 1175 0, /* src_mask */ 1176 0x7ff8, /* dst_mask */ 1177 false), /* pcrel_offset */ 1178 1179 /* Get to the page for the GOT entry for the symbol 1180 (G(S) - P) using an ADRP instruction. */ 1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */ 1182 12, /* rightshift */ 1183 4, /* size */ 1184 21, /* bitsize */ 1185 true, /* pc_relative */ 1186 0, /* bitpos */ 1187 complain_overflow_dont, /* complain_on_overflow */ 1188 bfd_elf_generic_reloc, /* special_function */ 1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */ 1190 false, /* partial_inplace */ 1191 0, /* src_mask */ 1192 0x1fffff, /* dst_mask */ 1193 true), /* pcrel_offset */ 1194 1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */ 1196 0, /* rightshift */ 1197 4, /* size */ 1198 21, /* bitsize */ 1199 true, /* pc_relative */ 1200 0, /* bitpos */ 1201 complain_overflow_dont, /* complain_on_overflow */ 1202 bfd_elf_generic_reloc, /* special_function */ 1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */ 1204 false, /* partial_inplace */ 1205 0, /* src_mask */ 1206 0x1fffff, /* dst_mask */ 1207 true), /* pcrel_offset */ 1208 1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */ 1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */ 1211 0, /* rightshift */ 1212 4, /* size */ 1213 12, /* bitsize */ 1214 false, /* pc_relative */ 1215 0, /* bitpos */ 1216 complain_overflow_dont, /* complain_on_overflow */ 1217 bfd_elf_generic_reloc, /* special_function */ 1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */ 1219 false, /* partial_inplace */ 1220 0, /* src_mask */ 1221 0xfff, /* dst_mask */ 1222 false), /* pcrel_offset */ 1223 1224 /* Lower 16 bits of GOT offset to tls_index. */ 1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */ 1226 0, /* rightshift */ 1227 4, /* size */ 1228 16, /* bitsize */ 1229 false, /* pc_relative */ 1230 0, /* bitpos */ 1231 complain_overflow_dont, /* complain_on_overflow */ 1232 bfd_elf_generic_reloc, /* special_function */ 1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */ 1234 false, /* partial_inplace */ 1235 0, /* src_mask */ 1236 0xffff, /* dst_mask */ 1237 false), /* pcrel_offset */ 1238 1239 /* Higher 16 bits of GOT offset to tls_index. */ 1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */ 1241 16, /* rightshift */ 1242 4, /* size */ 1243 16, /* bitsize */ 1244 false, /* pc_relative */ 1245 0, /* bitpos */ 1246 complain_overflow_unsigned, /* complain_on_overflow */ 1247 bfd_elf_generic_reloc, /* special_function */ 1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */ 1249 false, /* partial_inplace */ 1250 0, /* src_mask */ 1251 0xffff, /* dst_mask */ 1252 false), /* pcrel_offset */ 1253 1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */ 1255 12, /* rightshift */ 1256 4, /* size */ 1257 21, /* bitsize */ 1258 false, /* pc_relative */ 1259 0, /* bitpos */ 1260 complain_overflow_dont, /* complain_on_overflow */ 1261 bfd_elf_generic_reloc, /* special_function */ 1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */ 1263 false, /* partial_inplace */ 1264 0, /* src_mask */ 1265 0x1fffff, /* dst_mask */ 1266 false), /* pcrel_offset */ 1267 1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */ 1269 3, /* rightshift */ 1270 4, /* size */ 1271 12, /* bitsize */ 1272 false, /* pc_relative */ 1273 0, /* bitpos */ 1274 complain_overflow_dont, /* complain_on_overflow */ 1275 bfd_elf_generic_reloc, /* special_function */ 1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */ 1277 false, /* partial_inplace */ 1278 0, /* src_mask */ 1279 0xff8, /* dst_mask */ 1280 false), /* pcrel_offset */ 1281 1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */ 1283 2, /* rightshift */ 1284 4, /* size */ 1285 12, /* bitsize */ 1286 false, /* pc_relative */ 1287 0, /* bitpos */ 1288 complain_overflow_dont, /* complain_on_overflow */ 1289 bfd_elf_generic_reloc, /* special_function */ 1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */ 1291 false, /* partial_inplace */ 1292 0, /* src_mask */ 1293 0xffc, /* dst_mask */ 1294 false), /* pcrel_offset */ 1295 1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */ 1297 2, /* rightshift */ 1298 4, /* size */ 1299 19, /* bitsize */ 1300 false, /* pc_relative */ 1301 0, /* bitpos */ 1302 complain_overflow_dont, /* complain_on_overflow */ 1303 bfd_elf_generic_reloc, /* special_function */ 1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */ 1305 false, /* partial_inplace */ 1306 0, /* src_mask */ 1307 0x1ffffc, /* dst_mask */ 1308 false), /* pcrel_offset */ 1309 1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */ 1311 0, /* rightshift */ 1312 4, /* size */ 1313 16, /* bitsize */ 1314 false, /* pc_relative */ 1315 0, /* bitpos */ 1316 complain_overflow_dont, /* complain_on_overflow */ 1317 bfd_elf_generic_reloc, /* special_function */ 1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */ 1319 false, /* partial_inplace */ 1320 0, /* src_mask */ 1321 0xffff, /* dst_mask */ 1322 false), /* pcrel_offset */ 1323 1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */ 1325 16, /* rightshift */ 1326 4, /* size */ 1327 16, /* bitsize */ 1328 false, /* pc_relative */ 1329 0, /* bitpos */ 1330 complain_overflow_unsigned, /* complain_on_overflow */ 1331 bfd_elf_generic_reloc, /* special_function */ 1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */ 1333 false, /* partial_inplace */ 1334 0, /* src_mask */ 1335 0xffff, /* dst_mask */ 1336 false), /* pcrel_offset */ 1337 1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */ 1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */ 1340 12, /* rightshift */ 1341 4, /* size */ 1342 12, /* bitsize */ 1343 false, /* pc_relative */ 1344 0, /* bitpos */ 1345 complain_overflow_unsigned, /* complain_on_overflow */ 1346 bfd_elf_generic_reloc, /* special_function */ 1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */ 1348 false, /* partial_inplace */ 1349 0, /* src_mask */ 1350 0xfff, /* dst_mask */ 1351 false), /* pcrel_offset */ 1352 1353 /* Unsigned 12 bit byte offset to module TLS base address. */ 1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */ 1355 0, /* rightshift */ 1356 4, /* size */ 1357 12, /* bitsize */ 1358 false, /* pc_relative */ 1359 0, /* bitpos */ 1360 complain_overflow_unsigned, /* complain_on_overflow */ 1361 bfd_elf_generic_reloc, /* special_function */ 1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */ 1363 false, /* partial_inplace */ 1364 0, /* src_mask */ 1365 0xfff, /* dst_mask */ 1366 false), /* pcrel_offset */ 1367 1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */ 1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */ 1370 0, /* rightshift */ 1371 4, /* size */ 1372 12, /* bitsize */ 1373 false, /* pc_relative */ 1374 0, /* bitpos */ 1375 complain_overflow_dont, /* complain_on_overflow */ 1376 bfd_elf_generic_reloc, /* special_function */ 1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */ 1378 false, /* partial_inplace */ 1379 0, /* src_mask */ 1380 0xfff, /* dst_mask */ 1381 false), /* pcrel_offset */ 1382 1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */ 1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */ 1385 0, /* rightshift */ 1386 4, /* size */ 1387 12, /* bitsize */ 1388 false, /* pc_relative */ 1389 0, /* bitpos */ 1390 complain_overflow_dont, /* complain_on_overflow */ 1391 bfd_elf_generic_reloc, /* special_function */ 1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */ 1393 false, /* partial_inplace */ 1394 0, /* src_mask */ 1395 0xfff, /* dst_mask */ 1396 false), /* pcrel_offset */ 1397 1398 /* Get to the page for the GOT entry for the symbol 1399 (G(S) - P) using an ADRP instruction. */ 1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */ 1401 12, /* rightshift */ 1402 4, /* size */ 1403 21, /* bitsize */ 1404 true, /* pc_relative */ 1405 0, /* bitpos */ 1406 complain_overflow_signed, /* complain_on_overflow */ 1407 bfd_elf_generic_reloc, /* special_function */ 1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */ 1409 false, /* partial_inplace */ 1410 0, /* src_mask */ 1411 0x1fffff, /* dst_mask */ 1412 true), /* pcrel_offset */ 1413 1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */ 1415 0, /* rightshift */ 1416 4, /* size */ 1417 21, /* bitsize */ 1418 true, /* pc_relative */ 1419 0, /* bitpos */ 1420 complain_overflow_signed, /* complain_on_overflow */ 1421 bfd_elf_generic_reloc, /* special_function */ 1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */ 1423 false, /* partial_inplace */ 1424 0, /* src_mask */ 1425 0x1fffff, /* dst_mask */ 1426 true), /* pcrel_offset */ 1427 1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */ 1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */ 1430 1, /* rightshift */ 1431 4, /* size */ 1432 11, /* bitsize */ 1433 false, /* pc_relative */ 1434 10, /* bitpos */ 1435 complain_overflow_unsigned, /* complain_on_overflow */ 1436 bfd_elf_generic_reloc, /* special_function */ 1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */ 1438 false, /* partial_inplace */ 1439 0, /* src_mask */ 1440 0x1ffc00, /* dst_mask */ 1441 false), /* pcrel_offset */ 1442 1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */ 1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */ 1445 1, /* rightshift */ 1446 4, /* size */ 1447 11, /* bitsize */ 1448 false, /* pc_relative */ 1449 10, /* bitpos */ 1450 complain_overflow_dont, /* complain_on_overflow */ 1451 bfd_elf_generic_reloc, /* special_function */ 1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */ 1453 false, /* partial_inplace */ 1454 0, /* src_mask */ 1455 0x1ffc00, /* dst_mask */ 1456 false), /* pcrel_offset */ 1457 1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */ 1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */ 1460 2, /* rightshift */ 1461 4, /* size */ 1462 10, /* bitsize */ 1463 false, /* pc_relative */ 1464 10, /* bitpos */ 1465 complain_overflow_unsigned, /* complain_on_overflow */ 1466 bfd_elf_generic_reloc, /* special_function */ 1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */ 1468 false, /* partial_inplace */ 1469 0, /* src_mask */ 1470 0x3ffc00, /* dst_mask */ 1471 false), /* pcrel_offset */ 1472 1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */ 1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */ 1475 2, /* rightshift */ 1476 4, /* size */ 1477 10, /* bitsize */ 1478 false, /* pc_relative */ 1479 10, /* bitpos */ 1480 complain_overflow_dont, /* complain_on_overflow */ 1481 bfd_elf_generic_reloc, /* special_function */ 1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */ 1483 false, /* partial_inplace */ 1484 0, /* src_mask */ 1485 0xffc00, /* dst_mask */ 1486 false), /* pcrel_offset */ 1487 1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */ 1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */ 1490 3, /* rightshift */ 1491 4, /* size */ 1492 9, /* bitsize */ 1493 false, /* pc_relative */ 1494 10, /* bitpos */ 1495 complain_overflow_unsigned, /* complain_on_overflow */ 1496 bfd_elf_generic_reloc, /* special_function */ 1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */ 1498 false, /* partial_inplace */ 1499 0, /* src_mask */ 1500 0x3ffc00, /* dst_mask */ 1501 false), /* pcrel_offset */ 1502 1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */ 1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */ 1505 3, /* rightshift */ 1506 4, /* size */ 1507 9, /* bitsize */ 1508 false, /* pc_relative */ 1509 10, /* bitpos */ 1510 complain_overflow_dont, /* complain_on_overflow */ 1511 bfd_elf_generic_reloc, /* special_function */ 1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */ 1513 false, /* partial_inplace */ 1514 0, /* src_mask */ 1515 0x7fc00, /* dst_mask */ 1516 false), /* pcrel_offset */ 1517 1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */ 1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */ 1520 0, /* rightshift */ 1521 4, /* size */ 1522 12, /* bitsize */ 1523 false, /* pc_relative */ 1524 10, /* bitpos */ 1525 complain_overflow_unsigned, /* complain_on_overflow */ 1526 bfd_elf_generic_reloc, /* special_function */ 1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */ 1528 false, /* partial_inplace */ 1529 0, /* src_mask */ 1530 0x3ffc00, /* dst_mask */ 1531 false), /* pcrel_offset */ 1532 1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */ 1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */ 1535 0, /* rightshift */ 1536 4, /* size */ 1537 12, /* bitsize */ 1538 false, /* pc_relative */ 1539 10, /* bitpos */ 1540 complain_overflow_dont, /* complain_on_overflow */ 1541 bfd_elf_generic_reloc, /* special_function */ 1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */ 1543 false, /* partial_inplace */ 1544 0, /* src_mask */ 1545 0x3ffc00, /* dst_mask */ 1546 false), /* pcrel_offset */ 1547 1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */ 1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */ 1550 0, /* rightshift */ 1551 4, /* size */ 1552 16, /* bitsize */ 1553 false, /* pc_relative */ 1554 0, /* bitpos */ 1555 complain_overflow_unsigned, /* complain_on_overflow */ 1556 bfd_elf_generic_reloc, /* special_function */ 1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */ 1558 false, /* partial_inplace */ 1559 0, /* src_mask */ 1560 0xffff, /* dst_mask */ 1561 false), /* pcrel_offset */ 1562 1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */ 1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */ 1565 0, /* rightshift */ 1566 4, /* size */ 1567 16, /* bitsize */ 1568 false, /* pc_relative */ 1569 0, /* bitpos */ 1570 complain_overflow_dont, /* complain_on_overflow */ 1571 bfd_elf_generic_reloc, /* special_function */ 1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */ 1573 false, /* partial_inplace */ 1574 0, /* src_mask */ 1575 0xffff, /* dst_mask */ 1576 false), /* pcrel_offset */ 1577 1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */ 1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */ 1580 16, /* rightshift */ 1581 4, /* size */ 1582 16, /* bitsize */ 1583 false, /* pc_relative */ 1584 0, /* bitpos */ 1585 complain_overflow_unsigned, /* complain_on_overflow */ 1586 bfd_elf_generic_reloc, /* special_function */ 1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */ 1588 false, /* partial_inplace */ 1589 0, /* src_mask */ 1590 0xffff, /* dst_mask */ 1591 false), /* pcrel_offset */ 1592 1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */ 1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */ 1595 16, /* rightshift */ 1596 4, /* size */ 1597 16, /* bitsize */ 1598 false, /* pc_relative */ 1599 0, /* bitpos */ 1600 complain_overflow_dont, /* complain_on_overflow */ 1601 bfd_elf_generic_reloc, /* special_function */ 1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */ 1603 false, /* partial_inplace */ 1604 0, /* src_mask */ 1605 0xffff, /* dst_mask */ 1606 false), /* pcrel_offset */ 1607 1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */ 1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */ 1610 32, /* rightshift */ 1611 4, /* size */ 1612 16, /* bitsize */ 1613 false, /* pc_relative */ 1614 0, /* bitpos */ 1615 complain_overflow_unsigned, /* complain_on_overflow */ 1616 bfd_elf_generic_reloc, /* special_function */ 1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */ 1618 false, /* partial_inplace */ 1619 0, /* src_mask */ 1620 0xffff, /* dst_mask */ 1621 false), /* pcrel_offset */ 1622 1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */ 1624 32, /* rightshift */ 1625 4, /* size */ 1626 16, /* bitsize */ 1627 false, /* pc_relative */ 1628 0, /* bitpos */ 1629 complain_overflow_unsigned, /* complain_on_overflow */ 1630 bfd_elf_generic_reloc, /* special_function */ 1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */ 1632 false, /* partial_inplace */ 1633 0, /* src_mask */ 1634 0xffff, /* dst_mask */ 1635 false), /* pcrel_offset */ 1636 1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */ 1638 16, /* rightshift */ 1639 4, /* size */ 1640 16, /* bitsize */ 1641 false, /* pc_relative */ 1642 0, /* bitpos */ 1643 complain_overflow_dont, /* complain_on_overflow */ 1644 bfd_elf_generic_reloc, /* special_function */ 1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */ 1646 false, /* partial_inplace */ 1647 0, /* src_mask */ 1648 0xffff, /* dst_mask */ 1649 false), /* pcrel_offset */ 1650 1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */ 1652 16, /* rightshift */ 1653 4, /* size */ 1654 16, /* bitsize */ 1655 false, /* pc_relative */ 1656 0, /* bitpos */ 1657 complain_overflow_dont, /* complain_on_overflow */ 1658 bfd_elf_generic_reloc, /* special_function */ 1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */ 1660 false, /* partial_inplace */ 1661 0, /* src_mask */ 1662 0xffff, /* dst_mask */ 1663 false), /* pcrel_offset */ 1664 1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */ 1666 0, /* rightshift */ 1667 4, /* size */ 1668 16, /* bitsize */ 1669 false, /* pc_relative */ 1670 0, /* bitpos */ 1671 complain_overflow_dont, /* complain_on_overflow */ 1672 bfd_elf_generic_reloc, /* special_function */ 1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */ 1674 false, /* partial_inplace */ 1675 0, /* src_mask */ 1676 0xffff, /* dst_mask */ 1677 false), /* pcrel_offset */ 1678 1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */ 1680 0, /* rightshift */ 1681 4, /* size */ 1682 16, /* bitsize */ 1683 false, /* pc_relative */ 1684 0, /* bitpos */ 1685 complain_overflow_dont, /* complain_on_overflow */ 1686 bfd_elf_generic_reloc, /* special_function */ 1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */ 1688 false, /* partial_inplace */ 1689 0, /* src_mask */ 1690 0xffff, /* dst_mask */ 1691 false), /* pcrel_offset */ 1692 1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */ 1694 12, /* rightshift */ 1695 4, /* size */ 1696 12, /* bitsize */ 1697 false, /* pc_relative */ 1698 0, /* bitpos */ 1699 complain_overflow_unsigned, /* complain_on_overflow */ 1700 bfd_elf_generic_reloc, /* special_function */ 1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */ 1702 false, /* partial_inplace */ 1703 0, /* src_mask */ 1704 0xfff, /* dst_mask */ 1705 false), /* pcrel_offset */ 1706 1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */ 1708 0, /* rightshift */ 1709 4, /* size */ 1710 12, /* bitsize */ 1711 false, /* pc_relative */ 1712 0, /* bitpos */ 1713 complain_overflow_unsigned, /* complain_on_overflow */ 1714 bfd_elf_generic_reloc, /* special_function */ 1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */ 1716 false, /* partial_inplace */ 1717 0, /* src_mask */ 1718 0xfff, /* dst_mask */ 1719 false), /* pcrel_offset */ 1720 1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */ 1722 0, /* rightshift */ 1723 4, /* size */ 1724 12, /* bitsize */ 1725 false, /* pc_relative */ 1726 0, /* bitpos */ 1727 complain_overflow_dont, /* complain_on_overflow */ 1728 bfd_elf_generic_reloc, /* special_function */ 1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */ 1730 false, /* partial_inplace */ 1731 0, /* src_mask */ 1732 0xfff, /* dst_mask */ 1733 false), /* pcrel_offset */ 1734 1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */ 1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */ 1737 1, /* rightshift */ 1738 4, /* size */ 1739 11, /* bitsize */ 1740 false, /* pc_relative */ 1741 10, /* bitpos */ 1742 complain_overflow_unsigned, /* complain_on_overflow */ 1743 bfd_elf_generic_reloc, /* special_function */ 1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */ 1745 false, /* partial_inplace */ 1746 0, /* src_mask */ 1747 0x1ffc00, /* dst_mask */ 1748 false), /* pcrel_offset */ 1749 1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */ 1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */ 1752 1, /* rightshift */ 1753 4, /* size */ 1754 11, /* bitsize */ 1755 false, /* pc_relative */ 1756 10, /* bitpos */ 1757 complain_overflow_dont, /* complain_on_overflow */ 1758 bfd_elf_generic_reloc, /* special_function */ 1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */ 1760 false, /* partial_inplace */ 1761 0, /* src_mask */ 1762 0x1ffc00, /* dst_mask */ 1763 false), /* pcrel_offset */ 1764 1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */ 1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */ 1767 2, /* rightshift */ 1768 4, /* size */ 1769 10, /* bitsize */ 1770 false, /* pc_relative */ 1771 10, /* bitpos */ 1772 complain_overflow_unsigned, /* complain_on_overflow */ 1773 bfd_elf_generic_reloc, /* special_function */ 1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */ 1775 false, /* partial_inplace */ 1776 0, /* src_mask */ 1777 0xffc00, /* dst_mask */ 1778 false), /* pcrel_offset */ 1779 1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */ 1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */ 1782 2, /* rightshift */ 1783 4, /* size */ 1784 10, /* bitsize */ 1785 false, /* pc_relative */ 1786 10, /* bitpos */ 1787 complain_overflow_dont, /* complain_on_overflow */ 1788 bfd_elf_generic_reloc, /* special_function */ 1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */ 1790 false, /* partial_inplace */ 1791 0, /* src_mask */ 1792 0xffc00, /* dst_mask */ 1793 false), /* pcrel_offset */ 1794 1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */ 1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */ 1797 3, /* rightshift */ 1798 4, /* size */ 1799 9, /* bitsize */ 1800 false, /* pc_relative */ 1801 10, /* bitpos */ 1802 complain_overflow_unsigned, /* complain_on_overflow */ 1803 bfd_elf_generic_reloc, /* special_function */ 1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */ 1805 false, /* partial_inplace */ 1806 0, /* src_mask */ 1807 0x7fc00, /* dst_mask */ 1808 false), /* pcrel_offset */ 1809 1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */ 1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */ 1812 3, /* rightshift */ 1813 4, /* size */ 1814 9, /* bitsize */ 1815 false, /* pc_relative */ 1816 10, /* bitpos */ 1817 complain_overflow_dont, /* complain_on_overflow */ 1818 bfd_elf_generic_reloc, /* special_function */ 1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */ 1820 false, /* partial_inplace */ 1821 0, /* src_mask */ 1822 0x7fc00, /* dst_mask */ 1823 false), /* pcrel_offset */ 1824 1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */ 1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */ 1827 0, /* rightshift */ 1828 4, /* size */ 1829 12, /* bitsize */ 1830 false, /* pc_relative */ 1831 10, /* bitpos */ 1832 complain_overflow_unsigned, /* complain_on_overflow */ 1833 bfd_elf_generic_reloc, /* special_function */ 1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */ 1835 false, /* partial_inplace */ 1836 0, /* src_mask */ 1837 0x3ffc00, /* dst_mask */ 1838 false), /* pcrel_offset */ 1839 1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */ 1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */ 1842 0, /* rightshift */ 1843 4, /* size */ 1844 12, /* bitsize */ 1845 false, /* pc_relative */ 1846 10, /* bitpos */ 1847 complain_overflow_dont, /* complain_on_overflow */ 1848 bfd_elf_generic_reloc, /* special_function */ 1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */ 1850 false, /* partial_inplace */ 1851 0, /* src_mask */ 1852 0x3ffc00, /* dst_mask */ 1853 false), /* pcrel_offset */ 1854 1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */ 1856 2, /* rightshift */ 1857 4, /* size */ 1858 19, /* bitsize */ 1859 true, /* pc_relative */ 1860 0, /* bitpos */ 1861 complain_overflow_dont, /* complain_on_overflow */ 1862 bfd_elf_generic_reloc, /* special_function */ 1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */ 1864 false, /* partial_inplace */ 1865 0, /* src_mask */ 1866 0x0ffffe0, /* dst_mask */ 1867 true), /* pcrel_offset */ 1868 1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */ 1870 0, /* rightshift */ 1871 4, /* size */ 1872 21, /* bitsize */ 1873 true, /* pc_relative */ 1874 0, /* bitpos */ 1875 complain_overflow_dont, /* complain_on_overflow */ 1876 bfd_elf_generic_reloc, /* special_function */ 1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */ 1878 false, /* partial_inplace */ 1879 0, /* src_mask */ 1880 0x1fffff, /* dst_mask */ 1881 true), /* pcrel_offset */ 1882 1883 /* Get to the page for the GOT entry for the symbol 1884 (G(S) - P) using an ADRP instruction. */ 1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */ 1886 12, /* rightshift */ 1887 4, /* size */ 1888 21, /* bitsize */ 1889 true, /* pc_relative */ 1890 0, /* bitpos */ 1891 complain_overflow_dont, /* complain_on_overflow */ 1892 bfd_elf_generic_reloc, /* special_function */ 1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */ 1894 false, /* partial_inplace */ 1895 0, /* src_mask */ 1896 0x1fffff, /* dst_mask */ 1897 true), /* pcrel_offset */ 1898 1899 /* LD64: GOT offset G(S) & 0xff8. */ 1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */ 1901 3, /* rightshift */ 1902 4, /* size */ 1903 12, /* bitsize */ 1904 false, /* pc_relative */ 1905 0, /* bitpos */ 1906 complain_overflow_dont, /* complain_on_overflow */ 1907 bfd_elf_generic_reloc, /* special_function */ 1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */ 1909 false, /* partial_inplace */ 1910 0, /* src_mask */ 1911 0xff8, /* dst_mask */ 1912 false), /* pcrel_offset */ 1913 1914 /* LD32: GOT offset G(S) & 0xffc. */ 1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */ 1916 2, /* rightshift */ 1917 4, /* size */ 1918 12, /* bitsize */ 1919 false, /* pc_relative */ 1920 0, /* bitpos */ 1921 complain_overflow_dont, /* complain_on_overflow */ 1922 bfd_elf_generic_reloc, /* special_function */ 1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */ 1924 false, /* partial_inplace */ 1925 0, /* src_mask */ 1926 0xffc, /* dst_mask */ 1927 false), /* pcrel_offset */ 1928 1929 /* ADD: GOT offset G(S) & 0xfff. */ 1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */ 1931 0, /* rightshift */ 1932 4, /* size */ 1933 12, /* bitsize */ 1934 false, /* pc_relative */ 1935 0, /* bitpos */ 1936 complain_overflow_dont,/* complain_on_overflow */ 1937 bfd_elf_generic_reloc, /* special_function */ 1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */ 1939 false, /* partial_inplace */ 1940 0, /* src_mask */ 1941 0xfff, /* dst_mask */ 1942 false), /* pcrel_offset */ 1943 1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */ 1945 16, /* rightshift */ 1946 4, /* size */ 1947 12, /* bitsize */ 1948 false, /* pc_relative */ 1949 0, /* bitpos */ 1950 complain_overflow_unsigned, /* complain_on_overflow */ 1951 bfd_elf_generic_reloc, /* special_function */ 1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */ 1953 false, /* partial_inplace */ 1954 0, /* src_mask */ 1955 0xffff, /* dst_mask */ 1956 false), /* pcrel_offset */ 1957 1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */ 1959 0, /* rightshift */ 1960 4, /* size */ 1961 12, /* bitsize */ 1962 false, /* pc_relative */ 1963 0, /* bitpos */ 1964 complain_overflow_dont, /* complain_on_overflow */ 1965 bfd_elf_generic_reloc, /* special_function */ 1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */ 1967 false, /* partial_inplace */ 1968 0, /* src_mask */ 1969 0xffff, /* dst_mask */ 1970 false), /* pcrel_offset */ 1971 1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */ 1973 0, /* rightshift */ 1974 4, /* size */ 1975 12, /* bitsize */ 1976 false, /* pc_relative */ 1977 0, /* bitpos */ 1978 complain_overflow_dont, /* complain_on_overflow */ 1979 bfd_elf_generic_reloc, /* special_function */ 1980 AARCH64_R_STR (TLSDESC_LDR), /* name */ 1981 false, /* partial_inplace */ 1982 0x0, /* src_mask */ 1983 0x0, /* dst_mask */ 1984 false), /* pcrel_offset */ 1985 1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */ 1987 0, /* rightshift */ 1988 4, /* size */ 1989 12, /* bitsize */ 1990 false, /* pc_relative */ 1991 0, /* bitpos */ 1992 complain_overflow_dont, /* complain_on_overflow */ 1993 bfd_elf_generic_reloc, /* special_function */ 1994 AARCH64_R_STR (TLSDESC_ADD), /* name */ 1995 false, /* partial_inplace */ 1996 0x0, /* src_mask */ 1997 0x0, /* dst_mask */ 1998 false), /* pcrel_offset */ 1999 2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */ 2001 0, /* rightshift */ 2002 4, /* size */ 2003 0, /* bitsize */ 2004 false, /* pc_relative */ 2005 0, /* bitpos */ 2006 complain_overflow_dont, /* complain_on_overflow */ 2007 bfd_elf_generic_reloc, /* special_function */ 2008 AARCH64_R_STR (TLSDESC_CALL), /* name */ 2009 false, /* partial_inplace */ 2010 0x0, /* src_mask */ 2011 0x0, /* dst_mask */ 2012 false), /* pcrel_offset */ 2013 2014 HOWTO (AARCH64_R (COPY), /* type */ 2015 0, /* rightshift */ 2016 4, /* size */ 2017 64, /* bitsize */ 2018 false, /* pc_relative */ 2019 0, /* bitpos */ 2020 complain_overflow_bitfield, /* complain_on_overflow */ 2021 bfd_elf_generic_reloc, /* special_function */ 2022 AARCH64_R_STR (COPY), /* name */ 2023 true, /* partial_inplace */ 2024 0, /* src_mask */ 2025 0xffffffff, /* dst_mask */ 2026 false), /* pcrel_offset */ 2027 2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */ 2029 0, /* rightshift */ 2030 4, /* size */ 2031 64, /* bitsize */ 2032 false, /* pc_relative */ 2033 0, /* bitpos */ 2034 complain_overflow_bitfield, /* complain_on_overflow */ 2035 bfd_elf_generic_reloc, /* special_function */ 2036 AARCH64_R_STR (GLOB_DAT), /* name */ 2037 true, /* partial_inplace */ 2038 0, /* src_mask */ 2039 0xffffffff, /* dst_mask */ 2040 false), /* pcrel_offset */ 2041 2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */ 2043 0, /* rightshift */ 2044 4, /* size */ 2045 64, /* bitsize */ 2046 false, /* pc_relative */ 2047 0, /* bitpos */ 2048 complain_overflow_bitfield, /* complain_on_overflow */ 2049 bfd_elf_generic_reloc, /* special_function */ 2050 AARCH64_R_STR (JUMP_SLOT), /* name */ 2051 true, /* partial_inplace */ 2052 0, /* src_mask */ 2053 0xffffffff, /* dst_mask */ 2054 false), /* pcrel_offset */ 2055 2056 HOWTO (AARCH64_R (RELATIVE), /* type */ 2057 0, /* rightshift */ 2058 4, /* size */ 2059 64, /* bitsize */ 2060 false, /* pc_relative */ 2061 0, /* bitpos */ 2062 complain_overflow_bitfield, /* complain_on_overflow */ 2063 bfd_elf_generic_reloc, /* special_function */ 2064 AARCH64_R_STR (RELATIVE), /* name */ 2065 true, /* partial_inplace */ 2066 0, /* src_mask */ 2067 ALL_ONES, /* dst_mask */ 2068 false), /* pcrel_offset */ 2069 2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */ 2071 0, /* rightshift */ 2072 4, /* size */ 2073 64, /* bitsize */ 2074 false, /* pc_relative */ 2075 0, /* bitpos */ 2076 complain_overflow_dont, /* complain_on_overflow */ 2077 bfd_elf_generic_reloc, /* special_function */ 2078 #if ARCH_SIZE == 64 2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */ 2080 #else 2081 AARCH64_R_STR (TLS_DTPMOD), /* name */ 2082 #endif 2083 false, /* partial_inplace */ 2084 0, /* src_mask */ 2085 ALL_ONES, /* dst_mask */ 2086 false), /* pc_reloffset */ 2087 2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */ 2089 0, /* rightshift */ 2090 4, /* size */ 2091 64, /* bitsize */ 2092 false, /* pc_relative */ 2093 0, /* bitpos */ 2094 complain_overflow_dont, /* complain_on_overflow */ 2095 bfd_elf_generic_reloc, /* special_function */ 2096 #if ARCH_SIZE == 64 2097 AARCH64_R_STR (TLS_DTPREL64), /* name */ 2098 #else 2099 AARCH64_R_STR (TLS_DTPREL), /* name */ 2100 #endif 2101 false, /* partial_inplace */ 2102 0, /* src_mask */ 2103 ALL_ONES, /* dst_mask */ 2104 false), /* pcrel_offset */ 2105 2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */ 2107 0, /* rightshift */ 2108 4, /* size */ 2109 64, /* bitsize */ 2110 false, /* pc_relative */ 2111 0, /* bitpos */ 2112 complain_overflow_dont, /* complain_on_overflow */ 2113 bfd_elf_generic_reloc, /* special_function */ 2114 #if ARCH_SIZE == 64 2115 AARCH64_R_STR (TLS_TPREL64), /* name */ 2116 #else 2117 AARCH64_R_STR (TLS_TPREL), /* name */ 2118 #endif 2119 false, /* partial_inplace */ 2120 0, /* src_mask */ 2121 ALL_ONES, /* dst_mask */ 2122 false), /* pcrel_offset */ 2123 2124 HOWTO (AARCH64_R (TLSDESC), /* type */ 2125 0, /* rightshift */ 2126 4, /* size */ 2127 64, /* bitsize */ 2128 false, /* pc_relative */ 2129 0, /* bitpos */ 2130 complain_overflow_dont, /* complain_on_overflow */ 2131 bfd_elf_generic_reloc, /* special_function */ 2132 AARCH64_R_STR (TLSDESC), /* name */ 2133 false, /* partial_inplace */ 2134 0, /* src_mask */ 2135 ALL_ONES, /* dst_mask */ 2136 false), /* pcrel_offset */ 2137 2138 HOWTO (AARCH64_R (IRELATIVE), /* type */ 2139 0, /* rightshift */ 2140 4, /* size */ 2141 64, /* bitsize */ 2142 false, /* pc_relative */ 2143 0, /* bitpos */ 2144 complain_overflow_bitfield, /* complain_on_overflow */ 2145 bfd_elf_generic_reloc, /* special_function */ 2146 AARCH64_R_STR (IRELATIVE), /* name */ 2147 false, /* partial_inplace */ 2148 0, /* src_mask */ 2149 ALL_ONES, /* dst_mask */ 2150 false), /* pcrel_offset */ 2151 2152 EMPTY_HOWTO (0), 2153 }; 2154 2155 static reloc_howto_type elfNN_aarch64_howto_none = 2156 HOWTO (R_AARCH64_NONE, /* type */ 2157 0, /* rightshift */ 2158 0, /* size */ 2159 0, /* bitsize */ 2160 false, /* pc_relative */ 2161 0, /* bitpos */ 2162 complain_overflow_dont,/* complain_on_overflow */ 2163 bfd_elf_generic_reloc, /* special_function */ 2164 "R_AARCH64_NONE", /* name */ 2165 false, /* partial_inplace */ 2166 0, /* src_mask */ 2167 0, /* dst_mask */ 2168 false); /* pcrel_offset */ 2169 2170 /* Given HOWTO, return the bfd internal relocation enumerator. */ 2171 2172 static bfd_reloc_code_real_type 2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto) 2174 { 2175 const int size 2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table); 2177 const ptrdiff_t offset 2178 = howto - elfNN_aarch64_howto_table; 2179 2180 if (offset > 0 && offset < size - 1) 2181 return BFD_RELOC_AARCH64_RELOC_START + offset; 2182 2183 if (howto == &elfNN_aarch64_howto_none) 2184 return BFD_RELOC_AARCH64_NONE; 2185 2186 return BFD_RELOC_AARCH64_RELOC_START; 2187 } 2188 2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */ 2190 2191 static bfd_reloc_code_real_type 2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type) 2193 { 2194 static bool initialized_p = false; 2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */ 2196 static unsigned int offsets[R_AARCH64_end]; 2197 2198 if (!initialized_p) 2199 { 2200 unsigned int i; 2201 2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i) 2203 if (elfNN_aarch64_howto_table[i].type != 0) 2204 offsets[elfNN_aarch64_howto_table[i].type] = i; 2205 2206 initialized_p = true; 2207 } 2208 2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL) 2210 return BFD_RELOC_AARCH64_NONE; 2211 2212 /* PR 17512: file: b371e70a. */ 2213 if (r_type >= R_AARCH64_end) 2214 { 2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), 2216 abfd, r_type); 2217 bfd_set_error (bfd_error_bad_value); 2218 return BFD_RELOC_AARCH64_NONE; 2219 } 2220 2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type]; 2222 } 2223 2224 struct elf_aarch64_reloc_map 2225 { 2226 bfd_reloc_code_real_type from; 2227 bfd_reloc_code_real_type to; 2228 }; 2229 2230 /* Map bfd generic reloc to AArch64-specific reloc. */ 2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] = 2232 { 2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE}, 2234 2235 /* Basic data relocations. */ 2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN}, 2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64}, 2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32}, 2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16}, 2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL}, 2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL}, 2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL}, 2243 }; 2244 2245 /* Given the bfd internal relocation enumerator in CODE, return the 2246 corresponding howto entry. */ 2247 2248 static reloc_howto_type * 2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code) 2250 { 2251 unsigned int i; 2252 2253 /* Convert bfd generic reloc to AArch64-specific reloc. */ 2254 if (code < BFD_RELOC_AARCH64_RELOC_START 2255 || code > BFD_RELOC_AARCH64_RELOC_END) 2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++) 2257 if (elf_aarch64_reloc_map[i].from == code) 2258 { 2259 code = elf_aarch64_reloc_map[i].to; 2260 break; 2261 } 2262 2263 if (code > BFD_RELOC_AARCH64_RELOC_START 2264 && code < BFD_RELOC_AARCH64_RELOC_END) 2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type) 2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START]; 2267 2268 if (code == BFD_RELOC_AARCH64_NONE) 2269 return &elfNN_aarch64_howto_none; 2270 2271 return NULL; 2272 } 2273 2274 static reloc_howto_type * 2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type) 2276 { 2277 bfd_reloc_code_real_type val; 2278 reloc_howto_type *howto; 2279 2280 #if ARCH_SIZE == 32 2281 if (r_type > 256) 2282 { 2283 bfd_set_error (bfd_error_bad_value); 2284 return NULL; 2285 } 2286 #endif 2287 2288 if (r_type == R_AARCH64_NONE) 2289 return &elfNN_aarch64_howto_none; 2290 2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type); 2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val); 2293 2294 if (howto != NULL) 2295 return howto; 2296 2297 bfd_set_error (bfd_error_bad_value); 2298 return NULL; 2299 } 2300 2301 static bool 2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc, 2303 Elf_Internal_Rela *elf_reloc) 2304 { 2305 unsigned int r_type; 2306 2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info); 2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type); 2309 2310 if (bfd_reloc->howto == NULL) 2311 { 2312 /* xgettext:c-format */ 2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type); 2314 return false; 2315 } 2316 return true; 2317 } 2318 2319 static reloc_howto_type * 2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, 2321 bfd_reloc_code_real_type code) 2322 { 2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code); 2324 2325 if (howto != NULL) 2326 return howto; 2327 2328 bfd_set_error (bfd_error_bad_value); 2329 return NULL; 2330 } 2331 2332 static reloc_howto_type * 2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, 2334 const char *r_name) 2335 { 2336 unsigned int i; 2337 2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i) 2339 if (elfNN_aarch64_howto_table[i].name != NULL 2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0) 2341 return &elfNN_aarch64_howto_table[i]; 2342 2343 return NULL; 2344 } 2345 2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec 2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64" 2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec 2349 #define TARGET_BIG_NAME "elfNN-bigaarch64" 2350 2351 /* The linker script knows the section names for placement. 2352 The entry_names are used to do simple name mangling on the stubs. 2353 Given a function name, and its type, the stub can be found. The 2354 name can be changed. The only requirement is the %s be present. */ 2355 #define STUB_ENTRY_NAME "__%s_veneer" 2356 2357 /* Stub name for a BTI landing stub. */ 2358 #define BTI_STUB_ENTRY_NAME "__%s_bti_veneer" 2359 2360 /* The name of the dynamic interpreter. This is put in the .interp 2361 section. */ 2362 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1" 2363 2364 #define AARCH64_MAX_FWD_BRANCH_OFFSET \ 2365 (((1 << 25) - 1) << 2) 2366 #define AARCH64_MAX_BWD_BRANCH_OFFSET \ 2367 (-((1 << 25) << 2)) 2368 2369 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1) 2370 #define AARCH64_MIN_ADRP_IMM (-(1 << 20)) 2371 2372 static int 2373 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place) 2374 { 2375 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12; 2376 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM; 2377 } 2378 2379 static int 2380 aarch64_valid_branch_p (bfd_vma value, bfd_vma place) 2381 { 2382 bfd_signed_vma offset = (bfd_signed_vma) (value - place); 2383 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET 2384 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET); 2385 } 2386 2387 static const uint32_t aarch64_adrp_branch_stub [] = 2388 { 2389 0x90000010, /* adrp ip0, X */ 2390 /* R_AARCH64_ADR_HI21_PCREL(X) */ 2391 0x91000210, /* add ip0, ip0, :lo12:X */ 2392 /* R_AARCH64_ADD_ABS_LO12_NC(X) */ 2393 0xd61f0200, /* br ip0 */ 2394 }; 2395 2396 static const uint32_t aarch64_long_branch_stub[] = 2397 { 2398 #if ARCH_SIZE == 64 2399 0x58000090, /* ldr ip0, 1f */ 2400 #else 2401 0x18000090, /* ldr wip0, 1f */ 2402 #endif 2403 0x10000011, /* adr ip1, #0 */ 2404 0x8b110210, /* add ip0, ip0, ip1 */ 2405 0xd61f0200, /* br ip0 */ 2406 0x00000000, /* 1: .xword or .word 2407 R_AARCH64_PRELNN(X) + 12 2408 */ 2409 0x00000000, 2410 }; 2411 2412 static const uint32_t aarch64_bti_direct_branch_stub[] = 2413 { 2414 0xd503245f, /* bti c */ 2415 0x14000000, /* b <label> */ 2416 }; 2417 2418 static const uint32_t aarch64_erratum_835769_stub[] = 2419 { 2420 0x00000000, /* Placeholder for multiply accumulate. */ 2421 0x14000000, /* b <label> */ 2422 }; 2423 2424 static const uint32_t aarch64_erratum_843419_stub[] = 2425 { 2426 0x00000000, /* Placeholder for LDR instruction. */ 2427 0x14000000, /* b <label> */ 2428 }; 2429 2430 /* Section name for stubs is the associated section name plus this 2431 string. */ 2432 #define STUB_SUFFIX ".stub" 2433 2434 enum elf_aarch64_stub_type 2435 { 2436 aarch64_stub_none, 2437 aarch64_stub_adrp_branch, 2438 aarch64_stub_long_branch, 2439 aarch64_stub_bti_direct_branch, 2440 aarch64_stub_erratum_835769_veneer, 2441 aarch64_stub_erratum_843419_veneer, 2442 }; 2443 2444 struct elf_aarch64_stub_hash_entry 2445 { 2446 /* Base hash table entry structure. */ 2447 struct bfd_hash_entry root; 2448 2449 /* The stub section. */ 2450 asection *stub_sec; 2451 2452 /* Offset within stub_sec of the beginning of this stub. */ 2453 bfd_vma stub_offset; 2454 2455 /* Given the symbol's value and its section we can determine its final 2456 value when building the stubs (so the stub knows where to jump). */ 2457 bfd_vma target_value; 2458 asection *target_section; 2459 2460 enum elf_aarch64_stub_type stub_type; 2461 2462 /* The symbol table entry, if any, that this was derived from. */ 2463 struct elf_aarch64_link_hash_entry *h; 2464 2465 /* Destination symbol type */ 2466 unsigned char st_type; 2467 2468 /* The target is also a stub. */ 2469 bool double_stub; 2470 2471 /* Where this stub is being called from, or, in the case of combined 2472 stub sections, the first input section in the group. */ 2473 asection *id_sec; 2474 2475 /* The name for the local symbol at the start of this stub. The 2476 stub name in the hash table has to be unique; this does not, so 2477 it can be friendlier. */ 2478 char *output_name; 2479 2480 /* The instruction which caused this stub to be generated (only valid for 2481 erratum 835769 workaround stubs at present). */ 2482 uint32_t veneered_insn; 2483 2484 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */ 2485 bfd_vma adrp_offset; 2486 }; 2487 2488 /* Used to build a map of a section. This is required for mixed-endian 2489 code/data. */ 2490 2491 typedef struct elf_elf_section_map 2492 { 2493 bfd_vma vma; 2494 char type; 2495 } 2496 elf_aarch64_section_map; 2497 2498 2499 typedef struct _aarch64_elf_section_data 2500 { 2501 struct bfd_elf_section_data elf; 2502 unsigned int mapcount; 2503 unsigned int mapsize; 2504 elf_aarch64_section_map *map; 2505 } 2506 _aarch64_elf_section_data; 2507 2508 #define elf_aarch64_section_data(sec) \ 2509 ((_aarch64_elf_section_data *) elf_section_data (sec)) 2510 2511 /* The size of the thread control block which is defined to be two pointers. */ 2512 #define TCB_SIZE (ARCH_SIZE/8)*2 2513 2514 struct elf_aarch64_local_symbol 2515 { 2516 unsigned int got_type; 2517 bfd_signed_vma got_refcount; 2518 bfd_vma got_offset; 2519 2520 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The 2521 offset is from the end of the jump table and reserved entries 2522 within the PLTGOT. 2523 2524 The magic value (bfd_vma) -1 indicates that an offset has not be 2525 allocated. */ 2526 bfd_vma tlsdesc_got_jump_table_offset; 2527 }; 2528 2529 struct elf_aarch64_obj_tdata 2530 { 2531 struct elf_obj_tdata root; 2532 2533 /* local symbol descriptors */ 2534 struct elf_aarch64_local_symbol *locals; 2535 2536 /* Zero to warn when linking objects with incompatible enum sizes. */ 2537 int no_enum_size_warning; 2538 2539 /* Zero to warn when linking objects with incompatible wchar_t sizes. */ 2540 int no_wchar_size_warning; 2541 2542 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */ 2543 uint32_t gnu_and_prop; 2544 2545 /* Zero to warn when linking objects with incompatible 2546 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */ 2547 int no_bti_warn; 2548 2549 /* PLT type based on security. */ 2550 aarch64_plt_type plt_type; 2551 }; 2552 2553 #define elf_aarch64_tdata(bfd) \ 2554 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any) 2555 2556 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals) 2557 2558 #define is_aarch64_elf(bfd) \ 2559 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 2560 && elf_tdata (bfd) != NULL \ 2561 && elf_object_id (bfd) == AARCH64_ELF_DATA) 2562 2563 static bool 2564 elfNN_aarch64_mkobject (bfd *abfd) 2565 { 2566 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata), 2567 AARCH64_ELF_DATA); 2568 } 2569 2570 #define elf_aarch64_hash_entry(ent) \ 2571 ((struct elf_aarch64_link_hash_entry *)(ent)) 2572 2573 #define GOT_UNKNOWN 0 2574 #define GOT_NORMAL 1 2575 #define GOT_TLS_GD 2 2576 #define GOT_TLS_IE 4 2577 #define GOT_TLSDESC_GD 8 2578 2579 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD)) 2580 2581 /* AArch64 ELF linker hash entry. */ 2582 struct elf_aarch64_link_hash_entry 2583 { 2584 struct elf_link_hash_entry root; 2585 2586 /* Since PLT entries have variable size, we need to record the 2587 index into .got.plt instead of recomputing it from the PLT 2588 offset. */ 2589 bfd_signed_vma plt_got_offset; 2590 2591 /* Bit mask representing the type of GOT entry(s) if any required by 2592 this symbol. */ 2593 unsigned int got_type; 2594 2595 /* TRUE if symbol is defined as a protected symbol. */ 2596 unsigned int def_protected : 1; 2597 2598 /* A pointer to the most recently used stub hash entry against this 2599 symbol. */ 2600 struct elf_aarch64_stub_hash_entry *stub_cache; 2601 2602 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset 2603 is from the end of the jump table and reserved entries within the PLTGOT. 2604 2605 The magic value (bfd_vma) -1 indicates that an offset has not 2606 be allocated. */ 2607 bfd_vma tlsdesc_got_jump_table_offset; 2608 }; 2609 2610 static unsigned int 2611 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h, 2612 bfd *abfd, 2613 unsigned long r_symndx) 2614 { 2615 if (h) 2616 return elf_aarch64_hash_entry (h)->got_type; 2617 2618 if (! elf_aarch64_locals (abfd)) 2619 return GOT_UNKNOWN; 2620 2621 return elf_aarch64_locals (abfd)[r_symndx].got_type; 2622 } 2623 2624 /* Get the AArch64 elf linker hash table from a link_info structure. */ 2625 #define elf_aarch64_hash_table(info) \ 2626 ((struct elf_aarch64_link_hash_table *) ((info)->hash)) 2627 2628 #define aarch64_stub_hash_lookup(table, string, create, copy) \ 2629 ((struct elf_aarch64_stub_hash_entry *) \ 2630 bfd_hash_lookup ((table), (string), (create), (copy))) 2631 2632 /* AArch64 ELF linker hash table. */ 2633 struct elf_aarch64_link_hash_table 2634 { 2635 /* The main hash table. */ 2636 struct elf_link_hash_table root; 2637 2638 /* Nonzero to force PIC branch veneers. */ 2639 int pic_veneer; 2640 2641 /* Fix erratum 835769. */ 2642 int fix_erratum_835769; 2643 2644 /* Fix erratum 843419. */ 2645 erratum_84319_opts fix_erratum_843419; 2646 2647 /* Don't apply link-time values for dynamic relocations. */ 2648 int no_apply_dynamic_relocs; 2649 2650 /* The number of bytes in the initial entry in the PLT. */ 2651 bfd_size_type plt_header_size; 2652 2653 /* The bytes of the initial PLT entry. */ 2654 const bfd_byte *plt0_entry; 2655 2656 /* The number of bytes in the subsequent PLT entries. */ 2657 bfd_size_type plt_entry_size; 2658 2659 /* The bytes of the subsequent PLT entry. */ 2660 const bfd_byte *plt_entry; 2661 2662 /* For convenience in allocate_dynrelocs. */ 2663 bfd *obfd; 2664 2665 /* The amount of space used by the reserved portion of the sgotplt 2666 section, plus whatever space is used by the jump slots. */ 2667 bfd_vma sgotplt_jump_table_size; 2668 2669 /* The stub hash table. */ 2670 struct bfd_hash_table stub_hash_table; 2671 2672 /* Linker stub bfd. */ 2673 bfd *stub_bfd; 2674 2675 /* Linker call-backs. */ 2676 asection *(*add_stub_section) (const char *, asection *); 2677 void (*layout_sections_again) (void); 2678 2679 /* Array to keep track of which stub sections have been created, and 2680 information on stub grouping. */ 2681 struct map_stub 2682 { 2683 /* This is the section to which stubs in the group will be 2684 attached. */ 2685 asection *link_sec; 2686 /* The stub section. */ 2687 asection *stub_sec; 2688 } *stub_group; 2689 2690 /* Assorted information used by elfNN_aarch64_size_stubs. */ 2691 unsigned int bfd_count; 2692 unsigned int top_index; 2693 asection **input_list; 2694 2695 /* True when two stubs are added where one targets the other, happens 2696 when BTI stubs are inserted and then the stub layout must not change 2697 during elfNN_aarch64_build_stubs. */ 2698 bool has_double_stub; 2699 2700 /* JUMP_SLOT relocs for variant PCS symbols may be present. */ 2701 int variant_pcs; 2702 2703 /* The number of bytes in the PLT enty for the TLS descriptor. */ 2704 bfd_size_type tlsdesc_plt_entry_size; 2705 2706 /* Used by local STT_GNU_IFUNC symbols. */ 2707 htab_t loc_hash_table; 2708 void * loc_hash_memory; 2709 }; 2710 2711 /* Create an entry in an AArch64 ELF linker hash table. */ 2712 2713 static struct bfd_hash_entry * 2714 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry, 2715 struct bfd_hash_table *table, 2716 const char *string) 2717 { 2718 struct elf_aarch64_link_hash_entry *ret = 2719 (struct elf_aarch64_link_hash_entry *) entry; 2720 2721 /* Allocate the structure if it has not already been allocated by a 2722 subclass. */ 2723 if (ret == NULL) 2724 ret = bfd_hash_allocate (table, 2725 sizeof (struct elf_aarch64_link_hash_entry)); 2726 if (ret == NULL) 2727 return (struct bfd_hash_entry *) ret; 2728 2729 /* Call the allocation method of the superclass. */ 2730 ret = ((struct elf_aarch64_link_hash_entry *) 2731 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, 2732 table, string)); 2733 if (ret != NULL) 2734 { 2735 ret->got_type = GOT_UNKNOWN; 2736 ret->def_protected = 0; 2737 ret->plt_got_offset = (bfd_vma) - 1; 2738 ret->stub_cache = NULL; 2739 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 2740 } 2741 2742 return (struct bfd_hash_entry *) ret; 2743 } 2744 2745 /* Initialize an entry in the stub hash table. */ 2746 2747 static struct bfd_hash_entry * 2748 stub_hash_newfunc (struct bfd_hash_entry *entry, 2749 struct bfd_hash_table *table, const char *string) 2750 { 2751 /* Allocate the structure if it has not already been allocated by a 2752 subclass. */ 2753 if (entry == NULL) 2754 { 2755 entry = bfd_hash_allocate (table, 2756 sizeof (struct 2757 elf_aarch64_stub_hash_entry)); 2758 if (entry == NULL) 2759 return entry; 2760 } 2761 2762 /* Call the allocation method of the superclass. */ 2763 entry = bfd_hash_newfunc (entry, table, string); 2764 if (entry != NULL) 2765 { 2766 struct elf_aarch64_stub_hash_entry *eh; 2767 2768 /* Initialize the local fields. */ 2769 eh = (struct elf_aarch64_stub_hash_entry *) entry; 2770 memset (&eh->stub_sec, 0, 2771 (sizeof (struct elf_aarch64_stub_hash_entry) 2772 - offsetof (struct elf_aarch64_stub_hash_entry, stub_sec))); 2773 } 2774 2775 return entry; 2776 } 2777 2778 /* Compute a hash of a local hash entry. We use elf_link_hash_entry 2779 for local symbol so that we can handle local STT_GNU_IFUNC symbols 2780 as global symbol. We reuse indx and dynstr_index for local symbol 2781 hash since they aren't used by global symbols in this backend. */ 2782 2783 static hashval_t 2784 elfNN_aarch64_local_htab_hash (const void *ptr) 2785 { 2786 struct elf_link_hash_entry *h 2787 = (struct elf_link_hash_entry *) ptr; 2788 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index); 2789 } 2790 2791 /* Compare local hash entries. */ 2792 2793 static int 2794 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2) 2795 { 2796 struct elf_link_hash_entry *h1 2797 = (struct elf_link_hash_entry *) ptr1; 2798 struct elf_link_hash_entry *h2 2799 = (struct elf_link_hash_entry *) ptr2; 2800 2801 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index; 2802 } 2803 2804 /* Find and/or create a hash entry for local symbol. */ 2805 2806 static struct elf_link_hash_entry * 2807 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab, 2808 bfd *abfd, const Elf_Internal_Rela *rel, 2809 bool create) 2810 { 2811 struct elf_aarch64_link_hash_entry e, *ret; 2812 asection *sec = abfd->sections; 2813 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id, 2814 ELFNN_R_SYM (rel->r_info)); 2815 void **slot; 2816 2817 e.root.indx = sec->id; 2818 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info); 2819 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h, 2820 create ? INSERT : NO_INSERT); 2821 2822 if (!slot) 2823 return NULL; 2824 2825 if (*slot) 2826 { 2827 ret = (struct elf_aarch64_link_hash_entry *) *slot; 2828 return &ret->root; 2829 } 2830 2831 ret = (struct elf_aarch64_link_hash_entry *) 2832 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory, 2833 sizeof (struct elf_aarch64_link_hash_entry)); 2834 if (ret) 2835 { 2836 memset (ret, 0, sizeof (*ret)); 2837 ret->root.indx = sec->id; 2838 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info); 2839 ret->root.dynindx = -1; 2840 *slot = ret; 2841 } 2842 return &ret->root; 2843 } 2844 2845 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 2846 2847 static void 2848 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info, 2849 struct elf_link_hash_entry *dir, 2850 struct elf_link_hash_entry *ind) 2851 { 2852 struct elf_aarch64_link_hash_entry *edir, *eind; 2853 2854 edir = (struct elf_aarch64_link_hash_entry *) dir; 2855 eind = (struct elf_aarch64_link_hash_entry *) ind; 2856 2857 if (ind->root.type == bfd_link_hash_indirect) 2858 { 2859 /* Copy over PLT info. */ 2860 if (dir->got.refcount <= 0) 2861 { 2862 edir->got_type = eind->got_type; 2863 eind->got_type = GOT_UNKNOWN; 2864 } 2865 } 2866 2867 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 2868 } 2869 2870 /* Merge non-visibility st_other attributes. */ 2871 2872 static void 2873 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h, 2874 unsigned int st_other, 2875 bool definition, 2876 bool dynamic ATTRIBUTE_UNUSED) 2877 { 2878 if (definition) 2879 { 2880 struct elf_aarch64_link_hash_entry *eh 2881 = (struct elf_aarch64_link_hash_entry *)h; 2882 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED; 2883 } 2884 2885 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1); 2886 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1); 2887 2888 if (isym_sto == h_sto) 2889 return; 2890 2891 if (isym_sto & ~STO_AARCH64_VARIANT_PCS) 2892 /* Not fatal, this callback cannot fail. */ 2893 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"), 2894 h->root.root.string, isym_sto); 2895 2896 /* Note: Ideally we would warn about any attribute mismatch, but 2897 this api does not allow that without substantial changes. */ 2898 if (isym_sto & STO_AARCH64_VARIANT_PCS) 2899 h->other |= STO_AARCH64_VARIANT_PCS; 2900 } 2901 2902 /* Destroy an AArch64 elf linker hash table. */ 2903 2904 static void 2905 elfNN_aarch64_link_hash_table_free (bfd *obfd) 2906 { 2907 struct elf_aarch64_link_hash_table *ret 2908 = (struct elf_aarch64_link_hash_table *) obfd->link.hash; 2909 2910 if (ret->loc_hash_table) 2911 htab_delete (ret->loc_hash_table); 2912 if (ret->loc_hash_memory) 2913 objalloc_free ((struct objalloc *) ret->loc_hash_memory); 2914 2915 bfd_hash_table_free (&ret->stub_hash_table); 2916 _bfd_elf_link_hash_table_free (obfd); 2917 } 2918 2919 /* Create an AArch64 elf linker hash table. */ 2920 2921 static struct bfd_link_hash_table * 2922 elfNN_aarch64_link_hash_table_create (bfd *abfd) 2923 { 2924 struct elf_aarch64_link_hash_table *ret; 2925 size_t amt = sizeof (struct elf_aarch64_link_hash_table); 2926 2927 ret = bfd_zmalloc (amt); 2928 if (ret == NULL) 2929 return NULL; 2930 2931 if (!_bfd_elf_link_hash_table_init 2932 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc, 2933 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA)) 2934 { 2935 free (ret); 2936 return NULL; 2937 } 2938 2939 ret->plt_header_size = PLT_ENTRY_SIZE; 2940 ret->plt0_entry = elfNN_aarch64_small_plt0_entry; 2941 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE; 2942 ret->plt_entry = elfNN_aarch64_small_plt_entry; 2943 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; 2944 ret->obfd = abfd; 2945 ret->root.tlsdesc_got = (bfd_vma) - 1; 2946 2947 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, 2948 sizeof (struct elf_aarch64_stub_hash_entry))) 2949 { 2950 _bfd_elf_link_hash_table_free (abfd); 2951 return NULL; 2952 } 2953 2954 ret->loc_hash_table = htab_try_create (1024, 2955 elfNN_aarch64_local_htab_hash, 2956 elfNN_aarch64_local_htab_eq, 2957 NULL); 2958 ret->loc_hash_memory = objalloc_create (); 2959 if (!ret->loc_hash_table || !ret->loc_hash_memory) 2960 { 2961 elfNN_aarch64_link_hash_table_free (abfd); 2962 return NULL; 2963 } 2964 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free; 2965 2966 return &ret->root.root; 2967 } 2968 2969 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */ 2970 2971 static bool 2972 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section, 2973 bfd_vma offset, bfd_vma value) 2974 { 2975 reloc_howto_type *howto; 2976 bfd_vma place; 2977 2978 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); 2979 place = (input_section->output_section->vma + input_section->output_offset 2980 + offset); 2981 2982 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 2983 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place, 2984 value, 0, false); 2985 return _bfd_aarch64_elf_put_addend (input_bfd, 2986 input_section->contents + offset, r_type, 2987 howto, value) == bfd_reloc_ok; 2988 } 2989 2990 /* Determine the type of stub needed, if any, for a call. */ 2991 2992 static enum elf_aarch64_stub_type 2993 aarch64_type_of_stub (asection *input_sec, 2994 const Elf_Internal_Rela *rel, 2995 asection *sym_sec, 2996 unsigned char st_type, 2997 bfd_vma destination) 2998 { 2999 bfd_vma location; 3000 bfd_signed_vma branch_offset; 3001 unsigned int r_type; 3002 enum elf_aarch64_stub_type stub_type = aarch64_stub_none; 3003 3004 if (st_type != STT_FUNC 3005 && (sym_sec == input_sec)) 3006 return stub_type; 3007 3008 /* Determine where the call point is. */ 3009 location = (input_sec->output_offset 3010 + input_sec->output_section->vma + rel->r_offset); 3011 3012 branch_offset = (bfd_signed_vma) (destination - location); 3013 3014 r_type = ELFNN_R_TYPE (rel->r_info); 3015 3016 /* We don't want to redirect any old unconditional jump in this way, 3017 only one which is being used for a sibcall, where it is 3018 acceptable for the IP0 and IP1 registers to be clobbered. */ 3019 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26)) 3020 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET 3021 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET)) 3022 { 3023 stub_type = aarch64_stub_long_branch; 3024 } 3025 3026 return stub_type; 3027 } 3028 3029 /* Build a name for an entry in the stub hash table. */ 3030 3031 static char * 3032 elfNN_aarch64_stub_name (const asection *input_section, 3033 const asection *sym_sec, 3034 const struct elf_aarch64_link_hash_entry *hash, 3035 const Elf_Internal_Rela *rel) 3036 { 3037 char *stub_name; 3038 bfd_size_type len; 3039 3040 if (hash) 3041 { 3042 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1; 3043 stub_name = bfd_malloc (len); 3044 if (stub_name != NULL) 3045 snprintf (stub_name, len, "%08x_%s+%" PRIx64, 3046 (unsigned int) input_section->id, 3047 hash->root.root.root.string, 3048 (uint64_t) rel->r_addend); 3049 } 3050 else 3051 { 3052 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1; 3053 stub_name = bfd_malloc (len); 3054 if (stub_name != NULL) 3055 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64, 3056 (unsigned int) input_section->id, 3057 (unsigned int) sym_sec->id, 3058 (unsigned int) ELFNN_R_SYM (rel->r_info), 3059 (uint64_t) rel->r_addend); 3060 } 3061 3062 return stub_name; 3063 } 3064 3065 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For 3066 executable PLT slots where the executable never takes the address of those 3067 functions, the function symbols are not added to the hash table. */ 3068 3069 static bool 3070 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h) 3071 { 3072 if (h->plt.offset != (bfd_vma) -1 3073 && !h->def_regular 3074 && !h->pointer_equality_needed) 3075 return false; 3076 3077 return _bfd_elf_hash_symbol (h); 3078 } 3079 3080 3081 /* Look up an entry in the stub hash. Stub entries are cached because 3082 creating the stub name takes a bit of time. */ 3083 3084 static struct elf_aarch64_stub_hash_entry * 3085 elfNN_aarch64_get_stub_entry (const asection *input_section, 3086 const asection *sym_sec, 3087 struct elf_link_hash_entry *hash, 3088 const Elf_Internal_Rela *rel, 3089 struct elf_aarch64_link_hash_table *htab) 3090 { 3091 struct elf_aarch64_stub_hash_entry *stub_entry; 3092 struct elf_aarch64_link_hash_entry *h = 3093 (struct elf_aarch64_link_hash_entry *) hash; 3094 const asection *id_sec; 3095 3096 if ((input_section->flags & SEC_CODE) == 0) 3097 return NULL; 3098 3099 /* If this input section is part of a group of sections sharing one 3100 stub section, then use the id of the first section in the group. 3101 Stub names need to include a section id, as there may well be 3102 more than one stub used to reach say, printf, and we need to 3103 distinguish between them. */ 3104 id_sec = htab->stub_group[input_section->id].link_sec; 3105 3106 if (h != NULL && h->stub_cache != NULL 3107 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec) 3108 { 3109 stub_entry = h->stub_cache; 3110 } 3111 else 3112 { 3113 char *stub_name; 3114 3115 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel); 3116 if (stub_name == NULL) 3117 return NULL; 3118 3119 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, 3120 stub_name, false, false); 3121 if (h != NULL) 3122 h->stub_cache = stub_entry; 3123 3124 free (stub_name); 3125 } 3126 3127 return stub_entry; 3128 } 3129 3130 3131 /* Create a stub section. */ 3132 3133 static asection * 3134 _bfd_aarch64_create_stub_section (asection *section, 3135 struct elf_aarch64_link_hash_table *htab) 3136 { 3137 size_t namelen; 3138 bfd_size_type len; 3139 char *s_name; 3140 3141 namelen = strlen (section->name); 3142 len = namelen + sizeof (STUB_SUFFIX); 3143 s_name = bfd_alloc (htab->stub_bfd, len); 3144 if (s_name == NULL) 3145 return NULL; 3146 3147 memcpy (s_name, section->name, namelen); 3148 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); 3149 return (*htab->add_stub_section) (s_name, section); 3150 } 3151 3152 3153 /* Find or create a stub section for a link section. 3154 3155 Fix or create the stub section used to collect stubs attached to 3156 the specified link section. */ 3157 3158 static asection * 3159 _bfd_aarch64_get_stub_for_link_section (asection *link_section, 3160 struct elf_aarch64_link_hash_table *htab) 3161 { 3162 if (htab->stub_group[link_section->id].stub_sec == NULL) 3163 htab->stub_group[link_section->id].stub_sec 3164 = _bfd_aarch64_create_stub_section (link_section, htab); 3165 return htab->stub_group[link_section->id].stub_sec; 3166 } 3167 3168 3169 /* Find or create a stub section in the stub group for an input 3170 section. */ 3171 3172 static asection * 3173 _bfd_aarch64_create_or_find_stub_sec (asection *section, 3174 struct elf_aarch64_link_hash_table *htab) 3175 { 3176 asection *link_sec = htab->stub_group[section->id].link_sec; 3177 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab); 3178 } 3179 3180 3181 /* Add a new stub entry in the stub group associated with an input 3182 section to the stub hash. Not all fields of the new stub entry are 3183 initialised. */ 3184 3185 static struct elf_aarch64_stub_hash_entry * 3186 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name, 3187 asection *section, 3188 struct elf_aarch64_link_hash_table *htab) 3189 { 3190 asection *link_sec; 3191 asection *stub_sec; 3192 struct elf_aarch64_stub_hash_entry *stub_entry; 3193 3194 link_sec = htab->stub_group[section->id].link_sec; 3195 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab); 3196 3197 /* Enter this entry into the linker stub hash table. */ 3198 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 3199 true, false); 3200 if (stub_entry == NULL) 3201 { 3202 /* xgettext:c-format */ 3203 _bfd_error_handler (_("%pB: cannot create stub entry %s"), 3204 section->owner, stub_name); 3205 return NULL; 3206 } 3207 3208 stub_entry->stub_sec = stub_sec; 3209 stub_entry->stub_offset = 0; 3210 stub_entry->id_sec = link_sec; 3211 3212 return stub_entry; 3213 } 3214 3215 /* Add a new stub entry in the final stub section to the stub hash. 3216 Not all fields of the new stub entry are initialised. */ 3217 3218 static struct elf_aarch64_stub_hash_entry * 3219 _bfd_aarch64_add_stub_entry_after (const char *stub_name, 3220 asection *link_section, 3221 struct elf_aarch64_link_hash_table *htab) 3222 { 3223 asection *stub_sec; 3224 struct elf_aarch64_stub_hash_entry *stub_entry; 3225 3226 stub_sec = NULL; 3227 /* Only create the actual stub if we will end up needing it. */ 3228 if (htab->fix_erratum_843419 & ERRAT_ADRP) 3229 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab); 3230 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 3231 true, false); 3232 if (stub_entry == NULL) 3233 { 3234 _bfd_error_handler (_("cannot create stub entry %s"), stub_name); 3235 return NULL; 3236 } 3237 3238 stub_entry->stub_sec = stub_sec; 3239 stub_entry->stub_offset = 0; 3240 stub_entry->id_sec = link_section; 3241 3242 return stub_entry; 3243 } 3244 3245 3246 static bool 3247 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, 3248 void *in_arg) 3249 { 3250 struct elf_aarch64_stub_hash_entry *stub_entry; 3251 asection *stub_sec; 3252 bfd *stub_bfd; 3253 bfd_byte *loc; 3254 bfd_vma sym_value; 3255 bfd_vma veneered_insn_loc; 3256 bfd_vma veneer_entry_loc; 3257 bfd_signed_vma branch_offset = 0; 3258 unsigned int template_size; 3259 unsigned int pad_size = 0; 3260 const uint32_t *template; 3261 unsigned int i; 3262 struct bfd_link_info *info; 3263 struct elf_aarch64_link_hash_table *htab; 3264 3265 /* Massage our args to the form they really have. */ 3266 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 3267 3268 info = (struct bfd_link_info *) in_arg; 3269 htab = elf_aarch64_hash_table (info); 3270 3271 /* Fail if the target section could not be assigned to an output 3272 section. The user should fix his linker script. */ 3273 if (stub_entry->target_section->output_section == NULL 3274 && info->non_contiguous_regions) 3275 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. " 3276 "Retry without " 3277 "--enable-non-contiguous-regions.\n"), 3278 stub_entry->target_section); 3279 3280 stub_sec = stub_entry->stub_sec; 3281 3282 /* The layout must not change when a stub may be the target of another. */ 3283 if (htab->has_double_stub) 3284 BFD_ASSERT (stub_entry->stub_offset == stub_sec->size); 3285 3286 /* Make a note of the offset within the stubs for this entry. */ 3287 stub_entry->stub_offset = stub_sec->size; 3288 loc = stub_sec->contents + stub_entry->stub_offset; 3289 3290 stub_bfd = stub_sec->owner; 3291 3292 /* This is the address of the stub destination. */ 3293 sym_value = (stub_entry->target_value 3294 + stub_entry->target_section->output_offset 3295 + stub_entry->target_section->output_section->vma); 3296 3297 if (stub_entry->stub_type == aarch64_stub_long_branch) 3298 { 3299 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma 3300 + stub_sec->output_offset); 3301 3302 /* See if we can relax the stub. */ 3303 if (aarch64_valid_for_adrp_p (sym_value, place)) 3304 { 3305 stub_entry->stub_type = aarch64_stub_adrp_branch; 3306 3307 /* Avoid the relaxation changing the layout. */ 3308 if (htab->has_double_stub) 3309 pad_size = sizeof (aarch64_long_branch_stub) 3310 - sizeof (aarch64_adrp_branch_stub); 3311 } 3312 } 3313 3314 switch (stub_entry->stub_type) 3315 { 3316 case aarch64_stub_adrp_branch: 3317 template = aarch64_adrp_branch_stub; 3318 template_size = sizeof (aarch64_adrp_branch_stub); 3319 break; 3320 case aarch64_stub_long_branch: 3321 template = aarch64_long_branch_stub; 3322 template_size = sizeof (aarch64_long_branch_stub); 3323 break; 3324 case aarch64_stub_bti_direct_branch: 3325 template = aarch64_bti_direct_branch_stub; 3326 template_size = sizeof (aarch64_bti_direct_branch_stub); 3327 break; 3328 case aarch64_stub_erratum_835769_veneer: 3329 template = aarch64_erratum_835769_stub; 3330 template_size = sizeof (aarch64_erratum_835769_stub); 3331 break; 3332 case aarch64_stub_erratum_843419_veneer: 3333 template = aarch64_erratum_843419_stub; 3334 template_size = sizeof (aarch64_erratum_843419_stub); 3335 break; 3336 default: 3337 abort (); 3338 } 3339 3340 for (i = 0; i < (template_size / sizeof template[0]); i++) 3341 { 3342 bfd_putl32 (template[i], loc); 3343 loc += 4; 3344 } 3345 3346 template_size += pad_size; 3347 template_size = (template_size + 7) & ~7; 3348 stub_sec->size += template_size; 3349 3350 switch (stub_entry->stub_type) 3351 { 3352 case aarch64_stub_adrp_branch: 3353 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec, 3354 stub_entry->stub_offset, sym_value)) 3355 /* The stub would not have been relaxed if the offset was out 3356 of range. */ 3357 BFD_FAIL (); 3358 3359 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec, 3360 stub_entry->stub_offset + 4, sym_value)) 3361 BFD_FAIL (); 3362 break; 3363 3364 case aarch64_stub_long_branch: 3365 /* We want the value relative to the address 12 bytes back from the 3366 value itself. */ 3367 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec, 3368 stub_entry->stub_offset + 16, sym_value + 12)) 3369 BFD_FAIL (); 3370 break; 3371 3372 case aarch64_stub_bti_direct_branch: 3373 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec, 3374 stub_entry->stub_offset + 4, sym_value)) 3375 BFD_FAIL (); 3376 break; 3377 3378 case aarch64_stub_erratum_835769_veneer: 3379 veneered_insn_loc = stub_entry->target_section->output_section->vma 3380 + stub_entry->target_section->output_offset 3381 + stub_entry->target_value; 3382 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 3383 + stub_entry->stub_sec->output_offset 3384 + stub_entry->stub_offset; 3385 branch_offset = veneered_insn_loc - veneer_entry_loc; 3386 branch_offset >>= 2; 3387 branch_offset &= 0x3ffffff; 3388 bfd_putl32 (stub_entry->veneered_insn, 3389 stub_sec->contents + stub_entry->stub_offset); 3390 bfd_putl32 (template[1] | branch_offset, 3391 stub_sec->contents + stub_entry->stub_offset + 4); 3392 break; 3393 3394 case aarch64_stub_erratum_843419_veneer: 3395 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec, 3396 stub_entry->stub_offset + 4, sym_value + 4)) 3397 BFD_FAIL (); 3398 break; 3399 3400 default: 3401 abort (); 3402 } 3403 3404 return true; 3405 } 3406 3407 /* As above, but don't actually build the stub. Just bump offset so 3408 we know stub section sizes and record the offset for each stub so 3409 a stub can target another stub (needed for BTI direct branch stub). */ 3410 3411 static bool 3412 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) 3413 { 3414 struct elf_aarch64_stub_hash_entry *stub_entry; 3415 struct elf_aarch64_link_hash_table *htab; 3416 int size; 3417 3418 /* Massage our args to the form they really have. */ 3419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 3420 htab = (struct elf_aarch64_link_hash_table *) in_arg; 3421 3422 switch (stub_entry->stub_type) 3423 { 3424 case aarch64_stub_adrp_branch: 3425 size = sizeof (aarch64_adrp_branch_stub); 3426 break; 3427 case aarch64_stub_long_branch: 3428 size = sizeof (aarch64_long_branch_stub); 3429 break; 3430 case aarch64_stub_bti_direct_branch: 3431 size = sizeof (aarch64_bti_direct_branch_stub); 3432 break; 3433 case aarch64_stub_erratum_835769_veneer: 3434 size = sizeof (aarch64_erratum_835769_stub); 3435 break; 3436 case aarch64_stub_erratum_843419_veneer: 3437 { 3438 if (htab->fix_erratum_843419 == ERRAT_ADR) 3439 return true; 3440 size = sizeof (aarch64_erratum_843419_stub); 3441 } 3442 break; 3443 default: 3444 abort (); 3445 } 3446 3447 size = (size + 7) & ~7; 3448 stub_entry->stub_offset = stub_entry->stub_sec->size; 3449 stub_entry->stub_sec->size += size; 3450 return true; 3451 } 3452 3453 /* Output is BTI compatible. */ 3454 3455 static bool 3456 elf_aarch64_bti_p (bfd *output_bfd) 3457 { 3458 uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop; 3459 return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI; 3460 } 3461 3462 /* External entry points for sizing and building linker stubs. */ 3463 3464 /* Set up various things so that we can make a list of input sections 3465 for each output section included in the link. Returns -1 on error, 3466 0 when no stubs will be needed, and 1 on success. */ 3467 3468 int 3469 elfNN_aarch64_setup_section_lists (bfd *output_bfd, 3470 struct bfd_link_info *info) 3471 { 3472 bfd *input_bfd; 3473 unsigned int bfd_count; 3474 unsigned int top_id, top_index; 3475 asection *section; 3476 asection **input_list, **list; 3477 size_t amt; 3478 struct elf_aarch64_link_hash_table *htab = 3479 elf_aarch64_hash_table (info); 3480 3481 if (!is_elf_hash_table (&htab->root.root)) 3482 return 0; 3483 3484 /* Count the number of input BFDs and find the top input section id. */ 3485 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; 3486 input_bfd != NULL; input_bfd = input_bfd->link.next) 3487 { 3488 bfd_count += 1; 3489 for (section = input_bfd->sections; 3490 section != NULL; section = section->next) 3491 { 3492 if (top_id < section->id) 3493 top_id = section->id; 3494 } 3495 } 3496 htab->bfd_count = bfd_count; 3497 3498 amt = sizeof (struct map_stub) * (top_id + 1); 3499 htab->stub_group = bfd_zmalloc (amt); 3500 if (htab->stub_group == NULL) 3501 return -1; 3502 3503 /* We can't use output_bfd->section_count here to find the top output 3504 section index as some sections may have been removed, and 3505 _bfd_strip_section_from_output doesn't renumber the indices. */ 3506 for (section = output_bfd->sections, top_index = 0; 3507 section != NULL; section = section->next) 3508 { 3509 if (top_index < section->index) 3510 top_index = section->index; 3511 } 3512 3513 htab->top_index = top_index; 3514 amt = sizeof (asection *) * (top_index + 1); 3515 input_list = bfd_malloc (amt); 3516 htab->input_list = input_list; 3517 if (input_list == NULL) 3518 return -1; 3519 3520 /* For sections we aren't interested in, mark their entries with a 3521 value we can check later. */ 3522 list = input_list + top_index; 3523 do 3524 *list = bfd_abs_section_ptr; 3525 while (list-- != input_list); 3526 3527 for (section = output_bfd->sections; 3528 section != NULL; section = section->next) 3529 { 3530 if ((section->flags & SEC_CODE) != 0) 3531 input_list[section->index] = NULL; 3532 } 3533 3534 return 1; 3535 } 3536 3537 /* Used by elfNN_aarch64_next_input_section and group_sections. */ 3538 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) 3539 3540 /* The linker repeatedly calls this function for each input section, 3541 in the order that input sections are linked into output sections. 3542 Build lists of input sections to determine groupings between which 3543 we may insert linker stubs. */ 3544 3545 void 3546 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec) 3547 { 3548 struct elf_aarch64_link_hash_table *htab = 3549 elf_aarch64_hash_table (info); 3550 3551 if (isec->output_section->index <= htab->top_index) 3552 { 3553 asection **list = htab->input_list + isec->output_section->index; 3554 3555 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) 3556 { 3557 /* Steal the link_sec pointer for our list. */ 3558 /* This happens to make the list in reverse order, 3559 which is what we want. */ 3560 PREV_SEC (isec) = *list; 3561 *list = isec; 3562 } 3563 } 3564 } 3565 3566 /* See whether we can group stub sections together. Grouping stub 3567 sections may result in fewer stubs. More importantly, we need to 3568 put all .init* and .fini* stubs at the beginning of the .init or 3569 .fini output sections respectively, because glibc splits the 3570 _init and _fini functions into multiple parts. Putting a stub in 3571 the middle of a function is not a good idea. */ 3572 3573 static void 3574 group_sections (struct elf_aarch64_link_hash_table *htab, 3575 bfd_size_type stub_group_size, 3576 bool stubs_always_after_branch) 3577 { 3578 asection **list = htab->input_list; 3579 3580 do 3581 { 3582 asection *tail = *list; 3583 asection *head; 3584 3585 if (tail == bfd_abs_section_ptr) 3586 continue; 3587 3588 /* Reverse the list: we must avoid placing stubs at the 3589 beginning of the section because the beginning of the text 3590 section may be required for an interrupt vector in bare metal 3591 code. */ 3592 #define NEXT_SEC PREV_SEC 3593 head = NULL; 3594 while (tail != NULL) 3595 { 3596 /* Pop from tail. */ 3597 asection *item = tail; 3598 tail = PREV_SEC (item); 3599 3600 /* Push on head. */ 3601 NEXT_SEC (item) = head; 3602 head = item; 3603 } 3604 3605 while (head != NULL) 3606 { 3607 asection *curr; 3608 asection *next; 3609 bfd_vma stub_group_start = head->output_offset; 3610 bfd_vma end_of_next; 3611 3612 curr = head; 3613 while (NEXT_SEC (curr) != NULL) 3614 { 3615 next = NEXT_SEC (curr); 3616 end_of_next = next->output_offset + next->size; 3617 if (end_of_next - stub_group_start >= stub_group_size) 3618 /* End of NEXT is too far from start, so stop. */ 3619 break; 3620 /* Add NEXT to the group. */ 3621 curr = next; 3622 } 3623 3624 /* OK, the size from the start to the start of CURR is less 3625 than stub_group_size and thus can be handled by one stub 3626 section. (Or the head section is itself larger than 3627 stub_group_size, in which case we may be toast.) 3628 We should really be keeping track of the total size of 3629 stubs added here, as stubs contribute to the final output 3630 section size. */ 3631 do 3632 { 3633 next = NEXT_SEC (head); 3634 /* Set up this stub group. */ 3635 htab->stub_group[head->id].link_sec = curr; 3636 } 3637 while (head != curr && (head = next) != NULL); 3638 3639 /* But wait, there's more! Input sections up to stub_group_size 3640 bytes after the stub section can be handled by it too. */ 3641 if (!stubs_always_after_branch) 3642 { 3643 stub_group_start = curr->output_offset + curr->size; 3644 3645 while (next != NULL) 3646 { 3647 end_of_next = next->output_offset + next->size; 3648 if (end_of_next - stub_group_start >= stub_group_size) 3649 /* End of NEXT is too far from stubs, so stop. */ 3650 break; 3651 /* Add NEXT to the stub group. */ 3652 head = next; 3653 next = NEXT_SEC (head); 3654 htab->stub_group[head->id].link_sec = curr; 3655 } 3656 } 3657 head = next; 3658 } 3659 } 3660 while (list++ != htab->input_list + htab->top_index); 3661 3662 free (htab->input_list); 3663 } 3664 3665 #undef PREV_SEC 3666 #undef PREV_SEC 3667 3668 #define AARCH64_HINT(insn) (((insn) & 0xfffff01f) == 0xd503201f) 3669 #define AARCH64_PACIASP 0xd503233f 3670 #define AARCH64_PACIBSP 0xd503237f 3671 #define AARCH64_BTI_C 0xd503245f 3672 #define AARCH64_BTI_J 0xd503249f 3673 #define AARCH64_BTI_JC 0xd50324df 3674 3675 /* True if the inserted stub does not break BTI compatibility. */ 3676 3677 static bool 3678 aarch64_bti_stub_p (struct bfd_link_info *info, 3679 struct elf_aarch64_stub_hash_entry *stub_entry) 3680 { 3681 /* Stubs without indirect branch are BTI compatible. */ 3682 if (stub_entry->stub_type != aarch64_stub_adrp_branch 3683 && stub_entry->stub_type != aarch64_stub_long_branch) 3684 return true; 3685 3686 /* Return true if the target instruction is compatible with BR x16. */ 3687 3688 struct elf_aarch64_link_hash_table *globals = elf_aarch64_hash_table (info); 3689 asection *section = stub_entry->target_section; 3690 bfd_byte loc[4]; 3691 file_ptr off = stub_entry->target_value; 3692 bfd_size_type count = sizeof (loc); 3693 3694 /* PLT code is not generated yet, so treat it specially. 3695 Note: Checking elf_aarch64_obj_tdata.plt_type & PLT_BTI is not 3696 enough because it only implies BTI in the PLT0 and tlsdesc PLT 3697 entries. Normal PLT entries don't have BTI in a shared library 3698 (because such PLT is normally not called indirectly and adding 3699 the BTI when a stub targets a PLT would change the PLT layout 3700 and it's too late for that here). */ 3701 if (section == globals->root.splt) 3702 memcpy (loc, globals->plt_entry, count); 3703 else if (!bfd_get_section_contents (section->owner, section, loc, off, count)) 3704 return false; 3705 3706 uint32_t insn = bfd_getl32 (loc); 3707 if (!AARCH64_HINT (insn)) 3708 return false; 3709 return insn == AARCH64_BTI_C 3710 || insn == AARCH64_PACIASP 3711 || insn == AARCH64_BTI_JC 3712 || insn == AARCH64_BTI_J 3713 || insn == AARCH64_PACIBSP; 3714 } 3715 3716 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1)) 3717 3718 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5) 3719 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5) 3720 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5) 3721 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5) 3722 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5) 3723 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5) 3724 3725 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000) 3726 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1) 3727 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3) 3728 #define AARCH64_ZR 0x1f 3729 3730 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for 3731 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */ 3732 3733 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1) 3734 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000) 3735 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000) 3736 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000) 3737 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000) 3738 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000) 3739 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000) 3740 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000) 3741 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000) 3742 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400) 3743 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800) 3744 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00) 3745 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800) 3746 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000) 3747 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000) 3748 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000) 3749 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000) 3750 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000) 3751 3752 /* Classify an INSN if it is indeed a load/store. 3753 3754 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE. 3755 3756 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2 3757 is set equal to RT. 3758 3759 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */ 3760 3761 static bool 3762 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2, 3763 bool *pair, bool *load) 3764 { 3765 uint32_t opcode; 3766 unsigned int r; 3767 uint32_t opc = 0; 3768 uint32_t v = 0; 3769 uint32_t opc_v = 0; 3770 3771 /* Bail out quickly if INSN doesn't fall into the load-store 3772 encoding space. */ 3773 if (!AARCH64_LDST (insn)) 3774 return false; 3775 3776 *pair = false; 3777 *load = false; 3778 if (AARCH64_LDST_EX (insn)) 3779 { 3780 *rt = AARCH64_RT (insn); 3781 *rt2 = *rt; 3782 if (AARCH64_BIT (insn, 21) == 1) 3783 { 3784 *pair = true; 3785 *rt2 = AARCH64_RT2 (insn); 3786 } 3787 *load = AARCH64_LD (insn); 3788 return true; 3789 } 3790 else if (AARCH64_LDST_NAP (insn) 3791 || AARCH64_LDSTP_PI (insn) 3792 || AARCH64_LDSTP_O (insn) 3793 || AARCH64_LDSTP_PRE (insn)) 3794 { 3795 *pair = true; 3796 *rt = AARCH64_RT (insn); 3797 *rt2 = AARCH64_RT2 (insn); 3798 *load = AARCH64_LD (insn); 3799 return true; 3800 } 3801 else if (AARCH64_LDST_PCREL (insn) 3802 || AARCH64_LDST_UI (insn) 3803 || AARCH64_LDST_PIIMM (insn) 3804 || AARCH64_LDST_U (insn) 3805 || AARCH64_LDST_PREIMM (insn) 3806 || AARCH64_LDST_RO (insn) 3807 || AARCH64_LDST_UIMM (insn)) 3808 { 3809 *rt = AARCH64_RT (insn); 3810 *rt2 = *rt; 3811 if (AARCH64_LDST_PCREL (insn)) 3812 *load = true; 3813 opc = AARCH64_BITS (insn, 22, 2); 3814 v = AARCH64_BIT (insn, 26); 3815 opc_v = opc | (v << 2); 3816 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 3817 || opc_v == 5 || opc_v == 7); 3818 return true; 3819 } 3820 else if (AARCH64_LDST_SIMD_M (insn) 3821 || AARCH64_LDST_SIMD_M_PI (insn)) 3822 { 3823 *rt = AARCH64_RT (insn); 3824 *load = AARCH64_BIT (insn, 22); 3825 opcode = (insn >> 12) & 0xf; 3826 switch (opcode) 3827 { 3828 case 0: 3829 case 2: 3830 *rt2 = *rt + 3; 3831 break; 3832 3833 case 4: 3834 case 6: 3835 *rt2 = *rt + 2; 3836 break; 3837 3838 case 7: 3839 *rt2 = *rt; 3840 break; 3841 3842 case 8: 3843 case 10: 3844 *rt2 = *rt + 1; 3845 break; 3846 3847 default: 3848 return false; 3849 } 3850 return true; 3851 } 3852 else if (AARCH64_LDST_SIMD_S (insn) 3853 || AARCH64_LDST_SIMD_S_PI (insn)) 3854 { 3855 *rt = AARCH64_RT (insn); 3856 r = (insn >> 21) & 1; 3857 *load = AARCH64_BIT (insn, 22); 3858 opcode = (insn >> 13) & 0x7; 3859 switch (opcode) 3860 { 3861 case 0: 3862 case 2: 3863 case 4: 3864 *rt2 = *rt + r; 3865 break; 3866 3867 case 1: 3868 case 3: 3869 case 5: 3870 *rt2 = *rt + (r == 0 ? 2 : 3); 3871 break; 3872 3873 case 6: 3874 *rt2 = *rt + r; 3875 break; 3876 3877 case 7: 3878 *rt2 = *rt + (r == 0 ? 2 : 3); 3879 break; 3880 3881 default: 3882 return false; 3883 } 3884 return true; 3885 } 3886 3887 return false; 3888 } 3889 3890 /* Return TRUE if INSN is multiply-accumulate. */ 3891 3892 static bool 3893 aarch64_mlxl_p (uint32_t insn) 3894 { 3895 uint32_t op31 = AARCH64_OP31 (insn); 3896 3897 if (AARCH64_MAC (insn) 3898 && (op31 == 0 || op31 == 1 || op31 == 5) 3899 /* Exclude MUL instructions which are encoded as a multiple accumulate 3900 with RA = XZR. */ 3901 && AARCH64_RA (insn) != AARCH64_ZR) 3902 return true; 3903 3904 return false; 3905 } 3906 3907 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby 3908 it is possible for a 64-bit multiply-accumulate instruction to generate an 3909 incorrect result. The details are quite complex and hard to 3910 determine statically, since branches in the code may exist in some 3911 circumstances, but all cases end with a memory (load, store, or 3912 prefetch) instruction followed immediately by the multiply-accumulate 3913 operation. We employ a linker patching technique, by moving the potentially 3914 affected multiply-accumulate instruction into a patch region and replacing 3915 the original instruction with a branch to the patch. This function checks 3916 if INSN_1 is the memory operation followed by a multiply-accumulate 3917 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE 3918 if INSN_1 and INSN_2 are safe. */ 3919 3920 static bool 3921 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2) 3922 { 3923 uint32_t rt; 3924 uint32_t rt2; 3925 uint32_t rn; 3926 uint32_t rm; 3927 uint32_t ra; 3928 bool pair; 3929 bool load; 3930 3931 if (aarch64_mlxl_p (insn_2) 3932 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load)) 3933 { 3934 /* Any SIMD memory op is independent of the subsequent MLA 3935 by definition of the erratum. */ 3936 if (AARCH64_BIT (insn_1, 26)) 3937 return true; 3938 3939 /* If not SIMD, check for integer memory ops and MLA relationship. */ 3940 rn = AARCH64_RN (insn_2); 3941 ra = AARCH64_RA (insn_2); 3942 rm = AARCH64_RM (insn_2); 3943 3944 /* If this is a load and there's a true(RAW) dependency, we are safe 3945 and this is not an erratum sequence. */ 3946 if (load && 3947 (rt == rn || rt == rm || rt == ra 3948 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra)))) 3949 return false; 3950 3951 /* We conservatively put out stubs for all other cases (including 3952 writebacks). */ 3953 return true; 3954 } 3955 3956 return false; 3957 } 3958 3959 /* Used to order a list of mapping symbols by address. */ 3960 3961 static int 3962 elf_aarch64_compare_mapping (const void *a, const void *b) 3963 { 3964 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a; 3965 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b; 3966 3967 if (amap->vma > bmap->vma) 3968 return 1; 3969 else if (amap->vma < bmap->vma) 3970 return -1; 3971 else if (amap->type > bmap->type) 3972 /* Ensure results do not depend on the host qsort for objects with 3973 multiple mapping symbols at the same address by sorting on type 3974 after vma. */ 3975 return 1; 3976 else if (amap->type < bmap->type) 3977 return -1; 3978 else 3979 return 0; 3980 } 3981 3982 3983 static char * 3984 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes) 3985 { 3986 char *stub_name = (char *) bfd_malloc 3987 (strlen ("__erratum_835769_veneer_") + 16); 3988 if (stub_name != NULL) 3989 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes); 3990 return stub_name; 3991 } 3992 3993 /* Scan for Cortex-A53 erratum 835769 sequence. 3994 3995 Return TRUE else FALSE on abnormal termination. */ 3996 3997 static bool 3998 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd, 3999 struct bfd_link_info *info, 4000 unsigned int *num_fixes_p) 4001 { 4002 asection *section; 4003 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4004 unsigned int num_fixes = *num_fixes_p; 4005 4006 if (htab == NULL) 4007 return true; 4008 4009 for (section = input_bfd->sections; 4010 section != NULL; 4011 section = section->next) 4012 { 4013 bfd_byte *contents = NULL; 4014 struct _aarch64_elf_section_data *sec_data; 4015 unsigned int span; 4016 4017 if (elf_section_type (section) != SHT_PROGBITS 4018 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 4019 || (section->flags & SEC_EXCLUDE) != 0 4020 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 4021 || (section->output_section == bfd_abs_section_ptr)) 4022 continue; 4023 4024 if (elf_section_data (section)->this_hdr.contents != NULL) 4025 contents = elf_section_data (section)->this_hdr.contents; 4026 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 4027 return false; 4028 4029 sec_data = elf_aarch64_section_data (section); 4030 4031 if (sec_data->mapcount) 4032 qsort (sec_data->map, sec_data->mapcount, 4033 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); 4034 4035 for (span = 0; span < sec_data->mapcount; span++) 4036 { 4037 unsigned int span_start = sec_data->map[span].vma; 4038 unsigned int span_end = ((span == sec_data->mapcount - 1) 4039 ? sec_data->map[0].vma + section->size 4040 : sec_data->map[span + 1].vma); 4041 unsigned int i; 4042 char span_type = sec_data->map[span].type; 4043 4044 if (span_type == 'd') 4045 continue; 4046 4047 for (i = span_start; i + 4 < span_end; i += 4) 4048 { 4049 uint32_t insn_1 = bfd_getl32 (contents + i); 4050 uint32_t insn_2 = bfd_getl32 (contents + i + 4); 4051 4052 if (aarch64_erratum_sequence (insn_1, insn_2)) 4053 { 4054 struct elf_aarch64_stub_hash_entry *stub_entry; 4055 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes); 4056 if (! stub_name) 4057 return false; 4058 4059 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name, 4060 section, 4061 htab); 4062 if (! stub_entry) 4063 return false; 4064 4065 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer; 4066 stub_entry->target_section = section; 4067 stub_entry->target_value = i + 4; 4068 stub_entry->veneered_insn = insn_2; 4069 stub_entry->output_name = stub_name; 4070 num_fixes++; 4071 } 4072 } 4073 } 4074 if (elf_section_data (section)->this_hdr.contents == NULL) 4075 free (contents); 4076 } 4077 4078 *num_fixes_p = num_fixes; 4079 4080 return true; 4081 } 4082 4083 4084 /* Test if instruction INSN is ADRP. */ 4085 4086 static bool 4087 _bfd_aarch64_adrp_p (uint32_t insn) 4088 { 4089 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP); 4090 } 4091 4092 4093 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */ 4094 4095 static bool 4096 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2, 4097 uint32_t insn_3) 4098 { 4099 uint32_t rt; 4100 uint32_t rt2; 4101 bool pair; 4102 bool load; 4103 4104 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load) 4105 && (!pair 4106 || (pair && !load)) 4107 && AARCH64_LDST_UIMM (insn_3) 4108 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1)); 4109 } 4110 4111 4112 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence. 4113 4114 Return TRUE if section CONTENTS at offset I contains one of the 4115 erratum 843419 sequences, otherwise return FALSE. If a sequence is 4116 seen set P_VENEER_I to the offset of the final LOAD/STORE 4117 instruction in the sequence. 4118 */ 4119 4120 static bool 4121 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma, 4122 bfd_vma i, bfd_vma span_end, 4123 bfd_vma *p_veneer_i) 4124 { 4125 uint32_t insn_1 = bfd_getl32 (contents + i); 4126 4127 if (!_bfd_aarch64_adrp_p (insn_1)) 4128 return false; 4129 4130 if (span_end < i + 12) 4131 return false; 4132 4133 uint32_t insn_2 = bfd_getl32 (contents + i + 4); 4134 uint32_t insn_3 = bfd_getl32 (contents + i + 8); 4135 4136 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc) 4137 return false; 4138 4139 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3)) 4140 { 4141 *p_veneer_i = i + 8; 4142 return true; 4143 } 4144 4145 if (span_end < i + 16) 4146 return false; 4147 4148 uint32_t insn_4 = bfd_getl32 (contents + i + 12); 4149 4150 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4)) 4151 { 4152 *p_veneer_i = i + 12; 4153 return true; 4154 } 4155 4156 return false; 4157 } 4158 4159 4160 /* Resize all stub sections. */ 4161 4162 static void 4163 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab) 4164 { 4165 asection *section; 4166 4167 /* OK, we've added some stubs. Find out the new size of the 4168 stub sections. */ 4169 for (section = htab->stub_bfd->sections; 4170 section != NULL; section = section->next) 4171 { 4172 /* Ignore non-stub sections. */ 4173 if (!strstr (section->name, STUB_SUFFIX)) 4174 continue; 4175 4176 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned, 4177 as long branch stubs contain a 64-bit address. */ 4178 section->size = 8; 4179 } 4180 4181 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab); 4182 4183 for (section = htab->stub_bfd->sections; 4184 section != NULL; section = section->next) 4185 { 4186 if (!strstr (section->name, STUB_SUFFIX)) 4187 continue; 4188 4189 /* Empty stub section. */ 4190 if (section->size == 8) 4191 section->size = 0; 4192 4193 /* Ensure all stub sections have a size which is a multiple of 4194 4096. This is important in order to ensure that the insertion 4195 of stub sections does not in itself move existing code around 4196 in such a way that new errata sequences are created. We only do this 4197 when the ADRP workaround is enabled. If only the ADR workaround is 4198 enabled then the stubs workaround won't ever be used. */ 4199 if (htab->fix_erratum_843419 & ERRAT_ADRP) 4200 if (section->size) 4201 section->size = BFD_ALIGN (section->size, 0x1000); 4202 } 4203 } 4204 4205 /* Construct an erratum 843419 workaround stub name. */ 4206 4207 static char * 4208 _bfd_aarch64_erratum_843419_stub_name (asection *input_section, 4209 bfd_vma offset) 4210 { 4211 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1; 4212 char *stub_name = bfd_malloc (len); 4213 4214 if (stub_name != NULL) 4215 snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64, 4216 input_section->owner->id, 4217 input_section->id, 4218 (uint64_t) offset); 4219 return stub_name; 4220 } 4221 4222 /* Build a stub_entry structure describing an 843419 fixup. 4223 4224 The stub_entry constructed is populated with the bit pattern INSN 4225 of the instruction located at OFFSET within input SECTION. 4226 4227 Returns TRUE on success. */ 4228 4229 static bool 4230 _bfd_aarch64_erratum_843419_fixup (uint32_t insn, 4231 bfd_vma adrp_offset, 4232 bfd_vma ldst_offset, 4233 asection *section, 4234 struct bfd_link_info *info) 4235 { 4236 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4237 char *stub_name; 4238 struct elf_aarch64_stub_hash_entry *stub_entry; 4239 4240 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset); 4241 if (stub_name == NULL) 4242 return false; 4243 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, 4244 false, false); 4245 if (stub_entry) 4246 { 4247 free (stub_name); 4248 return true; 4249 } 4250 4251 /* We always place an 843419 workaround veneer in the stub section 4252 attached to the input section in which an erratum sequence has 4253 been found. This ensures that later in the link process (in 4254 elfNN_aarch64_write_section) when we copy the veneered 4255 instruction from the input section into the stub section the 4256 copied instruction will have had any relocations applied to it. 4257 If we placed workaround veneers in any other stub section then we 4258 could not assume that all relocations have been processed on the 4259 corresponding input section at the point we output the stub 4260 section. */ 4261 4262 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab); 4263 if (stub_entry == NULL) 4264 { 4265 free (stub_name); 4266 return false; 4267 } 4268 4269 stub_entry->adrp_offset = adrp_offset; 4270 stub_entry->target_value = ldst_offset; 4271 stub_entry->target_section = section; 4272 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer; 4273 stub_entry->veneered_insn = insn; 4274 stub_entry->output_name = stub_name; 4275 4276 return true; 4277 } 4278 4279 4280 /* Scan an input section looking for the signature of erratum 843419. 4281 4282 Scans input SECTION in INPUT_BFD looking for erratum 843419 4283 signatures, for each signature found a stub_entry is created 4284 describing the location of the erratum for subsequent fixup. 4285 4286 Return TRUE on successful scan, FALSE on failure to scan. 4287 */ 4288 4289 static bool 4290 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section, 4291 struct bfd_link_info *info) 4292 { 4293 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4294 4295 if (htab == NULL) 4296 return true; 4297 4298 if (elf_section_type (section) != SHT_PROGBITS 4299 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 4300 || (section->flags & SEC_EXCLUDE) != 0 4301 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 4302 || (section->output_section == bfd_abs_section_ptr)) 4303 return true; 4304 4305 do 4306 { 4307 bfd_byte *contents = NULL; 4308 struct _aarch64_elf_section_data *sec_data; 4309 unsigned int span; 4310 4311 if (elf_section_data (section)->this_hdr.contents != NULL) 4312 contents = elf_section_data (section)->this_hdr.contents; 4313 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 4314 return false; 4315 4316 sec_data = elf_aarch64_section_data (section); 4317 4318 if (sec_data->mapcount) 4319 qsort (sec_data->map, sec_data->mapcount, 4320 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping); 4321 4322 for (span = 0; span < sec_data->mapcount; span++) 4323 { 4324 unsigned int span_start = sec_data->map[span].vma; 4325 unsigned int span_end = ((span == sec_data->mapcount - 1) 4326 ? sec_data->map[0].vma + section->size 4327 : sec_data->map[span + 1].vma); 4328 unsigned int i; 4329 char span_type = sec_data->map[span].type; 4330 4331 if (span_type == 'd') 4332 continue; 4333 4334 for (i = span_start; i + 8 < span_end; i += 4) 4335 { 4336 bfd_vma vma = (section->output_section->vma 4337 + section->output_offset 4338 + i); 4339 bfd_vma veneer_i; 4340 4341 if (_bfd_aarch64_erratum_843419_p 4342 (contents, vma, i, span_end, &veneer_i)) 4343 { 4344 uint32_t insn = bfd_getl32 (contents + veneer_i); 4345 4346 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i, 4347 section, info)) 4348 return false; 4349 } 4350 } 4351 } 4352 4353 if (elf_section_data (section)->this_hdr.contents == NULL) 4354 free (contents); 4355 } 4356 while (0); 4357 4358 return true; 4359 } 4360 4361 4362 /* Add stub entries for calls. 4363 4364 The basic idea here is to examine all the relocations looking for 4365 PC-relative calls to a target that is unreachable with a "bl" 4366 instruction. */ 4367 4368 static bool 4369 _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd, 4370 struct bfd_link_info *info) 4371 { 4372 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4373 bool need_bti = elf_aarch64_bti_p (output_bfd); 4374 bfd *input_bfd; 4375 4376 for (input_bfd = info->input_bfds; input_bfd != NULL; 4377 input_bfd = input_bfd->link.next) 4378 { 4379 Elf_Internal_Shdr *symtab_hdr; 4380 asection *section; 4381 Elf_Internal_Sym *local_syms = NULL; 4382 4383 if (!is_aarch64_elf (input_bfd) 4384 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4385 continue; 4386 4387 /* We'll need the symbol table in a second. */ 4388 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; 4389 if (symtab_hdr->sh_info == 0) 4390 continue; 4391 4392 /* Walk over each section attached to the input bfd. */ 4393 for (section = input_bfd->sections; 4394 section != NULL; section = section->next) 4395 { 4396 Elf_Internal_Rela *internal_relocs, *irelaend, *irela; 4397 4398 /* If there aren't any relocs, then there's nothing more to do. */ 4399 if ((section->flags & SEC_RELOC) == 0 4400 || section->reloc_count == 0 4401 || (section->flags & SEC_CODE) == 0) 4402 continue; 4403 4404 /* If this section is a link-once section that will be 4405 discarded, then don't create any stubs. */ 4406 if (section->output_section == NULL 4407 || section->output_section->owner != output_bfd) 4408 continue; 4409 4410 /* Get the relocs. */ 4411 internal_relocs 4412 = _bfd_elf_link_read_relocs (input_bfd, section, NULL, 4413 NULL, info->keep_memory); 4414 if (internal_relocs == NULL) 4415 goto error_ret_free_local; 4416 4417 /* Now examine each relocation. */ 4418 irela = internal_relocs; 4419 irelaend = irela + section->reloc_count; 4420 for (; irela < irelaend; irela++) 4421 { 4422 unsigned int r_type, r_indx; 4423 enum elf_aarch64_stub_type stub_type; 4424 struct elf_aarch64_stub_hash_entry *stub_entry; 4425 struct elf_aarch64_stub_hash_entry *stub_entry_bti; 4426 asection *sym_sec; 4427 bfd_vma sym_value; 4428 bfd_vma destination; 4429 struct elf_aarch64_link_hash_entry *hash; 4430 const char *sym_name; 4431 char *stub_name; 4432 char *stub_name_bti; 4433 const asection *id_sec; 4434 const asection *id_sec_bti; 4435 unsigned char st_type; 4436 bfd_size_type len; 4437 4438 r_type = ELFNN_R_TYPE (irela->r_info); 4439 r_indx = ELFNN_R_SYM (irela->r_info); 4440 4441 if (r_type >= (unsigned int) R_AARCH64_end) 4442 { 4443 bfd_set_error (bfd_error_bad_value); 4444 error_ret_free_internal: 4445 if (elf_section_data (section)->relocs == NULL) 4446 free (internal_relocs); 4447 goto error_ret_free_local; 4448 } 4449 4450 /* Only look for stubs on unconditional branch and 4451 branch and link instructions. */ 4452 if (r_type != (unsigned int) AARCH64_R (CALL26) 4453 && r_type != (unsigned int) AARCH64_R (JUMP26)) 4454 continue; 4455 4456 /* Now determine the call target, its name, value, 4457 section. */ 4458 sym_sec = NULL; 4459 sym_value = 0; 4460 destination = 0; 4461 hash = NULL; 4462 sym_name = NULL; 4463 if (r_indx < symtab_hdr->sh_info) 4464 { 4465 /* It's a local symbol. */ 4466 Elf_Internal_Sym *sym; 4467 Elf_Internal_Shdr *hdr; 4468 4469 if (local_syms == NULL) 4470 { 4471 local_syms 4472 = (Elf_Internal_Sym *) symtab_hdr->contents; 4473 if (local_syms == NULL) 4474 local_syms 4475 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, 4476 symtab_hdr->sh_info, 0, 4477 NULL, NULL, NULL); 4478 if (local_syms == NULL) 4479 goto error_ret_free_internal; 4480 } 4481 4482 sym = local_syms + r_indx; 4483 hdr = elf_elfsections (input_bfd)[sym->st_shndx]; 4484 sym_sec = hdr->bfd_section; 4485 if (!sym_sec) 4486 /* This is an undefined symbol. It can never 4487 be resolved. */ 4488 continue; 4489 4490 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) 4491 sym_value = sym->st_value; 4492 destination = (sym_value + irela->r_addend 4493 + sym_sec->output_offset 4494 + sym_sec->output_section->vma); 4495 st_type = ELF_ST_TYPE (sym->st_info); 4496 sym_name 4497 = bfd_elf_string_from_elf_section (input_bfd, 4498 symtab_hdr->sh_link, 4499 sym->st_name); 4500 } 4501 else 4502 { 4503 int e_indx; 4504 4505 e_indx = r_indx - symtab_hdr->sh_info; 4506 hash = ((struct elf_aarch64_link_hash_entry *) 4507 elf_sym_hashes (input_bfd)[e_indx]); 4508 4509 while (hash->root.root.type == bfd_link_hash_indirect 4510 || hash->root.root.type == bfd_link_hash_warning) 4511 hash = ((struct elf_aarch64_link_hash_entry *) 4512 hash->root.root.u.i.link); 4513 4514 if (hash->root.root.type == bfd_link_hash_defined 4515 || hash->root.root.type == bfd_link_hash_defweak) 4516 { 4517 struct elf_aarch64_link_hash_table *globals = 4518 elf_aarch64_hash_table (info); 4519 sym_sec = hash->root.root.u.def.section; 4520 sym_value = hash->root.root.u.def.value; 4521 /* For a destination in a shared library, 4522 use the PLT stub as target address to 4523 decide whether a branch stub is 4524 needed. */ 4525 if (globals->root.splt != NULL && hash != NULL 4526 && hash->root.plt.offset != (bfd_vma) - 1) 4527 { 4528 sym_sec = globals->root.splt; 4529 sym_value = hash->root.plt.offset; 4530 if (sym_sec->output_section != NULL) 4531 destination = (sym_value 4532 + sym_sec->output_offset 4533 + sym_sec->output_section->vma); 4534 } 4535 else if (sym_sec->output_section != NULL) 4536 destination = (sym_value + irela->r_addend 4537 + sym_sec->output_offset 4538 + sym_sec->output_section->vma); 4539 } 4540 else if (hash->root.root.type == bfd_link_hash_undefined 4541 || (hash->root.root.type 4542 == bfd_link_hash_undefweak)) 4543 { 4544 /* For a shared library, use the PLT stub as 4545 target address to decide whether a long 4546 branch stub is needed. 4547 For absolute code, they cannot be handled. */ 4548 struct elf_aarch64_link_hash_table *globals = 4549 elf_aarch64_hash_table (info); 4550 4551 if (globals->root.splt != NULL && hash != NULL 4552 && hash->root.plt.offset != (bfd_vma) - 1) 4553 { 4554 sym_sec = globals->root.splt; 4555 sym_value = hash->root.plt.offset; 4556 if (sym_sec->output_section != NULL) 4557 destination = (sym_value 4558 + sym_sec->output_offset 4559 + sym_sec->output_section->vma); 4560 } 4561 else 4562 continue; 4563 } 4564 else 4565 { 4566 bfd_set_error (bfd_error_bad_value); 4567 goto error_ret_free_internal; 4568 } 4569 st_type = ELF_ST_TYPE (hash->root.type); 4570 sym_name = hash->root.root.root.string; 4571 } 4572 4573 /* Determine what (if any) linker stub is needed. */ 4574 stub_type = aarch64_type_of_stub (section, irela, sym_sec, 4575 st_type, destination); 4576 if (stub_type == aarch64_stub_none) 4577 continue; 4578 4579 /* Support for grouping stub sections. */ 4580 id_sec = htab->stub_group[section->id].link_sec; 4581 4582 /* Get the name of this stub. */ 4583 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash, 4584 irela); 4585 if (!stub_name) 4586 goto error_ret_free_internal; 4587 4588 stub_entry = 4589 aarch64_stub_hash_lookup (&htab->stub_hash_table, 4590 stub_name, false, false); 4591 if (stub_entry != NULL) 4592 { 4593 /* The proper stub has already been created. */ 4594 free (stub_name); 4595 4596 /* Always update this stub's target since it may have 4597 changed after layout. */ 4598 stub_entry->target_value = sym_value + irela->r_addend; 4599 4600 if (stub_entry->double_stub) 4601 { 4602 /* Update the target of both stubs. */ 4603 4604 id_sec_bti = htab->stub_group[sym_sec->id].link_sec; 4605 stub_name_bti = 4606 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, 4607 irela); 4608 if (!stub_name_bti) 4609 goto error_ret_free_internal; 4610 stub_entry_bti = 4611 aarch64_stub_hash_lookup (&htab->stub_hash_table, 4612 stub_name_bti, false, false); 4613 BFD_ASSERT (stub_entry_bti != NULL); 4614 free (stub_name_bti); 4615 stub_entry_bti->target_value = stub_entry->target_value; 4616 stub_entry->target_value = stub_entry_bti->stub_offset; 4617 } 4618 continue; 4619 } 4620 4621 stub_entry = _bfd_aarch64_add_stub_entry_in_group 4622 (stub_name, section, htab); 4623 if (stub_entry == NULL) 4624 { 4625 free (stub_name); 4626 goto error_ret_free_internal; 4627 } 4628 4629 stub_entry->target_value = sym_value + irela->r_addend; 4630 stub_entry->target_section = sym_sec; 4631 stub_entry->stub_type = stub_type; 4632 stub_entry->h = hash; 4633 stub_entry->st_type = st_type; 4634 4635 if (sym_name == NULL) 4636 sym_name = "unnamed"; 4637 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name); 4638 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len); 4639 if (stub_entry->output_name == NULL) 4640 { 4641 free (stub_name); 4642 goto error_ret_free_internal; 4643 } 4644 4645 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME, 4646 sym_name); 4647 4648 /* A stub with indirect jump may break BTI compatibility, so 4649 insert another stub with direct jump near the target then. */ 4650 if (need_bti && !aarch64_bti_stub_p (info, stub_entry)) 4651 { 4652 id_sec_bti = htab->stub_group[sym_sec->id].link_sec; 4653 4654 /* If the stub with indirect jump and the BTI stub are in 4655 the same stub group: change the indirect jump stub into 4656 a BTI stub since a direct branch can reach the target. 4657 The BTI landing pad is still needed in case another 4658 stub indirectly jumps to it. */ 4659 if (id_sec_bti == id_sec) 4660 { 4661 stub_entry->stub_type = aarch64_stub_bti_direct_branch; 4662 goto skip_double_stub; 4663 } 4664 4665 stub_entry->double_stub = true; 4666 htab->has_double_stub = true; 4667 4668 stub_name_bti = 4669 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela); 4670 if (!stub_name_bti) 4671 { 4672 free (stub_name); 4673 goto error_ret_free_internal; 4674 } 4675 4676 stub_entry_bti = 4677 aarch64_stub_hash_lookup (&htab->stub_hash_table, 4678 stub_name_bti, false, false); 4679 if (stub_entry_bti != NULL) 4680 BFD_ASSERT (stub_entry_bti->stub_type 4681 == aarch64_stub_bti_direct_branch); 4682 else 4683 { 4684 stub_entry_bti = 4685 _bfd_aarch64_add_stub_entry_in_group (stub_name_bti, 4686 sym_sec, htab); 4687 if (stub_entry_bti == NULL) 4688 { 4689 free (stub_name); 4690 free (stub_name_bti); 4691 goto error_ret_free_internal; 4692 } 4693 4694 stub_entry_bti->target_value = 4695 sym_value + irela->r_addend; 4696 stub_entry_bti->target_section = sym_sec; 4697 stub_entry_bti->stub_type = 4698 aarch64_stub_bti_direct_branch; 4699 stub_entry_bti->h = hash; 4700 stub_entry_bti->st_type = st_type; 4701 4702 len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name); 4703 stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd, 4704 len); 4705 if (stub_entry_bti->output_name == NULL) 4706 { 4707 free (stub_name); 4708 free (stub_name_bti); 4709 goto error_ret_free_internal; 4710 } 4711 snprintf (stub_entry_bti->output_name, len, 4712 BTI_STUB_ENTRY_NAME, sym_name); 4713 } 4714 4715 /* Update the indirect call stub to target the BTI stub. */ 4716 stub_entry->target_value = 0; 4717 stub_entry->target_section = stub_entry_bti->stub_sec; 4718 stub_entry->stub_type = stub_type; 4719 stub_entry->h = NULL; 4720 stub_entry->st_type = STT_FUNC; 4721 } 4722 skip_double_stub: 4723 *stub_changed = true; 4724 } 4725 4726 /* We're done with the internal relocs, free them. */ 4727 if (elf_section_data (section)->relocs == NULL) 4728 free (internal_relocs); 4729 } 4730 } 4731 return true; 4732 error_ret_free_local: 4733 return false; 4734 } 4735 4736 4737 /* Determine and set the size of the stub section for a final link. */ 4738 4739 bool 4740 elfNN_aarch64_size_stubs (bfd *output_bfd, 4741 bfd *stub_bfd, 4742 struct bfd_link_info *info, 4743 bfd_signed_vma group_size, 4744 asection * (*add_stub_section) (const char *, 4745 asection *), 4746 void (*layout_sections_again) (void)) 4747 { 4748 bfd_size_type stub_group_size; 4749 bool stubs_always_before_branch; 4750 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 4751 unsigned int num_erratum_835769_fixes = 0; 4752 4753 /* Propagate mach to stub bfd, because it may not have been 4754 finalized when we created stub_bfd. */ 4755 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), 4756 bfd_get_mach (output_bfd)); 4757 4758 /* Stash our params away. */ 4759 htab->stub_bfd = stub_bfd; 4760 htab->add_stub_section = add_stub_section; 4761 htab->layout_sections_again = layout_sections_again; 4762 stubs_always_before_branch = group_size < 0; 4763 if (group_size < 0) 4764 stub_group_size = -group_size; 4765 else 4766 stub_group_size = group_size; 4767 4768 if (stub_group_size == 1) 4769 { 4770 /* Default values. */ 4771 /* AArch64 branch range is +-128MB. The value used is 1MB less. */ 4772 stub_group_size = 127 * 1024 * 1024; 4773 } 4774 4775 group_sections (htab, stub_group_size, stubs_always_before_branch); 4776 4777 (*htab->layout_sections_again) (); 4778 4779 if (htab->fix_erratum_835769) 4780 { 4781 bfd *input_bfd; 4782 4783 for (input_bfd = info->input_bfds; 4784 input_bfd != NULL; input_bfd = input_bfd->link.next) 4785 { 4786 if (!is_aarch64_elf (input_bfd) 4787 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4788 continue; 4789 4790 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info, 4791 &num_erratum_835769_fixes)) 4792 return false; 4793 } 4794 4795 _bfd_aarch64_resize_stubs (htab); 4796 (*htab->layout_sections_again) (); 4797 } 4798 4799 if (htab->fix_erratum_843419 != ERRAT_NONE) 4800 { 4801 bfd *input_bfd; 4802 4803 for (input_bfd = info->input_bfds; 4804 input_bfd != NULL; 4805 input_bfd = input_bfd->link.next) 4806 { 4807 asection *section; 4808 4809 if (!is_aarch64_elf (input_bfd) 4810 || (input_bfd->flags & BFD_LINKER_CREATED) != 0) 4811 continue; 4812 4813 for (section = input_bfd->sections; 4814 section != NULL; 4815 section = section->next) 4816 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info)) 4817 return false; 4818 } 4819 4820 _bfd_aarch64_resize_stubs (htab); 4821 (*htab->layout_sections_again) (); 4822 } 4823 4824 for (;;) 4825 { 4826 bool stub_changed = false; 4827 4828 if (!_bfd_aarch64_add_call_stub_entries (&stub_changed, output_bfd, info)) 4829 return false; 4830 4831 if (!stub_changed) 4832 return true; 4833 4834 _bfd_aarch64_resize_stubs (htab); 4835 (*htab->layout_sections_again) (); 4836 } 4837 } 4838 4839 /* Build all the stubs associated with the current output file. The 4840 stubs are kept in a hash table attached to the main linker hash 4841 table. We also set up the .plt entries for statically linked PIC 4842 functions here. This function is called via aarch64_elf_finish in the 4843 linker. */ 4844 4845 bool 4846 elfNN_aarch64_build_stubs (struct bfd_link_info *info) 4847 { 4848 asection *stub_sec; 4849 struct bfd_hash_table *table; 4850 struct elf_aarch64_link_hash_table *htab; 4851 4852 htab = elf_aarch64_hash_table (info); 4853 4854 for (stub_sec = htab->stub_bfd->sections; 4855 stub_sec != NULL; stub_sec = stub_sec->next) 4856 { 4857 bfd_size_type size; 4858 4859 /* Ignore non-stub sections. */ 4860 if (!strstr (stub_sec->name, STUB_SUFFIX)) 4861 continue; 4862 4863 /* Allocate memory to hold the linker stubs. */ 4864 size = stub_sec->size; 4865 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size); 4866 if (stub_sec->contents == NULL && size != 0) 4867 return false; 4868 stub_sec->size = 0; 4869 4870 /* Add a branch around the stub section, and a nop, to keep it 8 byte 4871 aligned, as long branch stubs contain a 64-bit address. */ 4872 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents); 4873 bfd_putl32 (INSN_NOP, stub_sec->contents + 4); 4874 stub_sec->size += 8; 4875 } 4876 4877 /* Build the stubs as directed by the stub hash table. */ 4878 table = &htab->stub_hash_table; 4879 bfd_hash_traverse (table, aarch64_build_one_stub, info); 4880 4881 return true; 4882 } 4883 4884 4885 /* Add an entry to the code/data map for section SEC. */ 4886 4887 static void 4888 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma) 4889 { 4890 struct _aarch64_elf_section_data *sec_data = 4891 elf_aarch64_section_data (sec); 4892 unsigned int newidx; 4893 4894 if (sec_data->map == NULL) 4895 { 4896 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map)); 4897 sec_data->mapcount = 0; 4898 sec_data->mapsize = 1; 4899 } 4900 4901 newidx = sec_data->mapcount++; 4902 4903 if (sec_data->mapcount > sec_data->mapsize) 4904 { 4905 sec_data->mapsize *= 2; 4906 sec_data->map = bfd_realloc_or_free 4907 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map)); 4908 } 4909 4910 if (sec_data->map) 4911 { 4912 sec_data->map[newidx].vma = vma; 4913 sec_data->map[newidx].type = type; 4914 } 4915 } 4916 4917 4918 /* Initialise maps of insn/data for input BFDs. */ 4919 void 4920 bfd_elfNN_aarch64_init_maps (bfd *abfd) 4921 { 4922 Elf_Internal_Sym *isymbuf; 4923 Elf_Internal_Shdr *hdr; 4924 unsigned int i, localsyms; 4925 4926 /* Make sure that we are dealing with an AArch64 elf binary. */ 4927 if (!is_aarch64_elf (abfd)) 4928 return; 4929 4930 if ((abfd->flags & DYNAMIC) != 0) 4931 return; 4932 4933 hdr = &elf_symtab_hdr (abfd); 4934 localsyms = hdr->sh_info; 4935 4936 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field 4937 should contain the number of local symbols, which should come before any 4938 global symbols. Mapping symbols are always local. */ 4939 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL); 4940 4941 /* No internal symbols read? Skip this BFD. */ 4942 if (isymbuf == NULL) 4943 return; 4944 4945 for (i = 0; i < localsyms; i++) 4946 { 4947 Elf_Internal_Sym *isym = &isymbuf[i]; 4948 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); 4949 const char *name; 4950 4951 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL) 4952 { 4953 name = bfd_elf_string_from_elf_section (abfd, 4954 hdr->sh_link, 4955 isym->st_name); 4956 4957 if (bfd_is_aarch64_special_symbol_name 4958 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP)) 4959 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value); 4960 } 4961 } 4962 } 4963 4964 static void 4965 setup_plt_values (struct bfd_link_info *link_info, 4966 aarch64_plt_type plt_type) 4967 { 4968 struct elf_aarch64_link_hash_table *globals; 4969 globals = elf_aarch64_hash_table (link_info); 4970 4971 if (plt_type == PLT_BTI_PAC) 4972 { 4973 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; 4974 4975 /* Only in ET_EXEC we need PLTn with BTI. */ 4976 if (bfd_link_pde (link_info)) 4977 { 4978 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; 4979 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry; 4980 } 4981 else 4982 { 4983 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; 4984 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; 4985 } 4986 } 4987 else if (plt_type == PLT_BTI) 4988 { 4989 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry; 4990 4991 /* Only in ET_EXEC we need PLTn with BTI. */ 4992 if (bfd_link_pde (link_info)) 4993 { 4994 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE; 4995 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry; 4996 } 4997 } 4998 else if (plt_type == PLT_PAC) 4999 { 5000 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE; 5001 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry; 5002 } 5003 } 5004 5005 /* Set option values needed during linking. */ 5006 void 5007 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd, 5008 struct bfd_link_info *link_info, 5009 int no_enum_warn, 5010 int no_wchar_warn, int pic_veneer, 5011 int fix_erratum_835769, 5012 erratum_84319_opts fix_erratum_843419, 5013 int no_apply_dynamic_relocs, 5014 aarch64_bti_pac_info bp_info) 5015 { 5016 struct elf_aarch64_link_hash_table *globals; 5017 5018 globals = elf_aarch64_hash_table (link_info); 5019 globals->pic_veneer = pic_veneer; 5020 globals->fix_erratum_835769 = fix_erratum_835769; 5021 /* If the default options are used, then ERRAT_ADR will be set by default 5022 which will enable the ADRP->ADR workaround for the erratum 843419 5023 workaround. */ 5024 globals->fix_erratum_843419 = fix_erratum_843419; 5025 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs; 5026 5027 BFD_ASSERT (is_aarch64_elf (output_bfd)); 5028 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; 5029 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; 5030 5031 switch (bp_info.bti_type) 5032 { 5033 case BTI_WARN: 5034 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0; 5035 elf_aarch64_tdata (output_bfd)->gnu_and_prop 5036 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI; 5037 break; 5038 5039 default: 5040 break; 5041 } 5042 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type; 5043 setup_plt_values (link_info, bp_info.plt_type); 5044 } 5045 5046 static bfd_vma 5047 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h, 5048 struct elf_aarch64_link_hash_table 5049 *globals, struct bfd_link_info *info, 5050 bfd_vma value, bfd *output_bfd, 5051 bool *unresolved_reloc_p) 5052 { 5053 bfd_vma off = (bfd_vma) - 1; 5054 asection *basegot = globals->root.sgot; 5055 bool dyn = globals->root.dynamic_sections_created; 5056 5057 if (h != NULL) 5058 { 5059 BFD_ASSERT (basegot != NULL); 5060 off = h->got.offset; 5061 BFD_ASSERT (off != (bfd_vma) - 1); 5062 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h) 5063 || (bfd_link_pic (info) 5064 && SYMBOL_REFERENCES_LOCAL (info, h)) 5065 || (ELF_ST_VISIBILITY (h->other) 5066 && h->root.type == bfd_link_hash_undefweak)) 5067 { 5068 /* This is actually a static link, or it is a -Bsymbolic link 5069 and the symbol is defined locally. We must initialize this 5070 entry in the global offset table. Since the offset must 5071 always be a multiple of 8 (4 in the case of ILP32), we use 5072 the least significant bit to record whether we have 5073 initialized it already. 5074 When doing a dynamic link, we create a .rel(a).got relocation 5075 entry to initialize the value. This is done in the 5076 finish_dynamic_symbol routine. */ 5077 if ((off & 1) != 0) 5078 off &= ~1; 5079 else 5080 { 5081 bfd_put_NN (output_bfd, value, basegot->contents + off); 5082 h->got.offset |= 1; 5083 } 5084 } 5085 else 5086 *unresolved_reloc_p = false; 5087 5088 off = off + basegot->output_section->vma + basegot->output_offset; 5089 } 5090 5091 return off; 5092 } 5093 5094 /* Change R_TYPE to a more efficient access model where possible, 5095 return the new reloc type. */ 5096 5097 static bfd_reloc_code_real_type 5098 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type, 5099 struct elf_link_hash_entry *h, 5100 struct bfd_link_info *info) 5101 { 5102 bool local_exec = bfd_link_executable (info) 5103 && SYMBOL_REFERENCES_LOCAL (info, h); 5104 5105 switch (r_type) 5106 { 5107 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 5108 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 5109 return (local_exec 5110 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 5111 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21); 5112 5113 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 5114 return (local_exec 5115 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 5116 : r_type); 5117 5118 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 5119 return (local_exec 5120 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 5121 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19); 5122 5123 case BFD_RELOC_AARCH64_TLSDESC_LDR: 5124 return (local_exec 5125 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 5126 : BFD_RELOC_AARCH64_NONE); 5127 5128 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 5129 return (local_exec 5130 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC 5131 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC); 5132 5133 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 5134 return (local_exec 5135 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 5136 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1); 5137 5138 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 5139 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 5140 return (local_exec 5141 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC 5142 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC); 5143 5144 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5145 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type; 5146 5147 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 5148 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type; 5149 5150 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5151 return r_type; 5152 5153 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 5154 return (local_exec 5155 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 5156 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19); 5157 5158 case BFD_RELOC_AARCH64_TLSDESC_ADD: 5159 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 5160 case BFD_RELOC_AARCH64_TLSDESC_CALL: 5161 /* Instructions with these relocations will become NOPs. */ 5162 return BFD_RELOC_AARCH64_NONE; 5163 5164 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 5165 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 5166 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 5167 return local_exec ? BFD_RELOC_AARCH64_NONE : r_type; 5168 5169 #if ARCH_SIZE == 64 5170 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 5171 return local_exec 5172 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC 5173 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC; 5174 5175 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 5176 return local_exec 5177 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 5178 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1; 5179 #endif 5180 5181 default: 5182 break; 5183 } 5184 5185 return r_type; 5186 } 5187 5188 static unsigned int 5189 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type) 5190 { 5191 switch (r_type) 5192 { 5193 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 5194 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 5195 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 5196 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 5197 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 5198 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 5199 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 5200 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5201 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5202 return GOT_NORMAL; 5203 5204 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 5205 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 5206 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 5207 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 5208 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 5209 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 5210 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 5211 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 5212 return GOT_TLS_GD; 5213 5214 case BFD_RELOC_AARCH64_TLSDESC_ADD: 5215 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 5216 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 5217 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 5218 case BFD_RELOC_AARCH64_TLSDESC_CALL: 5219 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 5220 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 5221 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 5222 case BFD_RELOC_AARCH64_TLSDESC_LDR: 5223 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 5224 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 5225 return GOT_TLSDESC_GD; 5226 5227 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5228 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 5229 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 5230 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5231 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5232 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5233 return GOT_TLS_IE; 5234 5235 default: 5236 break; 5237 } 5238 return GOT_UNKNOWN; 5239 } 5240 5241 static bool 5242 aarch64_can_relax_tls (bfd *input_bfd, 5243 struct bfd_link_info *info, 5244 bfd_reloc_code_real_type r_type, 5245 struct elf_link_hash_entry *h, 5246 unsigned long r_symndx) 5247 { 5248 unsigned int symbol_got_type; 5249 unsigned int reloc_got_type; 5250 5251 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type)) 5252 return false; 5253 5254 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx); 5255 reloc_got_type = aarch64_reloc_got_type (r_type); 5256 5257 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type)) 5258 return true; 5259 5260 if (!bfd_link_executable (info)) 5261 return false; 5262 5263 if (h && h->root.type == bfd_link_hash_undefweak) 5264 return false; 5265 5266 return true; 5267 } 5268 5269 /* Given the relocation code R_TYPE, return the relaxed bfd reloc 5270 enumerator. */ 5271 5272 static bfd_reloc_code_real_type 5273 aarch64_tls_transition (bfd *input_bfd, 5274 struct bfd_link_info *info, 5275 unsigned int r_type, 5276 struct elf_link_hash_entry *h, 5277 unsigned long r_symndx) 5278 { 5279 bfd_reloc_code_real_type bfd_r_type 5280 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 5281 5282 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx)) 5283 return bfd_r_type; 5284 5285 return aarch64_tls_transition_without_check (bfd_r_type, h, info); 5286 } 5287 5288 /* Return the base VMA address which should be subtracted from real addresses 5289 when resolving R_AARCH64_TLS_DTPREL relocation. */ 5290 5291 static bfd_vma 5292 dtpoff_base (struct bfd_link_info *info) 5293 { 5294 /* If tls_sec is NULL, we should have signalled an error already. */ 5295 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL); 5296 return elf_hash_table (info)->tls_sec->vma; 5297 } 5298 5299 /* Return the base VMA address which should be subtracted from real addresses 5300 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */ 5301 5302 static bfd_vma 5303 tpoff_base (struct bfd_link_info *info) 5304 { 5305 struct elf_link_hash_table *htab = elf_hash_table (info); 5306 5307 /* If tls_sec is NULL, we should have signalled an error already. */ 5308 BFD_ASSERT (htab->tls_sec != NULL); 5309 5310 bfd_vma base = align_power ((bfd_vma) TCB_SIZE, 5311 htab->tls_sec->alignment_power); 5312 return htab->tls_sec->vma - base; 5313 } 5314 5315 static bfd_vma * 5316 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, 5317 unsigned long r_symndx) 5318 { 5319 /* Calculate the address of the GOT entry for symbol 5320 referred to in h. */ 5321 if (h != NULL) 5322 return &h->got.offset; 5323 else 5324 { 5325 /* local symbol */ 5326 struct elf_aarch64_local_symbol *l; 5327 5328 l = elf_aarch64_locals (input_bfd); 5329 return &l[r_symndx].got_offset; 5330 } 5331 } 5332 5333 static void 5334 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, 5335 unsigned long r_symndx) 5336 { 5337 bfd_vma *p; 5338 p = symbol_got_offset_ref (input_bfd, h, r_symndx); 5339 *p |= 1; 5340 } 5341 5342 static int 5343 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h, 5344 unsigned long r_symndx) 5345 { 5346 bfd_vma value; 5347 value = * symbol_got_offset_ref (input_bfd, h, r_symndx); 5348 return value & 1; 5349 } 5350 5351 static bfd_vma 5352 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, 5353 unsigned long r_symndx) 5354 { 5355 bfd_vma value; 5356 value = * symbol_got_offset_ref (input_bfd, h, r_symndx); 5357 value &= ~1; 5358 return value; 5359 } 5360 5361 static bfd_vma * 5362 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, 5363 unsigned long r_symndx) 5364 { 5365 /* Calculate the address of the GOT entry for symbol 5366 referred to in h. */ 5367 if (h != NULL) 5368 { 5369 struct elf_aarch64_link_hash_entry *eh; 5370 eh = (struct elf_aarch64_link_hash_entry *) h; 5371 return &eh->tlsdesc_got_jump_table_offset; 5372 } 5373 else 5374 { 5375 /* local symbol */ 5376 struct elf_aarch64_local_symbol *l; 5377 5378 l = elf_aarch64_locals (input_bfd); 5379 return &l[r_symndx].tlsdesc_got_jump_table_offset; 5380 } 5381 } 5382 5383 static void 5384 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, 5385 unsigned long r_symndx) 5386 { 5387 bfd_vma *p; 5388 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5389 *p |= 1; 5390 } 5391 5392 static int 5393 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd, 5394 struct elf_link_hash_entry *h, 5395 unsigned long r_symndx) 5396 { 5397 bfd_vma value; 5398 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5399 return value & 1; 5400 } 5401 5402 static bfd_vma 5403 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, 5404 unsigned long r_symndx) 5405 { 5406 bfd_vma value; 5407 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); 5408 value &= ~1; 5409 return value; 5410 } 5411 5412 /* Data for make_branch_to_erratum_835769_stub(). */ 5413 5414 struct erratum_835769_branch_to_stub_data 5415 { 5416 struct bfd_link_info *info; 5417 asection *output_section; 5418 bfd_byte *contents; 5419 }; 5420 5421 /* Helper to insert branches to erratum 835769 stubs in the right 5422 places for a particular section. */ 5423 5424 static bool 5425 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry, 5426 void *in_arg) 5427 { 5428 struct elf_aarch64_stub_hash_entry *stub_entry; 5429 struct erratum_835769_branch_to_stub_data *data; 5430 bfd_byte *contents; 5431 unsigned long branch_insn = 0; 5432 bfd_vma veneered_insn_loc, veneer_entry_loc; 5433 bfd_signed_vma branch_offset; 5434 unsigned int target; 5435 bfd *abfd; 5436 5437 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 5438 data = (struct erratum_835769_branch_to_stub_data *) in_arg; 5439 5440 if (stub_entry->target_section != data->output_section 5441 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer) 5442 return true; 5443 5444 contents = data->contents; 5445 veneered_insn_loc = stub_entry->target_section->output_section->vma 5446 + stub_entry->target_section->output_offset 5447 + stub_entry->target_value; 5448 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 5449 + stub_entry->stub_sec->output_offset 5450 + stub_entry->stub_offset; 5451 branch_offset = veneer_entry_loc - veneered_insn_loc; 5452 5453 abfd = stub_entry->target_section->owner; 5454 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) 5455 _bfd_error_handler 5456 (_("%pB: error: erratum 835769 stub out " 5457 "of range (input file too large)"), abfd); 5458 5459 target = stub_entry->target_value; 5460 branch_insn = 0x14000000; 5461 branch_offset >>= 2; 5462 branch_offset &= 0x3ffffff; 5463 branch_insn |= branch_offset; 5464 bfd_putl32 (branch_insn, &contents[target]); 5465 5466 return true; 5467 } 5468 5469 5470 static bool 5471 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry, 5472 void *in_arg) 5473 { 5474 struct elf_aarch64_stub_hash_entry *stub_entry 5475 = (struct elf_aarch64_stub_hash_entry *) gen_entry; 5476 struct erratum_835769_branch_to_stub_data *data 5477 = (struct erratum_835769_branch_to_stub_data *) in_arg; 5478 struct bfd_link_info *info; 5479 struct elf_aarch64_link_hash_table *htab; 5480 bfd_byte *contents; 5481 asection *section; 5482 bfd *abfd; 5483 bfd_vma place; 5484 uint32_t insn; 5485 5486 info = data->info; 5487 contents = data->contents; 5488 section = data->output_section; 5489 5490 htab = elf_aarch64_hash_table (info); 5491 5492 if (stub_entry->target_section != section 5493 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer) 5494 return true; 5495 5496 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec) 5497 || (htab->fix_erratum_843419 & ERRAT_ADR)); 5498 5499 /* Only update the stub section if we have one. We should always have one if 5500 we're allowed to use the ADRP errata workaround, otherwise it is not 5501 required. */ 5502 if (stub_entry->stub_sec) 5503 { 5504 insn = bfd_getl32 (contents + stub_entry->target_value); 5505 bfd_putl32 (insn, 5506 stub_entry->stub_sec->contents + stub_entry->stub_offset); 5507 } 5508 5509 place = (section->output_section->vma + section->output_offset 5510 + stub_entry->adrp_offset); 5511 insn = bfd_getl32 (contents + stub_entry->adrp_offset); 5512 5513 if (!_bfd_aarch64_adrp_p (insn)) 5514 abort (); 5515 5516 bfd_signed_vma imm = 5517 (_bfd_aarch64_sign_extend 5518 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33) 5519 - (place & 0xfff)); 5520 5521 if ((htab->fix_erratum_843419 & ERRAT_ADR) 5522 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM)) 5523 { 5524 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm) 5525 | AARCH64_RT (insn)); 5526 bfd_putl32 (insn, contents + stub_entry->adrp_offset); 5527 /* Stub is not needed, don't map it out. */ 5528 stub_entry->stub_type = aarch64_stub_none; 5529 } 5530 else if (htab->fix_erratum_843419 & ERRAT_ADRP) 5531 { 5532 bfd_vma veneered_insn_loc; 5533 bfd_vma veneer_entry_loc; 5534 bfd_signed_vma branch_offset; 5535 uint32_t branch_insn; 5536 5537 veneered_insn_loc = stub_entry->target_section->output_section->vma 5538 + stub_entry->target_section->output_offset 5539 + stub_entry->target_value; 5540 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 5541 + stub_entry->stub_sec->output_offset 5542 + stub_entry->stub_offset; 5543 branch_offset = veneer_entry_loc - veneered_insn_loc; 5544 5545 abfd = stub_entry->target_section->owner; 5546 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc)) 5547 _bfd_error_handler 5548 (_("%pB: error: erratum 843419 stub out " 5549 "of range (input file too large)"), abfd); 5550 5551 branch_insn = 0x14000000; 5552 branch_offset >>= 2; 5553 branch_offset &= 0x3ffffff; 5554 branch_insn |= branch_offset; 5555 bfd_putl32 (branch_insn, contents + stub_entry->target_value); 5556 } 5557 else 5558 { 5559 abfd = stub_entry->target_section->owner; 5560 _bfd_error_handler 5561 (_("%pB: error: erratum 843419 immediate 0x%" PRIx64 5562 " out of range for ADR (input file too large) and " 5563 "--fix-cortex-a53-843419=adr used. Run the linker with " 5564 "--fix-cortex-a53-843419=full instead"), 5565 abfd, (uint64_t) (bfd_vma) imm); 5566 bfd_set_error (bfd_error_bad_value); 5567 /* This function is called inside a hashtable traversal and the error 5568 handlers called above turn into non-fatal errors. Which means this 5569 case ld returns an exit code 0 and also produces a broken object file. 5570 To prevent this, issue a hard abort. */ 5571 BFD_FAIL (); 5572 } 5573 return true; 5574 } 5575 5576 5577 static bool 5578 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED, 5579 struct bfd_link_info *link_info, 5580 asection *sec, 5581 bfd_byte *contents) 5582 5583 { 5584 struct elf_aarch64_link_hash_table *globals = 5585 elf_aarch64_hash_table (link_info); 5586 5587 if (globals == NULL) 5588 return false; 5589 5590 /* Fix code to point to erratum 835769 stubs. */ 5591 if (globals->fix_erratum_835769) 5592 { 5593 struct erratum_835769_branch_to_stub_data data; 5594 5595 data.info = link_info; 5596 data.output_section = sec; 5597 data.contents = contents; 5598 bfd_hash_traverse (&globals->stub_hash_table, 5599 make_branch_to_erratum_835769_stub, &data); 5600 } 5601 5602 if (globals->fix_erratum_843419) 5603 { 5604 struct erratum_835769_branch_to_stub_data data; 5605 5606 data.info = link_info; 5607 data.output_section = sec; 5608 data.contents = contents; 5609 bfd_hash_traverse (&globals->stub_hash_table, 5610 _bfd_aarch64_erratum_843419_branch_to_stub, &data); 5611 } 5612 5613 return false; 5614 } 5615 5616 /* Return TRUE if RELOC is a relocation against the base of GOT table. */ 5617 5618 static bool 5619 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc) 5620 { 5621 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14 5622 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15 5623 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15 5624 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC 5625 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1); 5626 } 5627 5628 /* Perform a relocation as part of a final link. The input relocation type 5629 should be TLS relaxed. */ 5630 5631 static bfd_reloc_status_type 5632 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto, 5633 bfd *input_bfd, 5634 bfd *output_bfd, 5635 asection *input_section, 5636 bfd_byte *contents, 5637 Elf_Internal_Rela *rel, 5638 bfd_vma value, 5639 struct bfd_link_info *info, 5640 asection *sym_sec, 5641 struct elf_link_hash_entry *h, 5642 bool *unresolved_reloc_p, 5643 bool save_addend, 5644 bfd_vma *saved_addend, 5645 Elf_Internal_Sym *sym) 5646 { 5647 Elf_Internal_Shdr *symtab_hdr; 5648 unsigned int r_type = howto->type; 5649 bfd_reloc_code_real_type bfd_r_type 5650 = elfNN_aarch64_bfd_reloc_from_howto (howto); 5651 unsigned long r_symndx; 5652 bfd_byte *hit_data = contents + rel->r_offset; 5653 bfd_vma place, off, got_entry_addr = 0; 5654 bfd_signed_vma signed_addend; 5655 struct elf_aarch64_link_hash_table *globals; 5656 bool weak_undef_p; 5657 bool relative_reloc; 5658 asection *base_got; 5659 bfd_vma orig_value = value; 5660 bool resolved_to_zero; 5661 bool abs_symbol_p; 5662 5663 globals = elf_aarch64_hash_table (info); 5664 5665 symtab_hdr = &elf_symtab_hdr (input_bfd); 5666 5667 BFD_ASSERT (is_aarch64_elf (input_bfd)); 5668 5669 r_symndx = ELFNN_R_SYM (rel->r_info); 5670 5671 place = input_section->output_section->vma 5672 + input_section->output_offset + rel->r_offset; 5673 5674 /* Get addend, accumulating the addend for consecutive relocs 5675 which refer to the same offset. */ 5676 signed_addend = saved_addend ? *saved_addend : 0; 5677 signed_addend += rel->r_addend; 5678 5679 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak 5680 : bfd_is_und_section (sym_sec)); 5681 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root); 5682 5683 5684 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle 5685 it here if it is defined in a non-shared object. */ 5686 if (h != NULL 5687 && h->type == STT_GNU_IFUNC 5688 && h->def_regular) 5689 { 5690 asection *plt; 5691 const char *name; 5692 bfd_vma addend = 0; 5693 5694 if ((input_section->flags & SEC_ALLOC) == 0) 5695 { 5696 /* If this is a SHT_NOTE section without SHF_ALLOC, treat 5697 STT_GNU_IFUNC symbol as STT_FUNC. */ 5698 if (elf_section_type (input_section) == SHT_NOTE) 5699 goto skip_ifunc; 5700 5701 /* Dynamic relocs are not propagated for SEC_DEBUGGING 5702 sections because such sections are not SEC_ALLOC and 5703 thus ld.so will not process them. */ 5704 if ((input_section->flags & SEC_DEBUGGING) != 0) 5705 return bfd_reloc_ok; 5706 5707 if (h->root.root.string) 5708 name = h->root.root.string; 5709 else 5710 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); 5711 _bfd_error_handler 5712 /* xgettext:c-format */ 5713 (_("%pB(%pA+%#" PRIx64 "): " 5714 "unresolvable %s relocation against symbol `%s'"), 5715 input_bfd, input_section, (uint64_t) rel->r_offset, 5716 howto->name, name); 5717 bfd_set_error (bfd_error_bad_value); 5718 return bfd_reloc_notsupported; 5719 } 5720 else if (h->plt.offset == (bfd_vma) -1) 5721 goto bad_ifunc_reloc; 5722 5723 /* STT_GNU_IFUNC symbol must go through PLT. */ 5724 plt = globals->root.splt ? globals->root.splt : globals->root.iplt; 5725 value = (plt->output_section->vma + plt->output_offset + h->plt.offset); 5726 5727 switch (bfd_r_type) 5728 { 5729 default: 5730 bad_ifunc_reloc: 5731 if (h->root.root.string) 5732 name = h->root.root.string; 5733 else 5734 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, 5735 NULL); 5736 _bfd_error_handler 5737 /* xgettext:c-format */ 5738 (_("%pB: relocation %s against STT_GNU_IFUNC " 5739 "symbol `%s' isn't handled by %s"), input_bfd, 5740 howto->name, name, __func__); 5741 bfd_set_error (bfd_error_bad_value); 5742 return bfd_reloc_notsupported; 5743 5744 case BFD_RELOC_AARCH64_NN: 5745 if (rel->r_addend != 0) 5746 { 5747 if (h->root.root.string) 5748 name = h->root.root.string; 5749 else 5750 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 5751 sym, NULL); 5752 _bfd_error_handler 5753 /* xgettext:c-format */ 5754 (_("%pB: relocation %s against STT_GNU_IFUNC " 5755 "symbol `%s' has non-zero addend: %" PRId64), 5756 input_bfd, howto->name, name, (int64_t) rel->r_addend); 5757 bfd_set_error (bfd_error_bad_value); 5758 return bfd_reloc_notsupported; 5759 } 5760 5761 /* Generate dynamic relocation only when there is a 5762 non-GOT reference in a shared object. */ 5763 if (bfd_link_pic (info) && h->non_got_ref) 5764 { 5765 Elf_Internal_Rela outrel; 5766 asection *sreloc; 5767 5768 /* Need a dynamic relocation to get the real function 5769 address. */ 5770 outrel.r_offset = _bfd_elf_section_offset (output_bfd, 5771 info, 5772 input_section, 5773 rel->r_offset); 5774 if (outrel.r_offset == (bfd_vma) -1 5775 || outrel.r_offset == (bfd_vma) -2) 5776 abort (); 5777 5778 outrel.r_offset += (input_section->output_section->vma 5779 + input_section->output_offset); 5780 5781 if (h->dynindx == -1 5782 || h->forced_local 5783 || bfd_link_executable (info)) 5784 { 5785 /* This symbol is resolved locally. */ 5786 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)); 5787 outrel.r_addend = (h->root.u.def.value 5788 + h->root.u.def.section->output_section->vma 5789 + h->root.u.def.section->output_offset); 5790 } 5791 else 5792 { 5793 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type); 5794 outrel.r_addend = 0; 5795 } 5796 5797 sreloc = globals->root.irelifunc; 5798 elf_append_rela (output_bfd, sreloc, &outrel); 5799 5800 /* If this reloc is against an external symbol, we 5801 do not want to fiddle with the addend. Otherwise, 5802 we need to include the symbol value so that it 5803 becomes an addend for the dynamic reloc. For an 5804 internal symbol, we have updated addend. */ 5805 return bfd_reloc_ok; 5806 } 5807 /* FALLTHROUGH */ 5808 case BFD_RELOC_AARCH64_CALL26: 5809 case BFD_RELOC_AARCH64_JUMP26: 5810 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5811 place, value, 5812 signed_addend, 5813 weak_undef_p); 5814 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, 5815 howto, value); 5816 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 5817 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 5818 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 5819 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 5820 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 5821 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5822 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5823 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 5824 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 5825 base_got = globals->root.sgot; 5826 off = h->got.offset; 5827 5828 if (base_got == NULL) 5829 abort (); 5830 5831 if (off == (bfd_vma) -1) 5832 { 5833 bfd_vma plt_index; 5834 5835 /* We can't use h->got.offset here to save state, or 5836 even just remember the offset, as finish_dynamic_symbol 5837 would use that as offset into .got. */ 5838 5839 if (globals->root.splt != NULL) 5840 { 5841 plt_index = ((h->plt.offset - globals->plt_header_size) / 5842 globals->plt_entry_size); 5843 off = (plt_index + 3) * GOT_ENTRY_SIZE; 5844 base_got = globals->root.sgotplt; 5845 } 5846 else 5847 { 5848 plt_index = h->plt.offset / globals->plt_entry_size; 5849 off = plt_index * GOT_ENTRY_SIZE; 5850 base_got = globals->root.igotplt; 5851 } 5852 5853 if (h->dynindx == -1 5854 || h->forced_local 5855 || info->symbolic) 5856 { 5857 /* This references the local definition. We must 5858 initialize this entry in the global offset table. 5859 Since the offset must always be a multiple of 8, 5860 we use the least significant bit to record 5861 whether we have initialized it already. 5862 5863 When doing a dynamic link, we create a .rela.got 5864 relocation entry to initialize the value. This 5865 is done in the finish_dynamic_symbol routine. */ 5866 if ((off & 1) != 0) 5867 off &= ~1; 5868 else 5869 { 5870 bfd_put_NN (output_bfd, value, 5871 base_got->contents + off); 5872 /* Note that this is harmless as -1 | 1 still is -1. */ 5873 h->got.offset |= 1; 5874 } 5875 } 5876 value = (base_got->output_section->vma 5877 + base_got->output_offset + off); 5878 } 5879 else 5880 value = aarch64_calculate_got_entry_vma (h, globals, info, 5881 value, output_bfd, 5882 unresolved_reloc_p); 5883 5884 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 5885 addend = (globals->root.sgot->output_section->vma 5886 + globals->root.sgot->output_offset); 5887 5888 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 5889 place, value, 5890 addend, weak_undef_p); 5891 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value); 5892 case BFD_RELOC_AARCH64_ADD_LO12: 5893 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 5894 break; 5895 } 5896 } 5897 5898 skip_ifunc: 5899 resolved_to_zero = (h != NULL 5900 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)); 5901 5902 switch (bfd_r_type) 5903 { 5904 case BFD_RELOC_AARCH64_NONE: 5905 case BFD_RELOC_AARCH64_TLSDESC_ADD: 5906 case BFD_RELOC_AARCH64_TLSDESC_CALL: 5907 case BFD_RELOC_AARCH64_TLSDESC_LDR: 5908 *unresolved_reloc_p = false; 5909 return bfd_reloc_ok; 5910 5911 case BFD_RELOC_AARCH64_NN: 5912 5913 /* When generating a shared library or PIE, these relocations 5914 are copied into the output file to be resolved at run time. */ 5915 if ((bfd_link_pic (info) 5916 && (input_section->flags & SEC_ALLOC) 5917 && (h == NULL 5918 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 5919 && !resolved_to_zero) 5920 || h->root.type != bfd_link_hash_undefweak)) 5921 /* Or we are creating an executable, we may need to keep relocations 5922 for symbols satisfied by a dynamic library if we manage to avoid 5923 copy relocs for the symbol. */ 5924 || (ELIMINATE_COPY_RELOCS 5925 && !bfd_link_pic (info) 5926 && h != NULL 5927 && (input_section->flags & SEC_ALLOC) 5928 && h->dynindx != -1 5929 && !h->non_got_ref 5930 && ((h->def_dynamic 5931 && !h->def_regular) 5932 || h->root.type == bfd_link_hash_undefweak 5933 || h->root.type == bfd_link_hash_undefined))) 5934 { 5935 Elf_Internal_Rela outrel; 5936 bfd_byte *loc; 5937 bool skip, relocate; 5938 asection *sreloc; 5939 5940 *unresolved_reloc_p = false; 5941 5942 skip = false; 5943 relocate = false; 5944 5945 outrel.r_addend = signed_addend; 5946 outrel.r_offset = 5947 _bfd_elf_section_offset (output_bfd, info, input_section, 5948 rel->r_offset); 5949 if (outrel.r_offset == (bfd_vma) - 1) 5950 skip = true; 5951 else if (outrel.r_offset == (bfd_vma) - 2) 5952 { 5953 skip = true; 5954 relocate = true; 5955 } 5956 else if (abs_symbol_p) 5957 { 5958 /* Local absolute symbol. */ 5959 skip = (h->forced_local || (h->dynindx == -1)); 5960 relocate = skip; 5961 } 5962 5963 outrel.r_offset += (input_section->output_section->vma 5964 + input_section->output_offset); 5965 5966 if (skip) 5967 memset (&outrel, 0, sizeof outrel); 5968 else if (h != NULL 5969 && h->dynindx != -1 5970 && (!bfd_link_pic (info) 5971 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h)) 5972 || !h->def_regular)) 5973 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type); 5974 else 5975 { 5976 int symbol; 5977 5978 /* On SVR4-ish systems, the dynamic loader cannot 5979 relocate the text and data segments independently, 5980 so the symbol does not matter. */ 5981 symbol = 0; 5982 relocate = !globals->no_apply_dynamic_relocs; 5983 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE)); 5984 outrel.r_addend += value; 5985 } 5986 5987 sreloc = elf_section_data (input_section)->sreloc; 5988 if (sreloc == NULL || sreloc->contents == NULL) 5989 return bfd_reloc_notsupported; 5990 5991 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals); 5992 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc); 5993 5994 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size) 5995 { 5996 /* Sanity to check that we have previously allocated 5997 sufficient space in the relocation section for the 5998 number of relocations we actually want to emit. */ 5999 abort (); 6000 } 6001 6002 /* If this reloc is against an external symbol, we do not want to 6003 fiddle with the addend. Otherwise, we need to include the symbol 6004 value so that it becomes an addend for the dynamic reloc. */ 6005 if (!relocate) 6006 return bfd_reloc_ok; 6007 6008 return _bfd_final_link_relocate (howto, input_bfd, input_section, 6009 contents, rel->r_offset, value, 6010 signed_addend); 6011 } 6012 else 6013 value += signed_addend; 6014 break; 6015 6016 case BFD_RELOC_AARCH64_CALL26: 6017 case BFD_RELOC_AARCH64_JUMP26: 6018 { 6019 asection *splt = globals->root.splt; 6020 bool via_plt_p = 6021 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1; 6022 6023 /* A call to an undefined weak symbol is converted to a jump to 6024 the next instruction unless a PLT entry will be created. 6025 The jump to the next instruction is optimized as a NOP. 6026 Do the same for local undefined symbols. */ 6027 if (weak_undef_p && ! via_plt_p) 6028 { 6029 bfd_putl32 (INSN_NOP, hit_data); 6030 return bfd_reloc_ok; 6031 } 6032 6033 /* If the call goes through a PLT entry, make sure to 6034 check distance to the right destination address. */ 6035 if (via_plt_p) 6036 value = (splt->output_section->vma 6037 + splt->output_offset + h->plt.offset); 6038 6039 /* Check if a stub has to be inserted because the destination 6040 is too far away. */ 6041 struct elf_aarch64_stub_hash_entry *stub_entry = NULL; 6042 6043 /* If the branch destination is directed to plt stub, "value" will be 6044 the final destination, otherwise we should plus signed_addend, it may 6045 contain non-zero value, for example call to local function symbol 6046 which are turned into "sec_sym + sec_off", and sec_off is kept in 6047 signed_addend. */ 6048 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend, 6049 place)) 6050 /* The target is out of reach, so redirect the branch to 6051 the local stub for this function. */ 6052 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h, 6053 rel, globals); 6054 if (stub_entry != NULL) 6055 { 6056 value = (stub_entry->stub_offset 6057 + stub_entry->stub_sec->output_offset 6058 + stub_entry->stub_sec->output_section->vma); 6059 6060 /* We have redirected the destination to stub entry address, 6061 so ignore any addend record in the original rela entry. */ 6062 signed_addend = 0; 6063 } 6064 } 6065 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6066 place, value, 6067 signed_addend, weak_undef_p); 6068 *unresolved_reloc_p = false; 6069 break; 6070 6071 case BFD_RELOC_AARCH64_16_PCREL: 6072 case BFD_RELOC_AARCH64_32_PCREL: 6073 case BFD_RELOC_AARCH64_64_PCREL: 6074 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 6075 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 6076 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 6077 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 6078 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 6079 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: 6080 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 6081 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: 6082 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 6083 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: 6084 case BFD_RELOC_AARCH64_MOVW_PREL_G3: 6085 if (bfd_link_pic (info) 6086 && (input_section->flags & SEC_ALLOC) != 0 6087 && (input_section->flags & SEC_READONLY) != 0 6088 && !_bfd_elf_symbol_refs_local_p (h, info, 1)) 6089 { 6090 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6091 6092 _bfd_error_handler 6093 /* xgettext:c-format */ 6094 (_("%pB: relocation %s against symbol `%s' which may bind " 6095 "externally can not be used when making a shared object; " 6096 "recompile with -fPIC"), 6097 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 6098 h->root.root.string); 6099 bfd_set_error (bfd_error_bad_value); 6100 return bfd_reloc_notsupported; 6101 } 6102 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6103 place, value, 6104 signed_addend, 6105 weak_undef_p); 6106 break; 6107 6108 case BFD_RELOC_AARCH64_BRANCH19: 6109 case BFD_RELOC_AARCH64_TSTBR14: 6110 if (h && h->root.type == bfd_link_hash_undefined) 6111 { 6112 _bfd_error_handler 6113 /* xgettext:c-format */ 6114 (_("%pB: conditional branch to undefined symbol `%s' " 6115 "not allowed"), input_bfd, h->root.root.string); 6116 bfd_set_error (bfd_error_bad_value); 6117 return bfd_reloc_notsupported; 6118 } 6119 /* Fall through. */ 6120 6121 case BFD_RELOC_AARCH64_16: 6122 #if ARCH_SIZE == 64 6123 case BFD_RELOC_AARCH64_32: 6124 #endif 6125 case BFD_RELOC_AARCH64_ADD_LO12: 6126 case BFD_RELOC_AARCH64_LDST128_LO12: 6127 case BFD_RELOC_AARCH64_LDST16_LO12: 6128 case BFD_RELOC_AARCH64_LDST32_LO12: 6129 case BFD_RELOC_AARCH64_LDST64_LO12: 6130 case BFD_RELOC_AARCH64_LDST8_LO12: 6131 case BFD_RELOC_AARCH64_MOVW_G0: 6132 case BFD_RELOC_AARCH64_MOVW_G0_NC: 6133 case BFD_RELOC_AARCH64_MOVW_G0_S: 6134 case BFD_RELOC_AARCH64_MOVW_G1: 6135 case BFD_RELOC_AARCH64_MOVW_G1_NC: 6136 case BFD_RELOC_AARCH64_MOVW_G1_S: 6137 case BFD_RELOC_AARCH64_MOVW_G2: 6138 case BFD_RELOC_AARCH64_MOVW_G2_NC: 6139 case BFD_RELOC_AARCH64_MOVW_G2_S: 6140 case BFD_RELOC_AARCH64_MOVW_G3: 6141 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6142 place, value, 6143 signed_addend, weak_undef_p); 6144 break; 6145 6146 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 6147 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 6148 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 6149 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 6150 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 6151 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 6152 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 6153 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 6154 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 6155 if (globals->root.sgot == NULL) 6156 BFD_ASSERT (h != NULL); 6157 6158 relative_reloc = false; 6159 if (h != NULL) 6160 { 6161 bfd_vma addend = 0; 6162 6163 /* If a symbol is not dynamic and is not undefined weak, bind it 6164 locally and generate a RELATIVE relocation under PIC mode. 6165 6166 NOTE: one symbol may be referenced by several relocations, we 6167 should only generate one RELATIVE relocation for that symbol. 6168 Therefore, check GOT offset mark first. */ 6169 if (h->dynindx == -1 6170 && !h->forced_local 6171 && h->root.type != bfd_link_hash_undefweak 6172 && bfd_link_pic (info) 6173 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 6174 relative_reloc = true; 6175 6176 value = aarch64_calculate_got_entry_vma (h, globals, info, value, 6177 output_bfd, 6178 unresolved_reloc_p); 6179 /* Record the GOT entry address which will be used when generating 6180 RELATIVE relocation. */ 6181 if (relative_reloc) 6182 got_entry_addr = value; 6183 6184 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 6185 addend = (globals->root.sgot->output_section->vma 6186 + globals->root.sgot->output_offset); 6187 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6188 place, value, 6189 addend, weak_undef_p); 6190 } 6191 else 6192 { 6193 bfd_vma addend = 0; 6194 struct elf_aarch64_local_symbol *locals 6195 = elf_aarch64_locals (input_bfd); 6196 6197 if (locals == NULL) 6198 { 6199 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6200 _bfd_error_handler 6201 /* xgettext:c-format */ 6202 (_("%pB: local symbol descriptor table be NULL when applying " 6203 "relocation %s against local symbol"), 6204 input_bfd, elfNN_aarch64_howto_table[howto_index].name); 6205 abort (); 6206 } 6207 6208 off = symbol_got_offset (input_bfd, h, r_symndx); 6209 base_got = globals->root.sgot; 6210 got_entry_addr = (base_got->output_section->vma 6211 + base_got->output_offset + off); 6212 6213 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 6214 { 6215 bfd_put_64 (output_bfd, value, base_got->contents + off); 6216 6217 /* For local symbol, we have done absolute relocation in static 6218 linking stage. While for shared library, we need to update the 6219 content of GOT entry according to the shared object's runtime 6220 base address. So, we need to generate a R_AARCH64_RELATIVE reloc 6221 for dynamic linker. */ 6222 if (bfd_link_pic (info)) 6223 relative_reloc = true; 6224 6225 symbol_got_offset_mark (input_bfd, h, r_symndx); 6226 } 6227 6228 /* Update the relocation value to GOT entry addr as we have transformed 6229 the direct data access into indirect data access through GOT. */ 6230 value = got_entry_addr; 6231 6232 if (aarch64_relocation_aginst_gp_p (bfd_r_type)) 6233 addend = base_got->output_section->vma + base_got->output_offset; 6234 6235 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6236 place, value, 6237 addend, weak_undef_p); 6238 } 6239 6240 if (relative_reloc) 6241 { 6242 asection *s; 6243 Elf_Internal_Rela outrel; 6244 6245 s = globals->root.srelgot; 6246 if (s == NULL) 6247 abort (); 6248 6249 outrel.r_offset = got_entry_addr; 6250 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE)); 6251 outrel.r_addend = orig_value; 6252 elf_append_rela (output_bfd, s, &outrel); 6253 } 6254 break; 6255 6256 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6257 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6258 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 6259 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6260 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 6261 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6262 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 6263 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 6264 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 6265 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 6266 if (globals->root.sgot == NULL) 6267 return bfd_reloc_notsupported; 6268 6269 value = (symbol_got_offset (input_bfd, h, r_symndx) 6270 + globals->root.sgot->output_section->vma 6271 + globals->root.sgot->output_offset); 6272 6273 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6274 place, value, 6275 0, weak_undef_p); 6276 *unresolved_reloc_p = false; 6277 break; 6278 6279 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 6280 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 6281 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 6282 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 6283 if (globals->root.sgot == NULL) 6284 return bfd_reloc_notsupported; 6285 6286 value = symbol_got_offset (input_bfd, h, r_symndx); 6287 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6288 place, value, 6289 0, weak_undef_p); 6290 *unresolved_reloc_p = false; 6291 break; 6292 6293 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 6294 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 6295 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 6296 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 6297 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 6298 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 6299 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 6300 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 6301 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 6302 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 6303 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 6304 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 6305 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6306 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 6307 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 6308 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 6309 { 6310 if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) 6311 { 6312 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6313 _bfd_error_handler 6314 /* xgettext:c-format */ 6315 (_("%pB: TLS relocation %s against undefined symbol `%s'"), 6316 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 6317 h->root.root.string); 6318 bfd_set_error (bfd_error_bad_value); 6319 return bfd_reloc_notsupported; 6320 } 6321 6322 bfd_vma def_value 6323 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info); 6324 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6325 place, value, 6326 def_value, weak_undef_p); 6327 break; 6328 } 6329 6330 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 6331 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 6332 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6333 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: 6334 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 6335 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: 6336 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 6337 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: 6338 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 6339 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: 6340 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 6341 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 6342 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6343 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 6344 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6345 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 6346 { 6347 if (!(weak_undef_p || elf_hash_table (info)->tls_sec)) 6348 { 6349 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 6350 _bfd_error_handler 6351 /* xgettext:c-format */ 6352 (_("%pB: TLS relocation %s against undefined symbol `%s'"), 6353 input_bfd, elfNN_aarch64_howto_table[howto_index].name, 6354 h->root.root.string); 6355 bfd_set_error (bfd_error_bad_value); 6356 return bfd_reloc_notsupported; 6357 } 6358 6359 bfd_vma def_value 6360 = weak_undef_p ? 0 : signed_addend - tpoff_base (info); 6361 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6362 place, value, 6363 def_value, weak_undef_p); 6364 *unresolved_reloc_p = false; 6365 break; 6366 } 6367 6368 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 6369 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6370 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 6371 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 6372 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 6373 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 6374 if (globals->root.sgot == NULL) 6375 return bfd_reloc_notsupported; 6376 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) 6377 + globals->root.sgotplt->output_section->vma 6378 + globals->root.sgotplt->output_offset 6379 + globals->sgotplt_jump_table_size); 6380 6381 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6382 place, value, 6383 0, weak_undef_p); 6384 *unresolved_reloc_p = false; 6385 break; 6386 6387 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 6388 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 6389 if (globals->root.sgot == NULL) 6390 return bfd_reloc_notsupported; 6391 6392 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) 6393 + globals->root.sgotplt->output_section->vma 6394 + globals->root.sgotplt->output_offset 6395 + globals->sgotplt_jump_table_size); 6396 6397 value -= (globals->root.sgot->output_section->vma 6398 + globals->root.sgot->output_offset); 6399 6400 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type, 6401 place, value, 6402 0, weak_undef_p); 6403 *unresolved_reloc_p = false; 6404 break; 6405 6406 default: 6407 return bfd_reloc_notsupported; 6408 } 6409 6410 if (saved_addend) 6411 *saved_addend = value; 6412 6413 /* Only apply the final relocation in a sequence. */ 6414 if (save_addend) 6415 return bfd_reloc_continue; 6416 6417 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, 6418 howto, value); 6419 } 6420 6421 /* LP64 and ILP32 operates on x- and w-registers respectively. 6422 Next definitions take into account the difference between 6423 corresponding machine codes. R means x-register if the target 6424 arch is LP64, and w-register if the target is ILP32. */ 6425 6426 #if ARCH_SIZE == 64 6427 # define add_R0_R0 (0x91000000) 6428 # define add_R0_R0_R1 (0x8b000020) 6429 # define add_R0_R1 (0x91400020) 6430 # define ldr_R0 (0x58000000) 6431 # define ldr_R0_mask(i) (i & 0xffffffe0) 6432 # define ldr_R0_x0 (0xf9400000) 6433 # define ldr_hw_R0 (0xf2a00000) 6434 # define movk_R0 (0xf2800000) 6435 # define movz_R0 (0xd2a00000) 6436 # define movz_hw_R0 (0xd2c00000) 6437 #else /*ARCH_SIZE == 32 */ 6438 # define add_R0_R0 (0x11000000) 6439 # define add_R0_R0_R1 (0x0b000020) 6440 # define add_R0_R1 (0x11400020) 6441 # define ldr_R0 (0x18000000) 6442 # define ldr_R0_mask(i) (i & 0xbfffffe0) 6443 # define ldr_R0_x0 (0xb9400000) 6444 # define ldr_hw_R0 (0x72a00000) 6445 # define movk_R0 (0x72800000) 6446 # define movz_R0 (0x52a00000) 6447 # define movz_hw_R0 (0x52c00000) 6448 #endif 6449 6450 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub, 6451 it is used to identify the stub information to reset. */ 6452 6453 struct erratum_843419_branch_to_stub_clear_data 6454 { 6455 bfd_vma adrp_offset; 6456 asection *output_section; 6457 }; 6458 6459 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and 6460 section inside IN_ARG matches. The clearing is done by setting the 6461 stub_type to none. */ 6462 6463 static bool 6464 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry, 6465 void *in_arg) 6466 { 6467 struct elf_aarch64_stub_hash_entry *stub_entry 6468 = (struct elf_aarch64_stub_hash_entry *) gen_entry; 6469 struct erratum_843419_branch_to_stub_clear_data *data 6470 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg; 6471 6472 if (stub_entry->target_section != data->output_section 6473 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer 6474 || stub_entry->adrp_offset != data->adrp_offset) 6475 return true; 6476 6477 /* Change the stub type instead of removing the entry, removing from the hash 6478 table would be slower and we have already reserved the memory for the entry 6479 so there wouldn't be much gain. Changing the stub also keeps around a 6480 record of what was there before. */ 6481 stub_entry->stub_type = aarch64_stub_none; 6482 6483 /* We're done and there could have been only one matching stub at that 6484 particular offset, so abort further traversal. */ 6485 return false; 6486 } 6487 6488 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419 6489 sequence. In this case the erratum no longer applies and we need to remove 6490 the entry from the pending stub generation. This clears matching adrp insn 6491 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */ 6492 6493 static void 6494 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals, 6495 bfd_vma adrp_offset, asection *input_section) 6496 { 6497 if (globals->fix_erratum_843419 & ERRAT_ADRP) 6498 { 6499 struct erratum_843419_branch_to_stub_clear_data data; 6500 data.adrp_offset = adrp_offset; 6501 data.output_section = input_section; 6502 6503 bfd_hash_traverse (&globals->stub_hash_table, 6504 _bfd_aarch64_erratum_843419_clear_stub, &data); 6505 } 6506 } 6507 6508 /* Handle TLS relaxations. Relaxing is possible for symbols that use 6509 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static 6510 link. 6511 6512 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller 6513 is to then call final_link_relocate. Return other values in the 6514 case of error. */ 6515 6516 static bfd_reloc_status_type 6517 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals, 6518 bfd *input_bfd, asection *input_section, 6519 bfd_byte *contents, Elf_Internal_Rela *rel, 6520 struct elf_link_hash_entry *h, 6521 struct bfd_link_info *info) 6522 { 6523 bool local_exec = bfd_link_executable (info) 6524 && SYMBOL_REFERENCES_LOCAL (info, h); 6525 unsigned int r_type = ELFNN_R_TYPE (rel->r_info); 6526 unsigned long insn; 6527 6528 BFD_ASSERT (globals && input_bfd && contents && rel); 6529 6530 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) 6531 { 6532 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 6533 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 6534 if (local_exec) 6535 { 6536 /* GD->LE relaxation: 6537 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var 6538 or 6539 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var 6540 6541 Where R is x for LP64, and w for ILP32. */ 6542 bfd_putl32 (movz_R0, contents + rel->r_offset); 6543 /* We have relaxed the adrp into a mov, we may have to clear any 6544 pending erratum fixes. */ 6545 clear_erratum_843419_entry (globals, rel->r_offset, input_section); 6546 return bfd_reloc_continue; 6547 } 6548 else 6549 { 6550 /* GD->IE relaxation: 6551 adrp x0, :tlsgd:var => adrp x0, :gottprel:var 6552 or 6553 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var 6554 */ 6555 return bfd_reloc_continue; 6556 } 6557 6558 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 6559 BFD_ASSERT (0); 6560 break; 6561 6562 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 6563 if (local_exec) 6564 { 6565 /* Tiny TLSDESC->LE relaxation: 6566 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var 6567 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var 6568 .tlsdesccall var 6569 blr x1 => nop 6570 6571 Where R is x for LP64, and w for ILP32. */ 6572 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21)); 6573 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL)); 6574 6575 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6576 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC)); 6577 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6578 6579 bfd_putl32 (movz_R0, contents + rel->r_offset); 6580 bfd_putl32 (movk_R0, contents + rel->r_offset + 4); 6581 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8); 6582 return bfd_reloc_continue; 6583 } 6584 else 6585 { 6586 /* Tiny TLSDESC->IE relaxation: 6587 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var 6588 adr x0, :tlsdesc:var => nop 6589 .tlsdesccall var 6590 blr x1 => nop 6591 */ 6592 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21)); 6593 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL)); 6594 6595 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6596 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6597 6598 bfd_putl32 (ldr_R0, contents + rel->r_offset); 6599 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4); 6600 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8); 6601 return bfd_reloc_continue; 6602 } 6603 6604 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 6605 if (local_exec) 6606 { 6607 /* Tiny GD->LE relaxation: 6608 adr x0, :tlsgd:var => mrs x1, tpidr_el0 6609 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12 6610 nop => add R0, R0, #:tprel_lo12_nc:x 6611 6612 Where R is x for LP64, and x for Ilp32. */ 6613 6614 /* First kill the tls_get_addr reloc on the bl instruction. */ 6615 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6616 6617 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0); 6618 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4); 6619 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8); 6620 6621 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6622 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC)); 6623 rel[1].r_offset = rel->r_offset + 8; 6624 6625 /* Move the current relocation to the second instruction in 6626 the sequence. */ 6627 rel->r_offset += 4; 6628 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6629 AARCH64_R (TLSLE_ADD_TPREL_HI12)); 6630 return bfd_reloc_continue; 6631 } 6632 else 6633 { 6634 /* Tiny GD->IE relaxation: 6635 adr x0, :tlsgd:var => ldr R0, :gottprel:var 6636 bl __tls_get_addr => mrs x1, tpidr_el0 6637 nop => add R0, R0, R1 6638 6639 Where R is x for LP64, and w for Ilp32. */ 6640 6641 /* First kill the tls_get_addr reloc on the bl instruction. */ 6642 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6643 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6644 6645 bfd_putl32 (ldr_R0, contents + rel->r_offset); 6646 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4); 6647 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8); 6648 return bfd_reloc_continue; 6649 } 6650 6651 #if ARCH_SIZE == 64 6652 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 6653 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC)); 6654 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset); 6655 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26)); 6656 6657 if (local_exec) 6658 { 6659 /* Large GD->LE relaxation: 6660 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32 6661 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16 6662 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var 6663 bl __tls_get_addr => mrs x1, tpidr_el0 6664 nop => add x0, x0, x1 6665 */ 6666 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), 6667 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC)); 6668 rel[2].r_offset = rel->r_offset + 8; 6669 6670 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0); 6671 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4); 6672 bfd_putl32 (movk_R0, contents + rel->r_offset + 8); 6673 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12); 6674 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16); 6675 } 6676 else 6677 { 6678 /* Large GD->IE relaxation: 6679 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16 6680 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var 6681 add x0, gp, x0 => ldr x0, [gp, x0] 6682 bl __tls_get_addr => mrs x1, tpidr_el0 6683 nop => add x0, x0, x1 6684 */ 6685 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6686 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0); 6687 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8); 6688 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12); 6689 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16); 6690 } 6691 return bfd_reloc_continue; 6692 6693 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 6694 return bfd_reloc_continue; 6695 #endif 6696 6697 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 6698 return bfd_reloc_continue; 6699 6700 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 6701 if (local_exec) 6702 { 6703 /* GD->LE relaxation: 6704 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var 6705 6706 Where R is x for lp64 mode, and w for ILP32 mode. */ 6707 bfd_putl32 (movk_R0, contents + rel->r_offset); 6708 return bfd_reloc_continue; 6709 } 6710 else 6711 { 6712 /* GD->IE relaxation: 6713 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var] 6714 6715 Where R is x for lp64 mode, and w for ILP32 mode. */ 6716 insn = bfd_getl32 (contents + rel->r_offset); 6717 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset); 6718 return bfd_reloc_continue; 6719 } 6720 6721 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 6722 if (local_exec) 6723 { 6724 /* GD->LE relaxation 6725 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var 6726 bl __tls_get_addr => mrs x1, tpidr_el0 6727 nop => add R0, R1, R0 6728 6729 Where R is x for lp64 mode, and w for ILP32 mode. */ 6730 6731 /* First kill the tls_get_addr reloc on the bl instruction. */ 6732 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6733 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6734 6735 bfd_putl32 (movk_R0, contents + rel->r_offset); 6736 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4); 6737 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8); 6738 return bfd_reloc_continue; 6739 } 6740 else 6741 { 6742 /* GD->IE relaxation 6743 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var] 6744 BL __tls_get_addr => mrs x1, tpidr_el0 6745 R_AARCH64_CALL26 6746 NOP => add R0, R1, R0 6747 6748 Where R is x for lp64 mode, and w for ilp32 mode. */ 6749 6750 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6751 6752 /* Remove the relocation on the BL instruction. */ 6753 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6754 6755 /* We choose to fixup the BL and NOP instructions using the 6756 offset from the second relocation to allow flexibility in 6757 scheduling instructions between the ADD and BL. */ 6758 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset); 6759 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset); 6760 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4); 6761 return bfd_reloc_continue; 6762 } 6763 6764 case BFD_RELOC_AARCH64_TLSDESC_ADD: 6765 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 6766 case BFD_RELOC_AARCH64_TLSDESC_CALL: 6767 /* GD->IE/LE relaxation: 6768 add x0, x0, #:tlsdesc_lo12:var => nop 6769 blr xd => nop 6770 */ 6771 bfd_putl32 (INSN_NOP, contents + rel->r_offset); 6772 return bfd_reloc_ok; 6773 6774 case BFD_RELOC_AARCH64_TLSDESC_LDR: 6775 if (local_exec) 6776 { 6777 /* GD->LE relaxation: 6778 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var 6779 6780 Where R is x for lp64 mode, and w for ILP32 mode. */ 6781 bfd_putl32 (movk_R0, contents + rel->r_offset); 6782 return bfd_reloc_continue; 6783 } 6784 else 6785 { 6786 /* GD->IE relaxation: 6787 ldr xd, [gp, xn] => ldr R0, [gp, xn] 6788 6789 Where R is x for lp64 mode, and w for ILP32 mode. */ 6790 insn = bfd_getl32 (contents + rel->r_offset); 6791 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset); 6792 return bfd_reloc_ok; 6793 } 6794 6795 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 6796 /* GD->LE relaxation: 6797 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16 6798 GD->IE relaxation: 6799 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var 6800 6801 Where R is x for lp64 mode, and w for ILP32 mode. */ 6802 if (local_exec) 6803 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset); 6804 return bfd_reloc_continue; 6805 6806 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 6807 if (local_exec) 6808 { 6809 /* GD->LE relaxation: 6810 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32 6811 6812 Where R is x for lp64 mode, and w for ILP32 mode. */ 6813 bfd_putl32 (movz_hw_R0, contents + rel->r_offset); 6814 return bfd_reloc_continue; 6815 } 6816 else 6817 { 6818 /* GD->IE relaxation: 6819 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16 6820 6821 Where R is x for lp64 mode, and w for ILP32 mode. */ 6822 insn = bfd_getl32 (contents + rel->r_offset); 6823 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset); 6824 return bfd_reloc_continue; 6825 } 6826 6827 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6828 /* IE->LE relaxation: 6829 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var 6830 6831 Where R is x for lp64 mode, and w for ILP32 mode. */ 6832 if (local_exec) 6833 { 6834 insn = bfd_getl32 (contents + rel->r_offset); 6835 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset); 6836 /* We have relaxed the adrp into a mov, we may have to clear any 6837 pending erratum fixes. */ 6838 clear_erratum_843419_entry (globals, rel->r_offset, input_section); 6839 } 6840 return bfd_reloc_continue; 6841 6842 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 6843 /* IE->LE relaxation: 6844 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var 6845 6846 Where R is x for lp64 mode, and w for ILP32 mode. */ 6847 if (local_exec) 6848 { 6849 insn = bfd_getl32 (contents + rel->r_offset); 6850 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset); 6851 } 6852 return bfd_reloc_continue; 6853 6854 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 6855 /* LD->LE relaxation (tiny): 6856 adr x0, :tlsldm:x => mrs x0, tpidr_el0 6857 bl __tls_get_addr => add R0, R0, TCB_SIZE 6858 6859 Where R is x for lp64 mode, and w for ilp32 mode. */ 6860 if (local_exec) 6861 { 6862 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6863 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6864 /* No need of CALL26 relocation for tls_get_addr. */ 6865 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6866 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0); 6867 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10), 6868 contents + rel->r_offset + 4); 6869 return bfd_reloc_ok; 6870 } 6871 return bfd_reloc_continue; 6872 6873 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 6874 /* LD->LE relaxation (small): 6875 adrp x0, :tlsldm:x => mrs x0, tpidr_el0 6876 */ 6877 if (local_exec) 6878 { 6879 bfd_putl32 (0xd53bd040, contents + rel->r_offset); 6880 return bfd_reloc_ok; 6881 } 6882 return bfd_reloc_continue; 6883 6884 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 6885 /* LD->LE relaxation (small): 6886 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE 6887 bl __tls_get_addr => nop 6888 6889 Where R is x for lp64 mode, and w for ilp32 mode. */ 6890 if (local_exec) 6891 { 6892 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); 6893 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26)); 6894 /* No need of CALL26 relocation for tls_get_addr. */ 6895 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE); 6896 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10), 6897 contents + rel->r_offset + 0); 6898 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4); 6899 return bfd_reloc_ok; 6900 } 6901 return bfd_reloc_continue; 6902 6903 default: 6904 return bfd_reloc_continue; 6905 } 6906 6907 return bfd_reloc_ok; 6908 } 6909 6910 /* Relocate an AArch64 ELF section. */ 6911 6912 static int 6913 elfNN_aarch64_relocate_section (bfd *output_bfd, 6914 struct bfd_link_info *info, 6915 bfd *input_bfd, 6916 asection *input_section, 6917 bfd_byte *contents, 6918 Elf_Internal_Rela *relocs, 6919 Elf_Internal_Sym *local_syms, 6920 asection **local_sections) 6921 { 6922 Elf_Internal_Shdr *symtab_hdr; 6923 struct elf_link_hash_entry **sym_hashes; 6924 Elf_Internal_Rela *rel; 6925 Elf_Internal_Rela *relend; 6926 const char *name; 6927 struct elf_aarch64_link_hash_table *globals; 6928 bool save_addend = false; 6929 bfd_vma addend = 0; 6930 6931 globals = elf_aarch64_hash_table (info); 6932 6933 symtab_hdr = &elf_symtab_hdr (input_bfd); 6934 sym_hashes = elf_sym_hashes (input_bfd); 6935 6936 rel = relocs; 6937 relend = relocs + input_section->reloc_count; 6938 for (; rel < relend; rel++) 6939 { 6940 unsigned int r_type; 6941 bfd_reloc_code_real_type bfd_r_type; 6942 bfd_reloc_code_real_type relaxed_bfd_r_type; 6943 reloc_howto_type *howto; 6944 unsigned long r_symndx; 6945 Elf_Internal_Sym *sym; 6946 asection *sec; 6947 struct elf_link_hash_entry *h; 6948 bfd_vma relocation; 6949 bfd_reloc_status_type r; 6950 arelent bfd_reloc; 6951 char sym_type; 6952 bool unresolved_reloc = false; 6953 char *error_message = NULL; 6954 6955 r_symndx = ELFNN_R_SYM (rel->r_info); 6956 r_type = ELFNN_R_TYPE (rel->r_info); 6957 6958 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type); 6959 howto = bfd_reloc.howto; 6960 6961 if (howto == NULL) 6962 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); 6963 6964 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto); 6965 6966 h = NULL; 6967 sym = NULL; 6968 sec = NULL; 6969 6970 if (r_symndx < symtab_hdr->sh_info) 6971 { 6972 sym = local_syms + r_symndx; 6973 sym_type = ELFNN_ST_TYPE (sym->st_info); 6974 sec = local_sections[r_symndx]; 6975 6976 /* An object file might have a reference to a local 6977 undefined symbol. This is a daft object file, but we 6978 should at least do something about it. NONE and NULL 6979 relocations do not use the symbol and are explicitly 6980 allowed to use an undefined one, so allow those. 6981 Likewise for relocations against STN_UNDEF. */ 6982 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL 6983 && r_symndx != STN_UNDEF 6984 && bfd_is_und_section (sec) 6985 && ELF_ST_BIND (sym->st_info) != STB_WEAK) 6986 (*info->callbacks->undefined_symbol) 6987 (info, bfd_elf_string_from_elf_section 6988 (input_bfd, symtab_hdr->sh_link, sym->st_name), 6989 input_bfd, input_section, rel->r_offset, true); 6990 6991 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); 6992 6993 /* Relocate against local STT_GNU_IFUNC symbol. */ 6994 if (!bfd_link_relocatable (info) 6995 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) 6996 { 6997 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd, 6998 rel, false); 6999 if (h == NULL) 7000 abort (); 7001 7002 /* Set STT_GNU_IFUNC symbol value. */ 7003 h->root.u.def.value = sym->st_value; 7004 h->root.u.def.section = sec; 7005 } 7006 } 7007 else 7008 { 7009 bool warned, ignored; 7010 7011 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 7012 r_symndx, symtab_hdr, sym_hashes, 7013 h, sec, relocation, 7014 unresolved_reloc, warned, ignored); 7015 7016 sym_type = h->type; 7017 } 7018 7019 if (sec != NULL && discarded_section (sec)) 7020 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 7021 rel, 1, relend, howto, 0, contents); 7022 7023 if (bfd_link_relocatable (info)) 7024 continue; 7025 7026 if (h != NULL) 7027 name = h->root.root.string; 7028 else 7029 { 7030 name = (bfd_elf_string_from_elf_section 7031 (input_bfd, symtab_hdr->sh_link, sym->st_name)); 7032 if (name == NULL || *name == '\0') 7033 name = bfd_section_name (sec); 7034 } 7035 7036 if (r_symndx != 0 7037 && r_type != R_AARCH64_NONE 7038 && r_type != R_AARCH64_NULL 7039 && (h == NULL 7040 || h->root.type == bfd_link_hash_defined 7041 || h->root.type == bfd_link_hash_defweak) 7042 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS)) 7043 { 7044 _bfd_error_handler 7045 ((sym_type == STT_TLS 7046 /* xgettext:c-format */ 7047 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s") 7048 /* xgettext:c-format */ 7049 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")), 7050 input_bfd, 7051 input_section, (uint64_t) rel->r_offset, howto->name, name); 7052 } 7053 7054 /* We relax only if we can see that there can be a valid transition 7055 from a reloc type to another. 7056 We call elfNN_aarch64_final_link_relocate unless we're completely 7057 done, i.e., the relaxation produced the final output we want. */ 7058 7059 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, 7060 h, r_symndx); 7061 if (relaxed_bfd_r_type != bfd_r_type) 7062 { 7063 bfd_r_type = relaxed_bfd_r_type; 7064 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type); 7065 BFD_ASSERT (howto != NULL); 7066 r_type = howto->type; 7067 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section, 7068 contents, rel, h, info); 7069 unresolved_reloc = 0; 7070 } 7071 else 7072 r = bfd_reloc_continue; 7073 7074 /* There may be multiple consecutive relocations for the 7075 same offset. In that case we are supposed to treat the 7076 output of each relocation as the addend for the next. */ 7077 if (rel + 1 < relend 7078 && rel->r_offset == rel[1].r_offset 7079 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE 7080 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL) 7081 save_addend = true; 7082 else 7083 save_addend = false; 7084 7085 if (r == bfd_reloc_continue) 7086 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd, 7087 input_section, contents, rel, 7088 relocation, info, sec, 7089 h, &unresolved_reloc, 7090 save_addend, &addend, sym); 7091 7092 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type)) 7093 { 7094 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 7095 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 7096 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 7097 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 7098 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 7099 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 7100 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 7101 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 7102 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 7103 { 7104 bool need_relocs = false; 7105 bfd_byte *loc; 7106 int indx; 7107 bfd_vma off; 7108 7109 off = symbol_got_offset (input_bfd, h, r_symndx); 7110 indx = h && h->dynindx != -1 ? h->dynindx : 0; 7111 7112 need_relocs = 7113 (!bfd_link_executable (info) || indx != 0) && 7114 (h == NULL 7115 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 7116 || h->root.type != bfd_link_hash_undefweak); 7117 7118 BFD_ASSERT (globals->root.srelgot != NULL); 7119 7120 if (need_relocs) 7121 { 7122 Elf_Internal_Rela rela; 7123 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD)); 7124 rela.r_addend = 0; 7125 rela.r_offset = globals->root.sgot->output_section->vma + 7126 globals->root.sgot->output_offset + off; 7127 7128 7129 loc = globals->root.srelgot->contents; 7130 loc += globals->root.srelgot->reloc_count++ 7131 * RELOC_SIZE (htab); 7132 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7133 7134 bfd_reloc_code_real_type real_type = 7135 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 7136 7137 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 7138 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 7139 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC) 7140 { 7141 /* For local dynamic, don't generate DTPREL in any case. 7142 Initialize the DTPREL slot into zero, so we get module 7143 base address when invoke runtime TLS resolver. */ 7144 bfd_put_NN (output_bfd, 0, 7145 globals->root.sgot->contents + off 7146 + GOT_ENTRY_SIZE); 7147 } 7148 else if (indx == 0) 7149 { 7150 bfd_put_NN (output_bfd, 7151 relocation - dtpoff_base (info), 7152 globals->root.sgot->contents + off 7153 + GOT_ENTRY_SIZE); 7154 } 7155 else 7156 { 7157 /* This TLS symbol is global. We emit a 7158 relocation to fixup the tls offset at load 7159 time. */ 7160 rela.r_info = 7161 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL)); 7162 rela.r_addend = 0; 7163 rela.r_offset = 7164 (globals->root.sgot->output_section->vma 7165 + globals->root.sgot->output_offset + off 7166 + GOT_ENTRY_SIZE); 7167 7168 loc = globals->root.srelgot->contents; 7169 loc += globals->root.srelgot->reloc_count++ 7170 * RELOC_SIZE (globals); 7171 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7172 bfd_put_NN (output_bfd, (bfd_vma) 0, 7173 globals->root.sgot->contents + off 7174 + GOT_ENTRY_SIZE); 7175 } 7176 } 7177 else 7178 { 7179 bfd_put_NN (output_bfd, (bfd_vma) 1, 7180 globals->root.sgot->contents + off); 7181 bfd_put_NN (output_bfd, 7182 relocation - dtpoff_base (info), 7183 globals->root.sgot->contents + off 7184 + GOT_ENTRY_SIZE); 7185 } 7186 7187 symbol_got_offset_mark (input_bfd, h, r_symndx); 7188 } 7189 break; 7190 7191 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7192 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC: 7193 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 7194 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 7195 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 7196 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) 7197 { 7198 bool need_relocs = false; 7199 bfd_byte *loc; 7200 int indx; 7201 bfd_vma off; 7202 7203 off = symbol_got_offset (input_bfd, h, r_symndx); 7204 7205 indx = h && h->dynindx != -1 ? h->dynindx : 0; 7206 7207 need_relocs = 7208 (!bfd_link_executable (info) || indx != 0) && 7209 (h == NULL 7210 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 7211 || h->root.type != bfd_link_hash_undefweak); 7212 7213 BFD_ASSERT (globals->root.srelgot != NULL); 7214 7215 if (need_relocs) 7216 { 7217 Elf_Internal_Rela rela; 7218 7219 if (indx == 0) 7220 rela.r_addend = relocation - dtpoff_base (info); 7221 else 7222 rela.r_addend = 0; 7223 7224 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL)); 7225 rela.r_offset = globals->root.sgot->output_section->vma + 7226 globals->root.sgot->output_offset + off; 7227 7228 loc = globals->root.srelgot->contents; 7229 loc += globals->root.srelgot->reloc_count++ 7230 * RELOC_SIZE (htab); 7231 7232 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7233 7234 bfd_put_NN (output_bfd, rela.r_addend, 7235 globals->root.sgot->contents + off); 7236 } 7237 else 7238 bfd_put_NN (output_bfd, relocation - tpoff_base (info), 7239 globals->root.sgot->contents + off); 7240 7241 symbol_got_offset_mark (input_bfd, h, r_symndx); 7242 } 7243 break; 7244 7245 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 7246 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 7247 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 7248 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC: 7249 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 7250 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 7251 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 7252 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx)) 7253 { 7254 bool need_relocs = false; 7255 int indx = h && h->dynindx != -1 ? h->dynindx : 0; 7256 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx); 7257 7258 need_relocs = (h == NULL 7259 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 7260 || h->root.type != bfd_link_hash_undefweak); 7261 7262 BFD_ASSERT (globals->root.srelgot != NULL); 7263 BFD_ASSERT (globals->root.sgot != NULL); 7264 7265 if (need_relocs) 7266 { 7267 bfd_byte *loc; 7268 Elf_Internal_Rela rela; 7269 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC)); 7270 7271 rela.r_addend = 0; 7272 rela.r_offset = (globals->root.sgotplt->output_section->vma 7273 + globals->root.sgotplt->output_offset 7274 + off + globals->sgotplt_jump_table_size); 7275 7276 if (indx == 0) 7277 rela.r_addend = relocation - dtpoff_base (info); 7278 7279 /* Allocate the next available slot in the PLT reloc 7280 section to hold our R_AARCH64_TLSDESC, the next 7281 available slot is determined from reloc_count, 7282 which we step. But note, reloc_count was 7283 artifically moved down while allocating slots for 7284 real PLT relocs such that all of the PLT relocs 7285 will fit above the initial reloc_count and the 7286 extra stuff will fit below. */ 7287 loc = globals->root.srelplt->contents; 7288 loc += globals->root.srelplt->reloc_count++ 7289 * RELOC_SIZE (globals); 7290 7291 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 7292 7293 bfd_put_NN (output_bfd, (bfd_vma) 0, 7294 globals->root.sgotplt->contents + off + 7295 globals->sgotplt_jump_table_size); 7296 bfd_put_NN (output_bfd, (bfd_vma) 0, 7297 globals->root.sgotplt->contents + off + 7298 globals->sgotplt_jump_table_size + 7299 GOT_ENTRY_SIZE); 7300 } 7301 7302 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx); 7303 } 7304 break; 7305 default: 7306 break; 7307 } 7308 7309 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 7310 because such sections are not SEC_ALLOC and thus ld.so will 7311 not process them. */ 7312 if (unresolved_reloc 7313 && !((input_section->flags & SEC_DEBUGGING) != 0 7314 && h->def_dynamic) 7315 && _bfd_elf_section_offset (output_bfd, info, input_section, 7316 +rel->r_offset) != (bfd_vma) - 1) 7317 { 7318 _bfd_error_handler 7319 /* xgettext:c-format */ 7320 (_("%pB(%pA+%#" PRIx64 "): " 7321 "unresolvable %s relocation against symbol `%s'"), 7322 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name, 7323 h->root.root.string); 7324 return false; 7325 } 7326 7327 if (r != bfd_reloc_ok && r != bfd_reloc_continue) 7328 { 7329 bfd_reloc_code_real_type real_r_type 7330 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type); 7331 7332 switch (r) 7333 { 7334 case bfd_reloc_overflow: 7335 (*info->callbacks->reloc_overflow) 7336 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0, 7337 input_bfd, input_section, rel->r_offset); 7338 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15 7339 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14) 7340 { 7341 (*info->callbacks->warning) 7342 (info, 7343 _("too many GOT entries for -fpic, " 7344 "please recompile with -fPIC"), 7345 name, input_bfd, input_section, rel->r_offset); 7346 return false; 7347 } 7348 /* Overflow can occur when a variable is referenced with a type 7349 that has a larger alignment than the type with which it was 7350 declared. eg: 7351 file1.c: extern int foo; int a (void) { return foo; } 7352 file2.c: char bar, foo, baz; 7353 If the variable is placed into a data section at an offset 7354 that is incompatible with the larger alignment requirement 7355 overflow will occur. (Strictly speaking this is not overflow 7356 but rather an alignment problem, but the bfd_reloc_ error 7357 enum does not have a value to cover that situation). 7358 7359 Try to catch this situation here and provide a more helpful 7360 error message to the user. */ 7361 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1) 7362 /* FIXME: Are we testing all of the appropriate reloc 7363 types here ? */ 7364 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL 7365 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12 7366 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12 7367 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12 7368 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12)) 7369 { 7370 info->callbacks->warning 7371 (info, _("one possible cause of this error is that the \ 7372 symbol is being referenced in the indicated code as if it had a larger \ 7373 alignment than was declared where it was defined"), 7374 name, input_bfd, input_section, rel->r_offset); 7375 } 7376 break; 7377 7378 case bfd_reloc_undefined: 7379 (*info->callbacks->undefined_symbol) 7380 (info, name, input_bfd, input_section, rel->r_offset, true); 7381 break; 7382 7383 case bfd_reloc_outofrange: 7384 error_message = _("out of range"); 7385 goto common_error; 7386 7387 case bfd_reloc_notsupported: 7388 error_message = _("unsupported relocation"); 7389 goto common_error; 7390 7391 case bfd_reloc_dangerous: 7392 /* error_message should already be set. */ 7393 goto common_error; 7394 7395 default: 7396 error_message = _("unknown error"); 7397 /* Fall through. */ 7398 7399 common_error: 7400 BFD_ASSERT (error_message != NULL); 7401 (*info->callbacks->reloc_dangerous) 7402 (info, error_message, input_bfd, input_section, rel->r_offset); 7403 break; 7404 } 7405 } 7406 7407 if (!save_addend) 7408 addend = 0; 7409 } 7410 7411 return true; 7412 } 7413 7414 /* Set the right machine number. */ 7415 7416 static bool 7417 elfNN_aarch64_object_p (bfd *abfd) 7418 { 7419 #if ARCH_SIZE == 32 7420 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32); 7421 #else 7422 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64); 7423 #endif 7424 return true; 7425 } 7426 7427 /* Function to keep AArch64 specific flags in the ELF header. */ 7428 7429 static bool 7430 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags) 7431 { 7432 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags) 7433 { 7434 } 7435 else 7436 { 7437 elf_elfheader (abfd)->e_flags = flags; 7438 elf_flags_init (abfd) = true; 7439 } 7440 7441 return true; 7442 } 7443 7444 /* Merge backend specific data from an object file to the output 7445 object file when linking. */ 7446 7447 static bool 7448 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info) 7449 { 7450 bfd *obfd = info->output_bfd; 7451 flagword out_flags; 7452 flagword in_flags; 7453 bool flags_compatible = true; 7454 asection *sec; 7455 7456 /* Check if we have the same endianess. */ 7457 if (!_bfd_generic_verify_endian_match (ibfd, info)) 7458 return false; 7459 7460 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd)) 7461 return true; 7462 7463 /* The input BFD must have had its flags initialised. */ 7464 /* The following seems bogus to me -- The flags are initialized in 7465 the assembler but I don't think an elf_flags_init field is 7466 written into the object. */ 7467 /* BFD_ASSERT (elf_flags_init (ibfd)); */ 7468 7469 in_flags = elf_elfheader (ibfd)->e_flags; 7470 out_flags = elf_elfheader (obfd)->e_flags; 7471 7472 if (!elf_flags_init (obfd)) 7473 { 7474 /* If the input is the default architecture and had the default 7475 flags then do not bother setting the flags for the output 7476 architecture, instead allow future merges to do this. If no 7477 future merges ever set these flags then they will retain their 7478 uninitialised values, which surprise surprise, correspond 7479 to the default values. */ 7480 if (bfd_get_arch_info (ibfd)->the_default 7481 && elf_elfheader (ibfd)->e_flags == 0) 7482 return true; 7483 7484 elf_flags_init (obfd) = true; 7485 elf_elfheader (obfd)->e_flags = in_flags; 7486 7487 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) 7488 && bfd_get_arch_info (obfd)->the_default) 7489 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), 7490 bfd_get_mach (ibfd)); 7491 7492 return true; 7493 } 7494 7495 /* Identical flags must be compatible. */ 7496 if (in_flags == out_flags) 7497 return true; 7498 7499 /* Check to see if the input BFD actually contains any sections. If 7500 not, its flags may not have been initialised either, but it 7501 cannot actually cause any incompatiblity. Do not short-circuit 7502 dynamic objects; their section list may be emptied by 7503 elf_link_add_object_symbols. 7504 7505 Also check to see if there are no code sections in the input. 7506 In this case there is no need to check for code specific flags. 7507 XXX - do we need to worry about floating-point format compatability 7508 in data sections ? */ 7509 if (!(ibfd->flags & DYNAMIC)) 7510 { 7511 bool null_input_bfd = true; 7512 bool only_data_sections = true; 7513 7514 for (sec = ibfd->sections; sec != NULL; sec = sec->next) 7515 { 7516 if ((bfd_section_flags (sec) 7517 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 7518 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 7519 only_data_sections = false; 7520 7521 null_input_bfd = false; 7522 break; 7523 } 7524 7525 if (null_input_bfd || only_data_sections) 7526 return true; 7527 } 7528 7529 return flags_compatible; 7530 } 7531 7532 /* Display the flags field. */ 7533 7534 static bool 7535 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr) 7536 { 7537 FILE *file = (FILE *) ptr; 7538 unsigned long flags; 7539 7540 BFD_ASSERT (abfd != NULL && ptr != NULL); 7541 7542 /* Print normal ELF private data. */ 7543 _bfd_elf_print_private_bfd_data (abfd, ptr); 7544 7545 flags = elf_elfheader (abfd)->e_flags; 7546 /* Ignore init flag - it may not be set, despite the flags field 7547 containing valid data. */ 7548 7549 /* xgettext:c-format */ 7550 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags); 7551 7552 if (flags) 7553 fprintf (file, _(" <Unrecognised flag bits set>")); 7554 7555 fputc ('\n', file); 7556 7557 return true; 7558 } 7559 7560 /* Return true if we need copy relocation against EH. */ 7561 7562 static bool 7563 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh) 7564 { 7565 struct elf_dyn_relocs *p; 7566 asection *s; 7567 7568 for (p = eh->root.dyn_relocs; p != NULL; p = p->next) 7569 { 7570 /* If there is any pc-relative reference, we need to keep copy relocation 7571 to avoid propagating the relocation into runtime that current glibc 7572 does not support. */ 7573 if (p->pc_count) 7574 return true; 7575 7576 s = p->sec->output_section; 7577 /* Need copy relocation if it's against read-only section. */ 7578 if (s != NULL && (s->flags & SEC_READONLY) != 0) 7579 return true; 7580 } 7581 7582 return false; 7583 } 7584 7585 /* Adjust a symbol defined by a dynamic object and referenced by a 7586 regular object. The current definition is in some section of the 7587 dynamic object, but we're not including those sections. We have to 7588 change the definition to something the rest of the link can 7589 understand. */ 7590 7591 static bool 7592 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info, 7593 struct elf_link_hash_entry *h) 7594 { 7595 struct elf_aarch64_link_hash_table *htab; 7596 asection *s, *srel; 7597 7598 /* If this is a function, put it in the procedure linkage table. We 7599 will fill in the contents of the procedure linkage table later, 7600 when we know the address of the .got section. */ 7601 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt) 7602 { 7603 if (h->plt.refcount <= 0 7604 || (h->type != STT_GNU_IFUNC 7605 && (SYMBOL_CALLS_LOCAL (info, h) 7606 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 7607 && h->root.type == bfd_link_hash_undefweak)))) 7608 { 7609 /* This case can occur if we saw a CALL26 reloc in 7610 an input file, but the symbol wasn't referred to 7611 by a dynamic object or all references were 7612 garbage collected. In which case we can end up 7613 resolving. */ 7614 h->plt.offset = (bfd_vma) - 1; 7615 h->needs_plt = 0; 7616 } 7617 7618 return true; 7619 } 7620 else 7621 /* Otherwise, reset to -1. */ 7622 h->plt.offset = (bfd_vma) - 1; 7623 7624 7625 /* If this is a weak symbol, and there is a real definition, the 7626 processor independent code will have arranged for us to see the 7627 real definition first, and we can just use the same value. */ 7628 if (h->is_weakalias) 7629 { 7630 struct elf_link_hash_entry *def = weakdef (h); 7631 BFD_ASSERT (def->root.type == bfd_link_hash_defined); 7632 h->root.u.def.section = def->root.u.def.section; 7633 h->root.u.def.value = def->root.u.def.value; 7634 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) 7635 h->non_got_ref = def->non_got_ref; 7636 return true; 7637 } 7638 7639 /* If we are creating a shared library, we must presume that the 7640 only references to the symbol are via the global offset table. 7641 For such cases we need not do anything here; the relocations will 7642 be handled correctly by relocate_section. */ 7643 if (bfd_link_pic (info)) 7644 return true; 7645 7646 /* If there are no references to this symbol that do not use the 7647 GOT, we don't need to generate a copy reloc. */ 7648 if (!h->non_got_ref) 7649 return true; 7650 7651 /* If -z nocopyreloc was given, we won't generate them either. */ 7652 if (info->nocopyreloc) 7653 { 7654 h->non_got_ref = 0; 7655 return true; 7656 } 7657 7658 if (ELIMINATE_COPY_RELOCS) 7659 { 7660 struct elf_aarch64_link_hash_entry *eh; 7661 /* If we don't find any dynamic relocs in read-only sections, then 7662 we'll be keeping the dynamic relocs and avoiding the copy reloc. */ 7663 eh = (struct elf_aarch64_link_hash_entry *) h; 7664 if (!need_copy_relocation_p (eh)) 7665 { 7666 h->non_got_ref = 0; 7667 return true; 7668 } 7669 } 7670 7671 /* We must allocate the symbol in our .dynbss section, which will 7672 become part of the .bss section of the executable. There will be 7673 an entry for this symbol in the .dynsym section. The dynamic 7674 object will contain position independent code, so all references 7675 from the dynamic object to this symbol will go through the global 7676 offset table. The dynamic linker will use the .dynsym entry to 7677 determine the address it must put in the global offset table, so 7678 both the dynamic object and the regular object will refer to the 7679 same memory location for the variable. */ 7680 7681 htab = elf_aarch64_hash_table (info); 7682 7683 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker 7684 to copy the initial value out of the dynamic object and into the 7685 runtime process image. */ 7686 if ((h->root.u.def.section->flags & SEC_READONLY) != 0) 7687 { 7688 s = htab->root.sdynrelro; 7689 srel = htab->root.sreldynrelro; 7690 } 7691 else 7692 { 7693 s = htab->root.sdynbss; 7694 srel = htab->root.srelbss; 7695 } 7696 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) 7697 { 7698 srel->size += RELOC_SIZE (htab); 7699 h->needs_copy = 1; 7700 } 7701 7702 return _bfd_elf_adjust_dynamic_copy (info, h, s); 7703 7704 } 7705 7706 static bool 7707 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number) 7708 { 7709 struct elf_aarch64_local_symbol *locals; 7710 locals = elf_aarch64_locals (abfd); 7711 if (locals == NULL) 7712 { 7713 locals = (struct elf_aarch64_local_symbol *) 7714 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol)); 7715 if (locals == NULL) 7716 return false; 7717 elf_aarch64_locals (abfd) = locals; 7718 } 7719 return true; 7720 } 7721 7722 /* Create the .got section to hold the global offset table. */ 7723 7724 static bool 7725 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info) 7726 { 7727 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 7728 flagword flags; 7729 asection *s; 7730 struct elf_link_hash_entry *h; 7731 struct elf_link_hash_table *htab = elf_hash_table (info); 7732 7733 /* This function may be called more than once. */ 7734 if (htab->sgot != NULL) 7735 return true; 7736 7737 flags = bed->dynamic_sec_flags; 7738 7739 s = bfd_make_section_anyway_with_flags (abfd, 7740 (bed->rela_plts_and_copies_p 7741 ? ".rela.got" : ".rel.got"), 7742 (bed->dynamic_sec_flags 7743 | SEC_READONLY)); 7744 if (s == NULL 7745 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7746 return false; 7747 htab->srelgot = s; 7748 7749 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags); 7750 if (s == NULL 7751 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7752 return false; 7753 htab->sgot = s; 7754 htab->sgot->size += GOT_ENTRY_SIZE; 7755 7756 if (bed->want_got_sym) 7757 { 7758 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got 7759 (or .got.plt) section. We don't do this in the linker script 7760 because we don't want to define the symbol if we are not creating 7761 a global offset table. */ 7762 h = _bfd_elf_define_linkage_sym (abfd, info, s, 7763 "_GLOBAL_OFFSET_TABLE_"); 7764 elf_hash_table (info)->hgot = h; 7765 if (h == NULL) 7766 return false; 7767 } 7768 7769 if (bed->want_got_plt) 7770 { 7771 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags); 7772 if (s == NULL 7773 || !bfd_set_section_alignment (s, bed->s->log_file_align)) 7774 return false; 7775 htab->sgotplt = s; 7776 } 7777 7778 /* The first bit of the global offset table is the header. */ 7779 s->size += bed->got_header_size; 7780 7781 return true; 7782 } 7783 7784 /* Look through the relocs for a section during the first phase. */ 7785 7786 static bool 7787 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info, 7788 asection *sec, const Elf_Internal_Rela *relocs) 7789 { 7790 Elf_Internal_Shdr *symtab_hdr; 7791 struct elf_link_hash_entry **sym_hashes; 7792 const Elf_Internal_Rela *rel; 7793 const Elf_Internal_Rela *rel_end; 7794 asection *sreloc; 7795 7796 struct elf_aarch64_link_hash_table *htab; 7797 7798 if (bfd_link_relocatable (info)) 7799 return true; 7800 7801 BFD_ASSERT (is_aarch64_elf (abfd)); 7802 7803 htab = elf_aarch64_hash_table (info); 7804 sreloc = NULL; 7805 7806 symtab_hdr = &elf_symtab_hdr (abfd); 7807 sym_hashes = elf_sym_hashes (abfd); 7808 7809 rel_end = relocs + sec->reloc_count; 7810 for (rel = relocs; rel < rel_end; rel++) 7811 { 7812 struct elf_link_hash_entry *h; 7813 unsigned int r_symndx; 7814 unsigned int r_type; 7815 bfd_reloc_code_real_type bfd_r_type; 7816 Elf_Internal_Sym *isym; 7817 7818 r_symndx = ELFNN_R_SYM (rel->r_info); 7819 r_type = ELFNN_R_TYPE (rel->r_info); 7820 7821 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) 7822 { 7823 /* xgettext:c-format */ 7824 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx); 7825 return false; 7826 } 7827 7828 if (r_symndx < symtab_hdr->sh_info) 7829 { 7830 /* A local symbol. */ 7831 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, 7832 abfd, r_symndx); 7833 if (isym == NULL) 7834 return false; 7835 7836 /* Check relocation against local STT_GNU_IFUNC symbol. */ 7837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 7838 { 7839 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, 7840 true); 7841 if (h == NULL) 7842 return false; 7843 7844 /* Fake a STT_GNU_IFUNC symbol. */ 7845 h->type = STT_GNU_IFUNC; 7846 h->def_regular = 1; 7847 h->ref_regular = 1; 7848 h->forced_local = 1; 7849 h->root.type = bfd_link_hash_defined; 7850 } 7851 else 7852 h = NULL; 7853 } 7854 else 7855 { 7856 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 7857 while (h->root.type == bfd_link_hash_indirect 7858 || h->root.type == bfd_link_hash_warning) 7859 h = (struct elf_link_hash_entry *) h->root.u.i.link; 7860 } 7861 7862 /* Could be done earlier, if h were already available. */ 7863 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx); 7864 7865 if (h != NULL) 7866 { 7867 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got. 7868 This shows up in particular in an R_AARCH64_PREL64 in large model 7869 when calculating the pc-relative address to .got section which is 7870 used to initialize the gp register. */ 7871 if (h->root.root.string 7872 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0) 7873 { 7874 if (htab->root.dynobj == NULL) 7875 htab->root.dynobj = abfd; 7876 7877 if (! aarch64_elf_create_got_section (htab->root.dynobj, info)) 7878 return false; 7879 7880 BFD_ASSERT (h == htab->root.hgot); 7881 } 7882 7883 /* Create the ifunc sections for static executables. If we 7884 never see an indirect function symbol nor we are building 7885 a static executable, those sections will be empty and 7886 won't appear in output. */ 7887 switch (bfd_r_type) 7888 { 7889 default: 7890 break; 7891 7892 case BFD_RELOC_AARCH64_ADD_LO12: 7893 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 7894 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7895 case BFD_RELOC_AARCH64_CALL26: 7896 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 7897 case BFD_RELOC_AARCH64_JUMP26: 7898 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 7899 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 7900 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 7901 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 7902 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 7903 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 7904 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 7905 case BFD_RELOC_AARCH64_NN: 7906 if (htab->root.dynobj == NULL) 7907 htab->root.dynobj = abfd; 7908 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info)) 7909 return false; 7910 break; 7911 } 7912 7913 /* It is referenced by a non-shared object. */ 7914 h->ref_regular = 1; 7915 } 7916 7917 switch (bfd_r_type) 7918 { 7919 case BFD_RELOC_AARCH64_16: 7920 #if ARCH_SIZE == 64 7921 case BFD_RELOC_AARCH64_32: 7922 #endif 7923 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0) 7924 { 7925 if (h != NULL 7926 /* This is an absolute symbol. It represents a value instead 7927 of an address. */ 7928 && (bfd_is_abs_symbol (&h->root) 7929 /* This is an undefined symbol. */ 7930 || h->root.type == bfd_link_hash_undefined)) 7931 break; 7932 7933 /* For local symbols, defined global symbols in a non-ABS section, 7934 it is assumed that the value is an address. */ 7935 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 7936 _bfd_error_handler 7937 /* xgettext:c-format */ 7938 (_("%pB: relocation %s against `%s' can not be used when making " 7939 "a shared object"), 7940 abfd, elfNN_aarch64_howto_table[howto_index].name, 7941 (h) ? h->root.root.string : "a local symbol"); 7942 bfd_set_error (bfd_error_bad_value); 7943 return false; 7944 } 7945 else 7946 break; 7947 7948 case BFD_RELOC_AARCH64_MOVW_G0_NC: 7949 case BFD_RELOC_AARCH64_MOVW_G1_NC: 7950 case BFD_RELOC_AARCH64_MOVW_G2_NC: 7951 case BFD_RELOC_AARCH64_MOVW_G3: 7952 if (bfd_link_pic (info)) 7953 { 7954 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 7955 _bfd_error_handler 7956 /* xgettext:c-format */ 7957 (_("%pB: relocation %s against `%s' can not be used when making " 7958 "a shared object; recompile with -fPIC"), 7959 abfd, elfNN_aarch64_howto_table[howto_index].name, 7960 (h) ? h->root.root.string : "a local symbol"); 7961 bfd_set_error (bfd_error_bad_value); 7962 return false; 7963 } 7964 /* Fall through. */ 7965 7966 case BFD_RELOC_AARCH64_16_PCREL: 7967 case BFD_RELOC_AARCH64_32_PCREL: 7968 case BFD_RELOC_AARCH64_64_PCREL: 7969 case BFD_RELOC_AARCH64_ADD_LO12: 7970 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 7971 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 7972 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 7973 case BFD_RELOC_AARCH64_LDST128_LO12: 7974 case BFD_RELOC_AARCH64_LDST16_LO12: 7975 case BFD_RELOC_AARCH64_LDST32_LO12: 7976 case BFD_RELOC_AARCH64_LDST64_LO12: 7977 case BFD_RELOC_AARCH64_LDST8_LO12: 7978 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 7979 if (h == NULL || bfd_link_pic (info)) 7980 break; 7981 /* Fall through. */ 7982 7983 case BFD_RELOC_AARCH64_NN: 7984 7985 /* We don't need to handle relocs into sections not going into 7986 the "real" output. */ 7987 if ((sec->flags & SEC_ALLOC) == 0) 7988 break; 7989 7990 if (h != NULL) 7991 { 7992 if (!bfd_link_pic (info)) 7993 h->non_got_ref = 1; 7994 7995 h->plt.refcount += 1; 7996 h->pointer_equality_needed = 1; 7997 } 7998 7999 /* No need to do anything if we're not creating a shared 8000 object. */ 8001 if (!(bfd_link_pic (info) 8002 /* If on the other hand, we are creating an executable, we 8003 may need to keep relocations for symbols satisfied by a 8004 dynamic library if we manage to avoid copy relocs for the 8005 symbol. 8006 8007 NOTE: Currently, there is no support of copy relocs 8008 elimination on pc-relative relocation types, because there is 8009 no dynamic relocation support for them in glibc. We still 8010 record the dynamic symbol reference for them. This is 8011 because one symbol may be referenced by both absolute 8012 relocation (for example, BFD_RELOC_AARCH64_NN) and 8013 pc-relative relocation. We need full symbol reference 8014 information to make correct decision later in 8015 elfNN_aarch64_adjust_dynamic_symbol. */ 8016 || (ELIMINATE_COPY_RELOCS 8017 && !bfd_link_pic (info) 8018 && h != NULL 8019 && (h->root.type == bfd_link_hash_defweak 8020 || !h->def_regular)))) 8021 break; 8022 8023 { 8024 struct elf_dyn_relocs *p; 8025 struct elf_dyn_relocs **head; 8026 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START; 8027 8028 /* We must copy these reloc types into the output file. 8029 Create a reloc section in dynobj and make room for 8030 this reloc. */ 8031 if (sreloc == NULL) 8032 { 8033 if (htab->root.dynobj == NULL) 8034 htab->root.dynobj = abfd; 8035 8036 sreloc = _bfd_elf_make_dynamic_reloc_section 8037 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true); 8038 8039 if (sreloc == NULL) 8040 return false; 8041 } 8042 8043 /* If this is a global symbol, we count the number of 8044 relocations we need for this symbol. */ 8045 if (h != NULL) 8046 { 8047 head = &h->dyn_relocs; 8048 } 8049 else 8050 { 8051 /* Track dynamic relocs needed for local syms too. 8052 We really need local syms available to do this 8053 easily. Oh well. */ 8054 8055 asection *s; 8056 void **vpp; 8057 8058 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, 8059 abfd, r_symndx); 8060 if (isym == NULL) 8061 return false; 8062 8063 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 8064 if (s == NULL) 8065 s = sec; 8066 8067 /* Beware of type punned pointers vs strict aliasing 8068 rules. */ 8069 vpp = &(elf_section_data (s)->local_dynrel); 8070 head = (struct elf_dyn_relocs **) vpp; 8071 } 8072 8073 p = *head; 8074 if (p == NULL || p->sec != sec) 8075 { 8076 size_t amt = sizeof *p; 8077 p = ((struct elf_dyn_relocs *) 8078 bfd_zalloc (htab->root.dynobj, amt)); 8079 if (p == NULL) 8080 return false; 8081 p->next = *head; 8082 *head = p; 8083 p->sec = sec; 8084 } 8085 8086 p->count += 1; 8087 8088 if (elfNN_aarch64_howto_table[howto_index].pc_relative) 8089 p->pc_count += 1; 8090 } 8091 break; 8092 8093 /* RR: We probably want to keep a consistency check that 8094 there are no dangling GOT_PAGE relocs. */ 8095 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 8096 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 8097 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 8098 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 8099 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 8100 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 8101 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 8102 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 8103 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 8104 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 8105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 8106 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 8107 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 8108 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 8109 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 8110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 8111 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 8112 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 8113 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 8114 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 8115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 8116 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 8117 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 8118 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 8119 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 8120 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 8121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 8122 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 8123 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 8124 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 8125 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 8126 { 8127 unsigned got_type; 8128 unsigned old_got_type; 8129 8130 got_type = aarch64_reloc_got_type (bfd_r_type); 8131 8132 if (h) 8133 { 8134 h->got.refcount += 1; 8135 old_got_type = elf_aarch64_hash_entry (h)->got_type; 8136 } 8137 else 8138 { 8139 struct elf_aarch64_local_symbol *locals; 8140 8141 if (!elfNN_aarch64_allocate_local_symbols 8142 (abfd, symtab_hdr->sh_info)) 8143 return false; 8144 8145 locals = elf_aarch64_locals (abfd); 8146 BFD_ASSERT (r_symndx < symtab_hdr->sh_info); 8147 locals[r_symndx].got_refcount += 1; 8148 old_got_type = locals[r_symndx].got_type; 8149 } 8150 8151 /* If a variable is accessed with both general dynamic TLS 8152 methods, two slots may be created. */ 8153 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type)) 8154 got_type |= old_got_type; 8155 8156 /* We will already have issued an error message if there 8157 is a TLS/non-TLS mismatch, based on the symbol type. 8158 So just combine any TLS types needed. */ 8159 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL 8160 && got_type != GOT_NORMAL) 8161 got_type |= old_got_type; 8162 8163 /* If the symbol is accessed by both IE and GD methods, we 8164 are able to relax. Turn off the GD flag, without 8165 messing up with any other kind of TLS types that may be 8166 involved. */ 8167 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) 8168 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); 8169 8170 if (old_got_type != got_type) 8171 { 8172 if (h != NULL) 8173 elf_aarch64_hash_entry (h)->got_type = got_type; 8174 else 8175 { 8176 struct elf_aarch64_local_symbol *locals; 8177 locals = elf_aarch64_locals (abfd); 8178 BFD_ASSERT (r_symndx < symtab_hdr->sh_info); 8179 locals[r_symndx].got_type = got_type; 8180 } 8181 } 8182 8183 if (htab->root.dynobj == NULL) 8184 htab->root.dynobj = abfd; 8185 if (! aarch64_elf_create_got_section (htab->root.dynobj, info)) 8186 return false; 8187 break; 8188 } 8189 8190 case BFD_RELOC_AARCH64_CALL26: 8191 case BFD_RELOC_AARCH64_JUMP26: 8192 /* If this is a local symbol then we resolve it 8193 directly without creating a PLT entry. */ 8194 if (h == NULL) 8195 continue; 8196 8197 h->needs_plt = 1; 8198 if (h->plt.refcount <= 0) 8199 h->plt.refcount = 1; 8200 else 8201 h->plt.refcount += 1; 8202 break; 8203 8204 default: 8205 break; 8206 } 8207 } 8208 8209 return true; 8210 } 8211 8212 /* Treat mapping symbols as special target symbols. */ 8213 8214 static bool 8215 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED, 8216 asymbol *sym) 8217 { 8218 return bfd_is_aarch64_special_symbol_name (sym->name, 8219 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY); 8220 } 8221 8222 /* If the ELF symbol SYM might be a function in SEC, return the 8223 function size and set *CODE_OFF to the function's entry point, 8224 otherwise return zero. */ 8225 8226 static bfd_size_type 8227 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec, 8228 bfd_vma *code_off) 8229 { 8230 bfd_size_type size; 8231 elf_symbol_type * elf_sym = (elf_symbol_type *) sym; 8232 8233 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT 8234 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0 8235 || sym->section != sec) 8236 return 0; 8237 8238 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size; 8239 8240 if (!(sym->flags & BSF_SYNTHETIC)) 8241 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info)) 8242 { 8243 case STT_NOTYPE: 8244 /* Ignore symbols created by the annobin plugin for gcc and clang. 8245 These symbols are hidden, local, notype and have a size of 0. */ 8246 if (size == 0 8247 && sym->flags & BSF_LOCAL 8248 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN) 8249 return 0; 8250 /* Fall through. */ 8251 case STT_FUNC: 8252 /* FIXME: Allow STT_GNU_IFUNC as well ? */ 8253 break; 8254 default: 8255 return 0; 8256 } 8257 8258 if ((sym->flags & BSF_LOCAL) 8259 && bfd_is_aarch64_special_symbol_name (sym->name, 8260 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)) 8261 return 0; 8262 8263 *code_off = sym->value; 8264 8265 /* Do not return 0 for the function's size. */ 8266 return size ? size : 1; 8267 } 8268 8269 static bool 8270 elfNN_aarch64_find_inliner_info (bfd *abfd, 8271 const char **filename_ptr, 8272 const char **functionname_ptr, 8273 unsigned int *line_ptr) 8274 { 8275 bool found; 8276 found = _bfd_dwarf2_find_inliner_info 8277 (abfd, filename_ptr, 8278 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info); 8279 return found; 8280 } 8281 8282 8283 static bool 8284 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info) 8285 { 8286 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */ 8287 8288 if (!_bfd_elf_init_file_header (abfd, link_info)) 8289 return false; 8290 8291 i_ehdrp = elf_elfheader (abfd); 8292 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION; 8293 return true; 8294 } 8295 8296 static enum elf_reloc_type_class 8297 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, 8298 const asection *rel_sec ATTRIBUTE_UNUSED, 8299 const Elf_Internal_Rela *rela) 8300 { 8301 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info); 8302 8303 if (htab->root.dynsym != NULL 8304 && htab->root.dynsym->contents != NULL) 8305 { 8306 /* Check relocation against STT_GNU_IFUNC symbol if there are 8307 dynamic symbols. */ 8308 bfd *abfd = info->output_bfd; 8309 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 8310 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info); 8311 if (r_symndx != STN_UNDEF) 8312 { 8313 Elf_Internal_Sym sym; 8314 if (!bed->s->swap_symbol_in (abfd, 8315 (htab->root.dynsym->contents 8316 + r_symndx * bed->s->sizeof_sym), 8317 0, &sym)) 8318 { 8319 /* xgettext:c-format */ 8320 _bfd_error_handler (_("%pB symbol number %lu references" 8321 " nonexistent SHT_SYMTAB_SHNDX section"), 8322 abfd, r_symndx); 8323 /* Ideally an error class should be returned here. */ 8324 } 8325 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) 8326 return reloc_class_ifunc; 8327 } 8328 } 8329 8330 switch ((int) ELFNN_R_TYPE (rela->r_info)) 8331 { 8332 case AARCH64_R (IRELATIVE): 8333 return reloc_class_ifunc; 8334 case AARCH64_R (RELATIVE): 8335 return reloc_class_relative; 8336 case AARCH64_R (JUMP_SLOT): 8337 return reloc_class_plt; 8338 case AARCH64_R (COPY): 8339 return reloc_class_copy; 8340 default: 8341 return reloc_class_normal; 8342 } 8343 } 8344 8345 /* Handle an AArch64 specific section when reading an object file. This is 8346 called when bfd_section_from_shdr finds a section with an unknown 8347 type. */ 8348 8349 static bool 8350 elfNN_aarch64_section_from_shdr (bfd *abfd, 8351 Elf_Internal_Shdr *hdr, 8352 const char *name, int shindex) 8353 { 8354 /* There ought to be a place to keep ELF backend specific flags, but 8355 at the moment there isn't one. We just keep track of the 8356 sections by their name, instead. Fortunately, the ABI gives 8357 names for all the AArch64 specific sections, so we will probably get 8358 away with this. */ 8359 switch (hdr->sh_type) 8360 { 8361 case SHT_AARCH64_ATTRIBUTES: 8362 break; 8363 8364 default: 8365 return false; 8366 } 8367 8368 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 8369 return false; 8370 8371 return true; 8372 } 8373 8374 /* Process any AArch64-specific program segment types. */ 8375 8376 static bool 8377 elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED, 8378 Elf_Internal_Phdr *hdr, 8379 int hdr_index ATTRIBUTE_UNUSED, 8380 const char *name ATTRIBUTE_UNUSED) 8381 { 8382 /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */ 8383 if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE) 8384 return false; 8385 8386 if (hdr->p_filesz > 0) 8387 { 8388 /* Sections created from memory tag p_type's are always named 8389 "memtag". This makes it easier for tools (for example, GDB) 8390 to find them. */ 8391 asection *newsect = bfd_make_section_anyway (abfd, "memtag"); 8392 8393 if (newsect == NULL) 8394 return false; 8395 8396 unsigned int opb = bfd_octets_per_byte (abfd, NULL); 8397 8398 /* p_vaddr holds the original start address of the tagged memory 8399 range. */ 8400 newsect->vma = hdr->p_vaddr / opb; 8401 8402 /* p_filesz holds the storage size of the packed tags. */ 8403 newsect->size = hdr->p_filesz; 8404 newsect->filepos = hdr->p_offset; 8405 8406 /* p_memsz holds the size of the memory range that contains tags. The 8407 section's rawsize field is reused for this purpose. */ 8408 newsect->rawsize = hdr->p_memsz; 8409 8410 /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise 8411 BFD will return all zeroes when attempting to get contents from this 8412 section. */ 8413 newsect->flags |= SEC_HAS_CONTENTS; 8414 } 8415 8416 return true; 8417 } 8418 8419 /* Implements the bfd_elf_modify_headers hook for aarch64. */ 8420 8421 static bool 8422 elfNN_aarch64_modify_headers (bfd *abfd, 8423 struct bfd_link_info *info) 8424 { 8425 struct elf_segment_map *m; 8426 unsigned int segment_count = 0; 8427 Elf_Internal_Phdr *p; 8428 8429 for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++) 8430 { 8431 /* We are only interested in the memory tag segment that will be dumped 8432 to a core file. If we have no memory tags or this isn't a core file we 8433 are dealing with, just skip this segment. */ 8434 if (m->p_type != PT_AARCH64_MEMTAG_MTE 8435 || bfd_get_format (abfd) != bfd_core) 8436 continue; 8437 8438 /* For memory tag segments in core files, the size of the file contents 8439 is smaller than the size of the memory range. Adjust the memory size 8440 accordingly. The real memory size is held in the section's rawsize 8441 field. */ 8442 if (m->count > 0) 8443 { 8444 p = elf_tdata (abfd)->phdr; 8445 p += m->idx; 8446 p->p_memsz = m->sections[0]->rawsize; 8447 p->p_flags = 0; 8448 p->p_paddr = 0; 8449 p->p_align = 0; 8450 } 8451 } 8452 8453 /* Give the generic code a chance to handle the headers. */ 8454 return _bfd_elf_modify_headers (abfd, info); 8455 } 8456 8457 /* A structure used to record a list of sections, independently 8458 of the next and prev fields in the asection structure. */ 8459 typedef struct section_list 8460 { 8461 asection *sec; 8462 struct section_list *next; 8463 struct section_list *prev; 8464 } 8465 section_list; 8466 8467 /* Unfortunately we need to keep a list of sections for which 8468 an _aarch64_elf_section_data structure has been allocated. This 8469 is because it is possible for functions like elfNN_aarch64_write_section 8470 to be called on a section which has had an elf_data_structure 8471 allocated for it (and so the used_by_bfd field is valid) but 8472 for which the AArch64 extended version of this structure - the 8473 _aarch64_elf_section_data structure - has not been allocated. */ 8474 static section_list *sections_with_aarch64_elf_section_data = NULL; 8475 8476 static void 8477 record_section_with_aarch64_elf_section_data (asection *sec) 8478 { 8479 struct section_list *entry; 8480 8481 entry = bfd_malloc (sizeof (*entry)); 8482 if (entry == NULL) 8483 return; 8484 entry->sec = sec; 8485 entry->next = sections_with_aarch64_elf_section_data; 8486 entry->prev = NULL; 8487 if (entry->next != NULL) 8488 entry->next->prev = entry; 8489 sections_with_aarch64_elf_section_data = entry; 8490 } 8491 8492 static struct section_list * 8493 find_aarch64_elf_section_entry (asection *sec) 8494 { 8495 struct section_list *entry; 8496 static struct section_list *last_entry = NULL; 8497 8498 /* This is a short cut for the typical case where the sections are added 8499 to the sections_with_aarch64_elf_section_data list in forward order and 8500 then looked up here in backwards order. This makes a real difference 8501 to the ld-srec/sec64k.exp linker test. */ 8502 entry = sections_with_aarch64_elf_section_data; 8503 if (last_entry != NULL) 8504 { 8505 if (last_entry->sec == sec) 8506 entry = last_entry; 8507 else if (last_entry->next != NULL && last_entry->next->sec == sec) 8508 entry = last_entry->next; 8509 } 8510 8511 for (; entry; entry = entry->next) 8512 if (entry->sec == sec) 8513 break; 8514 8515 if (entry) 8516 /* Record the entry prior to this one - it is the entry we are 8517 most likely to want to locate next time. Also this way if we 8518 have been called from 8519 unrecord_section_with_aarch64_elf_section_data () we will not 8520 be caching a pointer that is about to be freed. */ 8521 last_entry = entry->prev; 8522 8523 return entry; 8524 } 8525 8526 static void 8527 unrecord_section_with_aarch64_elf_section_data (asection *sec) 8528 { 8529 struct section_list *entry; 8530 8531 entry = find_aarch64_elf_section_entry (sec); 8532 8533 if (entry) 8534 { 8535 if (entry->prev != NULL) 8536 entry->prev->next = entry->next; 8537 if (entry->next != NULL) 8538 entry->next->prev = entry->prev; 8539 if (entry == sections_with_aarch64_elf_section_data) 8540 sections_with_aarch64_elf_section_data = entry->next; 8541 free (entry); 8542 } 8543 } 8544 8545 8546 typedef struct 8547 { 8548 void *finfo; 8549 struct bfd_link_info *info; 8550 asection *sec; 8551 int sec_shndx; 8552 int (*func) (void *, const char *, Elf_Internal_Sym *, 8553 asection *, struct elf_link_hash_entry *); 8554 } output_arch_syminfo; 8555 8556 enum map_symbol_type 8557 { 8558 AARCH64_MAP_INSN, 8559 AARCH64_MAP_DATA 8560 }; 8561 8562 8563 /* Output a single mapping symbol. */ 8564 8565 static bool 8566 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi, 8567 enum map_symbol_type type, bfd_vma offset) 8568 { 8569 static const char *names[2] = { "$x", "$d" }; 8570 Elf_Internal_Sym sym; 8571 8572 sym.st_value = (osi->sec->output_section->vma 8573 + osi->sec->output_offset + offset); 8574 sym.st_size = 0; 8575 sym.st_other = 0; 8576 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 8577 sym.st_shndx = osi->sec_shndx; 8578 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1; 8579 } 8580 8581 /* Output a single local symbol for a generated stub. */ 8582 8583 static bool 8584 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name, 8585 bfd_vma offset, bfd_vma size) 8586 { 8587 Elf_Internal_Sym sym; 8588 8589 sym.st_value = (osi->sec->output_section->vma 8590 + osi->sec->output_offset + offset); 8591 sym.st_size = size; 8592 sym.st_other = 0; 8593 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 8594 sym.st_shndx = osi->sec_shndx; 8595 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; 8596 } 8597 8598 static bool 8599 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) 8600 { 8601 struct elf_aarch64_stub_hash_entry *stub_entry; 8602 asection *stub_sec; 8603 bfd_vma addr; 8604 char *stub_name; 8605 output_arch_syminfo *osi; 8606 8607 /* Massage our args to the form they really have. */ 8608 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry; 8609 osi = (output_arch_syminfo *) in_arg; 8610 8611 stub_sec = stub_entry->stub_sec; 8612 8613 /* Ensure this stub is attached to the current section being 8614 processed. */ 8615 if (stub_sec != osi->sec) 8616 return true; 8617 8618 addr = (bfd_vma) stub_entry->stub_offset; 8619 8620 stub_name = stub_entry->output_name; 8621 8622 switch (stub_entry->stub_type) 8623 { 8624 case aarch64_stub_adrp_branch: 8625 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8626 sizeof (aarch64_adrp_branch_stub))) 8627 return false; 8628 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8629 return false; 8630 break; 8631 case aarch64_stub_long_branch: 8632 if (!elfNN_aarch64_output_stub_sym 8633 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub))) 8634 return false; 8635 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8636 return false; 8637 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16)) 8638 return false; 8639 break; 8640 case aarch64_stub_bti_direct_branch: 8641 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8642 sizeof (aarch64_bti_direct_branch_stub))) 8643 return false; 8644 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8645 return false; 8646 break; 8647 case aarch64_stub_erratum_835769_veneer: 8648 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8649 sizeof (aarch64_erratum_835769_stub))) 8650 return false; 8651 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8652 return false; 8653 break; 8654 case aarch64_stub_erratum_843419_veneer: 8655 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr, 8656 sizeof (aarch64_erratum_843419_stub))) 8657 return false; 8658 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) 8659 return false; 8660 break; 8661 case aarch64_stub_none: 8662 break; 8663 8664 default: 8665 abort (); 8666 } 8667 8668 return true; 8669 } 8670 8671 /* Output mapping symbols for linker generated sections. */ 8672 8673 static bool 8674 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd, 8675 struct bfd_link_info *info, 8676 void *finfo, 8677 int (*func) (void *, const char *, 8678 Elf_Internal_Sym *, 8679 asection *, 8680 struct elf_link_hash_entry 8681 *)) 8682 { 8683 output_arch_syminfo osi; 8684 struct elf_aarch64_link_hash_table *htab; 8685 8686 if (info->strip == strip_all 8687 && !info->emitrelocations 8688 && !bfd_link_relocatable (info)) 8689 return true; 8690 8691 htab = elf_aarch64_hash_table (info); 8692 8693 osi.finfo = finfo; 8694 osi.info = info; 8695 osi.func = func; 8696 8697 /* Long calls stubs. */ 8698 if (htab->stub_bfd && htab->stub_bfd->sections) 8699 { 8700 asection *stub_sec; 8701 8702 for (stub_sec = htab->stub_bfd->sections; 8703 stub_sec != NULL; stub_sec = stub_sec->next) 8704 { 8705 /* Ignore non-stub sections. */ 8706 if (!strstr (stub_sec->name, STUB_SUFFIX)) 8707 continue; 8708 8709 osi.sec = stub_sec; 8710 8711 osi.sec_shndx = _bfd_elf_section_from_bfd_section 8712 (output_bfd, osi.sec->output_section); 8713 8714 /* The first instruction in a stub is always a branch. */ 8715 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0)) 8716 return false; 8717 8718 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub, 8719 &osi); 8720 } 8721 } 8722 8723 /* Finally, output mapping symbols for the PLT. */ 8724 if (!htab->root.splt || htab->root.splt->size == 0) 8725 return true; 8726 8727 osi.sec_shndx = _bfd_elf_section_from_bfd_section 8728 (output_bfd, htab->root.splt->output_section); 8729 osi.sec = htab->root.splt; 8730 8731 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0); 8732 8733 return true; 8734 8735 } 8736 8737 /* Allocate target specific section data. */ 8738 8739 static bool 8740 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec) 8741 { 8742 if (!sec->used_by_bfd) 8743 { 8744 _aarch64_elf_section_data *sdata; 8745 size_t amt = sizeof (*sdata); 8746 8747 sdata = bfd_zalloc (abfd, amt); 8748 if (sdata == NULL) 8749 return false; 8750 sec->used_by_bfd = sdata; 8751 } 8752 8753 record_section_with_aarch64_elf_section_data (sec); 8754 8755 return _bfd_elf_new_section_hook (abfd, sec); 8756 } 8757 8758 8759 static void 8760 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED, 8761 asection *sec, 8762 void *ignore ATTRIBUTE_UNUSED) 8763 { 8764 unrecord_section_with_aarch64_elf_section_data (sec); 8765 } 8766 8767 static bool 8768 elfNN_aarch64_bfd_free_cached_info (bfd *abfd) 8769 { 8770 if (abfd->sections) 8771 bfd_map_over_sections (abfd, 8772 unrecord_section_via_map_over_sections, NULL); 8773 8774 return _bfd_elf_free_cached_info (abfd); 8775 } 8776 8777 /* Create dynamic sections. This is different from the ARM backend in that 8778 the got, plt, gotplt and their relocation sections are all created in the 8779 standard part of the bfd elf backend. */ 8780 8781 static bool 8782 elfNN_aarch64_create_dynamic_sections (bfd *dynobj, 8783 struct bfd_link_info *info) 8784 { 8785 /* We need to create .got section. */ 8786 if (!aarch64_elf_create_got_section (dynobj, info)) 8787 return false; 8788 8789 return _bfd_elf_create_dynamic_sections (dynobj, info); 8790 } 8791 8792 8793 /* Allocate space in .plt, .got and associated reloc sections for 8794 dynamic relocs. */ 8795 8796 static bool 8797 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf) 8798 { 8799 struct bfd_link_info *info; 8800 struct elf_aarch64_link_hash_table *htab; 8801 struct elf_aarch64_link_hash_entry *eh; 8802 struct elf_dyn_relocs *p; 8803 8804 /* An example of a bfd_link_hash_indirect symbol is versioned 8805 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) 8806 -> __gxx_personality_v0(bfd_link_hash_defined) 8807 8808 There is no need to process bfd_link_hash_indirect symbols here 8809 because we will also be presented with the concrete instance of 8810 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been 8811 called to copy all relevant data from the generic to the concrete 8812 symbol instance. */ 8813 if (h->root.type == bfd_link_hash_indirect) 8814 return true; 8815 8816 if (h->root.type == bfd_link_hash_warning) 8817 h = (struct elf_link_hash_entry *) h->root.u.i.link; 8818 8819 info = (struct bfd_link_info *) inf; 8820 htab = elf_aarch64_hash_table (info); 8821 8822 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it 8823 here if it is defined and referenced in a non-shared object. */ 8824 if (h->type == STT_GNU_IFUNC 8825 && h->def_regular) 8826 return true; 8827 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0) 8828 { 8829 /* Make sure this symbol is output as a dynamic symbol. 8830 Undefined weak syms won't yet be marked as dynamic. */ 8831 if (h->dynindx == -1 && !h->forced_local 8832 && h->root.type == bfd_link_hash_undefweak) 8833 { 8834 if (!bfd_elf_link_record_dynamic_symbol (info, h)) 8835 return false; 8836 } 8837 8838 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 8839 { 8840 asection *s = htab->root.splt; 8841 8842 /* If this is the first .plt entry, make room for the special 8843 first entry. */ 8844 if (s->size == 0) 8845 s->size += htab->plt_header_size; 8846 8847 h->plt.offset = s->size; 8848 8849 /* If this symbol is not defined in a regular file, and we are 8850 not generating a shared library, then set the symbol to this 8851 location in the .plt. This is required to make function 8852 pointers compare as equal between the normal executable and 8853 the shared library. */ 8854 if (!bfd_link_pic (info) && !h->def_regular) 8855 { 8856 h->root.u.def.section = s; 8857 h->root.u.def.value = h->plt.offset; 8858 } 8859 8860 /* Make room for this entry. For now we only create the 8861 small model PLT entries. We later need to find a way 8862 of relaxing into these from the large model PLT entries. */ 8863 s->size += htab->plt_entry_size; 8864 8865 /* We also need to make an entry in the .got.plt section, which 8866 will be placed in the .got section by the linker script. */ 8867 htab->root.sgotplt->size += GOT_ENTRY_SIZE; 8868 8869 /* We also need to make an entry in the .rela.plt section. */ 8870 htab->root.srelplt->size += RELOC_SIZE (htab); 8871 8872 /* We need to ensure that all GOT entries that serve the PLT 8873 are consecutive with the special GOT slots [0] [1] and 8874 [2]. Any addtional relocations, such as 8875 R_AARCH64_TLSDESC, must be placed after the PLT related 8876 entries. We abuse the reloc_count such that during 8877 sizing we adjust reloc_count to indicate the number of 8878 PLT related reserved entries. In subsequent phases when 8879 filling in the contents of the reloc entries, PLT related 8880 entries are placed by computing their PLT index (0 8881 .. reloc_count). While other none PLT relocs are placed 8882 at the slot indicated by reloc_count and reloc_count is 8883 updated. */ 8884 8885 htab->root.srelplt->reloc_count++; 8886 8887 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against 8888 variant PCS symbols are present. */ 8889 if (h->other & STO_AARCH64_VARIANT_PCS) 8890 htab->variant_pcs = 1; 8891 8892 } 8893 else 8894 { 8895 h->plt.offset = (bfd_vma) - 1; 8896 h->needs_plt = 0; 8897 } 8898 } 8899 else 8900 { 8901 h->plt.offset = (bfd_vma) - 1; 8902 h->needs_plt = 0; 8903 } 8904 8905 eh = (struct elf_aarch64_link_hash_entry *) h; 8906 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 8907 8908 if (h->got.refcount > 0) 8909 { 8910 bool dyn; 8911 unsigned got_type = elf_aarch64_hash_entry (h)->got_type; 8912 8913 h->got.offset = (bfd_vma) - 1; 8914 8915 dyn = htab->root.dynamic_sections_created; 8916 8917 /* Make sure this symbol is output as a dynamic symbol. 8918 Undefined weak syms won't yet be marked as dynamic. */ 8919 if (dyn && h->dynindx == -1 && !h->forced_local 8920 && h->root.type == bfd_link_hash_undefweak) 8921 { 8922 if (!bfd_elf_link_record_dynamic_symbol (info, h)) 8923 return false; 8924 } 8925 8926 if (got_type == GOT_UNKNOWN) 8927 { 8928 } 8929 else if (got_type == GOT_NORMAL) 8930 { 8931 h->got.offset = htab->root.sgot->size; 8932 htab->root.sgot->size += GOT_ENTRY_SIZE; 8933 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8934 || h->root.type != bfd_link_hash_undefweak) 8935 && (bfd_link_pic (info) 8936 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)) 8937 /* Undefined weak symbol in static PIE resolves to 0 without 8938 any dynamic relocations. */ 8939 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 8940 { 8941 htab->root.srelgot->size += RELOC_SIZE (htab); 8942 } 8943 } 8944 else 8945 { 8946 int indx; 8947 if (got_type & GOT_TLSDESC_GD) 8948 { 8949 eh->tlsdesc_got_jump_table_offset = 8950 (htab->root.sgotplt->size 8951 - aarch64_compute_jump_table_size (htab)); 8952 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; 8953 h->got.offset = (bfd_vma) - 2; 8954 } 8955 8956 if (got_type & GOT_TLS_GD) 8957 { 8958 h->got.offset = htab->root.sgot->size; 8959 htab->root.sgot->size += GOT_ENTRY_SIZE * 2; 8960 } 8961 8962 if (got_type & GOT_TLS_IE) 8963 { 8964 h->got.offset = htab->root.sgot->size; 8965 htab->root.sgot->size += GOT_ENTRY_SIZE; 8966 } 8967 8968 indx = h && h->dynindx != -1 ? h->dynindx : 0; 8969 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8970 || h->root.type != bfd_link_hash_undefweak) 8971 && (!bfd_link_executable (info) 8972 || indx != 0 8973 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) 8974 { 8975 if (got_type & GOT_TLSDESC_GD) 8976 { 8977 htab->root.srelplt->size += RELOC_SIZE (htab); 8978 /* Note reloc_count not incremented here! We have 8979 already adjusted reloc_count for this relocation 8980 type. */ 8981 8982 /* TLSDESC PLT is now needed, but not yet determined. */ 8983 htab->root.tlsdesc_plt = (bfd_vma) - 1; 8984 } 8985 8986 if (got_type & GOT_TLS_GD) 8987 htab->root.srelgot->size += RELOC_SIZE (htab) * 2; 8988 8989 if (got_type & GOT_TLS_IE) 8990 htab->root.srelgot->size += RELOC_SIZE (htab); 8991 } 8992 } 8993 } 8994 else 8995 { 8996 h->got.offset = (bfd_vma) - 1; 8997 } 8998 8999 if (h->dyn_relocs == NULL) 9000 return true; 9001 9002 for (p = h->dyn_relocs; p != NULL; p = p->next) 9003 if (eh->def_protected) 9004 { 9005 /* Disallow copy relocations against protected symbol. */ 9006 asection *s = p->sec->output_section; 9007 if (s != NULL && (s->flags & SEC_READONLY) != 0) 9008 { 9009 info->callbacks->einfo 9010 /* xgettext:c-format */ 9011 (_ ("%F%P: %pB: copy relocation against non-copyable " 9012 "protected symbol `%s'\n"), 9013 p->sec->owner, h->root.root.string); 9014 return false; 9015 } 9016 } 9017 9018 /* In the shared -Bsymbolic case, discard space allocated for 9019 dynamic pc-relative relocs against symbols which turn out to be 9020 defined in regular objects. For the normal shared case, discard 9021 space for pc-relative relocs that have become local due to symbol 9022 visibility changes. */ 9023 9024 if (bfd_link_pic (info)) 9025 { 9026 /* Relocs that use pc_count are those that appear on a call 9027 insn, or certain REL relocs that can generated via assembly. 9028 We want calls to protected symbols to resolve directly to the 9029 function rather than going via the plt. If people want 9030 function pointer comparisons to work as expected then they 9031 should avoid writing weird assembly. */ 9032 if (SYMBOL_CALLS_LOCAL (info, h)) 9033 { 9034 struct elf_dyn_relocs **pp; 9035 9036 for (pp = &h->dyn_relocs; (p = *pp) != NULL;) 9037 { 9038 p->count -= p->pc_count; 9039 p->pc_count = 0; 9040 if (p->count == 0) 9041 *pp = p->next; 9042 else 9043 pp = &p->next; 9044 } 9045 } 9046 9047 /* Also discard relocs on undefined weak syms with non-default 9048 visibility. */ 9049 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak) 9050 { 9051 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 9052 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 9053 h->dyn_relocs = NULL; 9054 9055 /* Make sure undefined weak symbols are output as a dynamic 9056 symbol in PIEs. */ 9057 else if (h->dynindx == -1 9058 && !h->forced_local 9059 && h->root.type == bfd_link_hash_undefweak 9060 && !bfd_elf_link_record_dynamic_symbol (info, h)) 9061 return false; 9062 } 9063 9064 } 9065 else if (ELIMINATE_COPY_RELOCS) 9066 { 9067 /* For the non-shared case, discard space for relocs against 9068 symbols which turn out to need copy relocs or are not 9069 dynamic. */ 9070 9071 if (!h->non_got_ref 9072 && ((h->def_dynamic 9073 && !h->def_regular) 9074 || (htab->root.dynamic_sections_created 9075 && (h->root.type == bfd_link_hash_undefweak 9076 || h->root.type == bfd_link_hash_undefined)))) 9077 { 9078 /* Make sure this symbol is output as a dynamic symbol. 9079 Undefined weak syms won't yet be marked as dynamic. */ 9080 if (h->dynindx == -1 9081 && !h->forced_local 9082 && h->root.type == bfd_link_hash_undefweak 9083 && !bfd_elf_link_record_dynamic_symbol (info, h)) 9084 return false; 9085 9086 /* If that succeeded, we know we'll be keeping all the 9087 relocs. */ 9088 if (h->dynindx != -1) 9089 goto keep; 9090 } 9091 9092 h->dyn_relocs = NULL; 9093 9094 keep:; 9095 } 9096 9097 /* Finally, allocate space. */ 9098 for (p = h->dyn_relocs; p != NULL; p = p->next) 9099 { 9100 asection *sreloc; 9101 9102 sreloc = elf_section_data (p->sec)->sreloc; 9103 9104 BFD_ASSERT (sreloc != NULL); 9105 9106 sreloc->size += p->count * RELOC_SIZE (htab); 9107 } 9108 9109 return true; 9110 } 9111 9112 /* Allocate space in .plt, .got and associated reloc sections for 9113 ifunc dynamic relocs. */ 9114 9115 static bool 9116 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h, 9117 void *inf) 9118 { 9119 struct bfd_link_info *info; 9120 struct elf_aarch64_link_hash_table *htab; 9121 9122 /* An example of a bfd_link_hash_indirect symbol is versioned 9123 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) 9124 -> __gxx_personality_v0(bfd_link_hash_defined) 9125 9126 There is no need to process bfd_link_hash_indirect symbols here 9127 because we will also be presented with the concrete instance of 9128 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been 9129 called to copy all relevant data from the generic to the concrete 9130 symbol instance. */ 9131 if (h->root.type == bfd_link_hash_indirect) 9132 return true; 9133 9134 if (h->root.type == bfd_link_hash_warning) 9135 h = (struct elf_link_hash_entry *) h->root.u.i.link; 9136 9137 info = (struct bfd_link_info *) inf; 9138 htab = elf_aarch64_hash_table (info); 9139 9140 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it 9141 here if it is defined and referenced in a non-shared object. */ 9142 if (h->type == STT_GNU_IFUNC 9143 && h->def_regular) 9144 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h, 9145 &h->dyn_relocs, 9146 htab->plt_entry_size, 9147 htab->plt_header_size, 9148 GOT_ENTRY_SIZE, 9149 false); 9150 return true; 9151 } 9152 9153 /* Allocate space in .plt, .got and associated reloc sections for 9154 local ifunc dynamic relocs. */ 9155 9156 static int 9157 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf) 9158 { 9159 struct elf_link_hash_entry *h 9160 = (struct elf_link_hash_entry *) *slot; 9161 9162 if (h->type != STT_GNU_IFUNC 9163 || !h->def_regular 9164 || !h->ref_regular 9165 || !h->forced_local 9166 || h->root.type != bfd_link_hash_defined) 9167 abort (); 9168 9169 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf); 9170 } 9171 9172 /* This is the most important function of all . Innocuosly named 9173 though ! */ 9174 9175 static bool 9176 elfNN_aarch64_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED, 9177 struct bfd_link_info *info) 9178 { 9179 struct elf_aarch64_link_hash_table *htab; 9180 bfd *dynobj; 9181 asection *s; 9182 bool relocs; 9183 bfd *ibfd; 9184 9185 htab = elf_aarch64_hash_table ((info)); 9186 dynobj = htab->root.dynobj; 9187 9188 if (dynobj == NULL) 9189 return true; 9190 9191 if (htab->root.dynamic_sections_created) 9192 { 9193 if (bfd_link_executable (info) && !info->nointerp) 9194 { 9195 s = bfd_get_linker_section (dynobj, ".interp"); 9196 if (s == NULL) 9197 abort (); 9198 s->size = sizeof ELF_DYNAMIC_INTERPRETER; 9199 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; 9200 } 9201 } 9202 9203 /* Set up .got offsets for local syms, and space for local dynamic 9204 relocs. */ 9205 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 9206 { 9207 struct elf_aarch64_local_symbol *locals = NULL; 9208 Elf_Internal_Shdr *symtab_hdr; 9209 asection *srel; 9210 unsigned int i; 9211 9212 if (!is_aarch64_elf (ibfd)) 9213 continue; 9214 9215 for (s = ibfd->sections; s != NULL; s = s->next) 9216 { 9217 struct elf_dyn_relocs *p; 9218 9219 for (p = (struct elf_dyn_relocs *) 9220 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next) 9221 { 9222 if (!bfd_is_abs_section (p->sec) 9223 && bfd_is_abs_section (p->sec->output_section)) 9224 { 9225 /* Input section has been discarded, either because 9226 it is a copy of a linkonce section or due to 9227 linker script /DISCARD/, so we'll be discarding 9228 the relocs too. */ 9229 } 9230 else if (p->count != 0) 9231 { 9232 srel = elf_section_data (p->sec)->sreloc; 9233 srel->size += p->count * RELOC_SIZE (htab); 9234 if ((p->sec->output_section->flags & SEC_READONLY) != 0) 9235 info->flags |= DF_TEXTREL; 9236 } 9237 } 9238 } 9239 9240 locals = elf_aarch64_locals (ibfd); 9241 if (!locals) 9242 continue; 9243 9244 symtab_hdr = &elf_symtab_hdr (ibfd); 9245 srel = htab->root.srelgot; 9246 for (i = 0; i < symtab_hdr->sh_info; i++) 9247 { 9248 locals[i].got_offset = (bfd_vma) - 1; 9249 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1; 9250 if (locals[i].got_refcount > 0) 9251 { 9252 unsigned got_type = locals[i].got_type; 9253 if (got_type & GOT_TLSDESC_GD) 9254 { 9255 locals[i].tlsdesc_got_jump_table_offset = 9256 (htab->root.sgotplt->size 9257 - aarch64_compute_jump_table_size (htab)); 9258 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; 9259 locals[i].got_offset = (bfd_vma) - 2; 9260 } 9261 9262 if (got_type & GOT_TLS_GD) 9263 { 9264 locals[i].got_offset = htab->root.sgot->size; 9265 htab->root.sgot->size += GOT_ENTRY_SIZE * 2; 9266 } 9267 9268 if (got_type & GOT_TLS_IE 9269 || got_type & GOT_NORMAL) 9270 { 9271 locals[i].got_offset = htab->root.sgot->size; 9272 htab->root.sgot->size += GOT_ENTRY_SIZE; 9273 } 9274 9275 if (got_type == GOT_UNKNOWN) 9276 { 9277 } 9278 9279 if (bfd_link_pic (info)) 9280 { 9281 if (got_type & GOT_TLSDESC_GD) 9282 { 9283 htab->root.srelplt->size += RELOC_SIZE (htab); 9284 /* Note RELOC_COUNT not incremented here! */ 9285 htab->root.tlsdesc_plt = (bfd_vma) - 1; 9286 } 9287 9288 if (got_type & GOT_TLS_GD) 9289 htab->root.srelgot->size += RELOC_SIZE (htab) * 2; 9290 9291 if (got_type & GOT_TLS_IE 9292 || got_type & GOT_NORMAL) 9293 htab->root.srelgot->size += RELOC_SIZE (htab); 9294 } 9295 } 9296 else 9297 { 9298 locals[i].got_refcount = (bfd_vma) - 1; 9299 } 9300 } 9301 } 9302 9303 9304 /* Allocate global sym .plt and .got entries, and space for global 9305 sym dynamic relocs. */ 9306 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs, 9307 info); 9308 9309 /* Allocate global ifunc sym .plt and .got entries, and space for global 9310 ifunc sym dynamic relocs. */ 9311 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs, 9312 info); 9313 9314 /* Allocate .plt and .got entries, and space for local ifunc symbols. */ 9315 htab_traverse (htab->loc_hash_table, 9316 elfNN_aarch64_allocate_local_ifunc_dynrelocs, 9317 info); 9318 9319 /* For every jump slot reserved in the sgotplt, reloc_count is 9320 incremented. However, when we reserve space for TLS descriptors, 9321 it's not incremented, so in order to compute the space reserved 9322 for them, it suffices to multiply the reloc count by the jump 9323 slot size. */ 9324 9325 if (htab->root.srelplt) 9326 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab); 9327 9328 if (htab->root.tlsdesc_plt) 9329 { 9330 if (htab->root.splt->size == 0) 9331 htab->root.splt->size += htab->plt_header_size; 9332 9333 /* If we're not using lazy TLS relocations, don't generate the 9334 GOT and PLT entry required. */ 9335 if ((info->flags & DF_BIND_NOW)) 9336 htab->root.tlsdesc_plt = 0; 9337 else 9338 { 9339 htab->root.tlsdesc_plt = htab->root.splt->size; 9340 htab->root.splt->size += htab->tlsdesc_plt_entry_size; 9341 9342 htab->root.tlsdesc_got = htab->root.sgot->size; 9343 htab->root.sgot->size += GOT_ENTRY_SIZE; 9344 } 9345 } 9346 9347 /* Init mapping symbols information to use later to distingush between 9348 code and data while scanning for errata. */ 9349 if (htab->fix_erratum_835769 || htab->fix_erratum_843419) 9350 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 9351 { 9352 if (!is_aarch64_elf (ibfd)) 9353 continue; 9354 bfd_elfNN_aarch64_init_maps (ibfd); 9355 } 9356 9357 /* We now have determined the sizes of the various dynamic sections. 9358 Allocate memory for them. */ 9359 relocs = false; 9360 for (s = dynobj->sections; s != NULL; s = s->next) 9361 { 9362 if ((s->flags & SEC_LINKER_CREATED) == 0) 9363 continue; 9364 9365 if (s == htab->root.splt 9366 || s == htab->root.sgot 9367 || s == htab->root.sgotplt 9368 || s == htab->root.iplt 9369 || s == htab->root.igotplt 9370 || s == htab->root.sdynbss 9371 || s == htab->root.sdynrelro) 9372 { 9373 /* Strip this section if we don't need it; see the 9374 comment below. */ 9375 } 9376 else if (startswith (bfd_section_name (s), ".rela")) 9377 { 9378 if (s->size != 0 && s != htab->root.srelplt) 9379 relocs = true; 9380 9381 /* We use the reloc_count field as a counter if we need 9382 to copy relocs into the output file. */ 9383 if (s != htab->root.srelplt) 9384 s->reloc_count = 0; 9385 } 9386 else 9387 { 9388 /* It's not one of our sections, so don't allocate space. */ 9389 continue; 9390 } 9391 9392 if (s->size == 0) 9393 { 9394 /* If we don't need this section, strip it from the 9395 output file. This is mostly to handle .rela.bss and 9396 .rela.plt. We must create both sections in 9397 create_dynamic_sections, because they must be created 9398 before the linker maps input sections to output 9399 sections. The linker does that before 9400 adjust_dynamic_symbol is called, and it is that 9401 function which decides whether anything needs to go 9402 into these sections. */ 9403 s->flags |= SEC_EXCLUDE; 9404 continue; 9405 } 9406 9407 if ((s->flags & SEC_HAS_CONTENTS) == 0) 9408 continue; 9409 9410 /* Allocate memory for the section contents. We use bfd_zalloc 9411 here in case unused entries are not reclaimed before the 9412 section's contents are written out. This should not happen, 9413 but this way if it does, we get a R_AARCH64_NONE reloc instead 9414 of garbage. */ 9415 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); 9416 if (s->contents == NULL) 9417 return false; 9418 } 9419 9420 if (htab->root.dynamic_sections_created) 9421 { 9422 /* Add some entries to the .dynamic section. We fill in the 9423 values later, in elfNN_aarch64_finish_dynamic_sections, but we 9424 must add the entries now so that we get the correct size for 9425 the .dynamic section. The DT_DEBUG entry is filled in by the 9426 dynamic linker and used by the debugger. */ 9427 #define add_dynamic_entry(TAG, VAL) \ 9428 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 9429 9430 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs)) 9431 return false; 9432 9433 if (htab->root.splt->size != 0) 9434 { 9435 if (htab->variant_pcs 9436 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0)) 9437 return false; 9438 9439 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC) 9440 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0) 9441 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))) 9442 return false; 9443 9444 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI) 9445 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)) 9446 return false; 9447 9448 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC) 9449 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)) 9450 return false; 9451 } 9452 } 9453 #undef add_dynamic_entry 9454 9455 return true; 9456 } 9457 9458 static inline void 9459 elf_aarch64_update_plt_entry (bfd *output_bfd, 9460 bfd_reloc_code_real_type r_type, 9461 bfd_byte *plt_entry, bfd_vma value) 9462 { 9463 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type); 9464 9465 /* FIXME: We should check the return value from this function call. */ 9466 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value); 9467 } 9468 9469 static void 9470 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h, 9471 struct elf_aarch64_link_hash_table 9472 *htab, bfd *output_bfd, 9473 struct bfd_link_info *info) 9474 { 9475 bfd_byte *plt_entry; 9476 bfd_vma plt_index; 9477 bfd_vma got_offset; 9478 bfd_vma gotplt_entry_address; 9479 bfd_vma plt_entry_address; 9480 Elf_Internal_Rela rela; 9481 bfd_byte *loc; 9482 asection *plt, *gotplt, *relplt; 9483 9484 /* When building a static executable, use .iplt, .igot.plt and 9485 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 9486 if (htab->root.splt != NULL) 9487 { 9488 plt = htab->root.splt; 9489 gotplt = htab->root.sgotplt; 9490 relplt = htab->root.srelplt; 9491 } 9492 else 9493 { 9494 plt = htab->root.iplt; 9495 gotplt = htab->root.igotplt; 9496 relplt = htab->root.irelplt; 9497 } 9498 9499 /* Get the index in the procedure linkage table which 9500 corresponds to this symbol. This is the index of this symbol 9501 in all the symbols for which we are making plt entries. The 9502 first entry in the procedure linkage table is reserved. 9503 9504 Get the offset into the .got table of the entry that 9505 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE 9506 bytes. The first three are reserved for the dynamic linker. 9507 9508 For static executables, we don't reserve anything. */ 9509 9510 if (plt == htab->root.splt) 9511 { 9512 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size; 9513 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE; 9514 } 9515 else 9516 { 9517 plt_index = h->plt.offset / htab->plt_entry_size; 9518 got_offset = plt_index * GOT_ENTRY_SIZE; 9519 } 9520 9521 plt_entry = plt->contents + h->plt.offset; 9522 plt_entry_address = plt->output_section->vma 9523 + plt->output_offset + h->plt.offset; 9524 gotplt_entry_address = gotplt->output_section->vma + 9525 gotplt->output_offset + got_offset; 9526 9527 /* Copy in the boiler-plate for the PLTn entry. */ 9528 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size); 9529 9530 /* First instruction in BTI enabled PLT stub is a BTI 9531 instruction so skip it. */ 9532 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI 9533 && elf_elfheader (output_bfd)->e_type == ET_EXEC) 9534 plt_entry = plt_entry + 4; 9535 9536 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. 9537 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 9538 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9539 plt_entry, 9540 PG (gotplt_entry_address) - 9541 PG (plt_entry_address)); 9542 9543 /* Fill in the lo12 bits for the load from the pltgot. */ 9544 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12, 9545 plt_entry + 4, 9546 PG_OFFSET (gotplt_entry_address)); 9547 9548 /* Fill in the lo12 bits for the add from the pltgot entry. */ 9549 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12, 9550 plt_entry + 8, 9551 PG_OFFSET (gotplt_entry_address)); 9552 9553 /* All the GOTPLT Entries are essentially initialized to PLT0. */ 9554 bfd_put_NN (output_bfd, 9555 plt->output_section->vma + plt->output_offset, 9556 gotplt->contents + got_offset); 9557 9558 rela.r_offset = gotplt_entry_address; 9559 9560 if (h->dynindx == -1 9561 || ((bfd_link_executable (info) 9562 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 9563 && h->def_regular 9564 && h->type == STT_GNU_IFUNC)) 9565 { 9566 /* If an STT_GNU_IFUNC symbol is locally defined, generate 9567 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */ 9568 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)); 9569 rela.r_addend = (h->root.u.def.value 9570 + h->root.u.def.section->output_section->vma 9571 + h->root.u.def.section->output_offset); 9572 } 9573 else 9574 { 9575 /* Fill in the entry in the .rela.plt section. */ 9576 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT)); 9577 rela.r_addend = 0; 9578 } 9579 9580 /* Compute the relocation entry to used based on PLT index and do 9581 not adjust reloc_count. The reloc_count has already been adjusted 9582 to account for this entry. */ 9583 loc = relplt->contents + plt_index * RELOC_SIZE (htab); 9584 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9585 } 9586 9587 /* Size sections even though they're not dynamic. We use it to setup 9588 _TLS_MODULE_BASE_, if needed. */ 9589 9590 static bool 9591 elfNN_aarch64_early_size_sections (bfd *output_bfd, 9592 struct bfd_link_info *info) 9593 { 9594 asection *tls_sec; 9595 9596 if (bfd_link_relocatable (info)) 9597 return true; 9598 9599 tls_sec = elf_hash_table (info)->tls_sec; 9600 9601 if (tls_sec) 9602 { 9603 struct elf_link_hash_entry *tlsbase; 9604 9605 tlsbase = elf_link_hash_lookup (elf_hash_table (info), 9606 "_TLS_MODULE_BASE_", true, true, false); 9607 9608 if (tlsbase) 9609 { 9610 struct bfd_link_hash_entry *h = NULL; 9611 const struct elf_backend_data *bed = 9612 get_elf_backend_data (output_bfd); 9613 9614 if (!(_bfd_generic_link_add_one_symbol 9615 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 9616 tls_sec, 0, NULL, false, bed->collect, &h))) 9617 return false; 9618 9619 tlsbase->type = STT_TLS; 9620 tlsbase = (struct elf_link_hash_entry *) h; 9621 tlsbase->def_regular = 1; 9622 tlsbase->other = STV_HIDDEN; 9623 (*bed->elf_backend_hide_symbol) (info, tlsbase, true); 9624 } 9625 } 9626 9627 return true; 9628 } 9629 9630 /* Finish up dynamic symbol handling. We set the contents of various 9631 dynamic sections here. */ 9632 9633 static bool 9634 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd, 9635 struct bfd_link_info *info, 9636 struct elf_link_hash_entry *h, 9637 Elf_Internal_Sym *sym) 9638 { 9639 struct elf_aarch64_link_hash_table *htab; 9640 htab = elf_aarch64_hash_table (info); 9641 9642 if (h->plt.offset != (bfd_vma) - 1) 9643 { 9644 asection *plt, *gotplt, *relplt; 9645 9646 /* This symbol has an entry in the procedure linkage table. Set 9647 it up. */ 9648 9649 /* When building a static executable, use .iplt, .igot.plt and 9650 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 9651 if (htab->root.splt != NULL) 9652 { 9653 plt = htab->root.splt; 9654 gotplt = htab->root.sgotplt; 9655 relplt = htab->root.srelplt; 9656 } 9657 else 9658 { 9659 plt = htab->root.iplt; 9660 gotplt = htab->root.igotplt; 9661 relplt = htab->root.irelplt; 9662 } 9663 9664 /* This symbol has an entry in the procedure linkage table. Set 9665 it up. */ 9666 if ((h->dynindx == -1 9667 && !((h->forced_local || bfd_link_executable (info)) 9668 && h->def_regular 9669 && h->type == STT_GNU_IFUNC)) 9670 || plt == NULL 9671 || gotplt == NULL 9672 || relplt == NULL) 9673 abort (); 9674 9675 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info); 9676 if (!h->def_regular) 9677 { 9678 /* Mark the symbol as undefined, rather than as defined in 9679 the .plt section. */ 9680 sym->st_shndx = SHN_UNDEF; 9681 /* If the symbol is weak we need to clear the value. 9682 Otherwise, the PLT entry would provide a definition for 9683 the symbol even if the symbol wasn't defined anywhere, 9684 and so the symbol would never be NULL. Leave the value if 9685 there were any relocations where pointer equality matters 9686 (this is a clue for the dynamic linker, to make function 9687 pointer comparisons work between an application and shared 9688 library). */ 9689 if (!h->ref_regular_nonweak || !h->pointer_equality_needed) 9690 sym->st_value = 0; 9691 } 9692 } 9693 9694 if (h->got.offset != (bfd_vma) - 1 9695 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL 9696 /* Undefined weak symbol in static PIE resolves to 0 without 9697 any dynamic relocations. */ 9698 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) 9699 { 9700 Elf_Internal_Rela rela; 9701 bfd_byte *loc; 9702 9703 /* This symbol has an entry in the global offset table. Set it 9704 up. */ 9705 if (htab->root.sgot == NULL || htab->root.srelgot == NULL) 9706 abort (); 9707 9708 rela.r_offset = (htab->root.sgot->output_section->vma 9709 + htab->root.sgot->output_offset 9710 + (h->got.offset & ~(bfd_vma) 1)); 9711 9712 if (h->def_regular 9713 && h->type == STT_GNU_IFUNC) 9714 { 9715 if (bfd_link_pic (info)) 9716 { 9717 /* Generate R_AARCH64_GLOB_DAT. */ 9718 goto do_glob_dat; 9719 } 9720 else 9721 { 9722 asection *plt; 9723 9724 if (!h->pointer_equality_needed) 9725 abort (); 9726 9727 /* For non-shared object, we can't use .got.plt, which 9728 contains the real function address if we need pointer 9729 equality. We load the GOT entry with the PLT entry. */ 9730 plt = htab->root.splt ? htab->root.splt : htab->root.iplt; 9731 bfd_put_NN (output_bfd, (plt->output_section->vma 9732 + plt->output_offset 9733 + h->plt.offset), 9734 htab->root.sgot->contents 9735 + (h->got.offset & ~(bfd_vma) 1)); 9736 return true; 9737 } 9738 } 9739 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h)) 9740 { 9741 if (!(h->def_regular || ELF_COMMON_DEF_P (h))) 9742 return false; 9743 BFD_ASSERT ((h->got.offset & 1) != 0); 9744 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE)); 9745 rela.r_addend = (h->root.u.def.value 9746 + h->root.u.def.section->output_section->vma 9747 + h->root.u.def.section->output_offset); 9748 } 9749 else 9750 { 9751 do_glob_dat: 9752 BFD_ASSERT ((h->got.offset & 1) == 0); 9753 bfd_put_NN (output_bfd, (bfd_vma) 0, 9754 htab->root.sgot->contents + h->got.offset); 9755 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT)); 9756 rela.r_addend = 0; 9757 } 9758 9759 loc = htab->root.srelgot->contents; 9760 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab); 9761 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9762 } 9763 9764 if (h->needs_copy) 9765 { 9766 Elf_Internal_Rela rela; 9767 asection *s; 9768 bfd_byte *loc; 9769 9770 /* This symbol needs a copy reloc. Set it up. */ 9771 if (h->dynindx == -1 9772 || (h->root.type != bfd_link_hash_defined 9773 && h->root.type != bfd_link_hash_defweak) 9774 || htab->root.srelbss == NULL) 9775 abort (); 9776 9777 rela.r_offset = (h->root.u.def.value 9778 + h->root.u.def.section->output_section->vma 9779 + h->root.u.def.section->output_offset); 9780 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY)); 9781 rela.r_addend = 0; 9782 if (h->root.u.def.section == htab->root.sdynrelro) 9783 s = htab->root.sreldynrelro; 9784 else 9785 s = htab->root.srelbss; 9786 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab); 9787 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc); 9788 } 9789 9790 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may 9791 be NULL for local symbols. */ 9792 if (sym != NULL 9793 && (h == elf_hash_table (info)->hdynamic 9794 || h == elf_hash_table (info)->hgot)) 9795 sym->st_shndx = SHN_ABS; 9796 9797 return true; 9798 } 9799 9800 /* Finish up local dynamic symbol handling. We set the contents of 9801 various dynamic sections here. */ 9802 9803 static int 9804 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf) 9805 { 9806 struct elf_link_hash_entry *h 9807 = (struct elf_link_hash_entry *) *slot; 9808 struct bfd_link_info *info 9809 = (struct bfd_link_info *) inf; 9810 9811 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd, 9812 info, h, NULL); 9813 } 9814 9815 static void 9816 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED, 9817 struct elf_aarch64_link_hash_table 9818 *htab) 9819 { 9820 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between 9821 small and large plts and at the minute just generates 9822 the small PLT. */ 9823 9824 /* PLT0 of the small PLT looks like this in ELF64 - 9825 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack. 9826 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT 9827 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the 9828 // symbol resolver 9829 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the 9830 // GOTPLT entry for this. 9831 br x17 9832 PLT0 will be slightly different in ELF32 due to different got entry 9833 size. */ 9834 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */ 9835 bfd_vma plt_base; 9836 9837 9838 memcpy (htab->root.splt->contents, htab->plt0_entry, 9839 htab->plt_header_size); 9840 9841 /* PR 26312: Explicitly set the sh_entsize to 0 so that 9842 consumers do not think that the section contains fixed 9843 sized objects. */ 9844 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0; 9845 9846 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma 9847 + htab->root.sgotplt->output_offset 9848 + GOT_ENTRY_SIZE * 2); 9849 9850 plt_base = htab->root.splt->output_section->vma + 9851 htab->root.splt->output_offset; 9852 9853 /* First instruction in BTI enabled PLT stub is a BTI 9854 instruction so skip it. */ 9855 bfd_byte *plt0_entry = htab->root.splt->contents; 9856 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI) 9857 plt0_entry = plt0_entry + 4; 9858 9859 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. 9860 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ 9861 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9862 plt0_entry + 4, 9863 PG (plt_got_2nd_ent) - PG (plt_base + 4)); 9864 9865 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12, 9866 plt0_entry + 8, 9867 PG_OFFSET (plt_got_2nd_ent)); 9868 9869 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12, 9870 plt0_entry + 12, 9871 PG_OFFSET (plt_got_2nd_ent)); 9872 } 9873 9874 static bool 9875 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd, 9876 struct bfd_link_info *info) 9877 { 9878 struct elf_aarch64_link_hash_table *htab; 9879 bfd *dynobj; 9880 asection *sdyn; 9881 9882 htab = elf_aarch64_hash_table (info); 9883 dynobj = htab->root.dynobj; 9884 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 9885 9886 if (htab->root.dynamic_sections_created) 9887 { 9888 ElfNN_External_Dyn *dyncon, *dynconend; 9889 9890 if (sdyn == NULL || htab->root.sgot == NULL) 9891 abort (); 9892 9893 dyncon = (ElfNN_External_Dyn *) sdyn->contents; 9894 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size); 9895 for (; dyncon < dynconend; dyncon++) 9896 { 9897 Elf_Internal_Dyn dyn; 9898 asection *s; 9899 9900 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn); 9901 9902 switch (dyn.d_tag) 9903 { 9904 default: 9905 continue; 9906 9907 case DT_PLTGOT: 9908 s = htab->root.sgotplt; 9909 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 9910 break; 9911 9912 case DT_JMPREL: 9913 s = htab->root.srelplt; 9914 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 9915 break; 9916 9917 case DT_PLTRELSZ: 9918 s = htab->root.srelplt; 9919 dyn.d_un.d_val = s->size; 9920 break; 9921 9922 case DT_TLSDESC_PLT: 9923 s = htab->root.splt; 9924 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 9925 + htab->root.tlsdesc_plt; 9926 break; 9927 9928 case DT_TLSDESC_GOT: 9929 s = htab->root.sgot; 9930 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1); 9931 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 9932 + htab->root.tlsdesc_got; 9933 break; 9934 } 9935 9936 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon); 9937 } 9938 9939 } 9940 9941 /* Fill in the special first entry in the procedure linkage table. */ 9942 if (htab->root.splt && htab->root.splt->size > 0) 9943 { 9944 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab); 9945 9946 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW)) 9947 { 9948 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1); 9949 bfd_put_NN (output_bfd, (bfd_vma) 0, 9950 htab->root.sgot->contents + htab->root.tlsdesc_got); 9951 9952 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry; 9953 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE; 9954 9955 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type; 9956 if (type == PLT_BTI || type == PLT_BTI_PAC) 9957 { 9958 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry; 9959 } 9960 9961 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt, 9962 entry, htab->tlsdesc_plt_entry_size); 9963 9964 { 9965 bfd_vma adrp1_addr = 9966 htab->root.splt->output_section->vma 9967 + htab->root.splt->output_offset 9968 + htab->root.tlsdesc_plt + 4; 9969 9970 bfd_vma adrp2_addr = adrp1_addr + 4; 9971 9972 bfd_vma got_addr = 9973 htab->root.sgot->output_section->vma 9974 + htab->root.sgot->output_offset; 9975 9976 bfd_vma pltgot_addr = 9977 htab->root.sgotplt->output_section->vma 9978 + htab->root.sgotplt->output_offset; 9979 9980 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got; 9981 9982 bfd_byte *plt_entry = 9983 htab->root.splt->contents + htab->root.tlsdesc_plt; 9984 9985 /* First instruction in BTI enabled PLT stub is a BTI 9986 instruction so skip it. */ 9987 if (type & PLT_BTI) 9988 { 9989 plt_entry = plt_entry + 4; 9990 adrp1_addr = adrp1_addr + 4; 9991 adrp2_addr = adrp2_addr + 4; 9992 } 9993 9994 /* adrp x2, DT_TLSDESC_GOT */ 9995 elf_aarch64_update_plt_entry (output_bfd, 9996 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 9997 plt_entry + 4, 9998 (PG (dt_tlsdesc_got) 9999 - PG (adrp1_addr))); 10000 10001 /* adrp x3, 0 */ 10002 elf_aarch64_update_plt_entry (output_bfd, 10003 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 10004 plt_entry + 8, 10005 (PG (pltgot_addr) 10006 - PG (adrp2_addr))); 10007 10008 /* ldr x2, [x2, #0] */ 10009 elf_aarch64_update_plt_entry (output_bfd, 10010 BFD_RELOC_AARCH64_LDSTNN_LO12, 10011 plt_entry + 12, 10012 PG_OFFSET (dt_tlsdesc_got)); 10013 10014 /* add x3, x3, 0 */ 10015 elf_aarch64_update_plt_entry (output_bfd, 10016 BFD_RELOC_AARCH64_ADD_LO12, 10017 plt_entry + 16, 10018 PG_OFFSET (pltgot_addr)); 10019 } 10020 } 10021 } 10022 10023 if (htab->root.sgotplt) 10024 { 10025 if (bfd_is_abs_section (htab->root.sgotplt->output_section)) 10026 { 10027 _bfd_error_handler 10028 (_("discarded output section: `%pA'"), htab->root.sgotplt); 10029 return false; 10030 } 10031 10032 /* Fill in the first three entries in the global offset table. */ 10033 if (htab->root.sgotplt->size > 0) 10034 { 10035 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents); 10036 10037 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ 10038 bfd_put_NN (output_bfd, 10039 (bfd_vma) 0, 10040 htab->root.sgotplt->contents + GOT_ENTRY_SIZE); 10041 bfd_put_NN (output_bfd, 10042 (bfd_vma) 0, 10043 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2); 10044 } 10045 10046 if (htab->root.sgot) 10047 { 10048 if (htab->root.sgot->size > 0) 10049 { 10050 bfd_vma addr = 10051 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0; 10052 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents); 10053 } 10054 } 10055 10056 elf_section_data (htab->root.sgotplt->output_section)-> 10057 this_hdr.sh_entsize = GOT_ENTRY_SIZE; 10058 } 10059 10060 if (htab->root.sgot && htab->root.sgot->size > 0) 10061 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize 10062 = GOT_ENTRY_SIZE; 10063 10064 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ 10065 htab_traverse (htab->loc_hash_table, 10066 elfNN_aarch64_finish_local_dynamic_symbol, 10067 info); 10068 10069 return true; 10070 } 10071 10072 /* Check if BTI enabled PLTs are needed. Returns the type needed. */ 10073 static aarch64_plt_type 10074 get_plt_type (bfd *abfd) 10075 { 10076 aarch64_plt_type ret = PLT_NORMAL; 10077 bfd_byte *contents, *extdyn, *extdynend; 10078 asection *sec = bfd_get_section_by_name (abfd, ".dynamic"); 10079 if (!sec 10080 || (sec->flags & SEC_HAS_CONTENTS) == 0 10081 || sec->size < sizeof (ElfNN_External_Dyn) 10082 || !bfd_malloc_and_get_section (abfd, sec, &contents)) 10083 return ret; 10084 extdyn = contents; 10085 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn); 10086 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn)) 10087 { 10088 Elf_Internal_Dyn dyn; 10089 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn); 10090 10091 /* Let's check the processor specific dynamic array tags. */ 10092 bfd_vma tag = dyn.d_tag; 10093 if (tag < DT_LOPROC || tag > DT_HIPROC) 10094 continue; 10095 10096 switch (tag) 10097 { 10098 case DT_AARCH64_BTI_PLT: 10099 ret |= PLT_BTI; 10100 break; 10101 10102 case DT_AARCH64_PAC_PLT: 10103 ret |= PLT_PAC; 10104 break; 10105 10106 default: break; 10107 } 10108 } 10109 free (contents); 10110 return ret; 10111 } 10112 10113 static long 10114 elfNN_aarch64_get_synthetic_symtab (bfd *abfd, 10115 long symcount, 10116 asymbol **syms, 10117 long dynsymcount, 10118 asymbol **dynsyms, 10119 asymbol **ret) 10120 { 10121 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd); 10122 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms, 10123 dynsymcount, dynsyms, ret); 10124 } 10125 10126 /* Return address for Ith PLT stub in section PLT, for relocation REL 10127 or (bfd_vma) -1 if it should not be included. */ 10128 10129 static bfd_vma 10130 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt, 10131 const arelent *rel ATTRIBUTE_UNUSED) 10132 { 10133 size_t plt0_size = PLT_ENTRY_SIZE; 10134 size_t pltn_size = PLT_SMALL_ENTRY_SIZE; 10135 10136 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC) 10137 { 10138 if (elf_elfheader (plt->owner)->e_type == ET_EXEC) 10139 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE; 10140 else 10141 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; 10142 } 10143 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI) 10144 { 10145 if (elf_elfheader (plt->owner)->e_type == ET_EXEC) 10146 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE; 10147 } 10148 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC) 10149 { 10150 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE; 10151 } 10152 10153 return plt->vma + plt0_size + i * pltn_size; 10154 } 10155 10156 /* Returns TRUE if NAME is an AArch64 mapping symbol. 10157 The ARM ELF standard defines $x (for A64 code) and $d (for data). 10158 It also allows a period initiated suffix to be added to the symbol, ie: 10159 "$[adtx]\.[:sym_char]+". */ 10160 10161 static bool 10162 is_aarch64_mapping_symbol (const char * name) 10163 { 10164 return name != NULL /* Paranoia. */ 10165 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then 10166 the mapping symbols could have acquired a prefix. 10167 We do not support this here, since such symbols no 10168 longer conform to the ARM ELF ABI. */ 10169 && (name[1] == 'd' || name[1] == 'x') 10170 && (name[2] == 0 || name[2] == '.'); 10171 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if 10172 any characters that follow the period are legal characters for the body 10173 of a symbol's name. For now we just assume that this is the case. */ 10174 } 10175 10176 /* Make sure that mapping symbols in object files are not removed via the 10177 "strip --strip-unneeded" tool. These symbols might needed in order to 10178 correctly generate linked files. Once an object file has been linked, 10179 it should be safe to remove them. */ 10180 10181 static void 10182 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym) 10183 { 10184 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0) 10185 && sym->section != bfd_abs_section_ptr 10186 && is_aarch64_mapping_symbol (sym->name)) 10187 sym->flags |= BSF_KEEP; 10188 } 10189 10190 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a 10191 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account 10192 for the effect of GNU properties of the output_bfd. */ 10193 static bfd * 10194 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info) 10195 { 10196 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; 10197 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop); 10198 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop; 10199 elf_aarch64_tdata (info->output_bfd)->plt_type 10200 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0; 10201 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type); 10202 return pbfd; 10203 } 10204 10205 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a 10206 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account 10207 for the effect of GNU properties of the output_bfd. */ 10208 static bool 10209 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info, 10210 bfd *abfd, bfd *bbfd, 10211 elf_property *aprop, 10212 elf_property *bprop) 10213 { 10214 uint32_t prop 10215 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop; 10216 10217 /* If output has been marked with BTI using command line argument, give out 10218 warning if necessary. */ 10219 /* Properties are merged per type, hence only check for warnings when merging 10220 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */ 10221 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) 10222 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)) 10223 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 10224 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn)) 10225 { 10226 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) 10227 || !aprop) 10228 { 10229 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " 10230 "all inputs do not have BTI in NOTE section."), 10231 abfd); 10232 } 10233 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) 10234 || !bprop) 10235 { 10236 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when " 10237 "all inputs do not have BTI in NOTE section."), 10238 bbfd); 10239 } 10240 } 10241 10242 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop, 10243 bprop, prop); 10244 } 10245 10246 /* We use this so we can override certain functions 10247 (though currently we don't). */ 10248 10249 const struct elf_size_info elfNN_aarch64_size_info = 10250 { 10251 sizeof (ElfNN_External_Ehdr), 10252 sizeof (ElfNN_External_Phdr), 10253 sizeof (ElfNN_External_Shdr), 10254 sizeof (ElfNN_External_Rel), 10255 sizeof (ElfNN_External_Rela), 10256 sizeof (ElfNN_External_Sym), 10257 sizeof (ElfNN_External_Dyn), 10258 sizeof (Elf_External_Note), 10259 4, /* Hash table entry size. */ 10260 1, /* Internal relocs per external relocs. */ 10261 ARCH_SIZE, /* Arch size. */ 10262 LOG_FILE_ALIGN, /* Log_file_align. */ 10263 ELFCLASSNN, EV_CURRENT, 10264 bfd_elfNN_write_out_phdrs, 10265 bfd_elfNN_write_shdrs_and_ehdr, 10266 bfd_elfNN_checksum_contents, 10267 bfd_elfNN_write_relocs, 10268 bfd_elfNN_swap_symbol_in, 10269 bfd_elfNN_swap_symbol_out, 10270 bfd_elfNN_slurp_reloc_table, 10271 bfd_elfNN_slurp_symbol_table, 10272 bfd_elfNN_swap_dyn_in, 10273 bfd_elfNN_swap_dyn_out, 10274 bfd_elfNN_swap_reloc_in, 10275 bfd_elfNN_swap_reloc_out, 10276 bfd_elfNN_swap_reloca_in, 10277 bfd_elfNN_swap_reloca_out 10278 }; 10279 10280 #define ELF_ARCH bfd_arch_aarch64 10281 #define ELF_MACHINE_CODE EM_AARCH64 10282 #define ELF_MAXPAGESIZE 0x10000 10283 #define ELF_COMMONPAGESIZE 0x1000 10284 10285 #define bfd_elfNN_bfd_free_cached_info \ 10286 elfNN_aarch64_bfd_free_cached_info 10287 10288 #define bfd_elfNN_bfd_is_target_special_symbol \ 10289 elfNN_aarch64_is_target_special_symbol 10290 10291 #define bfd_elfNN_bfd_link_hash_table_create \ 10292 elfNN_aarch64_link_hash_table_create 10293 10294 #define bfd_elfNN_bfd_merge_private_bfd_data \ 10295 elfNN_aarch64_merge_private_bfd_data 10296 10297 #define bfd_elfNN_bfd_print_private_bfd_data \ 10298 elfNN_aarch64_print_private_bfd_data 10299 10300 #define bfd_elfNN_bfd_reloc_type_lookup \ 10301 elfNN_aarch64_reloc_type_lookup 10302 10303 #define bfd_elfNN_bfd_reloc_name_lookup \ 10304 elfNN_aarch64_reloc_name_lookup 10305 10306 #define bfd_elfNN_bfd_set_private_flags \ 10307 elfNN_aarch64_set_private_flags 10308 10309 #define bfd_elfNN_find_inliner_info \ 10310 elfNN_aarch64_find_inliner_info 10311 10312 #define bfd_elfNN_get_synthetic_symtab \ 10313 elfNN_aarch64_get_synthetic_symtab 10314 10315 #define bfd_elfNN_mkobject \ 10316 elfNN_aarch64_mkobject 10317 10318 #define bfd_elfNN_new_section_hook \ 10319 elfNN_aarch64_new_section_hook 10320 10321 #define elf_backend_adjust_dynamic_symbol \ 10322 elfNN_aarch64_adjust_dynamic_symbol 10323 10324 #define elf_backend_early_size_sections \ 10325 elfNN_aarch64_early_size_sections 10326 10327 #define elf_backend_check_relocs \ 10328 elfNN_aarch64_check_relocs 10329 10330 #define elf_backend_copy_indirect_symbol \ 10331 elfNN_aarch64_copy_indirect_symbol 10332 10333 #define elf_backend_merge_symbol_attribute \ 10334 elfNN_aarch64_merge_symbol_attribute 10335 10336 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts 10337 to them in our hash. */ 10338 #define elf_backend_create_dynamic_sections \ 10339 elfNN_aarch64_create_dynamic_sections 10340 10341 #define elf_backend_init_index_section \ 10342 _bfd_elf_init_2_index_sections 10343 10344 #define elf_backend_finish_dynamic_sections \ 10345 elfNN_aarch64_finish_dynamic_sections 10346 10347 #define elf_backend_finish_dynamic_symbol \ 10348 elfNN_aarch64_finish_dynamic_symbol 10349 10350 #define elf_backend_object_p \ 10351 elfNN_aarch64_object_p 10352 10353 #define elf_backend_output_arch_local_syms \ 10354 elfNN_aarch64_output_arch_local_syms 10355 10356 #define elf_backend_maybe_function_sym \ 10357 elfNN_aarch64_maybe_function_sym 10358 10359 #define elf_backend_plt_sym_val \ 10360 elfNN_aarch64_plt_sym_val 10361 10362 #define elf_backend_init_file_header \ 10363 elfNN_aarch64_init_file_header 10364 10365 #define elf_backend_relocate_section \ 10366 elfNN_aarch64_relocate_section 10367 10368 #define elf_backend_reloc_type_class \ 10369 elfNN_aarch64_reloc_type_class 10370 10371 #define elf_backend_section_from_shdr \ 10372 elfNN_aarch64_section_from_shdr 10373 10374 #define elf_backend_section_from_phdr \ 10375 elfNN_aarch64_section_from_phdr 10376 10377 #define elf_backend_modify_headers \ 10378 elfNN_aarch64_modify_headers 10379 10380 #define elf_backend_late_size_sections \ 10381 elfNN_aarch64_late_size_sections 10382 10383 #define elf_backend_size_info \ 10384 elfNN_aarch64_size_info 10385 10386 #define elf_backend_write_section \ 10387 elfNN_aarch64_write_section 10388 10389 #define elf_backend_symbol_processing \ 10390 elfNN_aarch64_backend_symbol_processing 10391 10392 #define elf_backend_setup_gnu_properties \ 10393 elfNN_aarch64_link_setup_gnu_properties 10394 10395 #define elf_backend_merge_gnu_properties \ 10396 elfNN_aarch64_merge_gnu_properties 10397 10398 #define elf_backend_can_refcount 1 10399 #define elf_backend_can_gc_sections 1 10400 #define elf_backend_plt_readonly 1 10401 #define elf_backend_want_got_plt 1 10402 #define elf_backend_want_plt_sym 0 10403 #define elf_backend_want_dynrelro 1 10404 #define elf_backend_may_use_rel_p 0 10405 #define elf_backend_may_use_rela_p 1 10406 #define elf_backend_default_use_rela_p 1 10407 #define elf_backend_rela_normal 1 10408 #define elf_backend_dtrel_excludes_plt 1 10409 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3) 10410 #define elf_backend_default_execstack 0 10411 #define elf_backend_extern_protected_data 0 10412 #define elf_backend_hash_symbol elf_aarch64_hash_symbol 10413 10414 #undef elf_backend_obj_attrs_section 10415 #define elf_backend_obj_attrs_section ".ARM.attributes" 10416 10417 #include "elfNN-target.h" 10418 10419 /* CloudABI support. */ 10420 10421 #undef TARGET_LITTLE_SYM 10422 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec 10423 #undef TARGET_LITTLE_NAME 10424 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi" 10425 #undef TARGET_BIG_SYM 10426 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec 10427 #undef TARGET_BIG_NAME 10428 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi" 10429 10430 #undef ELF_OSABI 10431 #define ELF_OSABI ELFOSABI_CLOUDABI 10432 10433 #undef elfNN_bed 10434 #define elfNN_bed elfNN_aarch64_cloudabi_bed 10435 10436 #include "elfNN-target.h" 10437