1 // aarch64.cc -- aarch64 target support for gold. 2 3 // Copyright (C) 2014-2024 Free Software Foundation, Inc. 4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>. 5 6 // This file is part of gold. 7 8 // This program is free software; you can redistribute it and/or modify 9 // it under the terms of the GNU General Public License as published by 10 // the Free Software Foundation; either version 3 of the License, or 11 // (at your option) any later version. 12 13 // This program is distributed in the hope that it will be useful, 14 // but WITHOUT ANY WARRANTY; without even the implied warranty of 15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 // GNU General Public License for more details. 17 18 // You should have received a copy of the GNU General Public License 19 // along with this program; if not, write to the Free Software 20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 21 // MA 02110-1301, USA. 22 23 #include "gold.h" 24 25 #include <cstring> 26 #include <map> 27 #include <set> 28 29 #include "elfcpp.h" 30 #include "dwarf.h" 31 #include "parameters.h" 32 #include "reloc.h" 33 #include "aarch64.h" 34 #include "object.h" 35 #include "symtab.h" 36 #include "layout.h" 37 #include "output.h" 38 #include "copy-relocs.h" 39 #include "target.h" 40 #include "target-reloc.h" 41 #include "target-select.h" 42 #include "tls.h" 43 #include "freebsd.h" 44 #include "nacl.h" 45 #include "gc.h" 46 #include "icf.h" 47 #include "aarch64-reloc-property.h" 48 49 // The first three .got.plt entries are reserved. 50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3; 51 52 53 namespace 54 { 55 56 using namespace gold; 57 58 template<int size, bool big_endian> 59 class Output_data_plt_aarch64; 60 61 template<int size, bool big_endian> 62 class Output_data_plt_aarch64_standard; 63 64 template<int size, bool big_endian> 65 class Target_aarch64; 66 67 template<int size, bool big_endian> 68 class AArch64_relocate_functions; 69 70 // Utility class dealing with insns. This is ported from macros in 71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This 72 // class is used in erratum sequence scanning. 73 74 template<bool big_endian> 75 class AArch64_insn_utilities 76 { 77 public: 78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 79 80 static const int BYTES_PER_INSN; 81 82 // Zero register encoding - 31. 83 static const unsigned int AARCH64_ZR; 84 85 static unsigned int 86 aarch64_bit(Insntype insn, int pos) 87 { return ((1 << pos) & insn) >> pos; } 88 89 static unsigned int 90 aarch64_bits(Insntype insn, int pos, int l) 91 { return (insn >> pos) & ((1 << l) - 1); } 92 93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is 94 // the name defined in armv8 insn manual C3.5.9. 95 static unsigned int 96 aarch64_op31(Insntype insn) 97 { return aarch64_bits(insn, 21, 3); } 98 99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the 100 // third source register. See armv8 insn manual C3.5.9. 101 static unsigned int 102 aarch64_ra(Insntype insn) 103 { return aarch64_bits(insn, 10, 5); } 104 105 static bool 106 is_adr(const Insntype insn) 107 { return (insn & 0x9F000000) == 0x10000000; } 108 109 static bool 110 is_adrp(const Insntype insn) 111 { return (insn & 0x9F000000) == 0x90000000; } 112 113 static bool 114 is_mrs_tpidr_el0(const Insntype insn) 115 { return (insn & 0xFFFFFFE0) == 0xd53bd040; } 116 117 static unsigned int 118 aarch64_rm(const Insntype insn) 119 { return aarch64_bits(insn, 16, 5); } 120 121 static unsigned int 122 aarch64_rn(const Insntype insn) 123 { return aarch64_bits(insn, 5, 5); } 124 125 static unsigned int 126 aarch64_rd(const Insntype insn) 127 { return aarch64_bits(insn, 0, 5); } 128 129 static unsigned int 130 aarch64_rt(const Insntype insn) 131 { return aarch64_bits(insn, 0, 5); } 132 133 static unsigned int 134 aarch64_rt2(const Insntype insn) 135 { return aarch64_bits(insn, 10, 5); } 136 137 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M). 138 static Insntype 139 aarch64_adr_encode_imm(Insntype adr, int imm21) 140 { 141 gold_assert(is_adr(adr)); 142 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20)); 143 const int mask19 = (1 << 19) - 1; 144 const int mask2 = 3; 145 adr &= ~((mask19 << 5) | (mask2 << 29)); 146 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5); 147 return adr; 148 } 149 150 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by 151 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and 152 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0. 153 static int64_t 154 aarch64_adrp_decode_imm(const Insntype adrp) 155 { 156 const int mask19 = (1 << 19) - 1; 157 const int mask2 = 3; 158 gold_assert(is_adrp(adrp)); 159 // 21-bit imm encoded in adrp. 160 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2); 161 // Retrieve msb of 21-bit-signed imm for sign extension. 162 uint64_t msbt = (imm >> 20) & 1; 163 // Real value is imm multiplied by 4k. Value now has 33-bit information. 164 int64_t value = imm << 12; 165 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it 166 // with value. 167 return ((((uint64_t)(1) << 32) - msbt) << 33) | value; 168 } 169 170 static bool 171 aarch64_b(const Insntype insn) 172 { return (insn & 0xFC000000) == 0x14000000; } 173 174 static bool 175 aarch64_bl(const Insntype insn) 176 { return (insn & 0xFC000000) == 0x94000000; } 177 178 static bool 179 aarch64_blr(const Insntype insn) 180 { return (insn & 0xFFFFFC1F) == 0xD63F0000; } 181 182 static bool 183 aarch64_br(const Insntype insn) 184 { return (insn & 0xFFFFFC1F) == 0xD61F0000; } 185 186 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for 187 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. 188 static bool 189 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; } 190 191 static bool 192 aarch64_ldst(Insntype insn) 193 { return (insn & 0x0a000000) == 0x08000000; } 194 195 static bool 196 aarch64_ldst_ex(Insntype insn) 197 { return (insn & 0x3f000000) == 0x08000000; } 198 199 static bool 200 aarch64_ldst_pcrel(Insntype insn) 201 { return (insn & 0x3b000000) == 0x18000000; } 202 203 static bool 204 aarch64_ldst_nap(Insntype insn) 205 { return (insn & 0x3b800000) == 0x28000000; } 206 207 static bool 208 aarch64_ldstp_pi(Insntype insn) 209 { return (insn & 0x3b800000) == 0x28800000; } 210 211 static bool 212 aarch64_ldstp_o(Insntype insn) 213 { return (insn & 0x3b800000) == 0x29000000; } 214 215 static bool 216 aarch64_ldstp_pre(Insntype insn) 217 { return (insn & 0x3b800000) == 0x29800000; } 218 219 static bool 220 aarch64_ldst_ui(Insntype insn) 221 { return (insn & 0x3b200c00) == 0x38000000; } 222 223 static bool 224 aarch64_ldst_piimm(Insntype insn) 225 { return (insn & 0x3b200c00) == 0x38000400; } 226 227 static bool 228 aarch64_ldst_u(Insntype insn) 229 { return (insn & 0x3b200c00) == 0x38000800; } 230 231 static bool 232 aarch64_ldst_preimm(Insntype insn) 233 { return (insn & 0x3b200c00) == 0x38000c00; } 234 235 static bool 236 aarch64_ldst_ro(Insntype insn) 237 { return (insn & 0x3b200c00) == 0x38200800; } 238 239 static bool 240 aarch64_ldst_uimm(Insntype insn) 241 { return (insn & 0x3b000000) == 0x39000000; } 242 243 static bool 244 aarch64_ldst_simd_m(Insntype insn) 245 { return (insn & 0xbfbf0000) == 0x0c000000; } 246 247 static bool 248 aarch64_ldst_simd_m_pi(Insntype insn) 249 { return (insn & 0xbfa00000) == 0x0c800000; } 250 251 static bool 252 aarch64_ldst_simd_s(Insntype insn) 253 { return (insn & 0xbf9f0000) == 0x0d000000; } 254 255 static bool 256 aarch64_ldst_simd_s_pi(Insntype insn) 257 { return (insn & 0xbf800000) == 0x0d800000; } 258 259 // Classify an INSN if it is indeed a load/store. Return true if INSN is a 260 // LD/ST instruction otherwise return false. For scalar LD/ST instructions 261 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair 262 // instructions PAIR is TRUE, RT and RT2 are returned. 263 static bool 264 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2, 265 bool *pair, bool *load) 266 { 267 uint32_t opcode; 268 unsigned int r; 269 uint32_t opc = 0; 270 uint32_t v = 0; 271 uint32_t opc_v = 0; 272 273 /* Bail out quickly if INSN doesn't fall into the load-store 274 encoding space. */ 275 if (!aarch64_ldst (insn)) 276 return false; 277 278 *pair = false; 279 *load = false; 280 if (aarch64_ldst_ex (insn)) 281 { 282 *rt = aarch64_rt (insn); 283 *rt2 = *rt; 284 if (aarch64_bit (insn, 21) == 1) 285 { 286 *pair = true; 287 *rt2 = aarch64_rt2 (insn); 288 } 289 *load = aarch64_ld (insn); 290 return true; 291 } 292 else if (aarch64_ldst_nap (insn) 293 || aarch64_ldstp_pi (insn) 294 || aarch64_ldstp_o (insn) 295 || aarch64_ldstp_pre (insn)) 296 { 297 *pair = true; 298 *rt = aarch64_rt (insn); 299 *rt2 = aarch64_rt2 (insn); 300 *load = aarch64_ld (insn); 301 return true; 302 } 303 else if (aarch64_ldst_pcrel (insn) 304 || aarch64_ldst_ui (insn) 305 || aarch64_ldst_piimm (insn) 306 || aarch64_ldst_u (insn) 307 || aarch64_ldst_preimm (insn) 308 || aarch64_ldst_ro (insn) 309 || aarch64_ldst_uimm (insn)) 310 { 311 *rt = aarch64_rt (insn); 312 *rt2 = *rt; 313 if (aarch64_ldst_pcrel (insn)) 314 *load = true; 315 opc = aarch64_bits (insn, 22, 2); 316 v = aarch64_bit (insn, 26); 317 opc_v = opc | (v << 2); 318 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 319 || opc_v == 5 || opc_v == 7); 320 return true; 321 } 322 else if (aarch64_ldst_simd_m (insn) 323 || aarch64_ldst_simd_m_pi (insn)) 324 { 325 *rt = aarch64_rt (insn); 326 *load = aarch64_bit (insn, 22); 327 opcode = (insn >> 12) & 0xf; 328 switch (opcode) 329 { 330 case 0: 331 case 2: 332 *rt2 = *rt + 3; 333 break; 334 335 case 4: 336 case 6: 337 *rt2 = *rt + 2; 338 break; 339 340 case 7: 341 *rt2 = *rt; 342 break; 343 344 case 8: 345 case 10: 346 *rt2 = *rt + 1; 347 break; 348 349 default: 350 return false; 351 } 352 return true; 353 } 354 else if (aarch64_ldst_simd_s (insn) 355 || aarch64_ldst_simd_s_pi (insn)) 356 { 357 *rt = aarch64_rt (insn); 358 r = (insn >> 21) & 1; 359 *load = aarch64_bit (insn, 22); 360 opcode = (insn >> 13) & 0x7; 361 switch (opcode) 362 { 363 case 0: 364 case 2: 365 case 4: 366 *rt2 = *rt + r; 367 break; 368 369 case 1: 370 case 3: 371 case 5: 372 *rt2 = *rt + (r == 0 ? 2 : 3); 373 break; 374 375 case 6: 376 *rt2 = *rt + r; 377 break; 378 379 case 7: 380 *rt2 = *rt + (r == 0 ? 2 : 3); 381 break; 382 383 default: 384 return false; 385 } 386 return true; 387 } 388 return false; 389 } // End of "aarch64_mem_op_p". 390 391 // Return true if INSN is mac insn. 392 static bool 393 aarch64_mac(Insntype insn) 394 { return (insn & 0xff000000) == 0x9b000000; } 395 396 // Return true if INSN is multiply-accumulate. 397 // (This is similar to implementaton in elfnn-aarch64.c.) 398 static bool 399 aarch64_mlxl(Insntype insn) 400 { 401 uint32_t op31 = aarch64_op31(insn); 402 if (aarch64_mac(insn) 403 && (op31 == 0 || op31 == 1 || op31 == 5) 404 /* Exclude MUL instructions which are encoded as a multiple-accumulate 405 with RA = XZR. */ 406 && aarch64_ra(insn) != AARCH64_ZR) 407 { 408 return true; 409 } 410 return false; 411 } 412 }; // End of "AArch64_insn_utilities". 413 414 415 // Insn length in byte. 416 417 template<bool big_endian> 418 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4; 419 420 421 // Zero register encoding - 31. 422 423 template<bool big_endian> 424 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f; 425 426 427 // Output_data_got_aarch64 class. 428 429 template<int size, bool big_endian> 430 class Output_data_got_aarch64 : public Output_data_got<size, big_endian> 431 { 432 public: 433 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype; 434 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout) 435 : Output_data_got<size, big_endian>(), 436 symbol_table_(symtab), layout_(layout) 437 { } 438 439 // Add a static entry for the GOT entry at OFFSET. GSYM is a global 440 // symbol and R_TYPE is the code of a dynamic relocation that needs to be 441 // applied in a static link. 442 void 443 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 444 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); } 445 446 447 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object 448 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic 449 // relocation that needs to be applied in a static link. 450 void 451 add_static_reloc(unsigned int got_offset, unsigned int r_type, 452 Sized_relobj_file<size, big_endian>* relobj, 453 unsigned int index) 454 { 455 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj, 456 index)); 457 } 458 459 460 protected: 461 // Write out the GOT table. 462 void 463 do_write(Output_file* of) { 464 // The first entry in the GOT is the address of the .dynamic section. 465 gold_assert(this->data_size() >= size / 8); 466 Output_section* dynamic = this->layout_->dynamic_section(); 467 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address(); 468 this->replace_constant(0, dynamic_addr); 469 Output_data_got<size, big_endian>::do_write(of); 470 471 // Handling static relocs 472 if (this->static_relocs_.empty()) 473 return; 474 475 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 476 477 gold_assert(parameters->doing_static_link()); 478 const off_t offset = this->offset(); 479 const section_size_type oview_size = 480 convert_to_section_size_type(this->data_size()); 481 unsigned char* const oview = of->get_output_view(offset, oview_size); 482 483 Output_segment* tls_segment = this->layout_->tls_segment(); 484 gold_assert(tls_segment != NULL); 485 486 AArch64_address aligned_tcb_address = 487 align_address(Target_aarch64<size, big_endian>::TCB_SIZE, 488 tls_segment->maximum_alignment()); 489 490 for (size_t i = 0; i < this->static_relocs_.size(); ++i) 491 { 492 Static_reloc& reloc(this->static_relocs_[i]); 493 AArch64_address value; 494 495 if (!reloc.symbol_is_global()) 496 { 497 Sized_relobj_file<size, big_endian>* object = reloc.relobj(); 498 const Symbol_value<size>* psymval = 499 reloc.relobj()->local_symbol(reloc.index()); 500 501 // We are doing static linking. Issue an error and skip this 502 // relocation if the symbol is undefined or in a discarded_section. 503 bool is_ordinary; 504 unsigned int shndx = psymval->input_shndx(&is_ordinary); 505 if ((shndx == elfcpp::SHN_UNDEF) 506 || (is_ordinary 507 && shndx != elfcpp::SHN_UNDEF 508 && !object->is_section_included(shndx) 509 && !this->symbol_table_->is_section_folded(object, shndx))) 510 { 511 gold_error(_("undefined or discarded local symbol %u from " 512 " object %s in GOT"), 513 reloc.index(), reloc.relobj()->name().c_str()); 514 continue; 515 } 516 value = psymval->value(object, 0); 517 } 518 else 519 { 520 const Symbol* gsym = reloc.symbol(); 521 gold_assert(gsym != NULL); 522 if (gsym->is_forwarder()) 523 gsym = this->symbol_table_->resolve_forwards(gsym); 524 525 // We are doing static linking. Issue an error and skip this 526 // relocation if the symbol is undefined or in a discarded_section 527 // unless it is a weakly_undefined symbol. 528 if ((gsym->is_defined_in_discarded_section() 529 || gsym->is_undefined()) 530 && !gsym->is_weak_undefined()) 531 { 532 gold_error(_("undefined or discarded symbol %s in GOT"), 533 gsym->name()); 534 continue; 535 } 536 537 if (!gsym->is_weak_undefined()) 538 { 539 const Sized_symbol<size>* sym = 540 static_cast<const Sized_symbol<size>*>(gsym); 541 value = sym->value(); 542 } 543 else 544 value = 0; 545 } 546 547 unsigned got_offset = reloc.got_offset(); 548 gold_assert(got_offset < oview_size); 549 550 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype; 551 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset); 552 Valtype x; 553 switch (reloc.r_type()) 554 { 555 case elfcpp::R_AARCH64_TLS_DTPREL64: 556 x = value; 557 break; 558 case elfcpp::R_AARCH64_TLS_TPREL64: 559 x = value + aligned_tcb_address; 560 break; 561 default: 562 gold_unreachable(); 563 } 564 elfcpp::Swap<size, big_endian>::writeval(wv, x); 565 } 566 567 of->write_output_view(offset, oview_size, oview); 568 } 569 570 private: 571 // Symbol table of the output object. 572 Symbol_table* symbol_table_; 573 // A pointer to the Layout class, so that we can find the .dynamic 574 // section when we write out the GOT section. 575 Layout* layout_; 576 577 // This class represent dynamic relocations that need to be applied by 578 // gold because we are using TLS relocations in a static link. 579 class Static_reloc 580 { 581 public: 582 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 583 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true) 584 { this->u_.global.symbol = gsym; } 585 586 Static_reloc(unsigned int got_offset, unsigned int r_type, 587 Sized_relobj_file<size, big_endian>* relobj, unsigned int index) 588 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false) 589 { 590 this->u_.local.relobj = relobj; 591 this->u_.local.index = index; 592 } 593 594 // Return the GOT offset. 595 unsigned int 596 got_offset() const 597 { return this->got_offset_; } 598 599 // Relocation type. 600 unsigned int 601 r_type() const 602 { return this->r_type_; } 603 604 // Whether the symbol is global or not. 605 bool 606 symbol_is_global() const 607 { return this->symbol_is_global_; } 608 609 // For a relocation against a global symbol, the global symbol. 610 Symbol* 611 symbol() const 612 { 613 gold_assert(this->symbol_is_global_); 614 return this->u_.global.symbol; 615 } 616 617 // For a relocation against a local symbol, the defining object. 618 Sized_relobj_file<size, big_endian>* 619 relobj() const 620 { 621 gold_assert(!this->symbol_is_global_); 622 return this->u_.local.relobj; 623 } 624 625 // For a relocation against a local symbol, the local symbol index. 626 unsigned int 627 index() const 628 { 629 gold_assert(!this->symbol_is_global_); 630 return this->u_.local.index; 631 } 632 633 private: 634 // GOT offset of the entry to which this relocation is applied. 635 unsigned int got_offset_; 636 // Type of relocation. 637 unsigned int r_type_; 638 // Whether this relocation is against a global symbol. 639 bool symbol_is_global_; 640 // A global or local symbol. 641 union 642 { 643 struct 644 { 645 // For a global symbol, the symbol itself. 646 Symbol* symbol; 647 } global; 648 struct 649 { 650 // For a local symbol, the object defining the symbol. 651 Sized_relobj_file<size, big_endian>* relobj; 652 // For a local symbol, the symbol index. 653 unsigned int index; 654 } local; 655 } u_; 656 }; // End of inner class Static_reloc 657 658 std::vector<Static_reloc> static_relocs_; 659 }; // End of Output_data_got_aarch64 660 661 662 template<int size, bool big_endian> 663 class AArch64_input_section; 664 665 666 template<int size, bool big_endian> 667 class AArch64_output_section; 668 669 670 template<int size, bool big_endian> 671 class AArch64_relobj; 672 673 674 // Stub type enum constants. 675 676 enum 677 { 678 ST_NONE = 0, 679 680 // Using adrp/add pair, 4 insns (including alignment) without mem access, 681 // the fastest stub. This has a limited jump distance, which is tested by 682 // aarch64_valid_for_adrp_p. 683 ST_ADRP_BRANCH = 1, 684 685 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access, 686 // unlimited in jump distance. 687 ST_LONG_BRANCH_ABS = 2, 688 689 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1 690 // mem access, slowest one. Only used in position independent executables. 691 ST_LONG_BRANCH_PCREL = 3, 692 693 // Stub for erratum 843419 handling. 694 ST_E_843419 = 4, 695 696 // Stub for erratum 835769 handling. 697 ST_E_835769 = 5, 698 699 // Number of total stub types. 700 ST_NUMBER = 6 701 }; 702 703 704 // Struct that wraps insns for a particular stub. All stub templates are 705 // created/initialized as constants by Stub_template_repertoire. 706 707 template<bool big_endian> 708 struct Stub_template 709 { 710 const typename AArch64_insn_utilities<big_endian>::Insntype* insns; 711 const int insn_num; 712 }; 713 714 715 // Simple singleton class that creates/initializes/stores all types of stub 716 // templates. 717 718 template<bool big_endian> 719 class Stub_template_repertoire 720 { 721 public: 722 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 723 724 // Single static method to get stub template for a given stub type. 725 static const Stub_template<big_endian>* 726 get_stub_template(int type) 727 { 728 static Stub_template_repertoire<big_endian> singleton; 729 return singleton.stub_templates_[type]; 730 } 731 732 private: 733 // Constructor - creates/initializes all stub templates. 734 Stub_template_repertoire(); 735 ~Stub_template_repertoire() 736 { } 737 738 // Disallowing copy ctor and copy assignment operator. 739 Stub_template_repertoire(Stub_template_repertoire&); 740 Stub_template_repertoire& operator=(Stub_template_repertoire&); 741 742 // Data that stores all insn templates. 743 const Stub_template<big_endian>* stub_templates_[ST_NUMBER]; 744 }; // End of "class Stub_template_repertoire". 745 746 747 // Constructor - creates/initilizes all stub templates. 748 749 template<bool big_endian> 750 Stub_template_repertoire<big_endian>::Stub_template_repertoire() 751 { 752 // Insn array definitions. 753 const static Insntype ST_NONE_INSNS[] = {}; 754 755 const static Insntype ST_ADRP_BRANCH_INSNS[] = 756 { 757 0x90000010, /* adrp ip0, X */ 758 /* ADR_PREL_PG_HI21(X) */ 759 0x91000210, /* add ip0, ip0, :lo12:X */ 760 /* ADD_ABS_LO12_NC(X) */ 761 0xd61f0200, /* br ip0 */ 762 0x00000000, /* alignment padding */ 763 }; 764 765 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] = 766 { 767 0x58000050, /* ldr ip0, 0x8 */ 768 0xd61f0200, /* br ip0 */ 769 0x00000000, /* address field */ 770 0x00000000, /* address fields */ 771 }; 772 773 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] = 774 { 775 0x58000090, /* ldr ip0, 0x10 */ 776 0x10000011, /* adr ip1, #0 */ 777 0x8b110210, /* add ip0, ip0, ip1 */ 778 0xd61f0200, /* br ip0 */ 779 0x00000000, /* address field */ 780 0x00000000, /* address field */ 781 0x00000000, /* alignment padding */ 782 0x00000000, /* alignment padding */ 783 }; 784 785 const static Insntype ST_E_843419_INSNS[] = 786 { 787 0x00000000, /* Placeholder for erratum insn. */ 788 0x14000000, /* b <label> */ 789 }; 790 791 // ST_E_835769 has the same stub template as ST_E_843419 792 // but we reproduce the array here so that the sizeof 793 // expressions in install_insn_template will work. 794 const static Insntype ST_E_835769_INSNS[] = 795 { 796 0x00000000, /* Placeholder for erratum insn. */ 797 0x14000000, /* b <label> */ 798 }; 799 800 #define install_insn_template(T) \ 801 const static Stub_template<big_endian> template_##T = { \ 802 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \ 803 this->stub_templates_[T] = &template_##T 804 805 install_insn_template(ST_NONE); 806 install_insn_template(ST_ADRP_BRANCH); 807 install_insn_template(ST_LONG_BRANCH_ABS); 808 install_insn_template(ST_LONG_BRANCH_PCREL); 809 install_insn_template(ST_E_843419); 810 install_insn_template(ST_E_835769); 811 812 #undef install_insn_template 813 } 814 815 816 // Base class for stubs. 817 818 template<int size, bool big_endian> 819 class Stub_base 820 { 821 public: 822 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 823 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 824 825 static const AArch64_address invalid_address = 826 static_cast<AArch64_address>(-1); 827 828 static const section_offset_type invalid_offset = 829 static_cast<section_offset_type>(-1); 830 831 Stub_base(int type) 832 : destination_address_(invalid_address), 833 offset_(invalid_offset), 834 type_(type) 835 {} 836 837 ~Stub_base() 838 {} 839 840 // Get stub type. 841 int 842 type() const 843 { return this->type_; } 844 845 // Get stub template that provides stub insn information. 846 const Stub_template<big_endian>* 847 stub_template() const 848 { 849 return Stub_template_repertoire<big_endian>:: 850 get_stub_template(this->type()); 851 } 852 853 // Get destination address. 854 AArch64_address 855 destination_address() const 856 { 857 gold_assert(this->destination_address_ != this->invalid_address); 858 return this->destination_address_; 859 } 860 861 // Set destination address. 862 void 863 set_destination_address(AArch64_address address) 864 { 865 gold_assert(address != this->invalid_address); 866 this->destination_address_ = address; 867 } 868 869 // Reset the destination address. 870 void 871 reset_destination_address() 872 { this->destination_address_ = this->invalid_address; } 873 874 // Get offset of code stub. For Reloc_stub, it is the offset from the 875 // beginning of its containing stub table; for Erratum_stub, it is the offset 876 // from the end of reloc_stubs. 877 section_offset_type 878 offset() const 879 { 880 gold_assert(this->offset_ != this->invalid_offset); 881 return this->offset_; 882 } 883 884 // Set stub offset. 885 void 886 set_offset(section_offset_type offset) 887 { this->offset_ = offset; } 888 889 // Return the stub insn. 890 const Insntype* 891 insns() const 892 { return this->stub_template()->insns; } 893 894 // Return num of stub insns. 895 unsigned int 896 insn_num() const 897 { return this->stub_template()->insn_num; } 898 899 // Get size of the stub. 900 int 901 stub_size() const 902 { 903 return this->insn_num() * 904 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 905 } 906 907 // Write stub to output file. 908 void 909 write(unsigned char* view, section_size_type view_size) 910 { this->do_write(view, view_size); } 911 912 protected: 913 // Abstract method to be implemented by sub-classes. 914 virtual void 915 do_write(unsigned char*, section_size_type) = 0; 916 917 private: 918 // The last insn of a stub is a jump to destination insn. This field records 919 // the destination address. 920 AArch64_address destination_address_; 921 // The stub offset. Note this has difference interpretations between an 922 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the 923 // beginning of the containing stub_table, whereas for Erratum_stub, this is 924 // the offset from the end of reloc_stubs. 925 section_offset_type offset_; 926 // Stub type. 927 const int type_; 928 }; // End of "Stub_base". 929 930 931 // Erratum stub class. An erratum stub differs from a reloc stub in that for 932 // each erratum occurrence, we generate an erratum stub. We never share erratum 933 // stubs, whereas for reloc stubs, different branch insns share a single reloc 934 // stub as long as the branch targets are the same. (More to the point, reloc 935 // stubs can be shared because they're used to reach a specific target, whereas 936 // erratum stubs branch back to the original control flow.) 937 938 template<int size, bool big_endian> 939 class Erratum_stub : public Stub_base<size, big_endian> 940 { 941 public: 942 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 943 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 944 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 945 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 946 947 static const int STUB_ADDR_ALIGN; 948 949 static const Insntype invalid_insn = static_cast<Insntype>(-1); 950 951 Erratum_stub(The_aarch64_relobj* relobj, int type, 952 unsigned shndx, unsigned int sh_offset) 953 : Stub_base<size, big_endian>(type), relobj_(relobj), 954 shndx_(shndx), sh_offset_(sh_offset), 955 erratum_insn_(invalid_insn), 956 erratum_address_(this->invalid_address) 957 {} 958 959 ~Erratum_stub() {} 960 961 // Return the object that contains the erratum. 962 The_aarch64_relobj* 963 relobj() 964 { return this->relobj_; } 965 966 // Get section index of the erratum. 967 unsigned int 968 shndx() const 969 { return this->shndx_; } 970 971 // Get section offset of the erratum. 972 unsigned int 973 sh_offset() const 974 { return this->sh_offset_; } 975 976 // Get the erratum insn. This is the insn located at erratum_insn_address. 977 Insntype 978 erratum_insn() const 979 { 980 gold_assert(this->erratum_insn_ != this->invalid_insn); 981 return this->erratum_insn_; 982 } 983 984 // Set the insn that the erratum happens to. 985 void 986 set_erratum_insn(Insntype insn) 987 { this->erratum_insn_ = insn; } 988 989 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a 990 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase 991 // is no longer the one we want to write out to the stub, update erratum_insn_ 992 // with relocated version. Also note that in this case xn must not be "PC", so 993 // it is safe to move the erratum insn from the origin place to the stub. For 994 // 835769, the erratum insn is multiply-accumulate insn, which could not be a 995 // relocation spot (assertion added though). 996 void 997 update_erratum_insn(Insntype insn) 998 { 999 gold_assert(this->erratum_insn_ != this->invalid_insn); 1000 switch (this->type()) 1001 { 1002 case ST_E_843419: 1003 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn)); 1004 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn())); 1005 gold_assert(Insn_utilities::aarch64_rd(insn) == 1006 Insn_utilities::aarch64_rd(this->erratum_insn())); 1007 gold_assert(Insn_utilities::aarch64_rn(insn) == 1008 Insn_utilities::aarch64_rn(this->erratum_insn())); 1009 // Update plain ld/st insn with relocated insn. 1010 this->erratum_insn_ = insn; 1011 break; 1012 case ST_E_835769: 1013 gold_assert(insn == this->erratum_insn()); 1014 break; 1015 default: 1016 gold_unreachable(); 1017 } 1018 } 1019 1020 1021 // Return the address where an erratum must be done. 1022 AArch64_address 1023 erratum_address() const 1024 { 1025 gold_assert(this->erratum_address_ != this->invalid_address); 1026 return this->erratum_address_; 1027 } 1028 1029 // Set the address where an erratum must be done. 1030 void 1031 set_erratum_address(AArch64_address addr) 1032 { this->erratum_address_ = addr; } 1033 1034 // Later relaxation passes of may alter the recorded erratum and destination 1035 // address. Given an up to date output section address of shidx_ in 1036 // relobj_ we can derive the erratum_address and destination address. 1037 void 1038 update_erratum_address(AArch64_address output_section_addr) 1039 { 1040 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 1041 AArch64_address updated_addr = output_section_addr + this->sh_offset_; 1042 this->set_erratum_address(updated_addr); 1043 this->set_destination_address(updated_addr + BPI); 1044 } 1045 1046 // Comparator used to group Erratum_stubs in a set by (obj, shndx, 1047 // sh_offset). We do not include 'type' in the calculation, because there is 1048 // at most one stub type at (obj, shndx, sh_offset). 1049 bool 1050 operator<(const Erratum_stub<size, big_endian>& k) const 1051 { 1052 if (this == &k) 1053 return false; 1054 // We group stubs by relobj. 1055 if (this->relobj_ != k.relobj_) 1056 return this->relobj_ < k.relobj_; 1057 // Then by section index. 1058 if (this->shndx_ != k.shndx_) 1059 return this->shndx_ < k.shndx_; 1060 // Lastly by section offset. 1061 return this->sh_offset_ < k.sh_offset_; 1062 } 1063 1064 void 1065 invalidate_erratum_stub() 1066 { 1067 gold_assert(this->erratum_insn_ != invalid_insn); 1068 this->erratum_insn_ = invalid_insn; 1069 } 1070 1071 bool 1072 is_invalidated_erratum_stub() 1073 { return this->erratum_insn_ == invalid_insn; } 1074 1075 protected: 1076 virtual void 1077 do_write(unsigned char*, section_size_type); 1078 1079 private: 1080 // The object that needs to be fixed. 1081 The_aarch64_relobj* relobj_; 1082 // The shndx in the object that needs to be fixed. 1083 const unsigned int shndx_; 1084 // The section offset in the obejct that needs to be fixed. 1085 const unsigned int sh_offset_; 1086 // The insn to be fixed. 1087 Insntype erratum_insn_; 1088 // The address of the above insn. 1089 AArch64_address erratum_address_; 1090 }; // End of "Erratum_stub". 1091 1092 1093 // Erratum sub class to wrap additional info needed by 843419. In fixing this 1094 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need 1095 // adrp's code position (two or three insns before erratum insn itself). 1096 1097 template<int size, bool big_endian> 1098 class E843419_stub : public Erratum_stub<size, big_endian> 1099 { 1100 public: 1101 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 1102 1103 E843419_stub(AArch64_relobj<size, big_endian>* relobj, 1104 unsigned int shndx, unsigned int sh_offset, 1105 unsigned int adrp_sh_offset) 1106 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset), 1107 adrp_sh_offset_(adrp_sh_offset) 1108 {} 1109 1110 unsigned int 1111 adrp_sh_offset() const 1112 { return this->adrp_sh_offset_; } 1113 1114 private: 1115 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we 1116 // can obtain it from its parent.) 1117 const unsigned int adrp_sh_offset_; 1118 }; 1119 1120 1121 template<int size, bool big_endian> 1122 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1123 1124 // Comparator used in set definition. 1125 template<int size, bool big_endian> 1126 struct Erratum_stub_less 1127 { 1128 bool 1129 operator()(const Erratum_stub<size, big_endian>* s1, 1130 const Erratum_stub<size, big_endian>* s2) const 1131 { return *s1 < *s2; } 1132 }; 1133 1134 // Erratum_stub implementation for writing stub to output file. 1135 1136 template<int size, bool big_endian> 1137 void 1138 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type) 1139 { 1140 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1141 const Insntype* insns = this->insns(); 1142 uint32_t num_insns = this->insn_num(); 1143 Insntype* ip = reinterpret_cast<Insntype*>(view); 1144 // For current implemented erratum 843419 and 835769, the first insn in the 1145 // stub is always a copy of the problematic insn (in 843419, the mem access 1146 // insn, in 835769, the mac insn), followed by a jump-back. 1147 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn()); 1148 for (uint32_t i = 1; i < num_insns; ++i) 1149 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1150 } 1151 1152 1153 // Reloc stub class. 1154 1155 template<int size, bool big_endian> 1156 class Reloc_stub : public Stub_base<size, big_endian> 1157 { 1158 public: 1159 typedef Reloc_stub<size, big_endian> This; 1160 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1161 1162 // Branch range. This is used to calculate the section group size, as well as 1163 // determine whether a stub is needed. 1164 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2; 1165 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2); 1166 1167 // Constant used to determine if an offset fits in the adrp instruction 1168 // encoding. 1169 static const int MAX_ADRP_IMM = (1 << 20) - 1; 1170 static const int MIN_ADRP_IMM = -(1 << 20); 1171 1172 static const int BYTES_PER_INSN = 4; 1173 static const int STUB_ADDR_ALIGN; 1174 1175 // Determine whether the offset fits in the jump/branch instruction. 1176 static bool 1177 aarch64_valid_branch_offset_p(int64_t offset) 1178 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; } 1179 1180 // Determine whether the offset fits in the adrp immediate field. 1181 static bool 1182 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest) 1183 { 1184 typedef AArch64_relocate_functions<size, big_endian> Reloc; 1185 int64_t adrp_imm = Reloc::Page (dest) - Reloc::Page (location); 1186 adrp_imm = adrp_imm < 0 ? ~(~adrp_imm >> 12) : adrp_imm >> 12; 1187 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM; 1188 } 1189 1190 // Determine the stub type for a certain relocation or ST_NONE, if no stub is 1191 // needed. 1192 static int 1193 stub_type_for_reloc(unsigned int r_type, AArch64_address address, 1194 AArch64_address target); 1195 1196 Reloc_stub(int type) 1197 : Stub_base<size, big_endian>(type) 1198 { } 1199 1200 ~Reloc_stub() 1201 { } 1202 1203 // The key class used to index the stub instance in the stub table's stub map. 1204 class Key 1205 { 1206 public: 1207 Key(int type, const Symbol* symbol, const Relobj* relobj, 1208 unsigned int r_sym, int32_t addend) 1209 : type_(type), addend_(addend) 1210 { 1211 if (symbol != NULL) 1212 { 1213 this->r_sym_ = Reloc_stub::invalid_index; 1214 this->u_.symbol = symbol; 1215 } 1216 else 1217 { 1218 gold_assert(relobj != NULL && r_sym != invalid_index); 1219 this->r_sym_ = r_sym; 1220 this->u_.relobj = relobj; 1221 } 1222 } 1223 1224 ~Key() 1225 { } 1226 1227 // Return stub type. 1228 int 1229 type() const 1230 { return this->type_; } 1231 1232 // Return the local symbol index or invalid_index. 1233 unsigned int 1234 r_sym() const 1235 { return this->r_sym_; } 1236 1237 // Return the symbol if there is one. 1238 const Symbol* 1239 symbol() const 1240 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; } 1241 1242 // Return the relobj if there is one. 1243 const Relobj* 1244 relobj() const 1245 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; } 1246 1247 // Whether this equals to another key k. 1248 bool 1249 eq(const Key& k) const 1250 { 1251 return ((this->type_ == k.type_) 1252 && (this->r_sym_ == k.r_sym_) 1253 && ((this->r_sym_ != Reloc_stub::invalid_index) 1254 ? (this->u_.relobj == k.u_.relobj) 1255 : (this->u_.symbol == k.u_.symbol)) 1256 && (this->addend_ == k.addend_)); 1257 } 1258 1259 // Return a hash value. 1260 size_t 1261 hash_value() const 1262 { 1263 size_t name_hash_value = gold::string_hash<char>( 1264 (this->r_sym_ != Reloc_stub::invalid_index) 1265 ? this->u_.relobj->name().c_str() 1266 : this->u_.symbol->name()); 1267 // We only have 4 stub types. 1268 size_t stub_type_hash_value = 0x03 & this->type_; 1269 return (name_hash_value 1270 ^ stub_type_hash_value 1271 ^ ((this->r_sym_ & 0x3fff) << 2) 1272 ^ ((this->addend_ & 0xffff) << 16)); 1273 } 1274 1275 // Functors for STL associative containers. 1276 struct hash 1277 { 1278 size_t 1279 operator()(const Key& k) const 1280 { return k.hash_value(); } 1281 }; 1282 1283 struct equal_to 1284 { 1285 bool 1286 operator()(const Key& k1, const Key& k2) const 1287 { return k1.eq(k2); } 1288 }; 1289 1290 private: 1291 // Stub type. 1292 const int type_; 1293 // If this is a local symbol, this is the index in the defining object. 1294 // Otherwise, it is invalid_index for a global symbol. 1295 unsigned int r_sym_; 1296 // If r_sym_ is an invalid index, this points to a global symbol. 1297 // Otherwise, it points to a relobj. We used the unsized and target 1298 // independent Symbol and Relobj classes instead of Sized_symbol<32> and 1299 // Arm_relobj, in order to avoid making the stub class a template 1300 // as most of the stub machinery is endianness-neutral. However, it 1301 // may require a bit of casting done by users of this class. 1302 union 1303 { 1304 const Symbol* symbol; 1305 const Relobj* relobj; 1306 } u_; 1307 // Addend associated with a reloc. 1308 int32_t addend_; 1309 }; // End of inner class Reloc_stub::Key 1310 1311 protected: 1312 // This may be overridden in the child class. 1313 virtual void 1314 do_write(unsigned char*, section_size_type); 1315 1316 private: 1317 static const unsigned int invalid_index = static_cast<unsigned int>(-1); 1318 }; // End of Reloc_stub 1319 1320 template<int size, bool big_endian> 1321 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1322 1323 // Write data to output file. 1324 1325 template<int size, bool big_endian> 1326 void 1327 Reloc_stub<size, big_endian>:: 1328 do_write(unsigned char* view, section_size_type) 1329 { 1330 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1331 const uint32_t* insns = this->insns(); 1332 uint32_t num_insns = this->insn_num(); 1333 Insntype* ip = reinterpret_cast<Insntype*>(view); 1334 for (uint32_t i = 0; i < num_insns; ++i) 1335 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1336 } 1337 1338 1339 // Determine the stub type for a certain relocation or ST_NONE, if no stub is 1340 // needed. 1341 1342 template<int size, bool big_endian> 1343 inline int 1344 Reloc_stub<size, big_endian>::stub_type_for_reloc( 1345 unsigned int r_type, AArch64_address location, AArch64_address dest) 1346 { 1347 int64_t branch_offset = 0; 1348 switch(r_type) 1349 { 1350 case elfcpp::R_AARCH64_CALL26: 1351 case elfcpp::R_AARCH64_JUMP26: 1352 branch_offset = dest - location; 1353 break; 1354 default: 1355 gold_unreachable(); 1356 } 1357 1358 if (aarch64_valid_branch_offset_p(branch_offset)) 1359 return ST_NONE; 1360 1361 if (aarch64_valid_for_adrp_p(location, dest)) 1362 return ST_ADRP_BRANCH; 1363 1364 // Always use PC-relative addressing in case of -shared or -pie. 1365 if (parameters->options().output_is_position_independent()) 1366 return ST_LONG_BRANCH_PCREL; 1367 1368 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL. 1369 // But is only applicable to non-shared or non-pie. 1370 return ST_LONG_BRANCH_ABS; 1371 } 1372 1373 // A class to hold stubs for the ARM target. This contains 2 different types of 1374 // stubs - reloc stubs and erratum stubs. 1375 1376 template<int size, bool big_endian> 1377 class Stub_table : public Output_data 1378 { 1379 public: 1380 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1381 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1382 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 1383 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1384 typedef Reloc_stub<size, big_endian> The_reloc_stub; 1385 typedef typename The_reloc_stub::Key The_reloc_stub_key; 1386 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1387 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less; 1388 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash; 1389 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to; 1390 typedef Stub_table<size, big_endian> The_stub_table; 1391 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*, 1392 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to> 1393 Reloc_stub_map; 1394 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter; 1395 typedef Relocate_info<size, big_endian> The_relocate_info; 1396 1397 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set; 1398 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter; 1399 1400 Stub_table(The_aarch64_input_section* owner) 1401 : Output_data(), owner_(owner), reloc_stubs_size_(0), 1402 erratum_stubs_size_(0), prev_data_size_(0) 1403 { } 1404 1405 ~Stub_table() 1406 { } 1407 1408 The_aarch64_input_section* 1409 owner() const 1410 { return owner_; } 1411 1412 // Whether this stub table is empty. 1413 bool 1414 empty() const 1415 { return reloc_stubs_.empty() && erratum_stubs_.empty(); } 1416 1417 // Return the current data size. 1418 off_t 1419 current_data_size() const 1420 { return this->current_data_size_for_child(); } 1421 1422 // Add a STUB using KEY. The caller is responsible for avoiding addition 1423 // if a STUB with the same key has already been added. 1424 void 1425 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key); 1426 1427 // Add an erratum stub into the erratum stub set. The set is ordered by 1428 // (relobj, shndx, sh_offset). 1429 void 1430 add_erratum_stub(The_erratum_stub* stub); 1431 1432 // Find if such erratum exists for any given (obj, shndx, sh_offset). 1433 The_erratum_stub* 1434 find_erratum_stub(The_aarch64_relobj* a64relobj, 1435 unsigned int shndx, unsigned int sh_offset); 1436 1437 // Find all the erratums for a given input section. The return value is a pair 1438 // of iterators [begin, end). 1439 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 1440 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj, 1441 unsigned int shndx); 1442 1443 // Compute the erratum stub address. 1444 AArch64_address 1445 erratum_stub_address(The_erratum_stub* stub) const 1446 { 1447 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_, 1448 The_erratum_stub::STUB_ADDR_ALIGN); 1449 r += stub->offset(); 1450 return r; 1451 } 1452 1453 // Finalize stubs. No-op here, just for completeness. 1454 void 1455 finalize_stubs() 1456 { } 1457 1458 // Look up a relocation stub using KEY. Return NULL if there is none. 1459 The_reloc_stub* 1460 find_reloc_stub(The_reloc_stub_key& key) 1461 { 1462 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key); 1463 return (p != this->reloc_stubs_.end()) ? p->second : NULL; 1464 } 1465 1466 // Relocate reloc stubs in this stub table. This does not relocate erratum stubs. 1467 void 1468 relocate_reloc_stubs(const The_relocate_info*, 1469 The_target_aarch64*, 1470 Output_section*, 1471 unsigned char*, 1472 AArch64_address, 1473 section_size_type); 1474 1475 // Relocate an erratum stub. 1476 void 1477 relocate_erratum_stub(The_erratum_stub*, unsigned char*); 1478 1479 // Update data size at the end of a relaxation pass. Return true if data size 1480 // is different from that of the previous relaxation pass. 1481 bool 1482 update_data_size_changed_p() 1483 { 1484 // No addralign changed here. 1485 off_t s = align_address(this->reloc_stubs_size_, 1486 The_erratum_stub::STUB_ADDR_ALIGN) 1487 + this->erratum_stubs_size_; 1488 bool changed = (s != this->prev_data_size_); 1489 this->prev_data_size_ = s; 1490 return changed; 1491 } 1492 1493 protected: 1494 // Write out section contents. 1495 void 1496 do_write(Output_file*); 1497 1498 // Return the required alignment. 1499 uint64_t 1500 do_addralign() const 1501 { 1502 return std::max(The_reloc_stub::STUB_ADDR_ALIGN, 1503 The_erratum_stub::STUB_ADDR_ALIGN); 1504 } 1505 1506 // Reset address and file offset. 1507 void 1508 do_reset_address_and_file_offset() 1509 { this->set_current_data_size_for_child(this->prev_data_size_); } 1510 1511 // Set final data size. 1512 void 1513 set_final_data_size() 1514 { this->set_data_size(this->current_data_size()); } 1515 1516 private: 1517 // Relocate one reloc stub. 1518 void 1519 relocate_reloc_stub(The_reloc_stub*, 1520 const The_relocate_info*, 1521 The_target_aarch64*, 1522 Output_section*, 1523 unsigned char*, 1524 AArch64_address, 1525 section_size_type); 1526 1527 private: 1528 // Owner of this stub table. 1529 The_aarch64_input_section* owner_; 1530 // The relocation stubs. 1531 Reloc_stub_map reloc_stubs_; 1532 // The erratum stubs. 1533 Erratum_stub_set erratum_stubs_; 1534 // Size of reloc stubs. 1535 off_t reloc_stubs_size_; 1536 // Size of erratum stubs. 1537 off_t erratum_stubs_size_; 1538 // data size of this in the previous pass. 1539 off_t prev_data_size_; 1540 }; // End of Stub_table 1541 1542 1543 // Add an erratum stub into the erratum stub set. The set is ordered by 1544 // (relobj, shndx, sh_offset). 1545 1546 template<int size, bool big_endian> 1547 void 1548 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub) 1549 { 1550 std::pair<Erratum_stub_set_iter, bool> ret = 1551 this->erratum_stubs_.insert(stub); 1552 gold_assert(ret.second); 1553 this->erratum_stubs_size_ = align_address( 1554 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1555 stub->set_offset(this->erratum_stubs_size_); 1556 this->erratum_stubs_size_ += stub->stub_size(); 1557 } 1558 1559 1560 // Find if such erratum exists for given (obj, shndx, sh_offset). 1561 1562 template<int size, bool big_endian> 1563 Erratum_stub<size, big_endian>* 1564 Stub_table<size, big_endian>::find_erratum_stub( 1565 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset) 1566 { 1567 // A dummy object used as key to search in the set. 1568 The_erratum_stub key(a64relobj, ST_NONE, 1569 shndx, sh_offset); 1570 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key); 1571 if (i != this->erratum_stubs_.end()) 1572 { 1573 The_erratum_stub* stub(*i); 1574 gold_assert(stub->erratum_insn() != 0); 1575 return stub; 1576 } 1577 return NULL; 1578 } 1579 1580 1581 // Find all the errata for a given input section. The return value is a pair of 1582 // iterators [begin, end). 1583 1584 template<int size, bool big_endian> 1585 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter, 1586 typename Stub_table<size, big_endian>::Erratum_stub_set_iter> 1587 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section( 1588 The_aarch64_relobj* a64relobj, unsigned int shndx) 1589 { 1590 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair; 1591 Erratum_stub_set_iter start, end; 1592 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0); 1593 start = this->erratum_stubs_.lower_bound(&low_key); 1594 if (start == this->erratum_stubs_.end()) 1595 return Result_pair(this->erratum_stubs_.end(), 1596 this->erratum_stubs_.end()); 1597 end = start; 1598 while (end != this->erratum_stubs_.end() && 1599 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx) 1600 ++end; 1601 return Result_pair(start, end); 1602 } 1603 1604 1605 // Add a STUB using KEY. The caller is responsible for avoiding addition 1606 // if a STUB with the same key has already been added. 1607 1608 template<int size, bool big_endian> 1609 void 1610 Stub_table<size, big_endian>::add_reloc_stub( 1611 The_reloc_stub* stub, const The_reloc_stub_key& key) 1612 { 1613 gold_assert(stub->type() == key.type()); 1614 this->reloc_stubs_[key] = stub; 1615 1616 // Assign stub offset early. We can do this because we never remove 1617 // reloc stubs and they are in the beginning of the stub table. 1618 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_, 1619 The_reloc_stub::STUB_ADDR_ALIGN); 1620 stub->set_offset(this->reloc_stubs_size_); 1621 this->reloc_stubs_size_ += stub->stub_size(); 1622 } 1623 1624 1625 // Relocate an erratum stub. 1626 1627 template<int size, bool big_endian> 1628 void 1629 Stub_table<size, big_endian>:: 1630 relocate_erratum_stub(The_erratum_stub* estub, 1631 unsigned char* view) 1632 { 1633 // Just for convenience. 1634 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 1635 1636 gold_assert(!estub->is_invalidated_erratum_stub()); 1637 AArch64_address stub_address = this->erratum_stub_address(estub); 1638 // The address of "b" in the stub that is to be "relocated". 1639 AArch64_address stub_b_insn_address; 1640 // Branch offset that is to be filled in "b" insn. 1641 int b_offset = 0; 1642 switch (estub->type()) 1643 { 1644 case ST_E_843419: 1645 case ST_E_835769: 1646 // The 1st insn of the erratum could be a relocation spot, 1647 // in this case we need to fix it with 1648 // "(*i)->erratum_insn()". 1649 elfcpp::Swap<32, big_endian>::writeval( 1650 view + (stub_address - this->address()), 1651 estub->erratum_insn()); 1652 // For the erratum, the 2nd insn is a b-insn to be patched 1653 // (relocated). 1654 stub_b_insn_address = stub_address + 1 * BPI; 1655 b_offset = estub->destination_address() - stub_b_insn_address; 1656 AArch64_relocate_functions<size, big_endian>::construct_b( 1657 view + (stub_b_insn_address - this->address()), 1658 ((unsigned int)(b_offset)) & 0xfffffff); 1659 break; 1660 default: 1661 gold_unreachable(); 1662 break; 1663 } 1664 estub->invalidate_erratum_stub(); 1665 } 1666 1667 1668 // Relocate only reloc stubs in this stub table. This does not relocate erratum 1669 // stubs. 1670 1671 template<int size, bool big_endian> 1672 void 1673 Stub_table<size, big_endian>:: 1674 relocate_reloc_stubs(const The_relocate_info* relinfo, 1675 The_target_aarch64* target_aarch64, 1676 Output_section* output_section, 1677 unsigned char* view, 1678 AArch64_address address, 1679 section_size_type view_size) 1680 { 1681 // "view_size" is the total size of the stub_table. 1682 gold_assert(address == this->address() && 1683 view_size == static_cast<section_size_type>(this->data_size())); 1684 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin(); 1685 p != this->reloc_stubs_.end(); ++p) 1686 relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section, 1687 view, address, view_size); 1688 } 1689 1690 1691 // Relocate one reloc stub. This is a helper for 1692 // Stub_table::relocate_reloc_stubs(). 1693 1694 template<int size, bool big_endian> 1695 void 1696 Stub_table<size, big_endian>:: 1697 relocate_reloc_stub(The_reloc_stub* stub, 1698 const The_relocate_info* relinfo, 1699 The_target_aarch64* target_aarch64, 1700 Output_section* output_section, 1701 unsigned char* view, 1702 AArch64_address address, 1703 section_size_type view_size) 1704 { 1705 // "offset" is the offset from the beginning of the stub_table. 1706 section_size_type offset = stub->offset(); 1707 section_size_type stub_size = stub->stub_size(); 1708 // "view_size" is the total size of the stub_table. 1709 gold_assert(offset + stub_size <= view_size); 1710 1711 target_aarch64->relocate_reloc_stub(stub, relinfo, output_section, 1712 view + offset, address + offset, view_size); 1713 } 1714 1715 1716 // Write out the stubs to file. 1717 1718 template<int size, bool big_endian> 1719 void 1720 Stub_table<size, big_endian>::do_write(Output_file* of) 1721 { 1722 off_t offset = this->offset(); 1723 const section_size_type oview_size = 1724 convert_to_section_size_type(this->data_size()); 1725 unsigned char* const oview = of->get_output_view(offset, oview_size); 1726 1727 // Write relocation stubs. 1728 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin(); 1729 p != this->reloc_stubs_.end(); ++p) 1730 { 1731 The_reloc_stub* stub = p->second; 1732 AArch64_address address = this->address() + stub->offset(); 1733 gold_assert(address == 1734 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN)); 1735 stub->write(oview + stub->offset(), stub->stub_size()); 1736 } 1737 1738 // Write erratum stubs. 1739 unsigned int erratum_stub_start_offset = 1740 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1741 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin(); 1742 p != this->erratum_stubs_.end(); ++p) 1743 { 1744 The_erratum_stub* stub(*p); 1745 stub->write(oview + erratum_stub_start_offset + stub->offset(), 1746 stub->stub_size()); 1747 } 1748 1749 of->write_output_view(this->offset(), oview_size, oview); 1750 } 1751 1752 1753 // AArch64_relobj class. 1754 1755 template<int size, bool big_endian> 1756 class AArch64_relobj : public Sized_relobj_file<size, big_endian> 1757 { 1758 public: 1759 typedef AArch64_relobj<size, big_endian> This; 1760 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1761 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1762 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1763 typedef Stub_table<size, big_endian> The_stub_table; 1764 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1765 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter; 1766 typedef std::vector<The_stub_table*> Stub_table_list; 1767 static const AArch64_address invalid_address = 1768 static_cast<AArch64_address>(-1); 1769 1770 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset, 1771 const typename elfcpp::Ehdr<size, big_endian>& ehdr) 1772 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr), 1773 stub_tables_() 1774 { } 1775 1776 ~AArch64_relobj() 1777 { } 1778 1779 // Return the stub table of the SHNDX-th section if there is one. 1780 The_stub_table* 1781 stub_table(unsigned int shndx) const 1782 { 1783 gold_assert(shndx < this->stub_tables_.size()); 1784 return this->stub_tables_[shndx]; 1785 } 1786 1787 // Set STUB_TABLE to be the stub_table of the SHNDX-th section. 1788 void 1789 set_stub_table(unsigned int shndx, The_stub_table* stub_table) 1790 { 1791 gold_assert(shndx < this->stub_tables_.size()); 1792 this->stub_tables_[shndx] = stub_table; 1793 } 1794 1795 // Entrance to errata scanning. 1796 void 1797 scan_errata(unsigned int shndx, 1798 const elfcpp::Shdr<size, big_endian>&, 1799 Output_section*, const Symbol_table*, 1800 The_target_aarch64*); 1801 1802 // Scan all relocation sections for stub generation. 1803 void 1804 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*, 1805 const Layout*); 1806 1807 // Whether a section is a scannable text section. 1808 bool 1809 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int, 1810 const Output_section*, const Symbol_table*); 1811 1812 // Convert regular input section with index SHNDX to a relaxed section. 1813 void 1814 convert_input_section_to_relaxed_section(unsigned shndx) 1815 { 1816 // The stubs have relocations and we need to process them after writing 1817 // out the stubs. So relocation now must follow section write. 1818 this->set_section_offset(shndx, -1ULL); 1819 this->set_relocs_must_follow_section_writes(); 1820 } 1821 1822 // Structure for mapping symbol position. 1823 struct Mapping_symbol_position 1824 { 1825 Mapping_symbol_position(unsigned int shndx, AArch64_address offset): 1826 shndx_(shndx), offset_(offset) 1827 {} 1828 1829 // "<" comparator used in ordered_map container. 1830 bool 1831 operator<(const Mapping_symbol_position& p) const 1832 { 1833 return (this->shndx_ < p.shndx_ 1834 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_)); 1835 } 1836 1837 // Section index. 1838 unsigned int shndx_; 1839 1840 // Section offset. 1841 AArch64_address offset_; 1842 }; 1843 1844 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info; 1845 1846 protected: 1847 // Post constructor setup. 1848 void 1849 do_setup() 1850 { 1851 // Call parent's setup method. 1852 Sized_relobj_file<size, big_endian>::do_setup(); 1853 1854 // Initialize look-up tables. 1855 this->stub_tables_.resize(this->shnum()); 1856 } 1857 1858 virtual void 1859 do_relocate_sections( 1860 const Symbol_table* symtab, const Layout* layout, 1861 const unsigned char* pshdrs, Output_file* of, 1862 typename Sized_relobj_file<size, big_endian>::Views* pviews); 1863 1864 // Count local symbols and (optionally) record mapping info. 1865 virtual void 1866 do_count_local_symbols(Stringpool_template<char>*, 1867 Stringpool_template<char>*); 1868 1869 private: 1870 // Fix all errata in the object, and for each erratum, relocate corresponding 1871 // erratum stub. 1872 void 1873 fix_errata_and_relocate_erratum_stubs( 1874 typename Sized_relobj_file<size, big_endian>::Views* pviews); 1875 1876 // Try to fix erratum 843419 in an optimized way. Return true if patch is 1877 // applied. 1878 bool 1879 try_fix_erratum_843419_optimized( 1880 The_erratum_stub*, AArch64_address, 1881 typename Sized_relobj_file<size, big_endian>::View_size&); 1882 1883 // Whether a section needs to be scanned for relocation stubs. 1884 bool 1885 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&, 1886 const Relobj::Output_sections&, 1887 const Symbol_table*, const unsigned char*); 1888 1889 // List of stub tables. 1890 Stub_table_list stub_tables_; 1891 1892 // Mapping symbol information sorted by (section index, section_offset). 1893 Mapping_symbol_info mapping_symbol_info_; 1894 }; // End of AArch64_relobj 1895 1896 1897 // Override to record mapping symbol information. 1898 template<int size, bool big_endian> 1899 void 1900 AArch64_relobj<size, big_endian>::do_count_local_symbols( 1901 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool) 1902 { 1903 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool); 1904 1905 // Only erratum-fixing work needs mapping symbols, so skip this time consuming 1906 // processing if not fixing erratum. 1907 if (!parameters->options().fix_cortex_a53_843419() 1908 && !parameters->options().fix_cortex_a53_835769()) 1909 return; 1910 1911 const unsigned int loccount = this->local_symbol_count(); 1912 if (loccount == 0) 1913 return; 1914 1915 // Read the symbol table section header. 1916 const unsigned int symtab_shndx = this->symtab_shndx(); 1917 elfcpp::Shdr<size, big_endian> 1918 symtabshdr(this, this->elf_file()->section_header(symtab_shndx)); 1919 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB); 1920 1921 // Read the local symbols. 1922 const int sym_size =elfcpp::Elf_sizes<size>::sym_size; 1923 gold_assert(loccount == symtabshdr.get_sh_info()); 1924 off_t locsize = loccount * sym_size; 1925 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(), 1926 locsize, true, true); 1927 1928 // For mapping symbol processing, we need to read the symbol names. 1929 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link()); 1930 if (strtab_shndx >= this->shnum()) 1931 { 1932 this->error(_("invalid symbol table name index: %u"), strtab_shndx); 1933 return; 1934 } 1935 1936 elfcpp::Shdr<size, big_endian> 1937 strtabshdr(this, this->elf_file()->section_header(strtab_shndx)); 1938 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB) 1939 { 1940 this->error(_("symbol table name section has wrong type: %u"), 1941 static_cast<unsigned int>(strtabshdr.get_sh_type())); 1942 return; 1943 } 1944 1945 const char* pnames = 1946 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(), 1947 strtabshdr.get_sh_size(), 1948 false, false)); 1949 1950 // Skip the first dummy symbol. 1951 psyms += sym_size; 1952 typename Sized_relobj_file<size, big_endian>::Local_values* 1953 plocal_values = this->local_values(); 1954 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size) 1955 { 1956 elfcpp::Sym<size, big_endian> sym(psyms); 1957 Symbol_value<size>& lv((*plocal_values)[i]); 1958 AArch64_address input_value = lv.input_value(); 1959 1960 // Check to see if this is a mapping symbol. AArch64 mapping symbols are 1961 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping 1962 // symbols. 1963 // Mapping symbols could be one of the following 4 forms - 1964 // a) $x 1965 // b) $x.<any...> 1966 // c) $d 1967 // d) $d.<any...> 1968 const char* sym_name = pnames + sym.get_st_name(); 1969 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd') 1970 && (sym_name[2] == '\0' || sym_name[2] == '.')) 1971 { 1972 bool is_ordinary; 1973 unsigned int input_shndx = 1974 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary); 1975 gold_assert(is_ordinary); 1976 1977 Mapping_symbol_position msp(input_shndx, input_value); 1978 // Insert mapping_symbol_info into map whose ordering is defined by 1979 // (shndx, offset_within_section). 1980 this->mapping_symbol_info_[msp] = sym_name[1]; 1981 } 1982 } 1983 } 1984 1985 1986 // Fix all errata in the object and for each erratum, we relocate the 1987 // corresponding erratum stub (by calling Stub_table::relocate_erratum_stub). 1988 1989 template<int size, bool big_endian> 1990 void 1991 AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs( 1992 typename Sized_relobj_file<size, big_endian>::Views* pviews) 1993 { 1994 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 1995 unsigned int shnum = this->shnum(); 1996 const Relobj::Output_sections& out_sections(this->output_sections()); 1997 for (unsigned int i = 1; i < shnum; ++i) 1998 { 1999 The_stub_table* stub_table = this->stub_table(i); 2000 if (!stub_table) 2001 continue; 2002 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 2003 ipair(stub_table->find_erratum_stubs_for_input_section(this, i)); 2004 Erratum_stub_set_iter p = ipair.first, end = ipair.second; 2005 typename Sized_relobj_file<size, big_endian>::View_size& 2006 pview((*pviews)[i]); 2007 AArch64_address view_offset = 0; 2008 if (pview.is_input_output_view) 2009 { 2010 // In this case, write_sections has not added the output offset to 2011 // the view's address, so we must do so. Currently this only happens 2012 // for a relaxed section. 2013 unsigned int index = this->adjust_shndx(i); 2014 const Output_relaxed_input_section* poris = 2015 out_sections[index]->find_relaxed_input_section(this, index); 2016 gold_assert(poris != NULL); 2017 view_offset = poris->address() - pview.address; 2018 } 2019 2020 while (p != end) 2021 { 2022 The_erratum_stub* stub = *p; 2023 2024 // Double check data before fix. 2025 gold_assert(pview.address + view_offset + stub->sh_offset() 2026 == stub->erratum_address()); 2027 2028 // Update previously recorded erratum insn with relocated 2029 // version. 2030 Insntype* ip = 2031 reinterpret_cast<Insntype*>( 2032 pview.view + view_offset + stub->sh_offset()); 2033 Insntype insn_to_fix = ip[0]; 2034 stub->update_erratum_insn(insn_to_fix); 2035 2036 // First try to see if erratum is 843419 and if it can be fixed 2037 // without using branch-to-stub. 2038 if (!try_fix_erratum_843419_optimized(stub, view_offset, pview)) 2039 { 2040 // Replace the erratum insn with a branch-to-stub. 2041 AArch64_address stub_address = 2042 stub_table->erratum_stub_address(stub); 2043 unsigned int b_offset = stub_address - stub->erratum_address(); 2044 AArch64_relocate_functions<size, big_endian>::construct_b( 2045 pview.view + view_offset + stub->sh_offset(), 2046 b_offset & 0xfffffff); 2047 } 2048 2049 // Erratum fix is done (or skipped), continue to relocate erratum 2050 // stub. Note, when erratum fix is skipped (either because we 2051 // proactively change the code sequence or the code sequence is 2052 // changed by relaxation, etc), we can still safely relocate the 2053 // erratum stub, ignoring the fact the erratum could never be 2054 // executed. 2055 stub_table->relocate_erratum_stub( 2056 stub, 2057 pview.view + (stub_table->address() - pview.address)); 2058 2059 // Next erratum stub. 2060 ++p; 2061 } 2062 } 2063 } 2064 2065 2066 // This is an optimization for 843419. This erratum requires the sequence begin 2067 // with 'adrp', when final value calculated by adrp fits in adr, we can just 2068 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however, 2069 // in this case, we do not delete the erratum stub (too late to do so), it is 2070 // merely generated without ever being called.) 2071 2072 template<int size, bool big_endian> 2073 bool 2074 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized( 2075 The_erratum_stub* stub, AArch64_address view_offset, 2076 typename Sized_relobj_file<size, big_endian>::View_size& pview) 2077 { 2078 if (stub->type() != ST_E_843419) 2079 return false; 2080 2081 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2082 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 2083 E843419_stub<size, big_endian>* e843419_stub = 2084 reinterpret_cast<E843419_stub<size, big_endian>*>(stub); 2085 AArch64_address pc = 2086 pview.address + view_offset + e843419_stub->adrp_sh_offset(); 2087 unsigned int adrp_offset = e843419_stub->adrp_sh_offset (); 2088 Insntype* adrp_view = 2089 reinterpret_cast<Insntype*>(pview.view + view_offset + adrp_offset); 2090 Insntype adrp_insn = adrp_view[0]; 2091 2092 // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come 2093 // from IE -> LE relaxation etc. This is a side-effect of TLS relaxation that 2094 // ADRP has been turned into MRS, there is no erratum risk anymore. 2095 // Therefore, we return true to avoid doing unnecessary branch-to-stub. 2096 if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn)) 2097 return true; 2098 2099 // If the instruction at adrp_sh_offset is not ADRP and the instruction before 2100 // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc. 2101 // Like the above case, there is no erratum risk any more, we can safely 2102 // return true. 2103 if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset) 2104 { 2105 Insntype* prev_view = 2106 reinterpret_cast<Insntype*>( 2107 pview.view + view_offset + adrp_offset - 4); 2108 Insntype prev_insn = prev_view[0]; 2109 2110 if (Insn_utilities::is_mrs_tpidr_el0(prev_insn)) 2111 return true; 2112 } 2113 2114 /* If we reach here, the first instruction must be ADRP. */ 2115 gold_assert(Insn_utilities::is_adrp(adrp_insn)); 2116 // Get adrp 33-bit signed imm value. 2117 int64_t adrp_imm = Insn_utilities:: 2118 aarch64_adrp_decode_imm(adrp_insn); 2119 // adrp - final value transferred to target register is calculated as: 2120 // PC[11:0] = Zeros(12) 2121 // adrp_dest_value = PC + adrp_imm; 2122 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm; 2123 // adr -final value transferred to target register is calucalted as: 2124 // PC + adr_imm 2125 // So we have: 2126 // PC + adr_imm = adrp_dest_value 2127 // ==> 2128 // adr_imm = adrp_dest_value - PC 2129 int64_t adr_imm = adrp_dest_value - pc; 2130 // Check if imm fits in adr (21-bit signed). 2131 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20)) 2132 { 2133 // Convert 'adrp' into 'adr'. 2134 Insntype adr_insn = adrp_insn & ((1u << 31) - 1); 2135 adr_insn = Insn_utilities:: 2136 aarch64_adr_encode_imm(adr_insn, adr_imm); 2137 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn); 2138 return true; 2139 } 2140 return false; 2141 } 2142 2143 2144 // Relocate sections. 2145 2146 template<int size, bool big_endian> 2147 void 2148 AArch64_relobj<size, big_endian>::do_relocate_sections( 2149 const Symbol_table* symtab, const Layout* layout, 2150 const unsigned char* pshdrs, Output_file* of, 2151 typename Sized_relobj_file<size, big_endian>::Views* pviews) 2152 { 2153 // Relocate the section data. 2154 this->relocate_section_range(symtab, layout, pshdrs, of, pviews, 2155 1, this->shnum() - 1); 2156 2157 // We do not generate stubs if doing a relocatable link. 2158 if (parameters->options().relocatable()) 2159 return; 2160 2161 // This part only relocates erratum stubs that belong to input sections of this 2162 // object file. 2163 if (parameters->options().fix_cortex_a53_843419() 2164 || parameters->options().fix_cortex_a53_835769()) 2165 this->fix_errata_and_relocate_erratum_stubs(pviews); 2166 2167 Relocate_info<size, big_endian> relinfo; 2168 relinfo.symtab = symtab; 2169 relinfo.layout = layout; 2170 relinfo.object = this; 2171 2172 // This part relocates all reloc stubs that are contained in stub_tables of 2173 // this object file. 2174 unsigned int shnum = this->shnum(); 2175 The_target_aarch64* target = The_target_aarch64::current_target(); 2176 2177 for (unsigned int i = 1; i < shnum; ++i) 2178 { 2179 The_aarch64_input_section* aarch64_input_section = 2180 target->find_aarch64_input_section(this, i); 2181 if (aarch64_input_section != NULL 2182 && aarch64_input_section->is_stub_table_owner() 2183 && !aarch64_input_section->stub_table()->empty()) 2184 { 2185 Output_section* os = this->output_section(i); 2186 gold_assert(os != NULL); 2187 2188 relinfo.reloc_shndx = elfcpp::SHN_UNDEF; 2189 relinfo.reloc_shdr = NULL; 2190 relinfo.data_shndx = i; 2191 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size; 2192 2193 typename Sized_relobj_file<size, big_endian>::View_size& 2194 view_struct = (*pviews)[i]; 2195 gold_assert(view_struct.view != NULL); 2196 2197 The_stub_table* stub_table = aarch64_input_section->stub_table(); 2198 off_t offset = stub_table->address() - view_struct.address; 2199 unsigned char* view = view_struct.view + offset; 2200 AArch64_address address = stub_table->address(); 2201 section_size_type view_size = stub_table->data_size(); 2202 stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address, 2203 view_size); 2204 } 2205 } 2206 } 2207 2208 2209 // Determine if an input section is scannable for stub processing. SHDR is 2210 // the header of the section and SHNDX is the section index. OS is the output 2211 // section for the input section and SYMTAB is the global symbol table used to 2212 // look up ICF information. 2213 2214 template<int size, bool big_endian> 2215 bool 2216 AArch64_relobj<size, big_endian>::text_section_is_scannable( 2217 const elfcpp::Shdr<size, big_endian>& text_shdr, 2218 unsigned int text_shndx, 2219 const Output_section* os, 2220 const Symbol_table* symtab) 2221 { 2222 // Skip any empty sections, unallocated sections or sections whose 2223 // type are not SHT_PROGBITS. 2224 if (text_shdr.get_sh_size() == 0 2225 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0 2226 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2227 return false; 2228 2229 // Skip any discarded or ICF'ed sections. 2230 if (os == NULL || symtab->is_section_folded(this, text_shndx)) 2231 return false; 2232 2233 // Skip exception frame. 2234 if (strcmp(os->name(), ".eh_frame") == 0) 2235 return false ; 2236 2237 gold_assert(!this->is_output_section_offset_invalid(text_shndx) || 2238 os->find_relaxed_input_section(this, text_shndx) != NULL); 2239 2240 return true; 2241 } 2242 2243 2244 // Determine if we want to scan the SHNDX-th section for relocation stubs. 2245 // This is a helper for AArch64_relobj::scan_sections_for_stubs(). 2246 2247 template<int size, bool big_endian> 2248 bool 2249 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning( 2250 const elfcpp::Shdr<size, big_endian>& shdr, 2251 const Relobj::Output_sections& out_sections, 2252 const Symbol_table* symtab, 2253 const unsigned char* pshdrs) 2254 { 2255 unsigned int sh_type = shdr.get_sh_type(); 2256 if (sh_type != elfcpp::SHT_RELA) 2257 return false; 2258 2259 // Ignore empty section. 2260 off_t sh_size = shdr.get_sh_size(); 2261 if (sh_size == 0) 2262 return false; 2263 2264 // Ignore reloc section with unexpected symbol table. The 2265 // error will be reported in the final link. 2266 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx()) 2267 return false; 2268 2269 gold_assert(sh_type == elfcpp::SHT_RELA); 2270 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2271 2272 // Ignore reloc section with unexpected entsize or uneven size. 2273 // The error will be reported in the final link. 2274 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0) 2275 return false; 2276 2277 // Ignore reloc section with bad info. This error will be 2278 // reported in the final link. 2279 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info()); 2280 if (text_shndx >= this->shnum()) 2281 return false; 2282 2283 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2284 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs + 2285 text_shndx * shdr_size); 2286 return this->text_section_is_scannable(text_shdr, text_shndx, 2287 out_sections[text_shndx], symtab); 2288 } 2289 2290 2291 // Scan section SHNDX for erratum 843419 and 835769. 2292 2293 template<int size, bool big_endian> 2294 void 2295 AArch64_relobj<size, big_endian>::scan_errata( 2296 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr, 2297 Output_section* os, const Symbol_table* symtab, 2298 The_target_aarch64* target) 2299 { 2300 if (shdr.get_sh_size() == 0 2301 || (shdr.get_sh_flags() & 2302 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0 2303 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2304 return; 2305 2306 if (!os || symtab->is_section_folded(this, shndx)) return; 2307 2308 AArch64_address output_offset = this->get_output_section_offset(shndx); 2309 AArch64_address output_address; 2310 if (output_offset != invalid_address) 2311 output_address = os->address() + output_offset; 2312 else 2313 { 2314 const Output_relaxed_input_section* poris = 2315 os->find_relaxed_input_section(this, shndx); 2316 if (!poris) return; 2317 output_address = poris->address(); 2318 } 2319 2320 // Update the addresses in previously generated erratum stubs. Unlike when 2321 // we scan relocations for stubs, if section addresses have changed due to 2322 // other relaxations we are unlikely to scan the same erratum instances 2323 // again. 2324 The_stub_table* stub_table = this->stub_table(shndx); 2325 if (stub_table) 2326 { 2327 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 2328 ipair(stub_table->find_erratum_stubs_for_input_section(this, shndx)); 2329 for (Erratum_stub_set_iter p = ipair.first; p != ipair.second; ++p) 2330 (*p)->update_erratum_address(output_address); 2331 } 2332 2333 section_size_type input_view_size = 0; 2334 const unsigned char* input_view = 2335 this->section_contents(shndx, &input_view_size, false); 2336 2337 Mapping_symbol_position section_start(shndx, 0); 2338 // Find the first mapping symbol record within section shndx. 2339 typename Mapping_symbol_info::const_iterator p = 2340 this->mapping_symbol_info_.lower_bound(section_start); 2341 while (p != this->mapping_symbol_info_.end() && 2342 p->first.shndx_ == shndx) 2343 { 2344 typename Mapping_symbol_info::const_iterator prev = p; 2345 ++p; 2346 if (prev->second == 'x') 2347 { 2348 section_size_type span_start = 2349 convert_to_section_size_type(prev->first.offset_); 2350 section_size_type span_end; 2351 if (p != this->mapping_symbol_info_.end() 2352 && p->first.shndx_ == shndx) 2353 span_end = convert_to_section_size_type(p->first.offset_); 2354 else 2355 span_end = convert_to_section_size_type(shdr.get_sh_size()); 2356 2357 // Here we do not share the scanning code of both errata. For 843419, 2358 // only the last few insns of each page are examined, which is fast, 2359 // whereas, for 835769, every insn pair needs to be checked. 2360 2361 if (parameters->options().fix_cortex_a53_843419()) 2362 target->scan_erratum_843419_span( 2363 this, shndx, span_start, span_end, 2364 const_cast<unsigned char*>(input_view), output_address); 2365 2366 if (parameters->options().fix_cortex_a53_835769()) 2367 target->scan_erratum_835769_span( 2368 this, shndx, span_start, span_end, 2369 const_cast<unsigned char*>(input_view), output_address); 2370 } 2371 } 2372 } 2373 2374 2375 // Scan relocations for stub generation. 2376 2377 template<int size, bool big_endian> 2378 void 2379 AArch64_relobj<size, big_endian>::scan_sections_for_stubs( 2380 The_target_aarch64* target, 2381 const Symbol_table* symtab, 2382 const Layout* layout) 2383 { 2384 unsigned int shnum = this->shnum(); 2385 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2386 2387 // Read the section headers. 2388 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(), 2389 shnum * shdr_size, 2390 true, true); 2391 2392 // To speed up processing, we set up hash tables for fast lookup of 2393 // input offsets to output addresses. 2394 this->initialize_input_to_output_maps(); 2395 2396 const Relobj::Output_sections& out_sections(this->output_sections()); 2397 2398 Relocate_info<size, big_endian> relinfo; 2399 relinfo.symtab = symtab; 2400 relinfo.layout = layout; 2401 relinfo.object = this; 2402 2403 // Do relocation stubs scanning. 2404 const unsigned char* p = pshdrs + shdr_size; 2405 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size) 2406 { 2407 const elfcpp::Shdr<size, big_endian> shdr(p); 2408 if (parameters->options().fix_cortex_a53_843419() 2409 || parameters->options().fix_cortex_a53_835769()) 2410 scan_errata(i, shdr, out_sections[i], symtab, target); 2411 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab, 2412 pshdrs)) 2413 { 2414 unsigned int index = this->adjust_shndx(shdr.get_sh_info()); 2415 AArch64_address output_offset = 2416 this->get_output_section_offset(index); 2417 AArch64_address output_address; 2418 if (output_offset != invalid_address) 2419 { 2420 output_address = out_sections[index]->address() + output_offset; 2421 } 2422 else 2423 { 2424 // Currently this only happens for a relaxed section. 2425 const Output_relaxed_input_section* poris = 2426 out_sections[index]->find_relaxed_input_section(this, index); 2427 gold_assert(poris != NULL); 2428 output_address = poris->address(); 2429 } 2430 2431 // Get the relocations. 2432 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(), 2433 shdr.get_sh_size(), 2434 true, false); 2435 2436 // Get the section contents. 2437 section_size_type input_view_size = 0; 2438 const unsigned char* input_view = 2439 this->section_contents(index, &input_view_size, false); 2440 2441 relinfo.reloc_shndx = i; 2442 relinfo.data_shndx = index; 2443 unsigned int sh_type = shdr.get_sh_type(); 2444 unsigned int reloc_size; 2445 gold_assert (sh_type == elfcpp::SHT_RELA); 2446 reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2447 2448 Output_section* os = out_sections[index]; 2449 target->scan_section_for_stubs(&relinfo, sh_type, prelocs, 2450 shdr.get_sh_size() / reloc_size, 2451 os, 2452 output_offset == invalid_address, 2453 input_view, output_address, 2454 input_view_size); 2455 } 2456 } 2457 } 2458 2459 2460 // A class to wrap an ordinary input section containing executable code. 2461 2462 template<int size, bool big_endian> 2463 class AArch64_input_section : public Output_relaxed_input_section 2464 { 2465 public: 2466 typedef Stub_table<size, big_endian> The_stub_table; 2467 2468 AArch64_input_section(Relobj* relobj, unsigned int shndx) 2469 : Output_relaxed_input_section(relobj, shndx, 1), 2470 stub_table_(NULL), 2471 original_contents_(NULL), original_size_(0), 2472 original_addralign_(1) 2473 { } 2474 2475 ~AArch64_input_section() 2476 { delete[] this->original_contents_; } 2477 2478 // Initialize. 2479 void 2480 init(); 2481 2482 // Set the stub_table. 2483 void 2484 set_stub_table(The_stub_table* st) 2485 { this->stub_table_ = st; } 2486 2487 // Whether this is a stub table owner. 2488 bool 2489 is_stub_table_owner() const 2490 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; } 2491 2492 // Return the original size of the section. 2493 uint32_t 2494 original_size() const 2495 { return this->original_size_; } 2496 2497 // Return the stub table. 2498 The_stub_table* 2499 stub_table() 2500 { return stub_table_; } 2501 2502 protected: 2503 // Write out this input section. 2504 void 2505 do_write(Output_file*); 2506 2507 // Return required alignment of this. 2508 uint64_t 2509 do_addralign() const 2510 { 2511 if (this->is_stub_table_owner()) 2512 return std::max(this->stub_table_->addralign(), 2513 static_cast<uint64_t>(this->original_addralign_)); 2514 else 2515 return this->original_addralign_; 2516 } 2517 2518 // Finalize data size. 2519 void 2520 set_final_data_size(); 2521 2522 // Reset address and file offset. 2523 void 2524 do_reset_address_and_file_offset(); 2525 2526 // Output offset. 2527 bool 2528 do_output_offset(const Relobj* object, unsigned int shndx, 2529 section_offset_type offset, 2530 section_offset_type* poutput) const 2531 { 2532 if ((object == this->relobj()) 2533 && (shndx == this->shndx()) 2534 && (offset >= 0) 2535 && (offset <= 2536 convert_types<section_offset_type, uint32_t>(this->original_size_))) 2537 { 2538 *poutput = offset; 2539 return true; 2540 } 2541 else 2542 return false; 2543 } 2544 2545 private: 2546 // Copying is not allowed. 2547 AArch64_input_section(const AArch64_input_section&); 2548 AArch64_input_section& operator=(const AArch64_input_section&); 2549 2550 // The relocation stubs. 2551 The_stub_table* stub_table_; 2552 // Original section contents. We have to make a copy here since the file 2553 // containing the original section may not be locked when we need to access 2554 // the contents. 2555 unsigned char* original_contents_; 2556 // Section size of the original input section. 2557 uint32_t original_size_; 2558 // Address alignment of the original input section. 2559 uint32_t original_addralign_; 2560 }; // End of AArch64_input_section 2561 2562 2563 // Finalize data size. 2564 2565 template<int size, bool big_endian> 2566 void 2567 AArch64_input_section<size, big_endian>::set_final_data_size() 2568 { 2569 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2570 2571 if (this->is_stub_table_owner()) 2572 { 2573 this->stub_table_->finalize_data_size(); 2574 off = align_address(off, this->stub_table_->addralign()); 2575 off += this->stub_table_->data_size(); 2576 } 2577 this->set_data_size(off); 2578 } 2579 2580 2581 // Reset address and file offset. 2582 2583 template<int size, bool big_endian> 2584 void 2585 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset() 2586 { 2587 // Size of the original input section contents. 2588 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2589 2590 // If this is a stub table owner, account for the stub table size. 2591 if (this->is_stub_table_owner()) 2592 { 2593 The_stub_table* stub_table = this->stub_table_; 2594 2595 // Reset the stub table's address and file offset. The 2596 // current data size for child will be updated after that. 2597 stub_table_->reset_address_and_file_offset(); 2598 off = align_address(off, stub_table_->addralign()); 2599 off += stub_table->current_data_size(); 2600 } 2601 2602 this->set_current_data_size(off); 2603 } 2604 2605 2606 // Initialize an Arm_input_section. 2607 2608 template<int size, bool big_endian> 2609 void 2610 AArch64_input_section<size, big_endian>::init() 2611 { 2612 Relobj* relobj = this->relobj(); 2613 unsigned int shndx = this->shndx(); 2614 2615 // We have to cache original size, alignment and contents to avoid locking 2616 // the original file. 2617 this->original_addralign_ = 2618 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx)); 2619 2620 // This is not efficient but we expect only a small number of relaxed 2621 // input sections for stubs. 2622 section_size_type section_size; 2623 const unsigned char* section_contents = 2624 relobj->section_contents(shndx, §ion_size, false); 2625 this->original_size_ = 2626 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx)); 2627 2628 gold_assert(this->original_contents_ == NULL); 2629 this->original_contents_ = new unsigned char[section_size]; 2630 memcpy(this->original_contents_, section_contents, section_size); 2631 2632 // We want to make this look like the original input section after 2633 // output sections are finalized. 2634 Output_section* os = relobj->output_section(shndx); 2635 off_t offset = relobj->output_section_offset(shndx); 2636 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx)); 2637 this->set_address(os->address() + offset); 2638 this->set_file_offset(os->offset() + offset); 2639 this->set_current_data_size(this->original_size_); 2640 this->finalize_data_size(); 2641 } 2642 2643 2644 // Write data to output file. 2645 2646 template<int size, bool big_endian> 2647 void 2648 AArch64_input_section<size, big_endian>::do_write(Output_file* of) 2649 { 2650 // We have to write out the original section content. 2651 gold_assert(this->original_contents_ != NULL); 2652 of->write(this->offset(), this->original_contents_, 2653 this->original_size_); 2654 2655 // If this owns a stub table and it is not empty, write it. 2656 if (this->is_stub_table_owner() && !this->stub_table_->empty()) 2657 this->stub_table_->write(of); 2658 } 2659 2660 2661 // Arm output section class. This is defined mainly to add a number of stub 2662 // generation methods. 2663 2664 template<int size, bool big_endian> 2665 class AArch64_output_section : public Output_section 2666 { 2667 public: 2668 typedef Target_aarch64<size, big_endian> The_target_aarch64; 2669 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2670 typedef Stub_table<size, big_endian> The_stub_table; 2671 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2672 2673 public: 2674 AArch64_output_section(const char* name, elfcpp::Elf_Word type, 2675 elfcpp::Elf_Xword flags) 2676 : Output_section(name, type, flags) 2677 { } 2678 2679 ~AArch64_output_section() {} 2680 2681 // Group input sections for stub generation. 2682 void 2683 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*, 2684 const Task*); 2685 2686 private: 2687 typedef Output_section::Input_section Input_section; 2688 typedef Output_section::Input_section_list Input_section_list; 2689 2690 // Create a stub group. 2691 void 2692 create_stub_group(Input_section_list::const_iterator, 2693 Input_section_list::const_iterator, 2694 Input_section_list::const_iterator, 2695 The_target_aarch64*, 2696 std::vector<Output_relaxed_input_section*>&, 2697 const Task*); 2698 }; // End of AArch64_output_section 2699 2700 2701 // Create a stub group for input sections from FIRST to LAST. OWNER points to 2702 // the input section that will be the owner of the stub table. 2703 2704 template<int size, bool big_endian> void 2705 AArch64_output_section<size, big_endian>::create_stub_group( 2706 Input_section_list::const_iterator first, 2707 Input_section_list::const_iterator last, 2708 Input_section_list::const_iterator owner, 2709 The_target_aarch64* target, 2710 std::vector<Output_relaxed_input_section*>& new_relaxed_sections, 2711 const Task* task) 2712 { 2713 // Currently we convert ordinary input sections into relaxed sections only 2714 // at this point. 2715 The_aarch64_input_section* input_section; 2716 if (owner->is_relaxed_input_section()) 2717 gold_unreachable(); 2718 else 2719 { 2720 gold_assert(owner->is_input_section()); 2721 // Create a new relaxed input section. We need to lock the original 2722 // file. 2723 Task_lock_obj<Object> tl(task, owner->relobj()); 2724 input_section = 2725 target->new_aarch64_input_section(owner->relobj(), owner->shndx()); 2726 new_relaxed_sections.push_back(input_section); 2727 } 2728 2729 // Create a stub table. 2730 The_stub_table* stub_table = 2731 target->new_stub_table(input_section); 2732 2733 input_section->set_stub_table(stub_table); 2734 2735 Input_section_list::const_iterator p = first; 2736 // Look for input sections or relaxed input sections in [first ... last]. 2737 do 2738 { 2739 if (p->is_input_section() || p->is_relaxed_input_section()) 2740 { 2741 // The stub table information for input sections live 2742 // in their objects. 2743 The_aarch64_relobj* aarch64_relobj = 2744 static_cast<The_aarch64_relobj*>(p->relobj()); 2745 aarch64_relobj->set_stub_table(p->shndx(), stub_table); 2746 } 2747 } 2748 while (p++ != last); 2749 } 2750 2751 2752 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of 2753 // stub groups. We grow a stub group by adding input section until the size is 2754 // just below GROUP_SIZE. The last input section will be converted into a stub 2755 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond 2756 // after the stub table, effectively doubling the group size. 2757 // 2758 // This is similar to the group_sections() function in elf32-arm.c but is 2759 // implemented differently. 2760 2761 template<int size, bool big_endian> 2762 void AArch64_output_section<size, big_endian>::group_sections( 2763 section_size_type group_size, 2764 bool stubs_always_after_branch, 2765 Target_aarch64<size, big_endian>* target, 2766 const Task* task) 2767 { 2768 typedef enum 2769 { 2770 NO_GROUP, 2771 FINDING_STUB_SECTION, 2772 HAS_STUB_SECTION 2773 } State; 2774 2775 std::vector<Output_relaxed_input_section*> new_relaxed_sections; 2776 2777 State state = NO_GROUP; 2778 section_size_type off = 0; 2779 section_size_type group_begin_offset = 0; 2780 section_size_type group_end_offset = 0; 2781 section_size_type stub_table_end_offset = 0; 2782 Input_section_list::const_iterator group_begin = 2783 this->input_sections().end(); 2784 Input_section_list::const_iterator stub_table = 2785 this->input_sections().end(); 2786 Input_section_list::const_iterator group_end = this->input_sections().end(); 2787 for (Input_section_list::const_iterator p = this->input_sections().begin(); 2788 p != this->input_sections().end(); 2789 ++p) 2790 { 2791 section_size_type section_begin_offset = 2792 align_address(off, p->addralign()); 2793 section_size_type section_end_offset = 2794 section_begin_offset + p->data_size(); 2795 2796 // Check to see if we should group the previously seen sections. 2797 switch (state) 2798 { 2799 case NO_GROUP: 2800 break; 2801 2802 case FINDING_STUB_SECTION: 2803 // Adding this section makes the group larger than GROUP_SIZE. 2804 if (section_end_offset - group_begin_offset >= group_size) 2805 { 2806 if (stubs_always_after_branch) 2807 { 2808 gold_assert(group_end != this->input_sections().end()); 2809 this->create_stub_group(group_begin, group_end, group_end, 2810 target, new_relaxed_sections, 2811 task); 2812 state = NO_GROUP; 2813 } 2814 else 2815 { 2816 // Input sections up to stub_group_size bytes after the stub 2817 // table can be handled by it too. 2818 state = HAS_STUB_SECTION; 2819 stub_table = group_end; 2820 stub_table_end_offset = group_end_offset; 2821 } 2822 } 2823 break; 2824 2825 case HAS_STUB_SECTION: 2826 // Adding this section makes the post stub-section group larger 2827 // than GROUP_SIZE. 2828 gold_unreachable(); 2829 // NOT SUPPORTED YET. For completeness only. 2830 if (section_end_offset - stub_table_end_offset >= group_size) 2831 { 2832 gold_assert(group_end != this->input_sections().end()); 2833 this->create_stub_group(group_begin, group_end, stub_table, 2834 target, new_relaxed_sections, task); 2835 state = NO_GROUP; 2836 } 2837 break; 2838 2839 default: 2840 gold_unreachable(); 2841 } 2842 2843 // If we see an input section and currently there is no group, start 2844 // a new one. Skip any empty sections. We look at the data size 2845 // instead of calling p->relobj()->section_size() to avoid locking. 2846 if ((p->is_input_section() || p->is_relaxed_input_section()) 2847 && (p->data_size() != 0)) 2848 { 2849 if (state == NO_GROUP) 2850 { 2851 state = FINDING_STUB_SECTION; 2852 group_begin = p; 2853 group_begin_offset = section_begin_offset; 2854 } 2855 2856 // Keep track of the last input section seen. 2857 group_end = p; 2858 group_end_offset = section_end_offset; 2859 } 2860 2861 off = section_end_offset; 2862 } 2863 2864 // Create a stub group for any ungrouped sections. 2865 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION) 2866 { 2867 gold_assert(group_end != this->input_sections().end()); 2868 this->create_stub_group(group_begin, group_end, 2869 (state == FINDING_STUB_SECTION 2870 ? group_end 2871 : stub_table), 2872 target, new_relaxed_sections, task); 2873 } 2874 2875 if (!new_relaxed_sections.empty()) 2876 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections); 2877 2878 // Update the section offsets 2879 for (size_t i = 0; i < new_relaxed_sections.size(); ++i) 2880 { 2881 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>( 2882 new_relaxed_sections[i]->relobj()); 2883 unsigned int shndx = new_relaxed_sections[i]->shndx(); 2884 // Tell AArch64_relobj that this input section is converted. 2885 relobj->convert_input_section_to_relaxed_section(shndx); 2886 } 2887 } // End of AArch64_output_section::group_sections 2888 2889 2890 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL; 2891 2892 2893 // The aarch64 target class. 2894 // See the ABI at 2895 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf 2896 template<int size, bool big_endian> 2897 class Target_aarch64 : public Sized_target<size, big_endian> 2898 { 2899 public: 2900 typedef Target_aarch64<size, big_endian> This; 2901 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 2902 Reloc_section; 2903 typedef Relocate_info<size, big_endian> The_relocate_info; 2904 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 2905 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2906 typedef Reloc_stub<size, big_endian> The_reloc_stub; 2907 typedef Erratum_stub<size, big_endian> The_erratum_stub; 2908 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key; 2909 typedef Stub_table<size, big_endian> The_stub_table; 2910 typedef std::vector<The_stub_table*> Stub_table_list; 2911 typedef typename Stub_table_list::iterator Stub_table_iterator; 2912 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2913 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section; 2914 typedef Unordered_map<Section_id, 2915 AArch64_input_section<size, big_endian>*, 2916 Section_id_hash> AArch64_input_section_map; 2917 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2918 const static int TCB_SIZE = size / 8 * 2; 2919 static const Address invalid_address = static_cast<Address>(-1); 2920 2921 Target_aarch64(const Target::Target_info* info = &aarch64_info) 2922 : Sized_target<size, big_endian>(info), 2923 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL), 2924 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL), 2925 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY), 2926 got_mod_index_offset_(-1U), 2927 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false), 2928 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_() 2929 { } 2930 2931 // Scan the relocations to determine unreferenced sections for 2932 // garbage collection. 2933 void 2934 gc_process_relocs(Symbol_table* symtab, 2935 Layout* layout, 2936 Sized_relobj_file<size, big_endian>* object, 2937 unsigned int data_shndx, 2938 unsigned int sh_type, 2939 const unsigned char* prelocs, 2940 size_t reloc_count, 2941 Output_section* output_section, 2942 bool needs_special_offset_handling, 2943 size_t local_symbol_count, 2944 const unsigned char* plocal_symbols); 2945 2946 // Scan the relocations to look for symbol adjustments. 2947 void 2948 scan_relocs(Symbol_table* symtab, 2949 Layout* layout, 2950 Sized_relobj_file<size, big_endian>* object, 2951 unsigned int data_shndx, 2952 unsigned int sh_type, 2953 const unsigned char* prelocs, 2954 size_t reloc_count, 2955 Output_section* output_section, 2956 bool needs_special_offset_handling, 2957 size_t local_symbol_count, 2958 const unsigned char* plocal_symbols); 2959 2960 // Finalize the sections. 2961 void 2962 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*); 2963 2964 // Return the value to use for a dynamic which requires special 2965 // treatment. 2966 uint64_t 2967 do_dynsym_value(const Symbol*) const; 2968 2969 // Relocate a section. 2970 void 2971 relocate_section(const Relocate_info<size, big_endian>*, 2972 unsigned int sh_type, 2973 const unsigned char* prelocs, 2974 size_t reloc_count, 2975 Output_section* output_section, 2976 bool needs_special_offset_handling, 2977 unsigned char* view, 2978 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 2979 section_size_type view_size, 2980 const Reloc_symbol_changes*); 2981 2982 // Scan the relocs during a relocatable link. 2983 void 2984 scan_relocatable_relocs(Symbol_table* symtab, 2985 Layout* layout, 2986 Sized_relobj_file<size, big_endian>* object, 2987 unsigned int data_shndx, 2988 unsigned int sh_type, 2989 const unsigned char* prelocs, 2990 size_t reloc_count, 2991 Output_section* output_section, 2992 bool needs_special_offset_handling, 2993 size_t local_symbol_count, 2994 const unsigned char* plocal_symbols, 2995 Relocatable_relocs*); 2996 2997 // Scan the relocs for --emit-relocs. 2998 void 2999 emit_relocs_scan(Symbol_table* symtab, 3000 Layout* layout, 3001 Sized_relobj_file<size, big_endian>* object, 3002 unsigned int data_shndx, 3003 unsigned int sh_type, 3004 const unsigned char* prelocs, 3005 size_t reloc_count, 3006 Output_section* output_section, 3007 bool needs_special_offset_handling, 3008 size_t local_symbol_count, 3009 const unsigned char* plocal_syms, 3010 Relocatable_relocs* rr); 3011 3012 // Relocate a section during a relocatable link. 3013 void 3014 relocate_relocs( 3015 const Relocate_info<size, big_endian>*, 3016 unsigned int sh_type, 3017 const unsigned char* prelocs, 3018 size_t reloc_count, 3019 Output_section* output_section, 3020 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 3021 unsigned char* view, 3022 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 3023 section_size_type view_size, 3024 unsigned char* reloc_view, 3025 section_size_type reloc_view_size); 3026 3027 // Return the symbol index to use for a target specific relocation. 3028 // The only target specific relocation is R_AARCH64_TLSDESC for a 3029 // local symbol, which is an absolute reloc. 3030 unsigned int 3031 do_reloc_symbol_index(void*, unsigned int r_type) const 3032 { 3033 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 3034 return 0; 3035 } 3036 3037 // Return the addend to use for a target specific relocation. 3038 uint64_t 3039 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const; 3040 3041 // Return the PLT section. 3042 uint64_t 3043 do_plt_address_for_global(const Symbol* gsym) const 3044 { return this->plt_section()->address_for_global(gsym); } 3045 3046 uint64_t 3047 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const 3048 { return this->plt_section()->address_for_local(relobj, symndx); } 3049 3050 // This function should be defined in targets that can use relocation 3051 // types to determine (implemented in local_reloc_may_be_function_pointer 3052 // and global_reloc_may_be_function_pointer) 3053 // if a function's pointer is taken. ICF uses this in safe mode to only 3054 // fold those functions whose pointer is defintely not taken. 3055 bool 3056 do_can_check_for_function_pointers() const 3057 { return true; } 3058 3059 // Return the number of entries in the PLT. 3060 unsigned int 3061 plt_entry_count() const; 3062 3063 //Return the offset of the first non-reserved PLT entry. 3064 unsigned int 3065 first_plt_entry_offset() const; 3066 3067 // Return the size of each PLT entry. 3068 unsigned int 3069 plt_entry_size() const; 3070 3071 // Create a stub table. 3072 The_stub_table* 3073 new_stub_table(The_aarch64_input_section*); 3074 3075 // Create an aarch64 input section. 3076 The_aarch64_input_section* 3077 new_aarch64_input_section(Relobj*, unsigned int); 3078 3079 // Find an aarch64 input section instance for a given OBJ and SHNDX. 3080 The_aarch64_input_section* 3081 find_aarch64_input_section(Relobj*, unsigned int) const; 3082 3083 // Return the thread control block size. 3084 unsigned int 3085 tcb_size() const { return This::TCB_SIZE; } 3086 3087 // Scan a section for stub generation. 3088 void 3089 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int, 3090 const unsigned char*, size_t, Output_section*, 3091 bool, const unsigned char*, 3092 Address, 3093 section_size_type); 3094 3095 // Scan a relocation section for stub. 3096 template<int sh_type> 3097 void 3098 scan_reloc_section_for_stubs( 3099 const The_relocate_info* relinfo, 3100 const unsigned char* prelocs, 3101 size_t reloc_count, 3102 Output_section* output_section, 3103 bool needs_special_offset_handling, 3104 const unsigned char* view, 3105 Address view_address, 3106 section_size_type); 3107 3108 // Relocate a single reloc stub. 3109 void 3110 relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*, 3111 Output_section*, unsigned char*, Address, 3112 section_size_type); 3113 3114 // Get the default AArch64 target. 3115 static This* 3116 current_target() 3117 { 3118 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64 3119 && parameters->target().get_size() == size 3120 && parameters->target().is_big_endian() == big_endian); 3121 return static_cast<This*>(parameters->sized_target<size, big_endian>()); 3122 } 3123 3124 3125 // Scan erratum 843419 for a part of a section. 3126 void 3127 scan_erratum_843419_span( 3128 AArch64_relobj<size, big_endian>*, 3129 unsigned int, 3130 const section_size_type, 3131 const section_size_type, 3132 unsigned char*, 3133 Address); 3134 3135 // Scan erratum 835769 for a part of a section. 3136 void 3137 scan_erratum_835769_span( 3138 AArch64_relobj<size, big_endian>*, 3139 unsigned int, 3140 const section_size_type, 3141 const section_size_type, 3142 unsigned char*, 3143 Address); 3144 3145 protected: 3146 void 3147 do_select_as_default_target() 3148 { 3149 gold_assert(aarch64_reloc_property_table == NULL); 3150 aarch64_reloc_property_table = new AArch64_reloc_property_table(); 3151 } 3152 3153 // Add a new reloc argument, returning the index in the vector. 3154 size_t 3155 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object, 3156 unsigned int r_sym) 3157 { 3158 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym)); 3159 return this->tlsdesc_reloc_info_.size() - 1; 3160 } 3161 3162 virtual Output_data_plt_aarch64<size, big_endian>* 3163 do_make_data_plt(Layout* layout, 3164 Output_data_got_aarch64<size, big_endian>* got, 3165 Output_data_space* got_plt, 3166 Output_data_space* got_irelative) 3167 { 3168 return new Output_data_plt_aarch64_standard<size, big_endian>( 3169 layout, got, got_plt, got_irelative); 3170 } 3171 3172 3173 // do_make_elf_object to override the same function in the base class. 3174 Object* 3175 do_make_elf_object(const std::string&, Input_file*, off_t, 3176 const elfcpp::Ehdr<size, big_endian>&); 3177 3178 Output_data_plt_aarch64<size, big_endian>* 3179 make_data_plt(Layout* layout, 3180 Output_data_got_aarch64<size, big_endian>* got, 3181 Output_data_space* got_plt, 3182 Output_data_space* got_irelative) 3183 { 3184 return this->do_make_data_plt(layout, got, got_plt, got_irelative); 3185 } 3186 3187 // We only need to generate stubs, and hence perform relaxation if we are 3188 // not doing relocatable linking. 3189 virtual bool 3190 do_may_relax() const 3191 { return !parameters->options().relocatable(); } 3192 3193 // Relaxation hook. This is where we do stub generation. 3194 virtual bool 3195 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*); 3196 3197 void 3198 group_sections(Layout* layout, 3199 section_size_type group_size, 3200 bool stubs_always_after_branch, 3201 const Task* task); 3202 3203 void 3204 scan_reloc_for_stub(const The_relocate_info*, unsigned int, 3205 const Sized_symbol<size>*, unsigned int, 3206 const Symbol_value<size>*, 3207 typename elfcpp::Elf_types<size>::Elf_Swxword, 3208 Address Elf_Addr); 3209 3210 // Make an output section. 3211 Output_section* 3212 do_make_output_section(const char* name, elfcpp::Elf_Word type, 3213 elfcpp::Elf_Xword flags) 3214 { return new The_aarch64_output_section(name, type, flags); } 3215 3216 private: 3217 // The class which scans relocations. 3218 class Scan 3219 { 3220 public: 3221 Scan() 3222 : issued_non_pic_error_(false) 3223 { } 3224 3225 inline void 3226 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3227 Sized_relobj_file<size, big_endian>* object, 3228 unsigned int data_shndx, 3229 Output_section* output_section, 3230 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3231 const elfcpp::Sym<size, big_endian>& lsym, 3232 bool is_discarded); 3233 3234 inline void 3235 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3236 Sized_relobj_file<size, big_endian>* object, 3237 unsigned int data_shndx, 3238 Output_section* output_section, 3239 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3240 Symbol* gsym); 3241 3242 inline bool 3243 local_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3244 Target_aarch64<size, big_endian>* , 3245 Sized_relobj_file<size, big_endian>* , 3246 unsigned int , 3247 Output_section* , 3248 const elfcpp::Rela<size, big_endian>& , 3249 unsigned int r_type, 3250 const elfcpp::Sym<size, big_endian>&); 3251 3252 inline bool 3253 global_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3254 Target_aarch64<size, big_endian>* , 3255 Sized_relobj_file<size, big_endian>* , 3256 unsigned int , 3257 Output_section* , 3258 const elfcpp::Rela<size, big_endian>& , 3259 unsigned int r_type, 3260 Symbol* gsym); 3261 3262 private: 3263 static void 3264 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*, 3265 unsigned int r_type); 3266 3267 static void 3268 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*, 3269 unsigned int r_type, Symbol*); 3270 3271 inline bool 3272 possible_function_pointer_reloc(unsigned int r_type); 3273 3274 void 3275 check_non_pic(Relobj*, unsigned int r_type); 3276 3277 bool 3278 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*, 3279 unsigned int r_type); 3280 3281 // Whether we have issued an error about a non-PIC compilation. 3282 bool issued_non_pic_error_; 3283 }; 3284 3285 // The class which implements relocation. 3286 class Relocate 3287 { 3288 public: 3289 Relocate() 3290 : skip_call_tls_get_addr_(false) 3291 { } 3292 3293 ~Relocate() 3294 { } 3295 3296 // Do a relocation. Return false if the caller should not issue 3297 // any warnings about this relocation. 3298 inline bool 3299 relocate(const Relocate_info<size, big_endian>*, unsigned int, 3300 Target_aarch64*, Output_section*, size_t, const unsigned char*, 3301 const Sized_symbol<size>*, const Symbol_value<size>*, 3302 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr, 3303 section_size_type); 3304 3305 private: 3306 inline typename AArch64_relocate_functions<size, big_endian>::Status 3307 relocate_tls(const Relocate_info<size, big_endian>*, 3308 Target_aarch64<size, big_endian>*, 3309 size_t, 3310 const elfcpp::Rela<size, big_endian>&, 3311 unsigned int r_type, const Sized_symbol<size>*, 3312 const Symbol_value<size>*, 3313 unsigned char*, 3314 typename elfcpp::Elf_types<size>::Elf_Addr); 3315 3316 inline typename AArch64_relocate_functions<size, big_endian>::Status 3317 tls_gd_to_le( 3318 const Relocate_info<size, big_endian>*, 3319 Target_aarch64<size, big_endian>*, 3320 const elfcpp::Rela<size, big_endian>&, 3321 unsigned int, 3322 unsigned char*, 3323 const Symbol_value<size>*); 3324 3325 inline typename AArch64_relocate_functions<size, big_endian>::Status 3326 tls_ld_to_le( 3327 const Relocate_info<size, big_endian>*, 3328 Target_aarch64<size, big_endian>*, 3329 const elfcpp::Rela<size, big_endian>&, 3330 unsigned int, 3331 unsigned char*, 3332 const Symbol_value<size>*); 3333 3334 inline typename AArch64_relocate_functions<size, big_endian>::Status 3335 tls_ie_to_le( 3336 const Relocate_info<size, big_endian>*, 3337 Target_aarch64<size, big_endian>*, 3338 const elfcpp::Rela<size, big_endian>&, 3339 unsigned int, 3340 unsigned char*, 3341 const Symbol_value<size>*); 3342 3343 inline typename AArch64_relocate_functions<size, big_endian>::Status 3344 tls_desc_gd_to_le( 3345 const Relocate_info<size, big_endian>*, 3346 Target_aarch64<size, big_endian>*, 3347 const elfcpp::Rela<size, big_endian>&, 3348 unsigned int, 3349 unsigned char*, 3350 const Symbol_value<size>*); 3351 3352 inline typename AArch64_relocate_functions<size, big_endian>::Status 3353 tls_desc_gd_to_ie( 3354 const Relocate_info<size, big_endian>*, 3355 Target_aarch64<size, big_endian>*, 3356 const elfcpp::Rela<size, big_endian>&, 3357 unsigned int, 3358 unsigned char*, 3359 const Symbol_value<size>*, 3360 typename elfcpp::Elf_types<size>::Elf_Addr, 3361 typename elfcpp::Elf_types<size>::Elf_Addr); 3362 3363 bool skip_call_tls_get_addr_; 3364 3365 }; // End of class Relocate 3366 3367 // Adjust TLS relocation type based on the options and whether this 3368 // is a local symbol. 3369 static tls::Tls_optimization 3370 optimize_tls_reloc(bool is_final, int r_type); 3371 3372 // Get the GOT section, creating it if necessary. 3373 Output_data_got_aarch64<size, big_endian>* 3374 got_section(Symbol_table*, Layout*); 3375 3376 // Get the GOT PLT section. 3377 Output_data_space* 3378 got_plt_section() const 3379 { 3380 gold_assert(this->got_plt_ != NULL); 3381 return this->got_plt_; 3382 } 3383 3384 // Get the GOT section for TLSDESC entries. 3385 Output_data_got<size, big_endian>* 3386 got_tlsdesc_section() const 3387 { 3388 gold_assert(this->got_tlsdesc_ != NULL); 3389 return this->got_tlsdesc_; 3390 } 3391 3392 // Create the PLT section. 3393 void 3394 make_plt_section(Symbol_table* symtab, Layout* layout); 3395 3396 // Create a PLT entry for a global symbol. 3397 void 3398 make_plt_entry(Symbol_table*, Layout*, Symbol*); 3399 3400 // Create a PLT entry for a local STT_GNU_IFUNC symbol. 3401 void 3402 make_local_ifunc_plt_entry(Symbol_table*, Layout*, 3403 Sized_relobj_file<size, big_endian>* relobj, 3404 unsigned int local_sym_index); 3405 3406 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 3407 void 3408 define_tls_base_symbol(Symbol_table*, Layout*); 3409 3410 // Create the reserved PLT and GOT entries for the TLS descriptor resolver. 3411 void 3412 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout); 3413 3414 // Create a GOT entry for the TLS module index. 3415 unsigned int 3416 got_mod_index_entry(Symbol_table* symtab, Layout* layout, 3417 Sized_relobj_file<size, big_endian>* object); 3418 3419 // Get the PLT section. 3420 Output_data_plt_aarch64<size, big_endian>* 3421 plt_section() const 3422 { 3423 gold_assert(this->plt_ != NULL); 3424 return this->plt_; 3425 } 3426 3427 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For 3428 // ST_E_843419, we need an additional field for adrp offset. 3429 void create_erratum_stub( 3430 AArch64_relobj<size, big_endian>* relobj, 3431 unsigned int shndx, 3432 section_size_type erratum_insn_offset, 3433 Address erratum_address, 3434 typename Insn_utilities::Insntype erratum_insn, 3435 int erratum_type, 3436 unsigned int e843419_adrp_offset=0); 3437 3438 // Return whether this is a 3-insn erratum sequence. 3439 bool is_erratum_843419_sequence( 3440 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 3441 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 3442 typename elfcpp::Swap<32,big_endian>::Valtype insn3); 3443 3444 // Return whether this is a 835769 sequence. 3445 // (Similarly implemented as in elfnn-aarch64.c.) 3446 bool is_erratum_835769_sequence( 3447 typename elfcpp::Swap<32,big_endian>::Valtype, 3448 typename elfcpp::Swap<32,big_endian>::Valtype); 3449 3450 // Get the dynamic reloc section, creating it if necessary. 3451 Reloc_section* 3452 rela_dyn_section(Layout*); 3453 3454 // Get the section to use for TLSDESC relocations. 3455 Reloc_section* 3456 rela_tlsdesc_section(Layout*) const; 3457 3458 // Get the section to use for IRELATIVE relocations. 3459 Reloc_section* 3460 rela_irelative_section(Layout*); 3461 3462 // Add a potential copy relocation. 3463 void 3464 copy_reloc(Symbol_table* symtab, Layout* layout, 3465 Sized_relobj_file<size, big_endian>* object, 3466 unsigned int shndx, Output_section* output_section, 3467 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc) 3468 { 3469 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info()); 3470 this->copy_relocs_.copy_reloc(symtab, layout, 3471 symtab->get_sized_symbol<size>(sym), 3472 object, shndx, output_section, 3473 r_type, reloc.get_r_offset(), 3474 reloc.get_r_addend(), 3475 this->rela_dyn_section(layout)); 3476 } 3477 3478 // Information about this specific target which we pass to the 3479 // general Target structure. 3480 static const Target::Target_info aarch64_info; 3481 3482 // The types of GOT entries needed for this platform. 3483 // These values are exposed to the ABI in an incremental link. 3484 // Do not renumber existing values without changing the version 3485 // number of the .gnu_incremental_inputs section. 3486 enum Got_type 3487 { 3488 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol 3489 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset 3490 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair 3491 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair 3492 }; 3493 3494 // This type is used as the argument to the target specific 3495 // relocation routines. The only target specific reloc is 3496 // R_AARCh64_TLSDESC against a local symbol. 3497 struct Tlsdesc_info 3498 { 3499 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object, 3500 unsigned int a_r_sym) 3501 : object(a_object), r_sym(a_r_sym) 3502 { } 3503 3504 // The object in which the local symbol is defined. 3505 Sized_relobj_file<size, big_endian>* object; 3506 // The local symbol index in the object. 3507 unsigned int r_sym; 3508 }; 3509 3510 // The GOT section. 3511 Output_data_got_aarch64<size, big_endian>* got_; 3512 // The PLT section. 3513 Output_data_plt_aarch64<size, big_endian>* plt_; 3514 // The GOT PLT section. 3515 Output_data_space* got_plt_; 3516 // The GOT section for IRELATIVE relocations. 3517 Output_data_space* got_irelative_; 3518 // The GOT section for TLSDESC relocations. 3519 Output_data_got<size, big_endian>* got_tlsdesc_; 3520 // The _GLOBAL_OFFSET_TABLE_ symbol. 3521 Symbol* global_offset_table_; 3522 // The dynamic reloc section. 3523 Reloc_section* rela_dyn_; 3524 // The section to use for IRELATIVE relocs. 3525 Reloc_section* rela_irelative_; 3526 // Relocs saved to avoid a COPY reloc. 3527 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_; 3528 // Offset of the GOT entry for the TLS module index. 3529 unsigned int got_mod_index_offset_; 3530 // We handle R_AARCH64_TLSDESC against a local symbol as a target 3531 // specific relocation. Here we store the object and local symbol 3532 // index for the relocation. 3533 std::vector<Tlsdesc_info> tlsdesc_reloc_info_; 3534 // True if the _TLS_MODULE_BASE_ symbol has been defined. 3535 bool tls_base_symbol_defined_; 3536 // List of stub_tables 3537 Stub_table_list stub_tables_; 3538 // Actual stub group size 3539 section_size_type stub_group_size_; 3540 AArch64_input_section_map aarch64_input_section_map_; 3541 }; // End of Target_aarch64 3542 3543 3544 template<> 3545 const Target::Target_info Target_aarch64<64, false>::aarch64_info = 3546 { 3547 64, // size 3548 false, // is_big_endian 3549 elfcpp::EM_AARCH64, // machine_code 3550 false, // has_make_symbol 3551 false, // has_resolve 3552 false, // has_code_fill 3553 false, // is_default_stack_executable 3554 true, // can_icf_inline_merge_sections 3555 '\0', // wrap_char 3556 "/lib/ld.so.1", // program interpreter 3557 0x400000, // default_text_segment_address 3558 0x10000, // abi_pagesize (overridable by -z max-page-size) 3559 0x1000, // common_pagesize (overridable by -z common-page-size) 3560 false, // isolate_execinstr 3561 0, // rosegment_gap 3562 elfcpp::SHN_UNDEF, // small_common_shndx 3563 elfcpp::SHN_UNDEF, // large_common_shndx 3564 0, // small_common_section_flags 3565 0, // large_common_section_flags 3566 NULL, // attributes_section 3567 NULL, // attributes_vendor 3568 "_start", // entry_symbol_name 3569 32, // hash_entry_size 3570 elfcpp::SHT_PROGBITS, // unwind_section_type 3571 }; 3572 3573 template<> 3574 const Target::Target_info Target_aarch64<32, false>::aarch64_info = 3575 { 3576 32, // size 3577 false, // is_big_endian 3578 elfcpp::EM_AARCH64, // machine_code 3579 false, // has_make_symbol 3580 false, // has_resolve 3581 false, // has_code_fill 3582 false, // is_default_stack_executable 3583 false, // can_icf_inline_merge_sections 3584 '\0', // wrap_char 3585 "/lib/ld.so.1", // program interpreter 3586 0x400000, // default_text_segment_address 3587 0x10000, // abi_pagesize (overridable by -z max-page-size) 3588 0x1000, // common_pagesize (overridable by -z common-page-size) 3589 false, // isolate_execinstr 3590 0, // rosegment_gap 3591 elfcpp::SHN_UNDEF, // small_common_shndx 3592 elfcpp::SHN_UNDEF, // large_common_shndx 3593 0, // small_common_section_flags 3594 0, // large_common_section_flags 3595 NULL, // attributes_section 3596 NULL, // attributes_vendor 3597 "_start", // entry_symbol_name 3598 32, // hash_entry_size 3599 elfcpp::SHT_PROGBITS, // unwind_section_type 3600 }; 3601 3602 template<> 3603 const Target::Target_info Target_aarch64<64, true>::aarch64_info = 3604 { 3605 64, // size 3606 true, // is_big_endian 3607 elfcpp::EM_AARCH64, // machine_code 3608 false, // has_make_symbol 3609 false, // has_resolve 3610 false, // has_code_fill 3611 false, // is_default_stack_executable 3612 true, // can_icf_inline_merge_sections 3613 '\0', // wrap_char 3614 "/lib/ld.so.1", // program interpreter 3615 0x400000, // default_text_segment_address 3616 0x10000, // abi_pagesize (overridable by -z max-page-size) 3617 0x1000, // common_pagesize (overridable by -z common-page-size) 3618 false, // isolate_execinstr 3619 0, // rosegment_gap 3620 elfcpp::SHN_UNDEF, // small_common_shndx 3621 elfcpp::SHN_UNDEF, // large_common_shndx 3622 0, // small_common_section_flags 3623 0, // large_common_section_flags 3624 NULL, // attributes_section 3625 NULL, // attributes_vendor 3626 "_start", // entry_symbol_name 3627 32, // hash_entry_size 3628 elfcpp::SHT_PROGBITS, // unwind_section_type 3629 }; 3630 3631 template<> 3632 const Target::Target_info Target_aarch64<32, true>::aarch64_info = 3633 { 3634 32, // size 3635 true, // is_big_endian 3636 elfcpp::EM_AARCH64, // machine_code 3637 false, // has_make_symbol 3638 false, // has_resolve 3639 false, // has_code_fill 3640 false, // is_default_stack_executable 3641 false, // can_icf_inline_merge_sections 3642 '\0', // wrap_char 3643 "/lib/ld.so.1", // program interpreter 3644 0x400000, // default_text_segment_address 3645 0x10000, // abi_pagesize (overridable by -z max-page-size) 3646 0x1000, // common_pagesize (overridable by -z common-page-size) 3647 false, // isolate_execinstr 3648 0, // rosegment_gap 3649 elfcpp::SHN_UNDEF, // small_common_shndx 3650 elfcpp::SHN_UNDEF, // large_common_shndx 3651 0, // small_common_section_flags 3652 0, // large_common_section_flags 3653 NULL, // attributes_section 3654 NULL, // attributes_vendor 3655 "_start", // entry_symbol_name 3656 32, // hash_entry_size 3657 elfcpp::SHT_PROGBITS, // unwind_section_type 3658 }; 3659 3660 // Get the GOT section, creating it if necessary. 3661 3662 template<int size, bool big_endian> 3663 Output_data_got_aarch64<size, big_endian>* 3664 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab, 3665 Layout* layout) 3666 { 3667 if (this->got_ == NULL) 3668 { 3669 gold_assert(symtab != NULL && layout != NULL); 3670 3671 // When using -z now, we can treat .got.plt as a relro section. 3672 // Without -z now, it is modified after program startup by lazy 3673 // PLT relocations. 3674 bool is_got_plt_relro = parameters->options().now(); 3675 Output_section_order got_order = (is_got_plt_relro 3676 ? ORDER_RELRO 3677 : ORDER_RELRO_LAST); 3678 Output_section_order got_plt_order = (is_got_plt_relro 3679 ? ORDER_RELRO 3680 : ORDER_NON_RELRO_FIRST); 3681 3682 // Layout of .got and .got.plt sections. 3683 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_ 3684 // ... 3685 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT 3686 // .gotplt[1] reserved for ld.so (resolver) 3687 // .gotplt[2] reserved 3688 3689 // Generate .got section. 3690 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab, 3691 layout); 3692 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, 3693 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE), 3694 this->got_, got_order, true); 3695 // The first word of GOT is reserved for the address of .dynamic. 3696 // We put 0 here now. The value will be replaced later in 3697 // Output_data_got_aarch64::do_write. 3698 this->got_->add_constant(0); 3699 3700 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT. 3701 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section, 3702 // even if there is a .got.plt section. 3703 this->global_offset_table_ = 3704 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL, 3705 Symbol_table::PREDEFINED, 3706 this->got_, 3707 0, 0, elfcpp::STT_OBJECT, 3708 elfcpp::STB_LOCAL, 3709 elfcpp::STV_HIDDEN, 0, 3710 false, false); 3711 3712 // Generate .got.plt section. 3713 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT"); 3714 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3715 (elfcpp::SHF_ALLOC 3716 | elfcpp::SHF_WRITE), 3717 this->got_plt_, got_plt_order, 3718 is_got_plt_relro); 3719 3720 // The first three entries are reserved. 3721 this->got_plt_->set_current_data_size( 3722 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3723 3724 // If there are any IRELATIVE relocations, they get GOT entries 3725 // in .got.plt after the jump slot entries. 3726 this->got_irelative_ = new Output_data_space(size / 8, 3727 "** GOT IRELATIVE PLT"); 3728 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3729 (elfcpp::SHF_ALLOC 3730 | elfcpp::SHF_WRITE), 3731 this->got_irelative_, 3732 got_plt_order, 3733 is_got_plt_relro); 3734 3735 // If there are any TLSDESC relocations, they get GOT entries in 3736 // .got.plt after the jump slot and IRELATIVE entries. 3737 this->got_tlsdesc_ = new Output_data_got<size, big_endian>(); 3738 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3739 (elfcpp::SHF_ALLOC 3740 | elfcpp::SHF_WRITE), 3741 this->got_tlsdesc_, 3742 got_plt_order, 3743 is_got_plt_relro); 3744 3745 if (!is_got_plt_relro) 3746 { 3747 // Those bytes can go into the relro segment. 3748 layout->increase_relro( 3749 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3750 } 3751 3752 } 3753 return this->got_; 3754 } 3755 3756 // Get the dynamic reloc section, creating it if necessary. 3757 3758 template<int size, bool big_endian> 3759 typename Target_aarch64<size, big_endian>::Reloc_section* 3760 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout) 3761 { 3762 if (this->rela_dyn_ == NULL) 3763 { 3764 gold_assert(layout != NULL); 3765 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc()); 3766 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3767 elfcpp::SHF_ALLOC, this->rela_dyn_, 3768 ORDER_DYNAMIC_RELOCS, false); 3769 } 3770 return this->rela_dyn_; 3771 } 3772 3773 // Get the section to use for IRELATIVE relocs, creating it if 3774 // necessary. These go in .rela.dyn, but only after all other dynamic 3775 // relocations. They need to follow the other dynamic relocations so 3776 // that they can refer to global variables initialized by those 3777 // relocs. 3778 3779 template<int size, bool big_endian> 3780 typename Target_aarch64<size, big_endian>::Reloc_section* 3781 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout) 3782 { 3783 if (this->rela_irelative_ == NULL) 3784 { 3785 // Make sure we have already created the dynamic reloc section. 3786 this->rela_dyn_section(layout); 3787 this->rela_irelative_ = new Reloc_section(false); 3788 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3789 elfcpp::SHF_ALLOC, this->rela_irelative_, 3790 ORDER_DYNAMIC_RELOCS, false); 3791 gold_assert(this->rela_dyn_->output_section() 3792 == this->rela_irelative_->output_section()); 3793 } 3794 return this->rela_irelative_; 3795 } 3796 3797 3798 // do_make_elf_object to override the same function in the base class. We need 3799 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to 3800 // store backend specific information. Hence we need to have our own ELF object 3801 // creation. 3802 3803 template<int size, bool big_endian> 3804 Object* 3805 Target_aarch64<size, big_endian>::do_make_elf_object( 3806 const std::string& name, 3807 Input_file* input_file, 3808 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr) 3809 { 3810 int et = ehdr.get_e_type(); 3811 // ET_EXEC files are valid input for --just-symbols/-R, 3812 // and we treat them as relocatable objects. 3813 if (et == elfcpp::ET_EXEC && input_file->just_symbols()) 3814 return Sized_target<size, big_endian>::do_make_elf_object( 3815 name, input_file, offset, ehdr); 3816 else if (et == elfcpp::ET_REL) 3817 { 3818 AArch64_relobj<size, big_endian>* obj = 3819 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr); 3820 obj->setup(); 3821 return obj; 3822 } 3823 else if (et == elfcpp::ET_DYN) 3824 { 3825 // Keep base implementation. 3826 Sized_dynobj<size, big_endian>* obj = 3827 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr); 3828 obj->setup(); 3829 return obj; 3830 } 3831 else 3832 { 3833 gold_error(_("%s: unsupported ELF file type %d"), 3834 name.c_str(), et); 3835 return NULL; 3836 } 3837 } 3838 3839 3840 // Scan a relocation for stub generation. 3841 3842 template<int size, bool big_endian> 3843 void 3844 Target_aarch64<size, big_endian>::scan_reloc_for_stub( 3845 const Relocate_info<size, big_endian>* relinfo, 3846 unsigned int r_type, 3847 const Sized_symbol<size>* gsym, 3848 unsigned int r_sym, 3849 const Symbol_value<size>* psymval, 3850 typename elfcpp::Elf_types<size>::Elf_Swxword addend, 3851 Address address) 3852 { 3853 const AArch64_relobj<size, big_endian>* aarch64_relobj = 3854 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3855 3856 Symbol_value<size> symval; 3857 if (gsym != NULL) 3858 { 3859 const AArch64_reloc_property* arp = aarch64_reloc_property_table-> 3860 get_reloc_property(r_type); 3861 if (gsym->use_plt_offset(arp->reference_flags())) 3862 { 3863 // This uses a PLT, change the symbol value. 3864 symval.set_output_value(this->plt_address_for_global(gsym)); 3865 psymval = &symval; 3866 } 3867 else if (gsym->is_undefined()) 3868 { 3869 // There is no need to generate a stub symbol if the original symbol 3870 // is undefined. 3871 gold_debug(DEBUG_TARGET, 3872 "stub: not creating a stub for undefined symbol %s in file %s", 3873 gsym->name(), aarch64_relobj->name().c_str()); 3874 return; 3875 } 3876 } 3877 3878 // Get the symbol value. 3879 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0); 3880 3881 // Owing to pipelining, the PC relative branches below actually skip 3882 // two instructions when the branch offset is 0. 3883 Address destination = static_cast<Address>(-1); 3884 switch (r_type) 3885 { 3886 case elfcpp::R_AARCH64_CALL26: 3887 case elfcpp::R_AARCH64_JUMP26: 3888 destination = value + addend; 3889 break; 3890 default: 3891 gold_unreachable(); 3892 } 3893 3894 int stub_type = The_reloc_stub:: 3895 stub_type_for_reloc(r_type, address, destination); 3896 if (stub_type == ST_NONE) 3897 return; 3898 3899 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 3900 gold_assert(stub_table != NULL); 3901 3902 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend); 3903 The_reloc_stub* stub = stub_table->find_reloc_stub(key); 3904 if (stub == NULL) 3905 { 3906 stub = new The_reloc_stub(stub_type); 3907 stub_table->add_reloc_stub(stub, key); 3908 } 3909 stub->set_destination_address(destination); 3910 } // End of Target_aarch64::scan_reloc_for_stub 3911 3912 3913 // This function scans a relocation section for stub generation. 3914 // The template parameter Relocate must be a class type which provides 3915 // a single function, relocate(), which implements the machine 3916 // specific part of a relocation. 3917 3918 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type: 3919 // SHT_REL or SHT_RELA. 3920 3921 // PRELOCS points to the relocation data. RELOC_COUNT is the number 3922 // of relocs. OUTPUT_SECTION is the output section. 3923 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be 3924 // mapped to output offsets. 3925 3926 // VIEW is the section data, VIEW_ADDRESS is its memory address, and 3927 // VIEW_SIZE is the size. These refer to the input section, unless 3928 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to 3929 // the output section. 3930 3931 template<int size, bool big_endian> 3932 template<int sh_type> 3933 void inline 3934 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs( 3935 const Relocate_info<size, big_endian>* relinfo, 3936 const unsigned char* prelocs, 3937 size_t reloc_count, 3938 Output_section* /*output_section*/, 3939 bool /*needs_special_offset_handling*/, 3940 const unsigned char* /*view*/, 3941 Address view_address, 3942 section_size_type) 3943 { 3944 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype; 3945 3946 const int reloc_size = 3947 Reloc_types<sh_type,size,big_endian>::reloc_size; 3948 AArch64_relobj<size, big_endian>* object = 3949 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3950 unsigned int local_count = object->local_symbol_count(); 3951 3952 gold::Default_comdat_behavior default_comdat_behavior; 3953 Comdat_behavior comdat_behavior = CB_UNDETERMINED; 3954 3955 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size) 3956 { 3957 Reltype reloc(prelocs); 3958 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info(); 3959 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info); 3960 unsigned int r_type = elfcpp::elf_r_type<size>(r_info); 3961 if (r_type != elfcpp::R_AARCH64_CALL26 3962 && r_type != elfcpp::R_AARCH64_JUMP26) 3963 continue; 3964 3965 section_offset_type offset = 3966 convert_to_section_size_type(reloc.get_r_offset()); 3967 3968 // Get the addend. 3969 typename elfcpp::Elf_types<size>::Elf_Swxword addend = 3970 reloc.get_r_addend(); 3971 3972 const Sized_symbol<size>* sym; 3973 Symbol_value<size> symval; 3974 const Symbol_value<size> *psymval; 3975 bool is_defined_in_discarded_section; 3976 unsigned int shndx; 3977 const Symbol* gsym = NULL; 3978 if (r_sym < local_count) 3979 { 3980 sym = NULL; 3981 psymval = object->local_symbol(r_sym); 3982 3983 // If the local symbol belongs to a section we are discarding, 3984 // and that section is a debug section, try to find the 3985 // corresponding kept section and map this symbol to its 3986 // counterpart in the kept section. The symbol must not 3987 // correspond to a section we are folding. 3988 bool is_ordinary; 3989 shndx = psymval->input_shndx(&is_ordinary); 3990 is_defined_in_discarded_section = 3991 (is_ordinary 3992 && shndx != elfcpp::SHN_UNDEF 3993 && !object->is_section_included(shndx) 3994 && !relinfo->symtab->is_section_folded(object, shndx)); 3995 3996 // We need to compute the would-be final value of this local 3997 // symbol. 3998 if (!is_defined_in_discarded_section) 3999 { 4000 typedef Sized_relobj_file<size, big_endian> ObjType; 4001 if (psymval->is_section_symbol()) 4002 symval.set_is_section_symbol(); 4003 typename ObjType::Compute_final_local_value_status status = 4004 object->compute_final_local_value(r_sym, psymval, &symval, 4005 relinfo->symtab); 4006 if (status == ObjType::CFLV_OK) 4007 { 4008 // Currently we cannot handle a branch to a target in 4009 // a merged section. If this is the case, issue an error 4010 // and also free the merge symbol value. 4011 if (!symval.has_output_value()) 4012 { 4013 const std::string& section_name = 4014 object->section_name(shndx); 4015 object->error(_("cannot handle branch to local %u " 4016 "in a merged section %s"), 4017 r_sym, section_name.c_str()); 4018 } 4019 psymval = &symval; 4020 } 4021 else 4022 { 4023 // We cannot determine the final value. 4024 continue; 4025 } 4026 } 4027 } 4028 else 4029 { 4030 gsym = object->global_symbol(r_sym); 4031 gold_assert(gsym != NULL); 4032 if (gsym->is_forwarder()) 4033 gsym = relinfo->symtab->resolve_forwards(gsym); 4034 4035 sym = static_cast<const Sized_symbol<size>*>(gsym); 4036 if (sym->has_symtab_index() && sym->symtab_index() != -1U) 4037 symval.set_output_symtab_index(sym->symtab_index()); 4038 else 4039 symval.set_no_output_symtab_entry(); 4040 4041 // We need to compute the would-be final value of this global 4042 // symbol. 4043 const Symbol_table* symtab = relinfo->symtab; 4044 const Sized_symbol<size>* sized_symbol = 4045 symtab->get_sized_symbol<size>(gsym); 4046 Symbol_table::Compute_final_value_status status; 4047 typename elfcpp::Elf_types<size>::Elf_Addr value = 4048 symtab->compute_final_value<size>(sized_symbol, &status); 4049 4050 // Skip this if the symbol has not output section. 4051 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION) 4052 continue; 4053 symval.set_output_value(value); 4054 4055 if (gsym->type() == elfcpp::STT_TLS) 4056 symval.set_is_tls_symbol(); 4057 else if (gsym->type() == elfcpp::STT_GNU_IFUNC) 4058 symval.set_is_ifunc_symbol(); 4059 psymval = &symval; 4060 4061 is_defined_in_discarded_section = 4062 (gsym->is_defined_in_discarded_section() 4063 && gsym->is_undefined()); 4064 shndx = 0; 4065 } 4066 4067 Symbol_value<size> symval2; 4068 if (is_defined_in_discarded_section) 4069 { 4070 std::string name = object->section_name(relinfo->data_shndx); 4071 4072 if (comdat_behavior == CB_UNDETERMINED) 4073 comdat_behavior = default_comdat_behavior.get(name.c_str()); 4074 4075 if (comdat_behavior == CB_PRETEND) 4076 { 4077 bool found; 4078 typename elfcpp::Elf_types<size>::Elf_Addr value = 4079 object->map_to_kept_section(shndx, name, &found); 4080 if (found) 4081 symval2.set_output_value(value + psymval->input_value()); 4082 else 4083 symval2.set_output_value(0); 4084 } 4085 else 4086 { 4087 if (comdat_behavior == CB_ERROR) 4088 issue_discarded_error(relinfo, i, offset, r_sym, gsym); 4089 symval2.set_output_value(0); 4090 } 4091 symval2.set_no_output_symtab_entry(); 4092 psymval = &symval2; 4093 } 4094 4095 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval, 4096 addend, view_address + offset); 4097 } // End of iterating relocs in a section 4098 } // End of Target_aarch64::scan_reloc_section_for_stubs 4099 4100 4101 // Scan an input section for stub generation. 4102 4103 template<int size, bool big_endian> 4104 void 4105 Target_aarch64<size, big_endian>::scan_section_for_stubs( 4106 const Relocate_info<size, big_endian>* relinfo, 4107 unsigned int sh_type, 4108 const unsigned char* prelocs, 4109 size_t reloc_count, 4110 Output_section* output_section, 4111 bool needs_special_offset_handling, 4112 const unsigned char* view, 4113 Address view_address, 4114 section_size_type view_size) 4115 { 4116 gold_assert(sh_type == elfcpp::SHT_RELA); 4117 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>( 4118 relinfo, 4119 prelocs, 4120 reloc_count, 4121 output_section, 4122 needs_special_offset_handling, 4123 view, 4124 view_address, 4125 view_size); 4126 } 4127 4128 4129 // Relocate a single reloc stub. 4130 4131 template<int size, bool big_endian> 4132 void Target_aarch64<size, big_endian>:: 4133 relocate_reloc_stub(The_reloc_stub* stub, 4134 const The_relocate_info*, 4135 Output_section*, 4136 unsigned char* view, 4137 Address address, 4138 section_size_type) 4139 { 4140 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions; 4141 typedef typename The_reloc_functions::Status The_reloc_functions_status; 4142 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 4143 4144 Insntype* ip = reinterpret_cast<Insntype*>(view); 4145 int insn_number = stub->insn_num(); 4146 const uint32_t* insns = stub->insns(); 4147 // Check the insns are really those stub insns. 4148 for (int i = 0; i < insn_number; ++i) 4149 { 4150 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i); 4151 gold_assert(((uint32_t)insn == insns[i])); 4152 } 4153 4154 Address dest = stub->destination_address(); 4155 4156 switch(stub->type()) 4157 { 4158 case ST_ADRP_BRANCH: 4159 { 4160 // 1st reloc is ADR_PREL_PG_HI21 4161 The_reloc_functions_status status = 4162 The_reloc_functions::adrp(view, dest, address); 4163 // An error should never arise in the above step. If so, please 4164 // check 'aarch64_valid_for_adrp_p'. 4165 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4166 4167 // 2nd reloc is ADD_ABS_LO12_NC 4168 const AArch64_reloc_property* arp = 4169 aarch64_reloc_property_table->get_reloc_property( 4170 elfcpp::R_AARCH64_ADD_ABS_LO12_NC); 4171 gold_assert(arp != NULL); 4172 status = The_reloc_functions::template 4173 rela_general<32>(view + 4, dest, 0, arp); 4174 // An error should never arise, it is an "_NC" relocation. 4175 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4176 } 4177 break; 4178 4179 case ST_LONG_BRANCH_ABS: 4180 // 1st reloc is R_AARCH64_PREL64, at offset 8 4181 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest); 4182 break; 4183 4184 case ST_LONG_BRANCH_PCREL: 4185 { 4186 // "PC" calculation is the 2nd insn in the stub. 4187 uint64_t offset = dest - (address + 4); 4188 // Offset is placed at offset 4 and 5. 4189 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset); 4190 } 4191 break; 4192 4193 default: 4194 gold_unreachable(); 4195 } 4196 } 4197 4198 4199 // A class to handle the PLT data. 4200 // This is an abstract base class that handles most of the linker details 4201 // but does not know the actual contents of PLT entries. The derived 4202 // classes below fill in those details. 4203 4204 template<int size, bool big_endian> 4205 class Output_data_plt_aarch64 : public Output_section_data 4206 { 4207 public: 4208 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 4209 Reloc_section; 4210 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4211 4212 Output_data_plt_aarch64(Layout* layout, 4213 uint64_t addralign, 4214 Output_data_got_aarch64<size, big_endian>* got, 4215 Output_data_space* got_plt, 4216 Output_data_space* got_irelative) 4217 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL), 4218 got_(got), got_plt_(got_plt), got_irelative_(got_irelative), 4219 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U) 4220 { this->init(layout); } 4221 4222 // Initialize the PLT section. 4223 void 4224 init(Layout* layout); 4225 4226 // Add an entry to the PLT. 4227 void 4228 add_entry(Symbol_table*, Layout*, Symbol* gsym); 4229 4230 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. 4231 unsigned int 4232 add_local_ifunc_entry(Symbol_table* symtab, Layout*, 4233 Sized_relobj_file<size, big_endian>* relobj, 4234 unsigned int local_sym_index); 4235 4236 // Add the relocation for a PLT entry. 4237 void 4238 add_relocation(Symbol_table*, Layout*, Symbol* gsym, 4239 unsigned int got_offset); 4240 4241 // Add the reserved TLSDESC_PLT entry to the PLT. 4242 void 4243 reserve_tlsdesc_entry(unsigned int got_offset) 4244 { this->tlsdesc_got_offset_ = got_offset; } 4245 4246 // Return true if a TLSDESC_PLT entry has been reserved. 4247 bool 4248 has_tlsdesc_entry() const 4249 { return this->tlsdesc_got_offset_ != -1U; } 4250 4251 // Return the GOT offset for the reserved TLSDESC_PLT entry. 4252 unsigned int 4253 get_tlsdesc_got_offset() const 4254 { return this->tlsdesc_got_offset_; } 4255 4256 // Return the PLT offset of the reserved TLSDESC_PLT entry. 4257 unsigned int 4258 get_tlsdesc_plt_offset() const 4259 { 4260 return (this->first_plt_entry_offset() + 4261 (this->count_ + this->irelative_count_) 4262 * this->get_plt_entry_size()); 4263 } 4264 4265 // Return the .rela.plt section data. 4266 Reloc_section* 4267 rela_plt() 4268 { return this->rel_; } 4269 4270 // Return where the TLSDESC relocations should go. 4271 Reloc_section* 4272 rela_tlsdesc(Layout*); 4273 4274 // Return where the IRELATIVE relocations should go in the PLT 4275 // relocations. 4276 Reloc_section* 4277 rela_irelative(Symbol_table*, Layout*); 4278 4279 // Return whether we created a section for IRELATIVE relocations. 4280 bool 4281 has_irelative_section() const 4282 { return this->irelative_rel_ != NULL; } 4283 4284 // Return the number of PLT entries. 4285 unsigned int 4286 entry_count() const 4287 { return this->count_ + this->irelative_count_; } 4288 4289 // Return the offset of the first non-reserved PLT entry. 4290 unsigned int 4291 first_plt_entry_offset() const 4292 { return this->do_first_plt_entry_offset(); } 4293 4294 // Return the size of a PLT entry. 4295 unsigned int 4296 get_plt_entry_size() const 4297 { return this->do_get_plt_entry_size(); } 4298 4299 // Return the reserved tlsdesc entry size. 4300 unsigned int 4301 get_plt_tlsdesc_entry_size() const 4302 { return this->do_get_plt_tlsdesc_entry_size(); } 4303 4304 // Return the PLT address to use for a global symbol. 4305 uint64_t 4306 address_for_global(const Symbol*); 4307 4308 // Return the PLT address to use for a local symbol. 4309 uint64_t 4310 address_for_local(const Relobj*, unsigned int symndx); 4311 4312 protected: 4313 // Fill in the first PLT entry. 4314 void 4315 fill_first_plt_entry(unsigned char* pov, 4316 Address got_address, 4317 Address plt_address) 4318 { this->do_fill_first_plt_entry(pov, got_address, plt_address); } 4319 4320 // Fill in a normal PLT entry. 4321 void 4322 fill_plt_entry(unsigned char* pov, 4323 Address got_address, 4324 Address plt_address, 4325 unsigned int got_offset, 4326 unsigned int plt_offset) 4327 { 4328 this->do_fill_plt_entry(pov, got_address, plt_address, 4329 got_offset, plt_offset); 4330 } 4331 4332 // Fill in the reserved TLSDESC PLT entry. 4333 void 4334 fill_tlsdesc_entry(unsigned char* pov, 4335 Address gotplt_address, 4336 Address plt_address, 4337 Address got_base, 4338 unsigned int tlsdesc_got_offset, 4339 unsigned int plt_offset) 4340 { 4341 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 4342 tlsdesc_got_offset, plt_offset); 4343 } 4344 4345 virtual unsigned int 4346 do_first_plt_entry_offset() const = 0; 4347 4348 virtual unsigned int 4349 do_get_plt_entry_size() const = 0; 4350 4351 virtual unsigned int 4352 do_get_plt_tlsdesc_entry_size() const = 0; 4353 4354 virtual void 4355 do_fill_first_plt_entry(unsigned char* pov, 4356 Address got_addr, 4357 Address plt_addr) = 0; 4358 4359 virtual void 4360 do_fill_plt_entry(unsigned char* pov, 4361 Address got_address, 4362 Address plt_address, 4363 unsigned int got_offset, 4364 unsigned int plt_offset) = 0; 4365 4366 virtual void 4367 do_fill_tlsdesc_entry(unsigned char* pov, 4368 Address gotplt_address, 4369 Address plt_address, 4370 Address got_base, 4371 unsigned int tlsdesc_got_offset, 4372 unsigned int plt_offset) = 0; 4373 4374 void 4375 do_adjust_output_section(Output_section* os); 4376 4377 // Write to a map file. 4378 void 4379 do_print_to_mapfile(Mapfile* mapfile) const 4380 { mapfile->print_output_data(this, _("** PLT")); } 4381 4382 private: 4383 // Set the final size. 4384 void 4385 set_final_data_size(); 4386 4387 // Write out the PLT data. 4388 void 4389 do_write(Output_file*); 4390 4391 // The reloc section. 4392 Reloc_section* rel_; 4393 4394 // The TLSDESC relocs, if necessary. These must follow the regular 4395 // PLT relocs. 4396 Reloc_section* tlsdesc_rel_; 4397 4398 // The IRELATIVE relocs, if necessary. These must follow the 4399 // regular PLT relocations. 4400 Reloc_section* irelative_rel_; 4401 4402 // The .got section. 4403 Output_data_got_aarch64<size, big_endian>* got_; 4404 4405 // The .got.plt section. 4406 Output_data_space* got_plt_; 4407 4408 // The part of the .got.plt section used for IRELATIVE relocs. 4409 Output_data_space* got_irelative_; 4410 4411 // The number of PLT entries. 4412 unsigned int count_; 4413 4414 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These 4415 // follow the regular PLT entries. 4416 unsigned int irelative_count_; 4417 4418 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline. 4419 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1 4420 // indicates an offset is not allocated. 4421 unsigned int tlsdesc_got_offset_; 4422 }; 4423 4424 // Initialize the PLT section. 4425 4426 template<int size, bool big_endian> 4427 void 4428 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout) 4429 { 4430 this->rel_ = new Reloc_section(false); 4431 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4432 elfcpp::SHF_ALLOC, this->rel_, 4433 ORDER_DYNAMIC_PLT_RELOCS, false); 4434 } 4435 4436 template<int size, bool big_endian> 4437 void 4438 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section( 4439 Output_section* os) 4440 { 4441 os->set_entsize(this->get_plt_entry_size()); 4442 } 4443 4444 // Add an entry to the PLT. 4445 4446 template<int size, bool big_endian> 4447 void 4448 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab, 4449 Layout* layout, Symbol* gsym) 4450 { 4451 gold_assert(!gsym->has_plt_offset()); 4452 4453 unsigned int* pcount; 4454 unsigned int plt_reserved; 4455 Output_section_data_build* got; 4456 4457 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4458 && gsym->can_use_relative_reloc(false)) 4459 { 4460 pcount = &this->irelative_count_; 4461 plt_reserved = 0; 4462 got = this->got_irelative_; 4463 } 4464 else 4465 { 4466 pcount = &this->count_; 4467 plt_reserved = this->first_plt_entry_offset(); 4468 got = this->got_plt_; 4469 } 4470 4471 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size() 4472 + plt_reserved); 4473 4474 ++*pcount; 4475 4476 section_offset_type got_offset = got->current_data_size(); 4477 4478 // Every PLT entry needs a GOT entry which points back to the PLT 4479 // entry (this will be changed by the dynamic linker, normally 4480 // lazily when the function is called). 4481 got->set_current_data_size(got_offset + size / 8); 4482 4483 // Every PLT entry needs a reloc. 4484 this->add_relocation(symtab, layout, gsym, got_offset); 4485 4486 // Note that we don't need to save the symbol. The contents of the 4487 // PLT are independent of which symbols are used. The symbols only 4488 // appear in the relocations. 4489 } 4490 4491 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return 4492 // the PLT offset. 4493 4494 template<int size, bool big_endian> 4495 unsigned int 4496 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry( 4497 Symbol_table* symtab, 4498 Layout* layout, 4499 Sized_relobj_file<size, big_endian>* relobj, 4500 unsigned int local_sym_index) 4501 { 4502 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size(); 4503 ++this->irelative_count_; 4504 4505 section_offset_type got_offset = this->got_irelative_->current_data_size(); 4506 4507 // Every PLT entry needs a GOT entry which points back to the PLT 4508 // entry. 4509 this->got_irelative_->set_current_data_size(got_offset + size / 8); 4510 4511 // Every PLT entry needs a reloc. 4512 Reloc_section* rela = this->rela_irelative(symtab, layout); 4513 rela->add_symbolless_local_addend(relobj, local_sym_index, 4514 elfcpp::R_AARCH64_IRELATIVE, 4515 this->got_irelative_, got_offset, 0); 4516 4517 return plt_offset; 4518 } 4519 4520 // Add the relocation for a PLT entry. 4521 4522 template<int size, bool big_endian> 4523 void 4524 Output_data_plt_aarch64<size, big_endian>::add_relocation( 4525 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset) 4526 { 4527 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4528 && gsym->can_use_relative_reloc(false)) 4529 { 4530 Reloc_section* rela = this->rela_irelative(symtab, layout); 4531 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE, 4532 this->got_irelative_, got_offset, 0); 4533 } 4534 else 4535 { 4536 gsym->set_needs_dynsym_entry(); 4537 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_, 4538 got_offset, 0); 4539 } 4540 } 4541 4542 // Return where the TLSDESC relocations should go, creating it if 4543 // necessary. These follow the JUMP_SLOT relocations. 4544 4545 template<int size, bool big_endian> 4546 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4547 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout) 4548 { 4549 if (this->tlsdesc_rel_ == NULL) 4550 { 4551 this->tlsdesc_rel_ = new Reloc_section(false); 4552 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4553 elfcpp::SHF_ALLOC, this->tlsdesc_rel_, 4554 ORDER_DYNAMIC_PLT_RELOCS, false); 4555 gold_assert(this->tlsdesc_rel_->output_section() 4556 == this->rel_->output_section()); 4557 } 4558 return this->tlsdesc_rel_; 4559 } 4560 4561 // Return where the IRELATIVE relocations should go in the PLT. These 4562 // follow the JUMP_SLOT and the TLSDESC relocations. 4563 4564 template<int size, bool big_endian> 4565 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4566 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab, 4567 Layout* layout) 4568 { 4569 if (this->irelative_rel_ == NULL) 4570 { 4571 // Make sure we have a place for the TLSDESC relocations, in 4572 // case we see any later on. 4573 this->rela_tlsdesc(layout); 4574 this->irelative_rel_ = new Reloc_section(false); 4575 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4576 elfcpp::SHF_ALLOC, this->irelative_rel_, 4577 ORDER_DYNAMIC_PLT_RELOCS, false); 4578 gold_assert(this->irelative_rel_->output_section() 4579 == this->rel_->output_section()); 4580 4581 if (parameters->doing_static_link()) 4582 { 4583 // A statically linked executable will only have a .rela.plt 4584 // section to hold R_AARCH64_IRELATIVE relocs for 4585 // STT_GNU_IFUNC symbols. The library will use these 4586 // symbols to locate the IRELATIVE relocs at program startup 4587 // time. 4588 symtab->define_in_output_data("__rela_iplt_start", NULL, 4589 Symbol_table::PREDEFINED, 4590 this->irelative_rel_, 0, 0, 4591 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4592 elfcpp::STV_HIDDEN, 0, false, true); 4593 symtab->define_in_output_data("__rela_iplt_end", NULL, 4594 Symbol_table::PREDEFINED, 4595 this->irelative_rel_, 0, 0, 4596 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4597 elfcpp::STV_HIDDEN, 0, true, true); 4598 } 4599 } 4600 return this->irelative_rel_; 4601 } 4602 4603 // Return the PLT address to use for a global symbol. 4604 4605 template<int size, bool big_endian> 4606 uint64_t 4607 Output_data_plt_aarch64<size, big_endian>::address_for_global( 4608 const Symbol* gsym) 4609 { 4610 uint64_t offset = 0; 4611 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4612 && gsym->can_use_relative_reloc(false)) 4613 offset = (this->first_plt_entry_offset() + 4614 this->count_ * this->get_plt_entry_size()); 4615 return this->address() + offset + gsym->plt_offset(); 4616 } 4617 4618 // Return the PLT address to use for a local symbol. These are always 4619 // IRELATIVE relocs. 4620 4621 template<int size, bool big_endian> 4622 uint64_t 4623 Output_data_plt_aarch64<size, big_endian>::address_for_local( 4624 const Relobj* object, 4625 unsigned int r_sym) 4626 { 4627 return (this->address() 4628 + this->first_plt_entry_offset() 4629 + this->count_ * this->get_plt_entry_size() 4630 + object->local_plt_offset(r_sym)); 4631 } 4632 4633 // Set the final size. 4634 4635 template<int size, bool big_endian> 4636 void 4637 Output_data_plt_aarch64<size, big_endian>::set_final_data_size() 4638 { 4639 unsigned int count = this->count_ + this->irelative_count_; 4640 unsigned int extra_size = 0; 4641 if (this->has_tlsdesc_entry()) 4642 extra_size += this->get_plt_tlsdesc_entry_size(); 4643 this->set_data_size(this->first_plt_entry_offset() 4644 + count * this->get_plt_entry_size() 4645 + extra_size); 4646 } 4647 4648 template<int size, bool big_endian> 4649 class Output_data_plt_aarch64_standard : 4650 public Output_data_plt_aarch64<size, big_endian> 4651 { 4652 public: 4653 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4654 Output_data_plt_aarch64_standard( 4655 Layout* layout, 4656 Output_data_got_aarch64<size, big_endian>* got, 4657 Output_data_space* got_plt, 4658 Output_data_space* got_irelative) 4659 : Output_data_plt_aarch64<size, big_endian>(layout, 4660 size == 32 ? 4 : 8, 4661 got, got_plt, 4662 got_irelative) 4663 { } 4664 4665 protected: 4666 // Return the offset of the first non-reserved PLT entry. 4667 virtual unsigned int 4668 do_first_plt_entry_offset() const 4669 { return this->first_plt_entry_size; } 4670 4671 // Return the size of a PLT entry 4672 virtual unsigned int 4673 do_get_plt_entry_size() const 4674 { return this->plt_entry_size; } 4675 4676 // Return the size of a tlsdesc entry 4677 virtual unsigned int 4678 do_get_plt_tlsdesc_entry_size() const 4679 { return this->plt_tlsdesc_entry_size; } 4680 4681 virtual void 4682 do_fill_first_plt_entry(unsigned char* pov, 4683 Address got_address, 4684 Address plt_address); 4685 4686 virtual void 4687 do_fill_plt_entry(unsigned char* pov, 4688 Address got_address, 4689 Address plt_address, 4690 unsigned int got_offset, 4691 unsigned int plt_offset); 4692 4693 virtual void 4694 do_fill_tlsdesc_entry(unsigned char* pov, 4695 Address gotplt_address, 4696 Address plt_address, 4697 Address got_base, 4698 unsigned int tlsdesc_got_offset, 4699 unsigned int plt_offset); 4700 4701 private: 4702 // The size of the first plt entry size. 4703 static const int first_plt_entry_size = 32; 4704 // The size of the plt entry size. 4705 static const int plt_entry_size = 16; 4706 // The size of the plt tlsdesc entry size. 4707 static const int plt_tlsdesc_entry_size = 32; 4708 // Template for the first PLT entry. 4709 static const uint32_t first_plt_entry[first_plt_entry_size / 4]; 4710 // Template for subsequent PLT entries. 4711 static const uint32_t plt_entry[plt_entry_size / 4]; 4712 // The reserved TLSDESC entry in the PLT for an executable. 4713 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4]; 4714 }; 4715 4716 // The first entry in the PLT for an executable. 4717 4718 template<> 4719 const uint32_t 4720 Output_data_plt_aarch64_standard<32, false>:: 4721 first_plt_entry[first_plt_entry_size / 4] = 4722 { 4723 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4724 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4725 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4726 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4727 0xd61f0220, /* br x17 */ 4728 0xd503201f, /* nop */ 4729 0xd503201f, /* nop */ 4730 0xd503201f, /* nop */ 4731 }; 4732 4733 4734 template<> 4735 const uint32_t 4736 Output_data_plt_aarch64_standard<32, true>:: 4737 first_plt_entry[first_plt_entry_size / 4] = 4738 { 4739 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4740 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4741 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4742 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4743 0xd61f0220, /* br x17 */ 4744 0xd503201f, /* nop */ 4745 0xd503201f, /* nop */ 4746 0xd503201f, /* nop */ 4747 }; 4748 4749 4750 template<> 4751 const uint32_t 4752 Output_data_plt_aarch64_standard<64, false>:: 4753 first_plt_entry[first_plt_entry_size / 4] = 4754 { 4755 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4756 0x90000010, /* adrp x16, PLT_GOT+16 */ 4757 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4758 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4759 0xd61f0220, /* br x17 */ 4760 0xd503201f, /* nop */ 4761 0xd503201f, /* nop */ 4762 0xd503201f, /* nop */ 4763 }; 4764 4765 4766 template<> 4767 const uint32_t 4768 Output_data_plt_aarch64_standard<64, true>:: 4769 first_plt_entry[first_plt_entry_size / 4] = 4770 { 4771 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4772 0x90000010, /* adrp x16, PLT_GOT+16 */ 4773 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4774 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4775 0xd61f0220, /* br x17 */ 4776 0xd503201f, /* nop */ 4777 0xd503201f, /* nop */ 4778 0xd503201f, /* nop */ 4779 }; 4780 4781 4782 template<> 4783 const uint32_t 4784 Output_data_plt_aarch64_standard<32, false>:: 4785 plt_entry[plt_entry_size / 4] = 4786 { 4787 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4788 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4789 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4790 0xd61f0220, /* br x17. */ 4791 }; 4792 4793 4794 template<> 4795 const uint32_t 4796 Output_data_plt_aarch64_standard<32, true>:: 4797 plt_entry[plt_entry_size / 4] = 4798 { 4799 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4800 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4801 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4802 0xd61f0220, /* br x17. */ 4803 }; 4804 4805 4806 template<> 4807 const uint32_t 4808 Output_data_plt_aarch64_standard<64, false>:: 4809 plt_entry[plt_entry_size / 4] = 4810 { 4811 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4812 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4813 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4814 0xd61f0220, /* br x17. */ 4815 }; 4816 4817 4818 template<> 4819 const uint32_t 4820 Output_data_plt_aarch64_standard<64, true>:: 4821 plt_entry[plt_entry_size / 4] = 4822 { 4823 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4824 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4825 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4826 0xd61f0220, /* br x17. */ 4827 }; 4828 4829 4830 template<int size, bool big_endian> 4831 void 4832 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry( 4833 unsigned char* pov, 4834 Address got_address, 4835 Address plt_address) 4836 { 4837 // PLT0 of the small PLT looks like this in ELF64 - 4838 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack. 4839 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT 4840 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the 4841 // symbol resolver 4842 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the 4843 // GOTPLT entry for this. 4844 // br x17 4845 // PLT0 will be slightly different in ELF32 due to different got entry 4846 // size. 4847 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size); 4848 Address gotplt_2nd_ent = got_address + (size / 8) * 2; 4849 4850 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2. 4851 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff. 4852 // FIXME: This only works for 64bit 4853 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4, 4854 gotplt_2nd_ent, plt_address + 4); 4855 4856 // Fill in R_AARCH64_LDST8_LO12 4857 elfcpp::Swap<32, big_endian>::writeval( 4858 pov + 8, 4859 ((this->first_plt_entry[2] & 0xffc003ff) 4860 | ((gotplt_2nd_ent & 0xff8) << 7))); 4861 4862 // Fill in R_AARCH64_ADD_ABS_LO12 4863 elfcpp::Swap<32, big_endian>::writeval( 4864 pov + 12, 4865 ((this->first_plt_entry[3] & 0xffc003ff) 4866 | ((gotplt_2nd_ent & 0xfff) << 10))); 4867 } 4868 4869 4870 // Subsequent entries in the PLT for an executable. 4871 // FIXME: This only works for 64bit 4872 4873 template<int size, bool big_endian> 4874 void 4875 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry( 4876 unsigned char* pov, 4877 Address got_address, 4878 Address plt_address, 4879 unsigned int got_offset, 4880 unsigned int plt_offset) 4881 { 4882 memcpy(pov, this->plt_entry, this->plt_entry_size); 4883 4884 Address gotplt_entry_address = got_address + got_offset; 4885 Address plt_entry_address = plt_address + plt_offset; 4886 4887 // Fill in R_AARCH64_PCREL_ADR_HI21 4888 AArch64_relocate_functions<size, big_endian>::adrp( 4889 pov, 4890 gotplt_entry_address, 4891 plt_entry_address); 4892 4893 // Fill in R_AARCH64_LDST64_ABS_LO12 4894 elfcpp::Swap<32, big_endian>::writeval( 4895 pov + 4, 4896 ((this->plt_entry[1] & 0xffc003ff) 4897 | ((gotplt_entry_address & 0xff8) << 7))); 4898 4899 // Fill in R_AARCH64_ADD_ABS_LO12 4900 elfcpp::Swap<32, big_endian>::writeval( 4901 pov + 8, 4902 ((this->plt_entry[2] & 0xffc003ff) 4903 | ((gotplt_entry_address & 0xfff) <<10))); 4904 4905 } 4906 4907 4908 template<> 4909 const uint32_t 4910 Output_data_plt_aarch64_standard<32, false>:: 4911 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4912 { 4913 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4914 0x90000002, /* adrp x2, 0 */ 4915 0x90000003, /* adrp x3, 0 */ 4916 0xb9400042, /* ldr w2, [w2, #0] */ 4917 0x11000063, /* add w3, w3, 0 */ 4918 0xd61f0040, /* br x2 */ 4919 0xd503201f, /* nop */ 4920 0xd503201f, /* nop */ 4921 }; 4922 4923 template<> 4924 const uint32_t 4925 Output_data_plt_aarch64_standard<32, true>:: 4926 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4927 { 4928 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4929 0x90000002, /* adrp x2, 0 */ 4930 0x90000003, /* adrp x3, 0 */ 4931 0xb9400042, /* ldr w2, [w2, #0] */ 4932 0x11000063, /* add w3, w3, 0 */ 4933 0xd61f0040, /* br x2 */ 4934 0xd503201f, /* nop */ 4935 0xd503201f, /* nop */ 4936 }; 4937 4938 template<> 4939 const uint32_t 4940 Output_data_plt_aarch64_standard<64, false>:: 4941 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4942 { 4943 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4944 0x90000002, /* adrp x2, 0 */ 4945 0x90000003, /* adrp x3, 0 */ 4946 0xf9400042, /* ldr x2, [x2, #0] */ 4947 0x91000063, /* add x3, x3, 0 */ 4948 0xd61f0040, /* br x2 */ 4949 0xd503201f, /* nop */ 4950 0xd503201f, /* nop */ 4951 }; 4952 4953 template<> 4954 const uint32_t 4955 Output_data_plt_aarch64_standard<64, true>:: 4956 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4957 { 4958 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4959 0x90000002, /* adrp x2, 0 */ 4960 0x90000003, /* adrp x3, 0 */ 4961 0xf9400042, /* ldr x2, [x2, #0] */ 4962 0x91000063, /* add x3, x3, 0 */ 4963 0xd61f0040, /* br x2 */ 4964 0xd503201f, /* nop */ 4965 0xd503201f, /* nop */ 4966 }; 4967 4968 template<int size, bool big_endian> 4969 void 4970 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry( 4971 unsigned char* pov, 4972 Address gotplt_address, 4973 Address plt_address, 4974 Address got_base, 4975 unsigned int tlsdesc_got_offset, 4976 unsigned int plt_offset) 4977 { 4978 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size); 4979 4980 // move DT_TLSDESC_GOT address into x2 4981 // move .got.plt address into x3 4982 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset; 4983 Address plt_entry_address = plt_address + plt_offset; 4984 4985 // R_AARCH64_ADR_PREL_PG_HI21 4986 AArch64_relocate_functions<size, big_endian>::adrp( 4987 pov + 4, 4988 tlsdesc_got_entry, 4989 plt_entry_address + 4); 4990 4991 // R_AARCH64_ADR_PREL_PG_HI21 4992 AArch64_relocate_functions<size, big_endian>::adrp( 4993 pov + 8, 4994 gotplt_address, 4995 plt_entry_address + 8); 4996 4997 // R_AARCH64_LDST64_ABS_LO12 4998 elfcpp::Swap<32, big_endian>::writeval( 4999 pov + 12, 5000 ((this->tlsdesc_plt_entry[3] & 0xffc003ff) 5001 | ((tlsdesc_got_entry & 0xff8) << 7))); 5002 5003 // R_AARCH64_ADD_ABS_LO12 5004 elfcpp::Swap<32, big_endian>::writeval( 5005 pov + 16, 5006 ((this->tlsdesc_plt_entry[4] & 0xffc003ff) 5007 | ((gotplt_address & 0xfff) << 10))); 5008 } 5009 5010 // Write out the PLT. This uses the hand-coded instructions above, 5011 // and adjusts them as needed. This is specified by the AMD64 ABI. 5012 5013 template<int size, bool big_endian> 5014 void 5015 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of) 5016 { 5017 const off_t offset = this->offset(); 5018 const section_size_type oview_size = 5019 convert_to_section_size_type(this->data_size()); 5020 unsigned char* const oview = of->get_output_view(offset, oview_size); 5021 5022 const off_t got_file_offset = this->got_plt_->offset(); 5023 gold_assert(got_file_offset + this->got_plt_->data_size() 5024 == this->got_irelative_->offset()); 5025 5026 const section_size_type got_size = 5027 convert_to_section_size_type(this->got_plt_->data_size() 5028 + this->got_irelative_->data_size()); 5029 unsigned char* const got_view = of->get_output_view(got_file_offset, 5030 got_size); 5031 5032 unsigned char* pov = oview; 5033 5034 // The base address of the .plt section. 5035 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address(); 5036 // The base address of the PLT portion of the .got section. 5037 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address 5038 = this->got_plt_->address(); 5039 5040 this->fill_first_plt_entry(pov, gotplt_address, plt_address); 5041 pov += this->first_plt_entry_offset(); 5042 5043 // The first three entries in .got.plt are reserved. 5044 unsigned char* got_pov = got_view; 5045 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT); 5046 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 5047 5048 unsigned int plt_offset = this->first_plt_entry_offset(); 5049 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 5050 const unsigned int count = this->count_ + this->irelative_count_; 5051 for (unsigned int plt_index = 0; 5052 plt_index < count; 5053 ++plt_index, 5054 pov += this->get_plt_entry_size(), 5055 got_pov += size / 8, 5056 plt_offset += this->get_plt_entry_size(), 5057 got_offset += size / 8) 5058 { 5059 // Set and adjust the PLT entry itself. 5060 this->fill_plt_entry(pov, gotplt_address, plt_address, 5061 got_offset, plt_offset); 5062 5063 // Set the entry in the GOT, which points to plt0. 5064 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address); 5065 } 5066 5067 if (this->has_tlsdesc_entry()) 5068 { 5069 // Set and adjust the reserved TLSDESC PLT entry. 5070 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset(); 5071 // The base address of the .base section. 5072 typename elfcpp::Elf_types<size>::Elf_Addr got_base = 5073 this->got_->address(); 5074 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 5075 tlsdesc_got_offset, plt_offset); 5076 pov += this->get_plt_tlsdesc_entry_size(); 5077 } 5078 5079 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size); 5080 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size); 5081 5082 of->write_output_view(offset, oview_size, oview); 5083 of->write_output_view(got_file_offset, got_size, got_view); 5084 } 5085 5086 // Telling how to update the immediate field of an instruction. 5087 struct AArch64_howto 5088 { 5089 // The immediate field mask. 5090 elfcpp::Elf_Xword dst_mask; 5091 5092 // The offset to apply relocation immediate 5093 int doffset; 5094 5095 // The second part offset, if the immediate field has two parts. 5096 // -1 if the immediate field has only one part. 5097 int doffset2; 5098 }; 5099 5100 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] = 5101 { 5102 {0, -1, -1}, // DATA 5103 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16 5104 {0xffffe0, 5, -1}, // LD [23:5]-imm19 5105 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi 5106 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi 5107 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12 5108 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12 5109 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14 5110 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19 5111 {0x3ffffff, 0, -1}, // B [25:0]-imm26 5112 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26 5113 }; 5114 5115 // AArch64 relocate function class 5116 5117 template<int size, bool big_endian> 5118 class AArch64_relocate_functions 5119 { 5120 public: 5121 typedef enum 5122 { 5123 STATUS_OKAY, // No error during relocation. 5124 STATUS_OVERFLOW, // Relocation overflow. 5125 STATUS_BAD_RELOC, // Relocation cannot be applied. 5126 } Status; 5127 5128 typedef AArch64_relocate_functions<size, big_endian> This; 5129 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 5130 typedef Relocate_info<size, big_endian> The_relocate_info; 5131 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 5132 typedef Reloc_stub<size, big_endian> The_reloc_stub; 5133 typedef Stub_table<size, big_endian> The_stub_table; 5134 typedef elfcpp::Rela<size, big_endian> The_rela; 5135 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype; 5136 5137 // Return the page address of the address. 5138 // Page(address) = address & ~0xFFF 5139 5140 static inline AArch64_valtype 5141 Page(Address address) 5142 { 5143 return (address & (~static_cast<Address>(0xFFF))); 5144 } 5145 5146 private: 5147 // Update instruction (pointed by view) with selected bits (immed). 5148 // val = (val & ~dst_mask) | (immed << doffset) 5149 5150 template<int valsize> 5151 static inline void 5152 update_view(unsigned char* view, 5153 AArch64_valtype immed, 5154 elfcpp::Elf_Xword doffset, 5155 elfcpp::Elf_Xword dst_mask) 5156 { 5157 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5158 Valtype* wv = reinterpret_cast<Valtype*>(view); 5159 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5160 5161 // Clear immediate fields. 5162 val &= ~dst_mask; 5163 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5164 static_cast<Valtype>(val | (immed << doffset))); 5165 } 5166 5167 // Update two parts of an instruction (pointed by view) with selected 5168 // bits (immed1 and immed2). 5169 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2) 5170 5171 template<int valsize> 5172 static inline void 5173 update_view_two_parts( 5174 unsigned char* view, 5175 AArch64_valtype immed1, 5176 AArch64_valtype immed2, 5177 elfcpp::Elf_Xword doffset1, 5178 elfcpp::Elf_Xword doffset2, 5179 elfcpp::Elf_Xword dst_mask) 5180 { 5181 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5182 Valtype* wv = reinterpret_cast<Valtype*>(view); 5183 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5184 val &= ~dst_mask; 5185 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5186 static_cast<Valtype>(val | (immed1 << doffset1) | 5187 (immed2 << doffset2))); 5188 } 5189 5190 // Update adr or adrp instruction with immed. 5191 // In adr and adrp: [30:29] immlo [23:5] immhi 5192 5193 static inline void 5194 update_adr(unsigned char* view, AArch64_valtype immed) 5195 { 5196 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5); 5197 This::template update_view_two_parts<32>( 5198 view, 5199 immed & 0x3, 5200 (immed & 0x1ffffc) >> 2, 5201 29, 5202 5, 5203 dst_mask); 5204 } 5205 5206 // Update movz/movn instruction with bits immed. 5207 // Set instruction to movz if is_movz is true, otherwise set instruction 5208 // to movn. 5209 5210 static inline void 5211 update_movnz(unsigned char* view, 5212 AArch64_valtype immed, 5213 bool is_movz) 5214 { 5215 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype; 5216 Valtype* wv = reinterpret_cast<Valtype*>(view); 5217 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv); 5218 5219 const elfcpp::Elf_Xword doffset = 5220 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset; 5221 const elfcpp::Elf_Xword dst_mask = 5222 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask; 5223 5224 // Clear immediate fields and opc code. 5225 val &= ~(dst_mask | (0x3 << 29)); 5226 5227 // Set instruction to movz or movn. 5228 // movz: [30:29] is 10 movn: [30:29] is 00 5229 if (is_movz) 5230 val |= (0x2 << 29); 5231 5232 elfcpp::Swap<32, big_endian>::writeval(wv, 5233 static_cast<Valtype>(val | (immed << doffset))); 5234 } 5235 5236 public: 5237 5238 // Update selected bits in text. 5239 5240 template<int valsize> 5241 static inline typename This::Status 5242 reloc_common(unsigned char* view, Address x, 5243 const AArch64_reloc_property* reloc_property) 5244 { 5245 // Select bits from X. 5246 Address immed = reloc_property->select_x_value(x); 5247 5248 // Update view. 5249 const AArch64_reloc_property::Reloc_inst inst = 5250 reloc_property->reloc_inst(); 5251 // If it is a data relocation or instruction has 2 parts of immediate 5252 // fields, you should not call pcrela_general. 5253 gold_assert(aarch64_howto[inst].doffset2 == -1 && 5254 aarch64_howto[inst].doffset != -1); 5255 This::template update_view<valsize>(view, immed, 5256 aarch64_howto[inst].doffset, 5257 aarch64_howto[inst].dst_mask); 5258 5259 // Do check overflow or alignment if needed. 5260 return (reloc_property->checkup_x_value(x) 5261 ? This::STATUS_OKAY 5262 : This::STATUS_OVERFLOW); 5263 } 5264 5265 // Construct a B insn. Note, although we group it here with other relocation 5266 // operation, there is actually no 'relocation' involved here. 5267 static inline void 5268 construct_b(unsigned char* view, unsigned int branch_offset) 5269 { 5270 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2), 5271 26, 0, 0xffffffff); 5272 } 5273 5274 // Do a simple rela relocation at unaligned addresses. 5275 5276 template<int valsize> 5277 static inline typename This::Status 5278 rela_ua(unsigned char* view, 5279 const Sized_relobj_file<size, big_endian>* object, 5280 const Symbol_value<size>* psymval, 5281 AArch64_valtype addend, 5282 const AArch64_reloc_property* reloc_property) 5283 { 5284 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5285 Valtype; 5286 typename elfcpp::Elf_types<size>::Elf_Addr x = 5287 psymval->value(object, addend); 5288 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5289 static_cast<Valtype>(x)); 5290 return (reloc_property->checkup_x_value(x) 5291 ? This::STATUS_OKAY 5292 : This::STATUS_OVERFLOW); 5293 } 5294 5295 // Do a simple pc-relative relocation at unaligned addresses. 5296 5297 template<int valsize> 5298 static inline typename This::Status 5299 pcrela_ua(unsigned char* view, 5300 const Sized_relobj_file<size, big_endian>* object, 5301 const Symbol_value<size>* psymval, 5302 AArch64_valtype addend, 5303 Address address, 5304 const AArch64_reloc_property* reloc_property) 5305 { 5306 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5307 Valtype; 5308 Address x = psymval->value(object, addend) - address; 5309 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5310 static_cast<Valtype>(x)); 5311 return (reloc_property->checkup_x_value(x) 5312 ? This::STATUS_OKAY 5313 : This::STATUS_OVERFLOW); 5314 } 5315 5316 // Do a simple rela relocation at aligned addresses. 5317 5318 template<int valsize> 5319 static inline typename This::Status 5320 rela( 5321 unsigned char* view, 5322 const Sized_relobj_file<size, big_endian>* object, 5323 const Symbol_value<size>* psymval, 5324 AArch64_valtype addend, 5325 const AArch64_reloc_property* reloc_property) 5326 { 5327 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5328 Valtype* wv = reinterpret_cast<Valtype*>(view); 5329 Address x = psymval->value(object, addend); 5330 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x)); 5331 return (reloc_property->checkup_x_value(x) 5332 ? This::STATUS_OKAY 5333 : This::STATUS_OVERFLOW); 5334 } 5335 5336 // Do relocate. Update selected bits in text. 5337 // new_val = (val & ~dst_mask) | (immed << doffset) 5338 5339 template<int valsize> 5340 static inline typename This::Status 5341 rela_general(unsigned char* view, 5342 const Sized_relobj_file<size, big_endian>* object, 5343 const Symbol_value<size>* psymval, 5344 AArch64_valtype addend, 5345 const AArch64_reloc_property* reloc_property) 5346 { 5347 // Calculate relocation. 5348 Address x = psymval->value(object, addend); 5349 return This::template reloc_common<valsize>(view, x, reloc_property); 5350 } 5351 5352 // Do relocate. Update selected bits in text. 5353 // new val = (val & ~dst_mask) | (immed << doffset) 5354 5355 template<int valsize> 5356 static inline typename This::Status 5357 rela_general( 5358 unsigned char* view, 5359 AArch64_valtype s, 5360 AArch64_valtype addend, 5361 const AArch64_reloc_property* reloc_property) 5362 { 5363 // Calculate relocation. 5364 Address x = s + addend; 5365 return This::template reloc_common<valsize>(view, x, reloc_property); 5366 } 5367 5368 // Do address relative relocate. Update selected bits in text. 5369 // new val = (val & ~dst_mask) | (immed << doffset) 5370 5371 template<int valsize> 5372 static inline typename This::Status 5373 pcrela_general( 5374 unsigned char* view, 5375 const Sized_relobj_file<size, big_endian>* object, 5376 const Symbol_value<size>* psymval, 5377 AArch64_valtype addend, 5378 Address address, 5379 const AArch64_reloc_property* reloc_property) 5380 { 5381 // Calculate relocation. 5382 Address x = psymval->value(object, addend) - address; 5383 return This::template reloc_common<valsize>(view, x, reloc_property); 5384 } 5385 5386 5387 // Calculate (S + A) - address, update adr instruction. 5388 5389 static inline typename This::Status 5390 adr(unsigned char* view, 5391 const Sized_relobj_file<size, big_endian>* object, 5392 const Symbol_value<size>* psymval, 5393 Address addend, 5394 Address address, 5395 const AArch64_reloc_property* /* reloc_property */) 5396 { 5397 AArch64_valtype x = psymval->value(object, addend) - address; 5398 // Pick bits [20:0] of X. 5399 AArch64_valtype immed = x & 0x1fffff; 5400 update_adr(view, immed); 5401 // Check -2^20 <= X < 2^20 5402 return (size == 64 && Bits<21>::has_overflow((x)) 5403 ? This::STATUS_OVERFLOW 5404 : This::STATUS_OKAY); 5405 } 5406 5407 // Calculate PG(S+A) - PG(address), update adrp instruction. 5408 // R_AARCH64_ADR_PREL_PG_HI21 5409 5410 static inline typename This::Status 5411 adrp( 5412 unsigned char* view, 5413 Address sa, 5414 Address address) 5415 { 5416 AArch64_valtype x = This::Page(sa) - This::Page(address); 5417 // Pick [32:12] of X. 5418 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5419 update_adr(view, immed); 5420 // Check -2^32 <= X < 2^32 5421 return (size == 64 && Bits<33>::has_overflow((x)) 5422 ? This::STATUS_OVERFLOW 5423 : This::STATUS_OKAY); 5424 } 5425 5426 // Calculate PG(S+A) - PG(address), update adrp instruction. 5427 // R_AARCH64_ADR_PREL_PG_HI21 5428 5429 static inline typename This::Status 5430 adrp(unsigned char* view, 5431 const Sized_relobj_file<size, big_endian>* object, 5432 const Symbol_value<size>* psymval, 5433 Address addend, 5434 Address address, 5435 const AArch64_reloc_property* reloc_property) 5436 { 5437 Address sa = psymval->value(object, addend); 5438 AArch64_valtype x = This::Page(sa) - This::Page(address); 5439 // Pick [32:12] of X. 5440 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5441 update_adr(view, immed); 5442 return (reloc_property->checkup_x_value(x) 5443 ? This::STATUS_OKAY 5444 : This::STATUS_OVERFLOW); 5445 } 5446 5447 // Update mov[n/z] instruction. Check overflow if needed. 5448 // If X >=0, set the instruction to movz and its immediate value to the 5449 // selected bits S. 5450 // If X < 0, set the instruction to movn and its immediate value to 5451 // NOT (selected bits of). 5452 5453 static inline typename This::Status 5454 movnz(unsigned char* view, 5455 AArch64_valtype x, 5456 const AArch64_reloc_property* reloc_property) 5457 { 5458 // Select bits from X. 5459 Address immed; 5460 bool is_movz; 5461 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW; 5462 if (static_cast<SignedW>(x) >= 0) 5463 { 5464 immed = reloc_property->select_x_value(x); 5465 is_movz = true; 5466 } 5467 else 5468 { 5469 immed = reloc_property->select_x_value(~x);; 5470 is_movz = false; 5471 } 5472 5473 // Update movnz instruction. 5474 update_movnz(view, immed, is_movz); 5475 5476 // Do check overflow or alignment if needed. 5477 return (reloc_property->checkup_x_value(x) 5478 ? This::STATUS_OKAY 5479 : This::STATUS_OVERFLOW); 5480 } 5481 5482 static inline bool 5483 maybe_apply_stub(unsigned int, 5484 const The_relocate_info*, 5485 const The_rela&, 5486 unsigned char*, 5487 Address, 5488 const Sized_symbol<size>*, 5489 const Symbol_value<size>*, 5490 const Sized_relobj_file<size, big_endian>*, 5491 section_size_type); 5492 5493 }; // End of AArch64_relocate_functions 5494 5495 5496 // For a certain relocation type (usually jump/branch), test to see if the 5497 // destination needs a stub to fulfil. If so, re-route the destination of the 5498 // original instruction to the stub, note, at this time, the stub has already 5499 // been generated. 5500 5501 template<int size, bool big_endian> 5502 bool 5503 AArch64_relocate_functions<size, big_endian>:: 5504 maybe_apply_stub(unsigned int r_type, 5505 const The_relocate_info* relinfo, 5506 const The_rela& rela, 5507 unsigned char* view, 5508 Address address, 5509 const Sized_symbol<size>* gsym, 5510 const Symbol_value<size>* psymval, 5511 const Sized_relobj_file<size, big_endian>* object, 5512 section_size_type current_group_size) 5513 { 5514 if (parameters->options().relocatable()) 5515 return false; 5516 5517 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend(); 5518 Address branch_target = psymval->value(object, 0) + addend; 5519 int stub_type = 5520 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target); 5521 if (stub_type == ST_NONE) 5522 return false; 5523 5524 const The_aarch64_relobj* aarch64_relobj = 5525 static_cast<const The_aarch64_relobj*>(object); 5526 const AArch64_reloc_property* arp = 5527 aarch64_reloc_property_table->get_reloc_property(r_type); 5528 gold_assert(arp != NULL); 5529 5530 // We don't create stubs for undefined symbols, but do for weak. 5531 if (gsym 5532 && !gsym->use_plt_offset(arp->reference_flags()) 5533 && gsym->is_undefined()) 5534 { 5535 gold_debug(DEBUG_TARGET, 5536 "stub: looking for a stub for undefined symbol %s in file %s", 5537 gsym->name(), aarch64_relobj->name().c_str()); 5538 return false; 5539 } 5540 5541 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 5542 gold_assert(stub_table != NULL); 5543 5544 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 5545 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend); 5546 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key); 5547 gold_assert(stub != NULL); 5548 5549 Address new_branch_target = stub_table->address() + stub->offset(); 5550 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset = 5551 new_branch_target - address; 5552 typename This::Status status = This::template 5553 rela_general<32>(view, branch_offset, 0, arp); 5554 if (status != This::STATUS_OKAY) 5555 gold_error(_("Stub is too far away, try a smaller value " 5556 "for '--stub-group-size'. The current value is 0x%lx."), 5557 static_cast<unsigned long>(current_group_size)); 5558 return true; 5559 } 5560 5561 5562 // Group input sections for stub generation. 5563 // 5564 // We group input sections in an output section so that the total size, 5565 // including any padding space due to alignment is smaller than GROUP_SIZE 5566 // unless the only input section in group is bigger than GROUP_SIZE already. 5567 // Then an ARM stub table is created to follow the last input section 5568 // in group. For each group an ARM stub table is created an is placed 5569 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further 5570 // extend the group after the stub table. 5571 5572 template<int size, bool big_endian> 5573 void 5574 Target_aarch64<size, big_endian>::group_sections( 5575 Layout* layout, 5576 section_size_type group_size, 5577 bool stubs_always_after_branch, 5578 const Task* task) 5579 { 5580 // Group input sections and insert stub table 5581 Layout::Section_list section_list; 5582 layout->get_executable_sections(§ion_list); 5583 for (Layout::Section_list::const_iterator p = section_list.begin(); 5584 p != section_list.end(); 5585 ++p) 5586 { 5587 AArch64_output_section<size, big_endian>* output_section = 5588 static_cast<AArch64_output_section<size, big_endian>*>(*p); 5589 output_section->group_sections(group_size, stubs_always_after_branch, 5590 this, task); 5591 } 5592 } 5593 5594 5595 // Find the AArch64_input_section object corresponding to the SHNDX-th input 5596 // section of RELOBJ. 5597 5598 template<int size, bool big_endian> 5599 AArch64_input_section<size, big_endian>* 5600 Target_aarch64<size, big_endian>::find_aarch64_input_section( 5601 Relobj* relobj, unsigned int shndx) const 5602 { 5603 Section_id sid(relobj, shndx); 5604 typename AArch64_input_section_map::const_iterator p = 5605 this->aarch64_input_section_map_.find(sid); 5606 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL; 5607 } 5608 5609 5610 // Make a new AArch64_input_section object. 5611 5612 template<int size, bool big_endian> 5613 AArch64_input_section<size, big_endian>* 5614 Target_aarch64<size, big_endian>::new_aarch64_input_section( 5615 Relobj* relobj, unsigned int shndx) 5616 { 5617 Section_id sid(relobj, shndx); 5618 5619 AArch64_input_section<size, big_endian>* input_section = 5620 new AArch64_input_section<size, big_endian>(relobj, shndx); 5621 input_section->init(); 5622 5623 // Register new AArch64_input_section in map for look-up. 5624 std::pair<typename AArch64_input_section_map::iterator,bool> ins = 5625 this->aarch64_input_section_map_.insert( 5626 std::make_pair(sid, input_section)); 5627 5628 // Make sure that it we have not created another AArch64_input_section 5629 // for this input section already. 5630 gold_assert(ins.second); 5631 5632 return input_section; 5633 } 5634 5635 5636 // Relaxation hook. This is where we do stub generation. 5637 5638 template<int size, bool big_endian> 5639 bool 5640 Target_aarch64<size, big_endian>::do_relax( 5641 int pass, 5642 const Input_objects* input_objects, 5643 Symbol_table* symtab, 5644 Layout* layout , 5645 const Task* task) 5646 { 5647 gold_assert(!parameters->options().relocatable()); 5648 if (pass == 1) 5649 { 5650 // We don't handle negative stub_group_size right now. 5651 this->stub_group_size_ = abs(parameters->options().stub_group_size()); 5652 if (this->stub_group_size_ == 1) 5653 { 5654 // Leave room for 4096 4-byte stub entries. If we exceed that, then we 5655 // will fail to link. The user will have to relink with an explicit 5656 // group size option. 5657 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET - 5658 4096 * 4; 5659 } 5660 group_sections(layout, this->stub_group_size_, true, task); 5661 } 5662 else 5663 { 5664 // If this is not the first pass, addresses and file offsets have 5665 // been reset at this point, set them here. 5666 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5667 sp != this->stub_tables_.end(); ++sp) 5668 { 5669 The_stub_table* stt = *sp; 5670 The_aarch64_input_section* owner = stt->owner(); 5671 off_t off = align_address(owner->original_size(), 5672 stt->addralign()); 5673 stt->set_address_and_file_offset(owner->address() + off, 5674 owner->offset() + off); 5675 } 5676 } 5677 5678 // Scan relocs for relocation stubs 5679 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin(); 5680 op != input_objects->relobj_end(); 5681 ++op) 5682 { 5683 The_aarch64_relobj* aarch64_relobj = 5684 static_cast<The_aarch64_relobj*>(*op); 5685 // Lock the object so we can read from it. This is only called 5686 // single-threaded from Layout::finalize, so it is OK to lock. 5687 Task_lock_obj<Object> tl(task, aarch64_relobj); 5688 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout); 5689 } 5690 5691 bool any_stub_table_changed = false; 5692 for (Stub_table_iterator siter = this->stub_tables_.begin(); 5693 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter) 5694 { 5695 The_stub_table* stub_table = *siter; 5696 if (stub_table->update_data_size_changed_p()) 5697 { 5698 The_aarch64_input_section* owner = stub_table->owner(); 5699 uint64_t address = owner->address(); 5700 off_t offset = owner->offset(); 5701 owner->reset_address_and_file_offset(); 5702 owner->set_address_and_file_offset(address, offset); 5703 5704 any_stub_table_changed = true; 5705 } 5706 } 5707 5708 // Do not continue relaxation. 5709 bool continue_relaxation = any_stub_table_changed; 5710 if (!continue_relaxation) 5711 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5712 (sp != this->stub_tables_.end()); 5713 ++sp) 5714 (*sp)->finalize_stubs(); 5715 5716 return continue_relaxation; 5717 } 5718 5719 5720 // Make a new Stub_table. 5721 5722 template<int size, bool big_endian> 5723 Stub_table<size, big_endian>* 5724 Target_aarch64<size, big_endian>::new_stub_table( 5725 AArch64_input_section<size, big_endian>* owner) 5726 { 5727 Stub_table<size, big_endian>* stub_table = 5728 new Stub_table<size, big_endian>(owner); 5729 stub_table->set_address(align_address( 5730 owner->address() + owner->data_size(), 8)); 5731 stub_table->set_file_offset(owner->offset() + owner->data_size()); 5732 stub_table->finalize_data_size(); 5733 5734 this->stub_tables_.push_back(stub_table); 5735 5736 return stub_table; 5737 } 5738 5739 5740 template<int size, bool big_endian> 5741 uint64_t 5742 Target_aarch64<size, big_endian>::do_reloc_addend( 5743 void* arg, unsigned int r_type, uint64_t) const 5744 { 5745 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 5746 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg); 5747 gold_assert(intarg < this->tlsdesc_reloc_info_.size()); 5748 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]); 5749 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym); 5750 gold_assert(psymval->is_tls_symbol()); 5751 // The value of a TLS symbol is the offset in the TLS segment. 5752 return psymval->value(ti.object, 0); 5753 } 5754 5755 // Return the number of entries in the PLT. 5756 5757 template<int size, bool big_endian> 5758 unsigned int 5759 Target_aarch64<size, big_endian>::plt_entry_count() const 5760 { 5761 if (this->plt_ == NULL) 5762 return 0; 5763 return this->plt_->entry_count(); 5764 } 5765 5766 // Return the offset of the first non-reserved PLT entry. 5767 5768 template<int size, bool big_endian> 5769 unsigned int 5770 Target_aarch64<size, big_endian>::first_plt_entry_offset() const 5771 { 5772 return this->plt_->first_plt_entry_offset(); 5773 } 5774 5775 // Return the size of each PLT entry. 5776 5777 template<int size, bool big_endian> 5778 unsigned int 5779 Target_aarch64<size, big_endian>::plt_entry_size() const 5780 { 5781 return this->plt_->get_plt_entry_size(); 5782 } 5783 5784 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 5785 5786 template<int size, bool big_endian> 5787 void 5788 Target_aarch64<size, big_endian>::define_tls_base_symbol( 5789 Symbol_table* symtab, Layout* layout) 5790 { 5791 if (this->tls_base_symbol_defined_) 5792 return; 5793 5794 Output_segment* tls_segment = layout->tls_segment(); 5795 if (tls_segment != NULL) 5796 { 5797 // _TLS_MODULE_BASE_ always points to the beginning of tls segment. 5798 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL, 5799 Symbol_table::PREDEFINED, 5800 tls_segment, 0, 0, 5801 elfcpp::STT_TLS, 5802 elfcpp::STB_LOCAL, 5803 elfcpp::STV_HIDDEN, 0, 5804 Symbol::SEGMENT_START, 5805 true); 5806 } 5807 this->tls_base_symbol_defined_ = true; 5808 } 5809 5810 // Create the reserved PLT and GOT entries for the TLS descriptor resolver. 5811 5812 template<int size, bool big_endian> 5813 void 5814 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries( 5815 Symbol_table* symtab, Layout* layout) 5816 { 5817 if (this->plt_ == NULL) 5818 this->make_plt_section(symtab, layout); 5819 5820 if (!this->plt_->has_tlsdesc_entry()) 5821 { 5822 // Allocate the TLSDESC_GOT entry. 5823 Output_data_got_aarch64<size, big_endian>* got = 5824 this->got_section(symtab, layout); 5825 unsigned int got_offset = got->add_constant(0); 5826 5827 // Allocate the TLSDESC_PLT entry. 5828 this->plt_->reserve_tlsdesc_entry(got_offset); 5829 } 5830 } 5831 5832 // Create a GOT entry for the TLS module index. 5833 5834 template<int size, bool big_endian> 5835 unsigned int 5836 Target_aarch64<size, big_endian>::got_mod_index_entry( 5837 Symbol_table* symtab, Layout* layout, 5838 Sized_relobj_file<size, big_endian>* object) 5839 { 5840 if (this->got_mod_index_offset_ == -1U) 5841 { 5842 gold_assert(symtab != NULL && layout != NULL && object != NULL); 5843 Reloc_section* rela_dyn = this->rela_dyn_section(layout); 5844 Output_data_got_aarch64<size, big_endian>* got = 5845 this->got_section(symtab, layout); 5846 unsigned int got_offset = got->add_constant(0); 5847 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got, 5848 got_offset, 0); 5849 got->add_constant(0); 5850 this->got_mod_index_offset_ = got_offset; 5851 } 5852 return this->got_mod_index_offset_; 5853 } 5854 5855 // Optimize the TLS relocation type based on what we know about the 5856 // symbol. IS_FINAL is true if the final address of this symbol is 5857 // known at link time. 5858 5859 template<int size, bool big_endian> 5860 tls::Tls_optimization 5861 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final, 5862 int r_type) 5863 { 5864 // If we are generating a shared library, then we can't do anything 5865 // in the linker 5866 if (parameters->options().shared()) 5867 return tls::TLSOPT_NONE; 5868 5869 switch (r_type) 5870 { 5871 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 5872 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 5873 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19: 5874 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21: 5875 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 5876 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 5877 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 5878 case elfcpp::R_AARCH64_TLSDESC_OFF_G1: 5879 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC: 5880 case elfcpp::R_AARCH64_TLSDESC_LDR: 5881 case elfcpp::R_AARCH64_TLSDESC_ADD: 5882 case elfcpp::R_AARCH64_TLSDESC_CALL: 5883 // These are General-Dynamic which permits fully general TLS 5884 // access. Since we know that we are generating an executable, 5885 // we can convert this to Initial-Exec. If we also know that 5886 // this is a local symbol, we can further switch to Local-Exec. 5887 if (is_final) 5888 return tls::TLSOPT_TO_LE; 5889 return tls::TLSOPT_TO_IE; 5890 5891 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 5892 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 5893 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 5894 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 5895 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 5896 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 5897 // These are Local-Dynamic, which refer to local symbols in the 5898 // dynamic TLS block. Since we know that we generating an 5899 // executable, we can switch to Local-Exec. 5900 return tls::TLSOPT_TO_LE; 5901 5902 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5903 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5904 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5905 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 5906 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5907 // These are Initial-Exec relocs which get the thread offset 5908 // from the GOT. If we know that we are linking against the 5909 // local symbol, we can switch to Local-Exec, which links the 5910 // thread offset into the instruction. 5911 if (is_final) 5912 return tls::TLSOPT_TO_LE; 5913 return tls::TLSOPT_NONE; 5914 5915 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 5916 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 5917 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 5918 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 5919 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 5920 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 5921 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 5922 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 5923 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12: 5924 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 5925 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12: 5926 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 5927 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12: 5928 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 5929 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12: 5930 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 5931 // When we already have Local-Exec, there is nothing further we 5932 // can do. 5933 return tls::TLSOPT_NONE; 5934 5935 default: 5936 gold_unreachable(); 5937 } 5938 } 5939 5940 // Returns true if this relocation type could be that of a function pointer. 5941 5942 template<int size, bool big_endian> 5943 inline bool 5944 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc( 5945 unsigned int r_type) 5946 { 5947 switch (r_type) 5948 { 5949 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 5950 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 5951 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 5952 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 5953 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 5954 { 5955 return true; 5956 } 5957 } 5958 return false; 5959 } 5960 5961 // For safe ICF, scan a relocation for a local symbol to check if it 5962 // corresponds to a function pointer being taken. In that case mark 5963 // the function whose pointer was taken as not foldable. 5964 5965 template<int size, bool big_endian> 5966 inline bool 5967 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer( 5968 Symbol_table* , 5969 Layout* , 5970 Target_aarch64<size, big_endian>* , 5971 Sized_relobj_file<size, big_endian>* , 5972 unsigned int , 5973 Output_section* , 5974 const elfcpp::Rela<size, big_endian>& , 5975 unsigned int r_type, 5976 const elfcpp::Sym<size, big_endian>&) 5977 { 5978 // When building a shared library, do not fold any local symbols. 5979 return (parameters->options().shared() 5980 || possible_function_pointer_reloc(r_type)); 5981 } 5982 5983 // For safe ICF, scan a relocation for a global symbol to check if it 5984 // corresponds to a function pointer being taken. In that case mark 5985 // the function whose pointer was taken as not foldable. 5986 5987 template<int size, bool big_endian> 5988 inline bool 5989 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer( 5990 Symbol_table* , 5991 Layout* , 5992 Target_aarch64<size, big_endian>* , 5993 Sized_relobj_file<size, big_endian>* , 5994 unsigned int , 5995 Output_section* , 5996 const elfcpp::Rela<size, big_endian>& , 5997 unsigned int r_type, 5998 Symbol* gsym) 5999 { 6000 // When building a shared library, do not fold symbols whose visibility 6001 // is hidden, internal or protected. 6002 return ((parameters->options().shared() 6003 && (gsym->visibility() == elfcpp::STV_INTERNAL 6004 || gsym->visibility() == elfcpp::STV_PROTECTED 6005 || gsym->visibility() == elfcpp::STV_HIDDEN)) 6006 || possible_function_pointer_reloc(r_type)); 6007 } 6008 6009 // Report an unsupported relocation against a local symbol. 6010 6011 template<int size, bool big_endian> 6012 void 6013 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local( 6014 Sized_relobj_file<size, big_endian>* object, 6015 unsigned int r_type) 6016 { 6017 gold_error(_("%s: unsupported reloc %u against local symbol"), 6018 object->name().c_str(), r_type); 6019 } 6020 6021 // We are about to emit a dynamic relocation of type R_TYPE. If the 6022 // dynamic linker does not support it, issue an error. 6023 6024 template<int size, bool big_endian> 6025 void 6026 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object, 6027 unsigned int r_type) 6028 { 6029 gold_assert(r_type != elfcpp::R_AARCH64_NONE); 6030 6031 switch (r_type) 6032 { 6033 // These are the relocation types supported by glibc for AARCH64. 6034 case elfcpp::R_AARCH64_NONE: 6035 case elfcpp::R_AARCH64_COPY: 6036 case elfcpp::R_AARCH64_GLOB_DAT: 6037 case elfcpp::R_AARCH64_JUMP_SLOT: 6038 case elfcpp::R_AARCH64_RELATIVE: 6039 case elfcpp::R_AARCH64_TLS_DTPREL64: 6040 case elfcpp::R_AARCH64_TLS_DTPMOD64: 6041 case elfcpp::R_AARCH64_TLS_TPREL64: 6042 case elfcpp::R_AARCH64_TLSDESC: 6043 case elfcpp::R_AARCH64_IRELATIVE: 6044 case elfcpp::R_AARCH64_ABS32: 6045 case elfcpp::R_AARCH64_ABS64: 6046 return; 6047 6048 default: 6049 break; 6050 } 6051 6052 // This prevents us from issuing more than one error per reloc 6053 // section. But we can still wind up issuing more than one 6054 // error per object file. 6055 if (this->issued_non_pic_error_) 6056 return; 6057 gold_assert(parameters->options().output_is_position_independent()); 6058 object->error(_("requires unsupported dynamic reloc; " 6059 "recompile with -fPIC")); 6060 this->issued_non_pic_error_ = true; 6061 return; 6062 } 6063 6064 // Return whether we need to make a PLT entry for a relocation of the 6065 // given type against a STT_GNU_IFUNC symbol. 6066 6067 template<int size, bool big_endian> 6068 bool 6069 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc( 6070 Sized_relobj_file<size, big_endian>* object, 6071 unsigned int r_type) 6072 { 6073 const AArch64_reloc_property* arp = 6074 aarch64_reloc_property_table->get_reloc_property(r_type); 6075 gold_assert(arp != NULL); 6076 6077 int flags = arp->reference_flags(); 6078 if (flags & Symbol::TLS_REF) 6079 { 6080 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"), 6081 object->name().c_str(), arp->name().c_str()); 6082 return false; 6083 } 6084 return flags != 0; 6085 } 6086 6087 // Scan a relocation for a local symbol. 6088 6089 template<int size, bool big_endian> 6090 inline void 6091 Target_aarch64<size, big_endian>::Scan::local( 6092 Symbol_table* symtab, 6093 Layout* layout, 6094 Target_aarch64<size, big_endian>* target, 6095 Sized_relobj_file<size, big_endian>* object, 6096 unsigned int data_shndx, 6097 Output_section* output_section, 6098 const elfcpp::Rela<size, big_endian>& rela, 6099 unsigned int r_type, 6100 const elfcpp::Sym<size, big_endian>& lsym, 6101 bool is_discarded) 6102 { 6103 if (is_discarded) 6104 return; 6105 6106 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 6107 Reloc_section; 6108 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6109 6110 // A local STT_GNU_IFUNC symbol may require a PLT entry. 6111 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC; 6112 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type)) 6113 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym); 6114 6115 switch (r_type) 6116 { 6117 case elfcpp::R_AARCH64_NONE: 6118 break; 6119 6120 case elfcpp::R_AARCH64_ABS32: 6121 case elfcpp::R_AARCH64_ABS16: 6122 if (parameters->options().output_is_position_independent()) 6123 { 6124 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6125 object->name().c_str(), r_type); 6126 } 6127 break; 6128 6129 case elfcpp::R_AARCH64_ABS64: 6130 // If building a shared library or pie, we need to mark this as a dynmic 6131 // reloction, so that the dynamic loader can relocate it. 6132 if (parameters->options().output_is_position_independent()) 6133 { 6134 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6135 rela_dyn->add_local_relative(object, r_sym, 6136 elfcpp::R_AARCH64_RELATIVE, 6137 output_section, 6138 data_shndx, 6139 rela.get_r_offset(), 6140 rela.get_r_addend(), 6141 is_ifunc); 6142 } 6143 break; 6144 6145 case elfcpp::R_AARCH64_PREL64: 6146 case elfcpp::R_AARCH64_PREL32: 6147 case elfcpp::R_AARCH64_PREL16: 6148 break; 6149 6150 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6151 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6152 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 6153 // The above relocations are used to access GOT entries. 6154 { 6155 Output_data_got_aarch64<size, big_endian>* got = 6156 target->got_section(symtab, layout); 6157 bool is_new = false; 6158 // This symbol requires a GOT entry. 6159 if (is_ifunc) 6160 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD); 6161 else 6162 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD); 6163 if (is_new && parameters->options().output_is_position_independent()) 6164 target->rela_dyn_section(layout)-> 6165 add_local_relative(object, 6166 r_sym, 6167 elfcpp::R_AARCH64_RELATIVE, 6168 got, 6169 object->local_got_offset(r_sym, 6170 GOT_TYPE_STANDARD), 6171 0, 6172 false); 6173 } 6174 break; 6175 6176 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263 6177 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264 6178 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265 6179 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266 6180 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267 6181 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268 6182 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269 6183 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270 6184 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271 6185 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272 6186 if (parameters->options().output_is_position_independent()) 6187 { 6188 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6189 object->name().c_str(), r_type); 6190 } 6191 break; 6192 6193 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6194 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6195 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6196 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6197 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6198 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6199 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6200 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6201 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6202 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6203 break; 6204 6205 // Control flow, pc-relative. We don't need to do anything for a relative 6206 // addressing relocation against a local symbol if it does not reference 6207 // the GOT. 6208 case elfcpp::R_AARCH64_TSTBR14: 6209 case elfcpp::R_AARCH64_CONDBR19: 6210 case elfcpp::R_AARCH64_JUMP26: 6211 case elfcpp::R_AARCH64_CALL26: 6212 break; 6213 6214 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6215 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6216 { 6217 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6218 optimize_tls_reloc(!parameters->options().shared(), r_type); 6219 if (tlsopt == tls::TLSOPT_TO_LE) 6220 break; 6221 6222 layout->set_has_static_tls(); 6223 // Create a GOT entry for the tp-relative offset. 6224 if (!parameters->doing_static_link()) 6225 { 6226 Output_data_got_aarch64<size, big_endian>* got = 6227 target->got_section(symtab, layout); 6228 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET, 6229 target->rela_dyn_section(layout), 6230 elfcpp::R_AARCH64_TLS_TPREL64); 6231 } 6232 else if (!object->local_has_got_offset(r_sym, 6233 GOT_TYPE_TLS_OFFSET)) 6234 { 6235 Output_data_got_aarch64<size, big_endian>* got = 6236 target->got_section(symtab, layout); 6237 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET); 6238 unsigned int got_offset = 6239 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET); 6240 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6241 gold_assert(addend == 0); 6242 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64, 6243 object, r_sym); 6244 } 6245 } 6246 break; 6247 6248 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6249 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 6250 { 6251 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6252 optimize_tls_reloc(!parameters->options().shared(), r_type); 6253 if (tlsopt == tls::TLSOPT_TO_LE) 6254 { 6255 layout->set_has_static_tls(); 6256 break; 6257 } 6258 gold_assert(tlsopt == tls::TLSOPT_NONE); 6259 6260 Output_data_got_aarch64<size, big_endian>* got = 6261 target->got_section(symtab, layout); 6262 got->add_local_pair_with_rel(object,r_sym, data_shndx, 6263 GOT_TYPE_TLS_PAIR, 6264 target->rela_dyn_section(layout), 6265 elfcpp::R_AARCH64_TLS_DTPMOD64); 6266 } 6267 break; 6268 6269 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6270 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6271 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6272 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6273 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6274 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6275 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6276 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6277 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12: 6278 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 6279 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12: 6280 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 6281 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12: 6282 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 6283 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12: 6284 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 6285 { 6286 layout->set_has_static_tls(); 6287 bool output_is_shared = parameters->options().shared(); 6288 if (output_is_shared) 6289 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."), 6290 object->name().c_str(), r_type); 6291 } 6292 break; 6293 6294 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6295 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 6296 { 6297 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6298 optimize_tls_reloc(!parameters->options().shared(), r_type); 6299 if (tlsopt == tls::TLSOPT_NONE) 6300 { 6301 // Create a GOT entry for the module index. 6302 target->got_mod_index_entry(symtab, layout, object); 6303 } 6304 else if (tlsopt != tls::TLSOPT_TO_LE) 6305 unsupported_reloc_local(object, r_type); 6306 } 6307 break; 6308 6309 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6310 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6311 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6312 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 6313 break; 6314 6315 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6316 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6317 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 6318 { 6319 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6320 optimize_tls_reloc(!parameters->options().shared(), r_type); 6321 target->define_tls_base_symbol(symtab, layout); 6322 if (tlsopt == tls::TLSOPT_NONE) 6323 { 6324 // Create reserved PLT and GOT entries for the resolver. 6325 target->reserve_tlsdesc_entries(symtab, layout); 6326 6327 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc. 6328 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT 6329 // entry needs to be in an area in .got.plt, not .got. Call 6330 // got_section to make sure the section has been created. 6331 target->got_section(symtab, layout); 6332 Output_data_got<size, big_endian>* got = 6333 target->got_tlsdesc_section(); 6334 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6335 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC)) 6336 { 6337 unsigned int got_offset = got->add_constant(0); 6338 got->add_constant(0); 6339 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC, 6340 got_offset); 6341 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6342 // We store the arguments we need in a vector, and use 6343 // the index into the vector as the parameter to pass 6344 // to the target specific routines. 6345 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym); 6346 void* arg = reinterpret_cast<void*>(intarg); 6347 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg, 6348 got, got_offset, 0); 6349 } 6350 } 6351 else if (tlsopt != tls::TLSOPT_TO_LE) 6352 unsupported_reloc_local(object, r_type); 6353 } 6354 break; 6355 6356 case elfcpp::R_AARCH64_TLSDESC_CALL: 6357 break; 6358 6359 default: 6360 unsupported_reloc_local(object, r_type); 6361 } 6362 } 6363 6364 6365 // Report an unsupported relocation against a global symbol. 6366 6367 template<int size, bool big_endian> 6368 void 6369 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global( 6370 Sized_relobj_file<size, big_endian>* object, 6371 unsigned int r_type, 6372 Symbol* gsym) 6373 { 6374 gold_error(_("%s: unsupported reloc %u against global symbol %s"), 6375 object->name().c_str(), r_type, gsym->demangled_name().c_str()); 6376 } 6377 6378 template<int size, bool big_endian> 6379 inline void 6380 Target_aarch64<size, big_endian>::Scan::global( 6381 Symbol_table* symtab, 6382 Layout* layout, 6383 Target_aarch64<size, big_endian>* target, 6384 Sized_relobj_file<size, big_endian> * object, 6385 unsigned int data_shndx, 6386 Output_section* output_section, 6387 const elfcpp::Rela<size, big_endian>& rela, 6388 unsigned int r_type, 6389 Symbol* gsym) 6390 { 6391 // A STT_GNU_IFUNC symbol may require a PLT entry. 6392 if (gsym->type() == elfcpp::STT_GNU_IFUNC 6393 && this->reloc_needs_plt_for_ifunc(object, r_type)) 6394 target->make_plt_entry(symtab, layout, gsym); 6395 6396 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 6397 Reloc_section; 6398 const AArch64_reloc_property* arp = 6399 aarch64_reloc_property_table->get_reloc_property(r_type); 6400 gold_assert(arp != NULL); 6401 6402 switch (r_type) 6403 { 6404 case elfcpp::R_AARCH64_NONE: 6405 break; 6406 6407 case elfcpp::R_AARCH64_ABS16: 6408 case elfcpp::R_AARCH64_ABS32: 6409 case elfcpp::R_AARCH64_ABS64: 6410 { 6411 // Make a PLT entry if necessary. 6412 if (gsym->needs_plt_entry()) 6413 { 6414 target->make_plt_entry(symtab, layout, gsym); 6415 // Since this is not a PC-relative relocation, we may be 6416 // taking the address of a function. In that case we need to 6417 // set the entry in the dynamic symbol table to the address of 6418 // the PLT entry. 6419 if (gsym->is_from_dynobj() && !parameters->options().shared()) 6420 gsym->set_needs_dynsym_value(); 6421 } 6422 // Make a dynamic relocation if necessary. 6423 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6424 { 6425 if (!parameters->options().output_is_position_independent() 6426 && gsym->may_need_copy_reloc()) 6427 { 6428 target->copy_reloc(symtab, layout, object, 6429 data_shndx, output_section, gsym, rela); 6430 } 6431 else if (r_type == elfcpp::R_AARCH64_ABS64 6432 && gsym->type() == elfcpp::STT_GNU_IFUNC 6433 && gsym->can_use_relative_reloc(false) 6434 && !gsym->is_from_dynobj() 6435 && !gsym->is_undefined() 6436 && !gsym->is_preemptible()) 6437 { 6438 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC 6439 // symbol. This makes a function address in a PIE executable 6440 // match the address in a shared library that it links against. 6441 Reloc_section* rela_dyn = 6442 target->rela_irelative_section(layout); 6443 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE; 6444 rela_dyn->add_symbolless_global_addend(gsym, r_type, 6445 output_section, object, 6446 data_shndx, 6447 rela.get_r_offset(), 6448 rela.get_r_addend()); 6449 } 6450 else if (r_type == elfcpp::R_AARCH64_ABS64 6451 && gsym->can_use_relative_reloc(false)) 6452 { 6453 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6454 rela_dyn->add_global_relative(gsym, 6455 elfcpp::R_AARCH64_RELATIVE, 6456 output_section, 6457 object, 6458 data_shndx, 6459 rela.get_r_offset(), 6460 rela.get_r_addend(), 6461 false); 6462 } 6463 else 6464 { 6465 check_non_pic(object, r_type); 6466 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>* 6467 rela_dyn = target->rela_dyn_section(layout); 6468 rela_dyn->add_global( 6469 gsym, r_type, output_section, object, 6470 data_shndx, rela.get_r_offset(),rela.get_r_addend()); 6471 } 6472 } 6473 } 6474 break; 6475 6476 case elfcpp::R_AARCH64_PREL16: 6477 case elfcpp::R_AARCH64_PREL32: 6478 case elfcpp::R_AARCH64_PREL64: 6479 // This is used to fill the GOT absolute address. 6480 if (gsym->needs_plt_entry()) 6481 { 6482 target->make_plt_entry(symtab, layout, gsym); 6483 } 6484 break; 6485 6486 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263 6487 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264 6488 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265 6489 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266 6490 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267 6491 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268 6492 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269 6493 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270 6494 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271 6495 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272 6496 if (parameters->options().output_is_position_independent()) 6497 { 6498 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6499 object->name().c_str(), r_type); 6500 } 6501 // Make a PLT entry if necessary. 6502 if (gsym->needs_plt_entry()) 6503 { 6504 target->make_plt_entry(symtab, layout, gsym); 6505 // Since this is not a PC-relative relocation, we may be 6506 // taking the address of a function. In that case we need to 6507 // set the entry in the dynamic symbol table to the address of 6508 // the PLT entry. 6509 if (gsym->is_from_dynobj() && !parameters->options().shared()) 6510 gsym->set_needs_dynsym_value(); 6511 } 6512 break; 6513 6514 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6515 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6516 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6517 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6518 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6519 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6520 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6521 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6522 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6523 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6524 { 6525 if (gsym->needs_plt_entry()) 6526 target->make_plt_entry(symtab, layout, gsym); 6527 // Make a dynamic relocation if necessary. 6528 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6529 { 6530 if (parameters->options().output_is_executable() 6531 && gsym->may_need_copy_reloc()) 6532 { 6533 target->copy_reloc(symtab, layout, object, 6534 data_shndx, output_section, gsym, rela); 6535 } 6536 } 6537 break; 6538 } 6539 6540 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6541 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6542 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 6543 { 6544 // The above relocations are used to access GOT entries. 6545 // Note a GOT entry is an *address* to a symbol. 6546 // The symbol requires a GOT entry 6547 Output_data_got_aarch64<size, big_endian>* got = 6548 target->got_section(symtab, layout); 6549 if (gsym->final_value_is_known()) 6550 { 6551 // For a STT_GNU_IFUNC symbol we want the PLT address. 6552 if (gsym->type() == elfcpp::STT_GNU_IFUNC) 6553 got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6554 else 6555 got->add_global(gsym, GOT_TYPE_STANDARD); 6556 } 6557 else 6558 { 6559 // If this symbol is not fully resolved, we need to add a dynamic 6560 // relocation for it. 6561 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6562 6563 // Use a GLOB_DAT rather than a RELATIVE reloc if: 6564 // 6565 // 1) The symbol may be defined in some other module. 6566 // 2) We are building a shared library and this is a protected 6567 // symbol; using GLOB_DAT means that the dynamic linker can use 6568 // the address of the PLT in the main executable when appropriate 6569 // so that function address comparisons work. 6570 // 3) This is a STT_GNU_IFUNC symbol in position dependent code, 6571 // again so that function address comparisons work. 6572 if (gsym->is_from_dynobj() 6573 || gsym->is_undefined() 6574 || gsym->is_preemptible() 6575 || (gsym->visibility() == elfcpp::STV_PROTECTED 6576 && parameters->options().shared()) 6577 || (gsym->type() == elfcpp::STT_GNU_IFUNC 6578 && parameters->options().output_is_position_independent())) 6579 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD, 6580 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT); 6581 else 6582 { 6583 // For a STT_GNU_IFUNC symbol we want to write the PLT 6584 // offset into the GOT, so that function pointer 6585 // comparisons work correctly. 6586 bool is_new; 6587 if (gsym->type() != elfcpp::STT_GNU_IFUNC) 6588 is_new = got->add_global(gsym, GOT_TYPE_STANDARD); 6589 else 6590 { 6591 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6592 // Tell the dynamic linker to use the PLT address 6593 // when resolving relocations. 6594 if (gsym->is_from_dynobj() 6595 && !parameters->options().shared()) 6596 gsym->set_needs_dynsym_value(); 6597 } 6598 if (is_new) 6599 { 6600 rela_dyn->add_global_relative( 6601 gsym, elfcpp::R_AARCH64_RELATIVE, 6602 got, 6603 gsym->got_offset(GOT_TYPE_STANDARD), 6604 0, 6605 false); 6606 } 6607 } 6608 } 6609 break; 6610 } 6611 6612 case elfcpp::R_AARCH64_TSTBR14: 6613 case elfcpp::R_AARCH64_CONDBR19: 6614 case elfcpp::R_AARCH64_JUMP26: 6615 case elfcpp::R_AARCH64_CALL26: 6616 { 6617 if (gsym->final_value_is_known()) 6618 break; 6619 6620 if (gsym->is_defined() && 6621 !gsym->is_from_dynobj() && 6622 !gsym->is_preemptible()) 6623 break; 6624 6625 // Make plt entry for function call. 6626 target->make_plt_entry(symtab, layout, gsym); 6627 break; 6628 } 6629 6630 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6631 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic 6632 { 6633 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6634 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6635 if (tlsopt == tls::TLSOPT_TO_LE) 6636 { 6637 layout->set_has_static_tls(); 6638 break; 6639 } 6640 gold_assert(tlsopt == tls::TLSOPT_NONE); 6641 6642 // General dynamic. 6643 Output_data_got_aarch64<size, big_endian>* got = 6644 target->got_section(symtab, layout); 6645 // Create 2 consecutive entries for module index and offset. 6646 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR, 6647 target->rela_dyn_section(layout), 6648 elfcpp::R_AARCH64_TLS_DTPMOD64, 6649 elfcpp::R_AARCH64_TLS_DTPREL64); 6650 } 6651 break; 6652 6653 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6654 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic 6655 { 6656 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6657 optimize_tls_reloc(!parameters->options().shared(), r_type); 6658 if (tlsopt == tls::TLSOPT_NONE) 6659 { 6660 // Create a GOT entry for the module index. 6661 target->got_mod_index_entry(symtab, layout, object); 6662 } 6663 else if (tlsopt != tls::TLSOPT_TO_LE) 6664 unsupported_reloc_local(object, r_type); 6665 } 6666 break; 6667 6668 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6669 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6670 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6671 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic 6672 break; 6673 6674 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6675 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable 6676 { 6677 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6678 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6679 if (tlsopt == tls::TLSOPT_TO_LE) 6680 break; 6681 6682 layout->set_has_static_tls(); 6683 // Create a GOT entry for the tp-relative offset. 6684 Output_data_got_aarch64<size, big_endian>* got 6685 = target->got_section(symtab, layout); 6686 if (!parameters->doing_static_link()) 6687 { 6688 got->add_global_with_rel( 6689 gsym, GOT_TYPE_TLS_OFFSET, 6690 target->rela_dyn_section(layout), 6691 elfcpp::R_AARCH64_TLS_TPREL64); 6692 } 6693 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET)) 6694 { 6695 got->add_global(gsym, GOT_TYPE_TLS_OFFSET); 6696 unsigned int got_offset = 6697 gsym->got_offset(GOT_TYPE_TLS_OFFSET); 6698 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6699 gold_assert(addend == 0); 6700 got->add_static_reloc(got_offset, 6701 elfcpp::R_AARCH64_TLS_TPREL64, gsym); 6702 } 6703 } 6704 break; 6705 6706 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6707 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6708 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6709 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6710 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6711 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6712 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6713 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6714 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12: 6715 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 6716 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12: 6717 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 6718 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12: 6719 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 6720 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12: 6721 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: // Local executable 6722 layout->set_has_static_tls(); 6723 if (parameters->options().shared()) 6724 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."), 6725 object->name().c_str(), r_type); 6726 break; 6727 6728 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6729 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6730 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor 6731 { 6732 target->define_tls_base_symbol(symtab, layout); 6733 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6734 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6735 if (tlsopt == tls::TLSOPT_NONE) 6736 { 6737 // Create reserved PLT and GOT entries for the resolver. 6738 target->reserve_tlsdesc_entries(symtab, layout); 6739 6740 // Create a double GOT entry with an R_AARCH64_TLSDESC 6741 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT 6742 // entry needs to be in an area in .got.plt, not .got. Call 6743 // got_section to make sure the section has been created. 6744 target->got_section(symtab, layout); 6745 Output_data_got<size, big_endian>* got = 6746 target->got_tlsdesc_section(); 6747 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6748 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt, 6749 elfcpp::R_AARCH64_TLSDESC, 0); 6750 } 6751 else if (tlsopt == tls::TLSOPT_TO_IE) 6752 { 6753 // Create a GOT entry for the tp-relative offset. 6754 Output_data_got<size, big_endian>* got 6755 = target->got_section(symtab, layout); 6756 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET, 6757 target->rela_dyn_section(layout), 6758 elfcpp::R_AARCH64_TLS_TPREL64); 6759 } 6760 else if (tlsopt != tls::TLSOPT_TO_LE) 6761 unsupported_reloc_global(object, r_type, gsym); 6762 } 6763 break; 6764 6765 case elfcpp::R_AARCH64_TLSDESC_CALL: 6766 break; 6767 6768 default: 6769 gold_error(_("%s: unsupported reloc type in global scan"), 6770 aarch64_reloc_property_table-> 6771 reloc_name_in_error_message(r_type).c_str()); 6772 } 6773 return; 6774 } // End of Scan::global 6775 6776 6777 // Create the PLT section. 6778 template<int size, bool big_endian> 6779 void 6780 Target_aarch64<size, big_endian>::make_plt_section( 6781 Symbol_table* symtab, Layout* layout) 6782 { 6783 if (this->plt_ == NULL) 6784 { 6785 // Create the GOT section first. 6786 this->got_section(symtab, layout); 6787 6788 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_, 6789 this->got_irelative_); 6790 6791 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, 6792 (elfcpp::SHF_ALLOC 6793 | elfcpp::SHF_EXECINSTR), 6794 this->plt_, ORDER_PLT, false); 6795 6796 // Make the sh_info field of .rela.plt point to .plt. 6797 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section(); 6798 rela_plt_os->set_info_section(this->plt_->output_section()); 6799 } 6800 } 6801 6802 // Return the section for TLSDESC relocations. 6803 6804 template<int size, bool big_endian> 6805 typename Target_aarch64<size, big_endian>::Reloc_section* 6806 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const 6807 { 6808 return this->plt_section()->rela_tlsdesc(layout); 6809 } 6810 6811 // Create a PLT entry for a global symbol. 6812 6813 template<int size, bool big_endian> 6814 void 6815 Target_aarch64<size, big_endian>::make_plt_entry( 6816 Symbol_table* symtab, 6817 Layout* layout, 6818 Symbol* gsym) 6819 { 6820 if (gsym->has_plt_offset()) 6821 return; 6822 6823 if (this->plt_ == NULL) 6824 this->make_plt_section(symtab, layout); 6825 6826 this->plt_->add_entry(symtab, layout, gsym); 6827 } 6828 6829 // Make a PLT entry for a local STT_GNU_IFUNC symbol. 6830 6831 template<int size, bool big_endian> 6832 void 6833 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry( 6834 Symbol_table* symtab, Layout* layout, 6835 Sized_relobj_file<size, big_endian>* relobj, 6836 unsigned int local_sym_index) 6837 { 6838 if (relobj->local_has_plt_offset(local_sym_index)) 6839 return; 6840 if (this->plt_ == NULL) 6841 this->make_plt_section(symtab, layout); 6842 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout, 6843 relobj, 6844 local_sym_index); 6845 relobj->set_local_plt_offset(local_sym_index, plt_offset); 6846 } 6847 6848 template<int size, bool big_endian> 6849 void 6850 Target_aarch64<size, big_endian>::gc_process_relocs( 6851 Symbol_table* symtab, 6852 Layout* layout, 6853 Sized_relobj_file<size, big_endian>* object, 6854 unsigned int data_shndx, 6855 unsigned int sh_type, 6856 const unsigned char* prelocs, 6857 size_t reloc_count, 6858 Output_section* output_section, 6859 bool needs_special_offset_handling, 6860 size_t local_symbol_count, 6861 const unsigned char* plocal_symbols) 6862 { 6863 typedef Target_aarch64<size, big_endian> Aarch64; 6864 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 6865 Classify_reloc; 6866 6867 if (sh_type == elfcpp::SHT_REL) 6868 { 6869 return; 6870 } 6871 6872 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>( 6873 symtab, 6874 layout, 6875 this, 6876 object, 6877 data_shndx, 6878 prelocs, 6879 reloc_count, 6880 output_section, 6881 needs_special_offset_handling, 6882 local_symbol_count, 6883 plocal_symbols); 6884 } 6885 6886 // Scan relocations for a section. 6887 6888 template<int size, bool big_endian> 6889 void 6890 Target_aarch64<size, big_endian>::scan_relocs( 6891 Symbol_table* symtab, 6892 Layout* layout, 6893 Sized_relobj_file<size, big_endian>* object, 6894 unsigned int data_shndx, 6895 unsigned int sh_type, 6896 const unsigned char* prelocs, 6897 size_t reloc_count, 6898 Output_section* output_section, 6899 bool needs_special_offset_handling, 6900 size_t local_symbol_count, 6901 const unsigned char* plocal_symbols) 6902 { 6903 typedef Target_aarch64<size, big_endian> Aarch64; 6904 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 6905 Classify_reloc; 6906 6907 if (sh_type == elfcpp::SHT_REL) 6908 { 6909 gold_error(_("%s: unsupported REL reloc section"), 6910 object->name().c_str()); 6911 return; 6912 } 6913 6914 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>( 6915 symtab, 6916 layout, 6917 this, 6918 object, 6919 data_shndx, 6920 prelocs, 6921 reloc_count, 6922 output_section, 6923 needs_special_offset_handling, 6924 local_symbol_count, 6925 plocal_symbols); 6926 } 6927 6928 // Return the value to use for a dynamic which requires special 6929 // treatment. This is how we support equality comparisons of function 6930 // pointers across shared library boundaries, as described in the 6931 // processor specific ABI supplement. 6932 6933 template<int size, bool big_endian> 6934 uint64_t 6935 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const 6936 { 6937 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset()); 6938 return this->plt_address_for_global(gsym); 6939 } 6940 6941 6942 // Finalize the sections. 6943 6944 template<int size, bool big_endian> 6945 void 6946 Target_aarch64<size, big_endian>::do_finalize_sections( 6947 Layout* layout, 6948 const Input_objects*, 6949 Symbol_table* symtab) 6950 { 6951 const Reloc_section* rel_plt = (this->plt_ == NULL 6952 ? NULL 6953 : this->plt_->rela_plt()); 6954 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt, 6955 this->rela_dyn_, true, false, false); 6956 6957 // Emit any relocs we saved in an attempt to avoid generating COPY 6958 // relocs. 6959 if (this->copy_relocs_.any_saved_relocs()) 6960 this->copy_relocs_.emit(this->rela_dyn_section(layout)); 6961 6962 // Fill in some more dynamic tags. 6963 Output_data_dynamic* const odyn = layout->dynamic_data(); 6964 if (odyn != NULL) 6965 { 6966 if (this->plt_ != NULL 6967 && this->plt_->output_section() != NULL 6968 && this->plt_ ->has_tlsdesc_entry()) 6969 { 6970 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset(); 6971 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset(); 6972 this->got_->finalize_data_size(); 6973 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT, 6974 this->plt_, plt_offset); 6975 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT, 6976 this->got_, got_offset); 6977 } 6978 } 6979 6980 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of 6981 // the .got section. 6982 Symbol* sym = this->global_offset_table_; 6983 if (sym != NULL) 6984 { 6985 uint64_t data_size = this->got_->current_data_size(); 6986 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size); 6987 6988 // If the .got section is more than 0x8000 bytes, we add 6989 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16 6990 // bit relocations have a greater chance of working. 6991 if (data_size >= 0x8000) 6992 symtab->get_sized_symbol<size>(sym)->set_value( 6993 symtab->get_sized_symbol<size>(sym)->value() + 0x8000); 6994 } 6995 6996 if (parameters->doing_static_link() 6997 && (this->plt_ == NULL || !this->plt_->has_irelative_section())) 6998 { 6999 // If linking statically, make sure that the __rela_iplt symbols 7000 // were defined if necessary, even if we didn't create a PLT. 7001 static const Define_symbol_in_segment syms[] = 7002 { 7003 { 7004 "__rela_iplt_start", // name 7005 elfcpp::PT_LOAD, // segment_type 7006 elfcpp::PF_W, // segment_flags_set 7007 elfcpp::PF(0), // segment_flags_clear 7008 0, // value 7009 0, // size 7010 elfcpp::STT_NOTYPE, // type 7011 elfcpp::STB_GLOBAL, // binding 7012 elfcpp::STV_HIDDEN, // visibility 7013 0, // nonvis 7014 Symbol::SEGMENT_START, // offset_from_base 7015 true // only_if_ref 7016 }, 7017 { 7018 "__rela_iplt_end", // name 7019 elfcpp::PT_LOAD, // segment_type 7020 elfcpp::PF_W, // segment_flags_set 7021 elfcpp::PF(0), // segment_flags_clear 7022 0, // value 7023 0, // size 7024 elfcpp::STT_NOTYPE, // type 7025 elfcpp::STB_GLOBAL, // binding 7026 elfcpp::STV_HIDDEN, // visibility 7027 0, // nonvis 7028 Symbol::SEGMENT_START, // offset_from_base 7029 true // only_if_ref 7030 } 7031 }; 7032 7033 symtab->define_symbols(layout, 2, syms, 7034 layout->script_options()->saw_sections_clause()); 7035 } 7036 7037 return; 7038 } 7039 7040 // Perform a relocation. 7041 7042 template<int size, bool big_endian> 7043 inline bool 7044 Target_aarch64<size, big_endian>::Relocate::relocate( 7045 const Relocate_info<size, big_endian>* relinfo, 7046 unsigned int, 7047 Target_aarch64<size, big_endian>* target, 7048 Output_section* , 7049 size_t relnum, 7050 const unsigned char* preloc, 7051 const Sized_symbol<size>* gsym, 7052 const Symbol_value<size>* psymval, 7053 unsigned char* view, 7054 typename elfcpp::Elf_types<size>::Elf_Addr address, 7055 section_size_type /* view_size */) 7056 { 7057 if (view == NULL) 7058 return true; 7059 7060 typedef AArch64_relocate_functions<size, big_endian> Reloc; 7061 7062 const elfcpp::Rela<size, big_endian> rela(preloc); 7063 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info()); 7064 const AArch64_reloc_property* reloc_property = 7065 aarch64_reloc_property_table->get_reloc_property(r_type); 7066 7067 if (reloc_property == NULL) 7068 { 7069 std::string reloc_name = 7070 aarch64_reloc_property_table->reloc_name_in_error_message(r_type); 7071 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7072 _("cannot relocate %s in object file"), 7073 reloc_name.c_str()); 7074 return true; 7075 } 7076 7077 const Sized_relobj_file<size, big_endian>* object = relinfo->object; 7078 7079 // Pick the value to use for symbols defined in the PLT. 7080 Symbol_value<size> symval; 7081 if (gsym != NULL 7082 && gsym->use_plt_offset(reloc_property->reference_flags())) 7083 { 7084 symval.set_output_value(target->plt_address_for_global(gsym)); 7085 psymval = &symval; 7086 } 7087 else if (gsym == NULL && psymval->is_ifunc_symbol()) 7088 { 7089 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7090 if (object->local_has_plt_offset(r_sym)) 7091 { 7092 symval.set_output_value(target->plt_address_for_local(object, r_sym)); 7093 psymval = &symval; 7094 } 7095 } 7096 7097 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7098 7099 // Get the GOT offset if needed. 7100 // For aarch64, the GOT pointer points to the start of the GOT section. 7101 bool have_got_offset = false; 7102 int got_offset = 0; 7103 int got_base = (target->got_ != NULL 7104 ? (target->got_->current_data_size() >= 0x8000 7105 ? 0x8000 : 0) 7106 : 0); 7107 switch (r_type) 7108 { 7109 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0: 7110 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC: 7111 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1: 7112 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC: 7113 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2: 7114 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC: 7115 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3: 7116 case elfcpp::R_AARCH64_GOTREL64: 7117 case elfcpp::R_AARCH64_GOTREL32: 7118 case elfcpp::R_AARCH64_GOT_LD_PREL19: 7119 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15: 7120 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 7121 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 7122 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 7123 if (gsym != NULL) 7124 { 7125 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD)); 7126 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base; 7127 } 7128 else 7129 { 7130 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7131 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD)); 7132 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD) 7133 - got_base); 7134 } 7135 have_got_offset = true; 7136 break; 7137 7138 default: 7139 break; 7140 } 7141 7142 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY; 7143 typename elfcpp::Elf_types<size>::Elf_Addr value; 7144 switch (r_type) 7145 { 7146 case elfcpp::R_AARCH64_NONE: 7147 break; 7148 7149 case elfcpp::R_AARCH64_ABS64: 7150 if (!parameters->options().apply_dynamic_relocs() 7151 && parameters->options().output_is_position_independent() 7152 && gsym != NULL 7153 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()) 7154 && !gsym->can_use_relative_reloc(false)) 7155 // We have generated an absolute dynamic relocation, so do not 7156 // apply the relocation statically. (Works around bugs in older 7157 // Android dynamic linkers.) 7158 break; 7159 reloc_status = Reloc::template rela_ua<64>( 7160 view, object, psymval, addend, reloc_property); 7161 break; 7162 7163 case elfcpp::R_AARCH64_ABS32: 7164 if (!parameters->options().apply_dynamic_relocs() 7165 && parameters->options().output_is_position_independent() 7166 && gsym != NULL 7167 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 7168 // We have generated an absolute dynamic relocation, so do not 7169 // apply the relocation statically. (Works around bugs in older 7170 // Android dynamic linkers.) 7171 break; 7172 reloc_status = Reloc::template rela_ua<32>( 7173 view, object, psymval, addend, reloc_property); 7174 break; 7175 7176 case elfcpp::R_AARCH64_ABS16: 7177 if (!parameters->options().apply_dynamic_relocs() 7178 && parameters->options().output_is_position_independent() 7179 && gsym != NULL 7180 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 7181 // We have generated an absolute dynamic relocation, so do not 7182 // apply the relocation statically. (Works around bugs in older 7183 // Android dynamic linkers.) 7184 break; 7185 reloc_status = Reloc::template rela_ua<16>( 7186 view, object, psymval, addend, reloc_property); 7187 break; 7188 7189 case elfcpp::R_AARCH64_PREL64: 7190 reloc_status = Reloc::template pcrela_ua<64>( 7191 view, object, psymval, addend, address, reloc_property); 7192 break; 7193 7194 case elfcpp::R_AARCH64_PREL32: 7195 reloc_status = Reloc::template pcrela_ua<32>( 7196 view, object, psymval, addend, address, reloc_property); 7197 break; 7198 7199 case elfcpp::R_AARCH64_PREL16: 7200 reloc_status = Reloc::template pcrela_ua<16>( 7201 view, object, psymval, addend, address, reloc_property); 7202 break; 7203 7204 case elfcpp::R_AARCH64_MOVW_UABS_G0: 7205 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: 7206 case elfcpp::R_AARCH64_MOVW_UABS_G1: 7207 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: 7208 case elfcpp::R_AARCH64_MOVW_UABS_G2: 7209 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: 7210 case elfcpp::R_AARCH64_MOVW_UABS_G3: 7211 reloc_status = Reloc::template rela_general<32>( 7212 view, object, psymval, addend, reloc_property); 7213 break; 7214 case elfcpp::R_AARCH64_MOVW_SABS_G0: 7215 case elfcpp::R_AARCH64_MOVW_SABS_G1: 7216 case elfcpp::R_AARCH64_MOVW_SABS_G2: 7217 reloc_status = Reloc::movnz(view, psymval->value(object, addend), 7218 reloc_property); 7219 break; 7220 7221 case elfcpp::R_AARCH64_LD_PREL_LO19: 7222 reloc_status = Reloc::template pcrela_general<32>( 7223 view, object, psymval, addend, address, reloc_property); 7224 break; 7225 7226 case elfcpp::R_AARCH64_ADR_PREL_LO21: 7227 reloc_status = Reloc::adr(view, object, psymval, addend, 7228 address, reloc_property); 7229 break; 7230 7231 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 7232 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 7233 reloc_status = Reloc::adrp(view, object, psymval, addend, address, 7234 reloc_property); 7235 break; 7236 7237 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: 7238 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: 7239 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: 7240 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: 7241 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: 7242 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 7243 reloc_status = Reloc::template rela_general<32>( 7244 view, object, psymval, addend, reloc_property); 7245 break; 7246 7247 case elfcpp::R_AARCH64_CALL26: 7248 if (this->skip_call_tls_get_addr_) 7249 { 7250 // Double check that the TLSGD insn has been optimized away. 7251 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7252 Insntype insn = elfcpp::Swap<32, big_endian>::readval( 7253 reinterpret_cast<Insntype*>(view)); 7254 gold_assert((insn & 0xff000000) == 0x91000000); 7255 7256 reloc_status = Reloc::STATUS_OKAY; 7257 this->skip_call_tls_get_addr_ = false; 7258 // Return false to stop further processing this reloc. 7259 return false; 7260 } 7261 // Fall through. 7262 case elfcpp::R_AARCH64_JUMP26: 7263 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address, 7264 gsym, psymval, object, 7265 target->stub_group_size_)) 7266 break; 7267 // Fall through. 7268 case elfcpp::R_AARCH64_TSTBR14: 7269 case elfcpp::R_AARCH64_CONDBR19: 7270 reloc_status = Reloc::template pcrela_general<32>( 7271 view, object, psymval, addend, address, reloc_property); 7272 break; 7273 7274 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 7275 gold_assert(have_got_offset); 7276 value = target->got_->address() + got_base + got_offset; 7277 reloc_status = Reloc::adrp(view, value + addend, address); 7278 break; 7279 7280 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 7281 gold_assert(have_got_offset); 7282 value = target->got_->address() + got_base + got_offset; 7283 reloc_status = Reloc::template rela_general<32>( 7284 view, value, addend, reloc_property); 7285 break; 7286 7287 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 7288 { 7289 gold_assert(have_got_offset); 7290 value = target->got_->address() + got_base + got_offset + addend - 7291 Reloc::Page(target->got_->address() + got_base); 7292 if ((value & 7) != 0) 7293 reloc_status = Reloc::STATUS_OVERFLOW; 7294 else 7295 reloc_status = Reloc::template reloc_common<32>( 7296 view, value, reloc_property); 7297 break; 7298 } 7299 7300 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7301 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7302 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7303 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7304 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7305 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7306 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7307 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7308 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7309 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7310 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7311 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7312 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7313 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7314 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7315 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7316 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7317 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7318 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12: 7319 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 7320 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12: 7321 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 7322 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12: 7323 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 7324 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12: 7325 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 7326 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7327 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7328 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7329 case elfcpp::R_AARCH64_TLSDESC_CALL: 7330 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type, 7331 gsym, psymval, view, address); 7332 break; 7333 7334 // These are dynamic relocations, which are unexpected when linking. 7335 case elfcpp::R_AARCH64_COPY: 7336 case elfcpp::R_AARCH64_GLOB_DAT: 7337 case elfcpp::R_AARCH64_JUMP_SLOT: 7338 case elfcpp::R_AARCH64_RELATIVE: 7339 case elfcpp::R_AARCH64_IRELATIVE: 7340 case elfcpp::R_AARCH64_TLS_DTPREL64: 7341 case elfcpp::R_AARCH64_TLS_DTPMOD64: 7342 case elfcpp::R_AARCH64_TLS_TPREL64: 7343 case elfcpp::R_AARCH64_TLSDESC: 7344 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7345 _("unexpected reloc %u in object file"), 7346 r_type); 7347 break; 7348 7349 default: 7350 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7351 _("unsupported reloc %s"), 7352 reloc_property->name().c_str()); 7353 break; 7354 } 7355 7356 // Report any errors. 7357 switch (reloc_status) 7358 { 7359 case Reloc::STATUS_OKAY: 7360 break; 7361 case Reloc::STATUS_OVERFLOW: 7362 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7363 _("relocation overflow in %s"), 7364 reloc_property->name().c_str()); 7365 break; 7366 case Reloc::STATUS_BAD_RELOC: 7367 gold_error_at_location( 7368 relinfo, 7369 relnum, 7370 rela.get_r_offset(), 7371 _("unexpected opcode while processing relocation %s"), 7372 reloc_property->name().c_str()); 7373 break; 7374 default: 7375 gold_unreachable(); 7376 } 7377 7378 return true; 7379 } 7380 7381 7382 template<int size, bool big_endian> 7383 inline 7384 typename AArch64_relocate_functions<size, big_endian>::Status 7385 Target_aarch64<size, big_endian>::Relocate::relocate_tls( 7386 const Relocate_info<size, big_endian>* relinfo, 7387 Target_aarch64<size, big_endian>* target, 7388 size_t relnum, 7389 const elfcpp::Rela<size, big_endian>& rela, 7390 unsigned int r_type, const Sized_symbol<size>* gsym, 7391 const Symbol_value<size>* psymval, 7392 unsigned char* view, 7393 typename elfcpp::Elf_types<size>::Elf_Addr address) 7394 { 7395 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7396 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7397 7398 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7399 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7400 const AArch64_reloc_property* reloc_property = 7401 aarch64_reloc_property_table->get_reloc_property(r_type); 7402 gold_assert(reloc_property != NULL); 7403 7404 const bool is_final = (gsym == NULL 7405 ? !parameters->options().shared() 7406 : gsym->final_value_is_known()); 7407 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 7408 optimize_tls_reloc(is_final, r_type); 7409 7410 Sized_relobj_file<size, big_endian>* object = relinfo->object; 7411 int tls_got_offset_type; 7412 switch (r_type) 7413 { 7414 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7415 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic 7416 { 7417 if (tlsopt == tls::TLSOPT_TO_LE) 7418 { 7419 if (tls_segment == NULL) 7420 { 7421 gold_assert(parameters->errors()->error_count() > 0 7422 || issue_undefined_symbol_error(gsym)); 7423 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7424 } 7425 return tls_gd_to_le(relinfo, target, rela, r_type, view, 7426 psymval); 7427 } 7428 else if (tlsopt == tls::TLSOPT_NONE) 7429 { 7430 tls_got_offset_type = GOT_TYPE_TLS_PAIR; 7431 // Firstly get the address for the got entry. 7432 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7433 if (gsym != NULL) 7434 { 7435 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7436 got_entry_address = target->got_->address() + 7437 gsym->got_offset(tls_got_offset_type); 7438 } 7439 else 7440 { 7441 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7442 gold_assert( 7443 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7444 got_entry_address = target->got_->address() + 7445 object->local_got_offset(r_sym, tls_got_offset_type); 7446 } 7447 7448 // Relocate the address into adrp/ld, adrp/add pair. 7449 switch (r_type) 7450 { 7451 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7452 return aarch64_reloc_funcs::adrp( 7453 view, got_entry_address + addend, address); 7454 7455 break; 7456 7457 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7458 return aarch64_reloc_funcs::template rela_general<32>( 7459 view, got_entry_address, addend, reloc_property); 7460 break; 7461 7462 default: 7463 gold_unreachable(); 7464 } 7465 } 7466 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7467 _("unsupported gd_to_ie relaxation on %u"), 7468 r_type); 7469 } 7470 break; 7471 7472 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7473 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic 7474 { 7475 if (tlsopt == tls::TLSOPT_TO_LE) 7476 { 7477 if (tls_segment == NULL) 7478 { 7479 gold_assert(parameters->errors()->error_count() > 0 7480 || issue_undefined_symbol_error(gsym)); 7481 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7482 } 7483 return this->tls_ld_to_le(relinfo, target, rela, r_type, view, 7484 psymval); 7485 } 7486 7487 gold_assert(tlsopt == tls::TLSOPT_NONE); 7488 // Relocate the field with the offset of the GOT entry for 7489 // the module index. 7490 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7491 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) + 7492 target->got_->address()); 7493 7494 switch (r_type) 7495 { 7496 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7497 return aarch64_reloc_funcs::adrp( 7498 view, got_entry_address + addend, address); 7499 break; 7500 7501 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7502 return aarch64_reloc_funcs::template rela_general<32>( 7503 view, got_entry_address, addend, reloc_property); 7504 break; 7505 7506 default: 7507 gold_unreachable(); 7508 } 7509 } 7510 break; 7511 7512 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7513 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7514 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7515 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic 7516 { 7517 AArch64_address value = psymval->value(object, 0); 7518 if (tlsopt == tls::TLSOPT_TO_LE) 7519 { 7520 if (tls_segment == NULL) 7521 { 7522 gold_assert(parameters->errors()->error_count() > 0 7523 || issue_undefined_symbol_error(gsym)); 7524 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7525 } 7526 } 7527 switch (r_type) 7528 { 7529 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7530 return aarch64_reloc_funcs::movnz(view, value + addend, 7531 reloc_property); 7532 break; 7533 7534 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7535 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7536 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7537 return aarch64_reloc_funcs::template rela_general<32>( 7538 view, value, addend, reloc_property); 7539 break; 7540 7541 default: 7542 gold_unreachable(); 7543 } 7544 // We should never reach here. 7545 } 7546 break; 7547 7548 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7549 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec 7550 { 7551 if (tlsopt == tls::TLSOPT_TO_LE) 7552 { 7553 if (tls_segment == NULL) 7554 { 7555 gold_assert(parameters->errors()->error_count() > 0 7556 || issue_undefined_symbol_error(gsym)); 7557 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7558 } 7559 return tls_ie_to_le(relinfo, target, rela, r_type, view, 7560 psymval); 7561 } 7562 tls_got_offset_type = GOT_TYPE_TLS_OFFSET; 7563 7564 // Firstly get the address for the got entry. 7565 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7566 if (gsym != NULL) 7567 { 7568 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7569 got_entry_address = target->got_->address() + 7570 gsym->got_offset(tls_got_offset_type); 7571 } 7572 else 7573 { 7574 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7575 gold_assert( 7576 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7577 got_entry_address = target->got_->address() + 7578 object->local_got_offset(r_sym, tls_got_offset_type); 7579 } 7580 // Relocate the address into adrp/ld, adrp/add pair. 7581 switch (r_type) 7582 { 7583 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7584 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 7585 address); 7586 break; 7587 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7588 return aarch64_reloc_funcs::template rela_general<32>( 7589 view, got_entry_address, addend, reloc_property); 7590 default: 7591 gold_unreachable(); 7592 } 7593 } 7594 // We shall never reach here. 7595 break; 7596 7597 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7598 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7599 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7600 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7601 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7602 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7603 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7604 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7605 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12: 7606 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 7607 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12: 7608 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 7609 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12: 7610 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 7611 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12: 7612 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 7613 { 7614 gold_assert(tls_segment != NULL); 7615 AArch64_address value = psymval->value(object, 0); 7616 7617 if (!parameters->options().shared()) 7618 { 7619 AArch64_address aligned_tcb_size = 7620 align_address(target->tcb_size(), 7621 tls_segment->maximum_alignment()); 7622 value += aligned_tcb_size; 7623 switch (r_type) 7624 { 7625 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7626 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7627 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7628 return aarch64_reloc_funcs::movnz(view, value + addend, 7629 reloc_property); 7630 default: 7631 return aarch64_reloc_funcs::template 7632 rela_general<32>(view, 7633 value, 7634 addend, 7635 reloc_property); 7636 } 7637 } 7638 else 7639 gold_error(_("%s: unsupported reloc %u " 7640 "in non-static TLSLE mode."), 7641 object->name().c_str(), r_type); 7642 } 7643 break; 7644 7645 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7646 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7647 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7648 case elfcpp::R_AARCH64_TLSDESC_CALL: 7649 { 7650 if (tlsopt == tls::TLSOPT_TO_LE) 7651 { 7652 if (tls_segment == NULL) 7653 { 7654 gold_assert(parameters->errors()->error_count() > 0 7655 || issue_undefined_symbol_error(gsym)); 7656 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7657 } 7658 return tls_desc_gd_to_le(relinfo, target, rela, r_type, 7659 view, psymval); 7660 } 7661 else 7662 { 7663 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE 7664 ? GOT_TYPE_TLS_OFFSET 7665 : GOT_TYPE_TLS_DESC); 7666 int got_tlsdesc_offset = 0; 7667 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL 7668 && tlsopt == tls::TLSOPT_NONE) 7669 { 7670 // We created GOT entries in the .got.tlsdesc portion of the 7671 // .got.plt section, but the offset stored in the symbol is the 7672 // offset within .got.tlsdesc. 7673 got_tlsdesc_offset = (target->got_tlsdesc_->address() 7674 - target->got_->address()); 7675 } 7676 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7677 if (gsym != NULL) 7678 { 7679 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7680 got_entry_address = target->got_->address() 7681 + got_tlsdesc_offset 7682 + gsym->got_offset(tls_got_offset_type); 7683 } 7684 else 7685 { 7686 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7687 gold_assert( 7688 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7689 got_entry_address = target->got_->address() + 7690 got_tlsdesc_offset + 7691 object->local_got_offset(r_sym, tls_got_offset_type); 7692 } 7693 if (tlsopt == tls::TLSOPT_TO_IE) 7694 { 7695 return tls_desc_gd_to_ie(relinfo, target, rela, r_type, 7696 view, psymval, got_entry_address, 7697 address); 7698 } 7699 7700 // Now do tlsdesc relocation. 7701 switch (r_type) 7702 { 7703 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7704 return aarch64_reloc_funcs::adrp(view, 7705 got_entry_address + addend, 7706 address); 7707 break; 7708 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7709 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7710 return aarch64_reloc_funcs::template rela_general<32>( 7711 view, got_entry_address, addend, reloc_property); 7712 break; 7713 case elfcpp::R_AARCH64_TLSDESC_CALL: 7714 return aarch64_reloc_funcs::STATUS_OKAY; 7715 break; 7716 default: 7717 gold_unreachable(); 7718 } 7719 } 7720 } 7721 break; 7722 7723 default: 7724 gold_error(_("%s: unsupported TLS reloc %u."), 7725 object->name().c_str(), r_type); 7726 } 7727 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7728 } // End of relocate_tls. 7729 7730 7731 template<int size, bool big_endian> 7732 inline 7733 typename AArch64_relocate_functions<size, big_endian>::Status 7734 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le( 7735 const Relocate_info<size, big_endian>* relinfo, 7736 Target_aarch64<size, big_endian>* target, 7737 const elfcpp::Rela<size, big_endian>& rela, 7738 unsigned int r_type, 7739 unsigned char* view, 7740 const Symbol_value<size>* psymval) 7741 { 7742 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7743 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7744 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7745 7746 Insntype* ip = reinterpret_cast<Insntype*>(view); 7747 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7748 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7749 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7750 7751 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC) 7752 { 7753 // This is the 2nd relocs, optimization should already have been 7754 // done. 7755 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7756 return aarch64_reloc_funcs::STATUS_OKAY; 7757 } 7758 7759 // The original sequence is - 7760 // 90000000 adrp x0, 0 <main> 7761 // 91000000 add x0, x0, #0x0 7762 // 94000000 bl 0 <__tls_get_addr> 7763 // optimized to sequence - 7764 // d53bd040 mrs x0, tpidr_el0 7765 // 91400000 add x0, x0, #0x0, lsl #12 7766 // 91000000 add x0, x0, #0x0 7767 7768 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7769 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we 7770 // have to change "bl tls_get_addr", which does not have a corresponding tls 7771 // relocation type. So before proceeding, we need to make sure compiler 7772 // does not change the sequence. 7773 if(!(insn1 == 0x90000000 // adrp x0,0 7774 && insn2 == 0x91000000 // add x0, x0, #0x0 7775 && insn3 == 0x94000000)) // bl 0 7776 { 7777 // Ideally we should give up gd_to_le relaxation and do gd access. 7778 // However the gd_to_le relaxation decision has been made early 7779 // in the scan stage, where we did not allocate any GOT entry for 7780 // this symbol. Therefore we have to exit and report error now. 7781 gold_error(_("unexpected reloc insn sequence while relaxing " 7782 "tls gd to le for reloc %u."), r_type); 7783 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7784 } 7785 7786 // Write new insns. 7787 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7788 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7789 insn3 = 0x91000000; // add x0, x0, #0x0 7790 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7791 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7792 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7793 7794 // Calculate tprel value. 7795 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7796 gold_assert(tls_segment != NULL); 7797 AArch64_address value = psymval->value(relinfo->object, 0); 7798 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7799 AArch64_address aligned_tcb_size = 7800 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7801 AArch64_address x = value + aligned_tcb_size; 7802 7803 // After new insns are written, apply TLSLE relocs. 7804 const AArch64_reloc_property* rp1 = 7805 aarch64_reloc_property_table->get_reloc_property( 7806 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7807 const AArch64_reloc_property* rp2 = 7808 aarch64_reloc_property_table->get_reloc_property( 7809 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7810 gold_assert(rp1 != NULL && rp2 != NULL); 7811 7812 typename aarch64_reloc_funcs::Status s1 = 7813 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7814 x, 7815 addend, 7816 rp1); 7817 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7818 return s1; 7819 7820 typename aarch64_reloc_funcs::Status s2 = 7821 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7822 x, 7823 addend, 7824 rp2); 7825 7826 this->skip_call_tls_get_addr_ = true; 7827 return s2; 7828 } // End of tls_gd_to_le 7829 7830 7831 template<int size, bool big_endian> 7832 inline 7833 typename AArch64_relocate_functions<size, big_endian>::Status 7834 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le( 7835 const Relocate_info<size, big_endian>* relinfo, 7836 Target_aarch64<size, big_endian>* target, 7837 const elfcpp::Rela<size, big_endian>& rela, 7838 unsigned int r_type, 7839 unsigned char* view, 7840 const Symbol_value<size>* psymval) 7841 { 7842 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7843 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7844 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7845 7846 Insntype* ip = reinterpret_cast<Insntype*>(view); 7847 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7848 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7849 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7850 7851 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC) 7852 { 7853 // This is the 2nd relocs, optimization should already have been 7854 // done. 7855 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7856 return aarch64_reloc_funcs::STATUS_OKAY; 7857 } 7858 7859 // The original sequence is - 7860 // 90000000 adrp x0, 0 <main> 7861 // 91000000 add x0, x0, #0x0 7862 // 94000000 bl 0 <__tls_get_addr> 7863 // optimized to sequence - 7864 // d53bd040 mrs x0, tpidr_el0 7865 // 91400000 add x0, x0, #0x0, lsl #12 7866 // 91000000 add x0, x0, #0x0 7867 7868 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7869 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we 7870 // have to change "bl tls_get_addr", which does not have a corresponding tls 7871 // relocation type. So before proceeding, we need to make sure compiler 7872 // does not change the sequence. 7873 if(!(insn1 == 0x90000000 // adrp x0,0 7874 && insn2 == 0x91000000 // add x0, x0, #0x0 7875 && insn3 == 0x94000000)) // bl 0 7876 { 7877 // Ideally we should give up gd_to_le relaxation and do gd access. 7878 // However the gd_to_le relaxation decision has been made early 7879 // in the scan stage, where we did not allocate a GOT entry for 7880 // this symbol. Therefore we have to exit and report an error now. 7881 gold_error(_("unexpected reloc insn sequence while relaxing " 7882 "tls gd to le for reloc %u."), r_type); 7883 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7884 } 7885 7886 // Write new insns. 7887 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7888 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7889 insn3 = 0x91000000; // add x0, x0, #0x0 7890 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7891 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7892 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7893 7894 // Calculate tprel value. 7895 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7896 gold_assert(tls_segment != NULL); 7897 AArch64_address value = psymval->value(relinfo->object, 0); 7898 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7899 AArch64_address aligned_tcb_size = 7900 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7901 AArch64_address x = value + aligned_tcb_size; 7902 7903 // After new insns are written, apply TLSLE relocs. 7904 const AArch64_reloc_property* rp1 = 7905 aarch64_reloc_property_table->get_reloc_property( 7906 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7907 const AArch64_reloc_property* rp2 = 7908 aarch64_reloc_property_table->get_reloc_property( 7909 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7910 gold_assert(rp1 != NULL && rp2 != NULL); 7911 7912 typename aarch64_reloc_funcs::Status s1 = 7913 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7914 x, 7915 addend, 7916 rp1); 7917 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7918 return s1; 7919 7920 typename aarch64_reloc_funcs::Status s2 = 7921 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7922 x, 7923 addend, 7924 rp2); 7925 7926 this->skip_call_tls_get_addr_ = true; 7927 return s2; 7928 7929 } // End of tls_ld_to_le 7930 7931 template<int size, bool big_endian> 7932 inline 7933 typename AArch64_relocate_functions<size, big_endian>::Status 7934 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le( 7935 const Relocate_info<size, big_endian>* relinfo, 7936 Target_aarch64<size, big_endian>* target, 7937 const elfcpp::Rela<size, big_endian>& rela, 7938 unsigned int r_type, 7939 unsigned char* view, 7940 const Symbol_value<size>* psymval) 7941 { 7942 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7943 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7944 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7945 7946 AArch64_address value = psymval->value(relinfo->object, 0); 7947 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7948 AArch64_address aligned_tcb_address = 7949 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7950 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7951 AArch64_address x = value + addend + aligned_tcb_address; 7952 // "x" is the offset to tp, we can only do this if x is within 7953 // range [0, 2^32-1] 7954 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0))) 7955 { 7956 gold_error(_("TLS variable referred by reloc %u is too far from TP."), 7957 r_type); 7958 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7959 } 7960 7961 Insntype* ip = reinterpret_cast<Insntype*>(view); 7962 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 7963 unsigned int regno; 7964 Insntype newinsn; 7965 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) 7966 { 7967 // Generate movz. 7968 regno = (insn & 0x1f); 7969 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5); 7970 } 7971 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) 7972 { 7973 // Generate movk. 7974 regno = (insn & 0x1f); 7975 gold_assert(regno == ((insn >> 5) & 0x1f)); 7976 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5); 7977 } 7978 else 7979 gold_unreachable(); 7980 7981 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7982 return aarch64_reloc_funcs::STATUS_OKAY; 7983 } // End of tls_ie_to_le 7984 7985 7986 template<int size, bool big_endian> 7987 inline 7988 typename AArch64_relocate_functions<size, big_endian>::Status 7989 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le( 7990 const Relocate_info<size, big_endian>* relinfo, 7991 Target_aarch64<size, big_endian>* target, 7992 const elfcpp::Rela<size, big_endian>& rela, 7993 unsigned int r_type, 7994 unsigned char* view, 7995 const Symbol_value<size>* psymval) 7996 { 7997 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7998 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7999 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 8000 8001 // TLSDESC-GD sequence is like: 8002 // adrp x0, :tlsdesc:v1 8003 // ldr x1, [x0, #:tlsdesc_lo12:v1] 8004 // add x0, x0, :tlsdesc_lo12:v1 8005 // .tlsdesccall v1 8006 // blr x1 8007 // After desc_gd_to_le optimization, the sequence will be like: 8008 // movz x0, #0x0, lsl #16 8009 // movk x0, #0x10 8010 // nop 8011 // nop 8012 8013 // Calculate tprel value. 8014 Output_segment* tls_segment = relinfo->layout->tls_segment(); 8015 gold_assert(tls_segment != NULL); 8016 Insntype* ip = reinterpret_cast<Insntype*>(view); 8017 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 8018 AArch64_address value = psymval->value(relinfo->object, addend); 8019 AArch64_address aligned_tcb_size = 8020 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 8021 AArch64_address x = value + aligned_tcb_size; 8022 // x is the offset to tp, we can only do this if x is within range 8023 // [0, 2^32-1]. If x is out of range, fail and exit. 8024 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0) 8025 { 8026 gold_error(_("TLS variable referred by reloc %u is too far from TP. " 8027 "We Can't do gd_to_le relaxation.\n"), r_type); 8028 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 8029 } 8030 Insntype newinsn; 8031 switch (r_type) 8032 { 8033 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 8034 case elfcpp::R_AARCH64_TLSDESC_CALL: 8035 // Change to nop 8036 newinsn = 0xd503201f; 8037 break; 8038 8039 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 8040 // Change to movz. 8041 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5); 8042 break; 8043 8044 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 8045 // Change to movk. 8046 newinsn = 0xf2800000 | ((x & 0xffff) << 5); 8047 break; 8048 8049 default: 8050 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"), 8051 r_type); 8052 gold_unreachable(); 8053 } 8054 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 8055 return aarch64_reloc_funcs::STATUS_OKAY; 8056 } // End of tls_desc_gd_to_le 8057 8058 8059 template<int size, bool big_endian> 8060 inline 8061 typename AArch64_relocate_functions<size, big_endian>::Status 8062 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie( 8063 const Relocate_info<size, big_endian>* /* relinfo */, 8064 Target_aarch64<size, big_endian>* /* target */, 8065 const elfcpp::Rela<size, big_endian>& rela, 8066 unsigned int r_type, 8067 unsigned char* view, 8068 const Symbol_value<size>* /* psymval */, 8069 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address, 8070 typename elfcpp::Elf_types<size>::Elf_Addr address) 8071 { 8072 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 8073 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 8074 8075 // TLSDESC-GD sequence is like: 8076 // adrp x0, :tlsdesc:v1 8077 // ldr x1, [x0, #:tlsdesc_lo12:v1] 8078 // add x0, x0, :tlsdesc_lo12:v1 8079 // .tlsdesccall v1 8080 // blr x1 8081 // After desc_gd_to_ie optimization, the sequence will be like: 8082 // adrp x0, :tlsie:v1 8083 // ldr x0, [x0, :tlsie_lo12:v1] 8084 // nop 8085 // nop 8086 8087 Insntype* ip = reinterpret_cast<Insntype*>(view); 8088 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 8089 Insntype newinsn; 8090 switch (r_type) 8091 { 8092 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 8093 case elfcpp::R_AARCH64_TLSDESC_CALL: 8094 // Change to nop 8095 newinsn = 0xd503201f; 8096 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 8097 break; 8098 8099 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 8100 { 8101 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 8102 address); 8103 } 8104 break; 8105 8106 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 8107 { 8108 // Set ldr target register to be x0. 8109 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 8110 insn &= 0xffffffe0; 8111 elfcpp::Swap<32, big_endian>::writeval(ip, insn); 8112 // Do relocation. 8113 const AArch64_reloc_property* reloc_property = 8114 aarch64_reloc_property_table->get_reloc_property( 8115 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 8116 return aarch64_reloc_funcs::template rela_general<32>( 8117 view, got_entry_address, addend, reloc_property); 8118 } 8119 break; 8120 8121 default: 8122 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"), 8123 r_type); 8124 gold_unreachable(); 8125 } 8126 return aarch64_reloc_funcs::STATUS_OKAY; 8127 } // End of tls_desc_gd_to_ie 8128 8129 // Relocate section data. 8130 8131 template<int size, bool big_endian> 8132 void 8133 Target_aarch64<size, big_endian>::relocate_section( 8134 const Relocate_info<size, big_endian>* relinfo, 8135 unsigned int sh_type, 8136 const unsigned char* prelocs, 8137 size_t reloc_count, 8138 Output_section* output_section, 8139 bool needs_special_offset_handling, 8140 unsigned char* view, 8141 typename elfcpp::Elf_types<size>::Elf_Addr address, 8142 section_size_type view_size, 8143 const Reloc_symbol_changes* reloc_symbol_changes) 8144 { 8145 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 8146 typedef Target_aarch64<size, big_endian> Aarch64; 8147 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate; 8148 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8149 Classify_reloc; 8150 8151 gold_assert(sh_type == elfcpp::SHT_RELA); 8152 8153 // See if we are relocating a relaxed input section. If so, the view 8154 // covers the whole output section and we need to adjust accordingly. 8155 if (needs_special_offset_handling) 8156 { 8157 const Output_relaxed_input_section* poris = 8158 output_section->find_relaxed_input_section(relinfo->object, 8159 relinfo->data_shndx); 8160 if (poris != NULL) 8161 { 8162 Address section_address = poris->address(); 8163 section_size_type section_size = poris->data_size(); 8164 8165 gold_assert((section_address >= address) 8166 && ((section_address + section_size) 8167 <= (address + view_size))); 8168 8169 off_t offset = section_address - address; 8170 view += offset; 8171 address += offset; 8172 view_size = section_size; 8173 } 8174 } 8175 8176 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate, 8177 gold::Default_comdat_behavior, Classify_reloc>( 8178 relinfo, 8179 this, 8180 prelocs, 8181 reloc_count, 8182 output_section, 8183 needs_special_offset_handling, 8184 view, 8185 address, 8186 view_size, 8187 reloc_symbol_changes); 8188 } 8189 8190 // Scan the relocs during a relocatable link. 8191 8192 template<int size, bool big_endian> 8193 void 8194 Target_aarch64<size, big_endian>::scan_relocatable_relocs( 8195 Symbol_table* symtab, 8196 Layout* layout, 8197 Sized_relobj_file<size, big_endian>* object, 8198 unsigned int data_shndx, 8199 unsigned int sh_type, 8200 const unsigned char* prelocs, 8201 size_t reloc_count, 8202 Output_section* output_section, 8203 bool needs_special_offset_handling, 8204 size_t local_symbol_count, 8205 const unsigned char* plocal_symbols, 8206 Relocatable_relocs* rr) 8207 { 8208 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8209 Classify_reloc; 8210 typedef gold::Default_scan_relocatable_relocs<Classify_reloc> 8211 Scan_relocatable_relocs; 8212 8213 gold_assert(sh_type == elfcpp::SHT_RELA); 8214 8215 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>( 8216 symtab, 8217 layout, 8218 object, 8219 data_shndx, 8220 prelocs, 8221 reloc_count, 8222 output_section, 8223 needs_special_offset_handling, 8224 local_symbol_count, 8225 plocal_symbols, 8226 rr); 8227 } 8228 8229 // Scan the relocs for --emit-relocs. 8230 8231 template<int size, bool big_endian> 8232 void 8233 Target_aarch64<size, big_endian>::emit_relocs_scan( 8234 Symbol_table* symtab, 8235 Layout* layout, 8236 Sized_relobj_file<size, big_endian>* object, 8237 unsigned int data_shndx, 8238 unsigned int sh_type, 8239 const unsigned char* prelocs, 8240 size_t reloc_count, 8241 Output_section* output_section, 8242 bool needs_special_offset_handling, 8243 size_t local_symbol_count, 8244 const unsigned char* plocal_syms, 8245 Relocatable_relocs* rr) 8246 { 8247 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8248 Classify_reloc; 8249 typedef gold::Default_emit_relocs_strategy<Classify_reloc> 8250 Emit_relocs_strategy; 8251 8252 gold_assert(sh_type == elfcpp::SHT_RELA); 8253 8254 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>( 8255 symtab, 8256 layout, 8257 object, 8258 data_shndx, 8259 prelocs, 8260 reloc_count, 8261 output_section, 8262 needs_special_offset_handling, 8263 local_symbol_count, 8264 plocal_syms, 8265 rr); 8266 } 8267 8268 // Relocate a section during a relocatable link. 8269 8270 template<int size, bool big_endian> 8271 void 8272 Target_aarch64<size, big_endian>::relocate_relocs( 8273 const Relocate_info<size, big_endian>* relinfo, 8274 unsigned int sh_type, 8275 const unsigned char* prelocs, 8276 size_t reloc_count, 8277 Output_section* output_section, 8278 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 8279 unsigned char* view, 8280 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 8281 section_size_type view_size, 8282 unsigned char* reloc_view, 8283 section_size_type reloc_view_size) 8284 { 8285 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8286 Classify_reloc; 8287 8288 gold_assert(sh_type == elfcpp::SHT_RELA); 8289 8290 if (offset_in_output_section == this->invalid_address) 8291 { 8292 const Output_relaxed_input_section *poris 8293 = output_section->find_relaxed_input_section(relinfo->object, 8294 relinfo->data_shndx); 8295 if (poris != NULL) 8296 { 8297 Address section_address = poris->address(); 8298 section_size_type section_size = poris->data_size(); 8299 8300 gold_assert(section_address >= view_address 8301 && (section_address + section_size 8302 <= view_address + view_size)); 8303 8304 off_t offset = section_address - view_address; 8305 view += offset; 8306 view_address += offset; 8307 view_size = section_size; 8308 } 8309 } 8310 8311 gold::relocate_relocs<size, big_endian, Classify_reloc>( 8312 relinfo, 8313 prelocs, 8314 reloc_count, 8315 output_section, 8316 offset_in_output_section, 8317 view, 8318 view_address, 8319 view_size, 8320 reloc_view, 8321 reloc_view_size); 8322 } 8323 8324 8325 // Return whether this is a 3-insn erratum sequence. 8326 8327 template<int size, bool big_endian> 8328 bool 8329 Target_aarch64<size, big_endian>::is_erratum_843419_sequence( 8330 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 8331 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 8332 typename elfcpp::Swap<32,big_endian>::Valtype insn3) 8333 { 8334 unsigned rt1, rt2; 8335 bool load, pair; 8336 8337 // The 2nd insn is a single register load or store; or register pair 8338 // store. 8339 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load) 8340 && (!pair || (pair && !load))) 8341 { 8342 // The 3rd insn is a load or store instruction from the "Load/store 8343 // register (unsigned immediate)" encoding class, using Rn as the 8344 // base address register. 8345 if (Insn_utilities::aarch64_ldst_uimm(insn3) 8346 && (Insn_utilities::aarch64_rn(insn3) 8347 == Insn_utilities::aarch64_rd(insn1))) 8348 return true; 8349 } 8350 return false; 8351 } 8352 8353 8354 // Return whether this is a 835769 sequence. 8355 // (Similarly implemented as in elfnn-aarch64.c.) 8356 8357 template<int size, bool big_endian> 8358 bool 8359 Target_aarch64<size, big_endian>::is_erratum_835769_sequence( 8360 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 8361 typename elfcpp::Swap<32,big_endian>::Valtype insn2) 8362 { 8363 uint32_t rt; 8364 uint32_t rt2 = 0; 8365 uint32_t rn; 8366 uint32_t rm; 8367 uint32_t ra; 8368 bool pair; 8369 bool load; 8370 8371 if (Insn_utilities::aarch64_mlxl(insn2) 8372 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load)) 8373 { 8374 /* Any SIMD memory op is independent of the subsequent MLA 8375 by definition of the erratum. */ 8376 if (Insn_utilities::aarch64_bit(insn1, 26)) 8377 return true; 8378 8379 /* If not SIMD, check for integer memory ops and MLA relationship. */ 8380 rn = Insn_utilities::aarch64_rn(insn2); 8381 ra = Insn_utilities::aarch64_ra(insn2); 8382 rm = Insn_utilities::aarch64_rm(insn2); 8383 8384 /* If this is a load and there's a true(RAW) dependency, we are safe 8385 and this is not an erratum sequence. */ 8386 if (load && 8387 (rt == rn || rt == rm || rt == ra 8388 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra)))) 8389 return false; 8390 8391 /* We conservatively put out stubs for all other cases (including 8392 writebacks). */ 8393 return true; 8394 } 8395 8396 return false; 8397 } 8398 8399 8400 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769. 8401 8402 template<int size, bool big_endian> 8403 void 8404 Target_aarch64<size, big_endian>::create_erratum_stub( 8405 AArch64_relobj<size, big_endian>* relobj, 8406 unsigned int shndx, 8407 section_size_type erratum_insn_offset, 8408 Address erratum_address, 8409 typename Insn_utilities::Insntype erratum_insn, 8410 int erratum_type, 8411 unsigned int e843419_adrp_offset) 8412 { 8413 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769); 8414 The_stub_table* stub_table = relobj->stub_table(shndx); 8415 gold_assert(stub_table != NULL); 8416 if (stub_table->find_erratum_stub(relobj, 8417 shndx, 8418 erratum_insn_offset) == NULL) 8419 { 8420 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8421 The_erratum_stub* stub; 8422 if (erratum_type == ST_E_835769) 8423 stub = new The_erratum_stub(relobj, erratum_type, shndx, 8424 erratum_insn_offset); 8425 else if (erratum_type == ST_E_843419) 8426 stub = new E843419_stub<size, big_endian>( 8427 relobj, shndx, erratum_insn_offset, e843419_adrp_offset); 8428 else 8429 gold_unreachable(); 8430 stub->set_erratum_insn(erratum_insn); 8431 stub->set_erratum_address(erratum_address); 8432 // For erratum ST_E_843419 and ST_E_835769, the destination address is 8433 // always the next insn after erratum insn. 8434 stub->set_destination_address(erratum_address + BPI); 8435 stub_table->add_erratum_stub(stub); 8436 } 8437 } 8438 8439 8440 // Scan erratum for section SHNDX range [output_address + span_start, 8441 // output_address + span_end). Note here we do not share the code with 8442 // scan_erratum_843419_span function, because for 843419 we optimize by only 8443 // scanning the last few insns of a page, whereas for 835769, we need to scan 8444 // every insn. 8445 8446 template<int size, bool big_endian> 8447 void 8448 Target_aarch64<size, big_endian>::scan_erratum_835769_span( 8449 AArch64_relobj<size, big_endian>* relobj, 8450 unsigned int shndx, 8451 const section_size_type span_start, 8452 const section_size_type span_end, 8453 unsigned char* input_view, 8454 Address output_address) 8455 { 8456 typedef typename Insn_utilities::Insntype Insntype; 8457 8458 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8459 8460 // Adjust output_address and view to the start of span. 8461 output_address += span_start; 8462 input_view += span_start; 8463 8464 section_size_type span_length = span_end - span_start; 8465 section_size_type offset = 0; 8466 for (offset = 0; offset + BPI < span_length; offset += BPI) 8467 { 8468 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8469 Insntype insn1 = ip[0]; 8470 Insntype insn2 = ip[1]; 8471 if (is_erratum_835769_sequence(insn1, insn2)) 8472 { 8473 Insntype erratum_insn = insn2; 8474 // "span_start + offset" is the offset for insn1. So for insn2, it is 8475 // "span_start + offset + BPI". 8476 section_size_type erratum_insn_offset = span_start + offset + BPI; 8477 Address erratum_address = output_address + offset + BPI; 8478 gold_info(_("Erratum 835769 found and fixed at \"%s\", " 8479 "section %d, offset 0x%08x."), 8480 relobj->name().c_str(), shndx, 8481 (unsigned int)(span_start + offset)); 8482 8483 this->create_erratum_stub(relobj, shndx, 8484 erratum_insn_offset, erratum_address, 8485 erratum_insn, ST_E_835769); 8486 offset += BPI; // Skip mac insn. 8487 } 8488 } 8489 } // End of "Target_aarch64::scan_erratum_835769_span". 8490 8491 8492 // Scan erratum for section SHNDX range 8493 // [output_address + span_start, output_address + span_end). 8494 8495 template<int size, bool big_endian> 8496 void 8497 Target_aarch64<size, big_endian>::scan_erratum_843419_span( 8498 AArch64_relobj<size, big_endian>* relobj, 8499 unsigned int shndx, 8500 const section_size_type span_start, 8501 const section_size_type span_end, 8502 unsigned char* input_view, 8503 Address output_address) 8504 { 8505 typedef typename Insn_utilities::Insntype Insntype; 8506 8507 // Adjust output_address and view to the start of span. 8508 output_address += span_start; 8509 input_view += span_start; 8510 8511 if ((output_address & 0x03) != 0) 8512 return; 8513 8514 section_size_type offset = 0; 8515 section_size_type span_length = span_end - span_start; 8516 // The first instruction must be ending at 0xFF8 or 0xFFC. 8517 unsigned int page_offset = output_address & 0xFFF; 8518 // Make sure starting position, that is "output_address+offset", 8519 // starts at page position 0xff8 or 0xffc. 8520 if (page_offset < 0xff8) 8521 offset = 0xff8 - page_offset; 8522 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length) 8523 { 8524 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8525 Insntype insn1 = ip[0]; 8526 if (Insn_utilities::is_adrp(insn1)) 8527 { 8528 Insntype insn2 = ip[1]; 8529 Insntype insn3 = ip[2]; 8530 Insntype erratum_insn; 8531 unsigned insn_offset; 8532 bool do_report = false; 8533 if (is_erratum_843419_sequence(insn1, insn2, insn3)) 8534 { 8535 do_report = true; 8536 erratum_insn = insn3; 8537 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN; 8538 } 8539 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length) 8540 { 8541 // Optionally we can have an insn between ins2 and ins3 8542 Insntype insn_opt = ip[2]; 8543 // And insn_opt must not be a branch. 8544 if (!Insn_utilities::aarch64_b(insn_opt) 8545 && !Insn_utilities::aarch64_bl(insn_opt) 8546 && !Insn_utilities::aarch64_blr(insn_opt) 8547 && !Insn_utilities::aarch64_br(insn_opt)) 8548 { 8549 // And insn_opt must not write to dest reg in insn1. However 8550 // we do a conservative scan, which means we may fix/report 8551 // more than necessary, but it doesn't hurt. 8552 8553 Insntype insn4 = ip[3]; 8554 if (is_erratum_843419_sequence(insn1, insn2, insn4)) 8555 { 8556 do_report = true; 8557 erratum_insn = insn4; 8558 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN; 8559 } 8560 } 8561 } 8562 if (do_report) 8563 { 8564 unsigned int erratum_insn_offset = 8565 span_start + offset + insn_offset; 8566 Address erratum_address = 8567 output_address + offset + insn_offset; 8568 create_erratum_stub(relobj, shndx, 8569 erratum_insn_offset, erratum_address, 8570 erratum_insn, ST_E_843419, 8571 span_start + offset); 8572 } 8573 } 8574 8575 // Advance to next candidate instruction. We only consider instruction 8576 // sequences starting at a page offset of 0xff8 or 0xffc. 8577 page_offset = (output_address + offset) & 0xfff; 8578 if (page_offset == 0xff8) 8579 offset += 4; 8580 else // (page_offset == 0xffc), we move to next page's 0xff8. 8581 offset += 0xffc; 8582 } 8583 } // End of "Target_aarch64::scan_erratum_843419_span". 8584 8585 8586 // The selector for aarch64 object files. 8587 8588 template<int size, bool big_endian> 8589 class Target_selector_aarch64 : public Target_selector 8590 { 8591 public: 8592 Target_selector_aarch64(); 8593 8594 virtual Target* 8595 do_instantiate_target() 8596 { return new Target_aarch64<size, big_endian>(); } 8597 }; 8598 8599 template<> 8600 Target_selector_aarch64<32, true>::Target_selector_aarch64() 8601 : Target_selector(elfcpp::EM_AARCH64, 32, true, 8602 "elf32-bigaarch64", "aarch64_elf32_be_vec") 8603 { } 8604 8605 template<> 8606 Target_selector_aarch64<32, false>::Target_selector_aarch64() 8607 : Target_selector(elfcpp::EM_AARCH64, 32, false, 8608 "elf32-littleaarch64", "aarch64_elf32_le_vec") 8609 { } 8610 8611 template<> 8612 Target_selector_aarch64<64, true>::Target_selector_aarch64() 8613 : Target_selector(elfcpp::EM_AARCH64, 64, true, 8614 "elf64-bigaarch64", "aarch64_elf64_be_vec") 8615 { } 8616 8617 template<> 8618 Target_selector_aarch64<64, false>::Target_selector_aarch64() 8619 : Target_selector(elfcpp::EM_AARCH64, 64, false, 8620 "elf64-littleaarch64", "aarch64_elf64_le_vec") 8621 { } 8622 8623 Target_selector_aarch64<32, true> target_selector_aarch64elf32b; 8624 Target_selector_aarch64<32, false> target_selector_aarch64elf32; 8625 Target_selector_aarch64<64, true> target_selector_aarch64elfb; 8626 Target_selector_aarch64<64, false> target_selector_aarch64elf; 8627 8628 } // End anonymous namespace. 8629