1 /* 32-bit ELF support for ARM 2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 20 MA 02110-1301, USA. */ 21 22 #include "sysdep.h" 23 #include <limits.h> 24 25 #include "bfd.h" 26 #include "bfd_stdint.h" 27 #include "libiberty.h" 28 #include "libbfd.h" 29 #include "elf-bfd.h" 30 #include "elf-nacl.h" 31 #include "elf-vxworks.h" 32 #include "elf/arm.h" 33 34 /* Return the relocation section associated with NAME. HTAB is the 35 bfd's elf32_arm_link_hash_entry. */ 36 #define RELOC_SECTION(HTAB, NAME) \ 37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME) 38 39 /* Return size of a relocation entry. HTAB is the bfd's 40 elf32_arm_link_hash_entry. */ 41 #define RELOC_SIZE(HTAB) \ 42 ((HTAB)->use_rel \ 43 ? sizeof (Elf32_External_Rel) \ 44 : sizeof (Elf32_External_Rela)) 45 46 /* Return function to swap relocations in. HTAB is the bfd's 47 elf32_arm_link_hash_entry. */ 48 #define SWAP_RELOC_IN(HTAB) \ 49 ((HTAB)->use_rel \ 50 ? bfd_elf32_swap_reloc_in \ 51 : bfd_elf32_swap_reloca_in) 52 53 /* Return function to swap relocations out. HTAB is the bfd's 54 elf32_arm_link_hash_entry. */ 55 #define SWAP_RELOC_OUT(HTAB) \ 56 ((HTAB)->use_rel \ 57 ? bfd_elf32_swap_reloc_out \ 58 : bfd_elf32_swap_reloca_out) 59 60 #define elf_info_to_howto 0 61 #define elf_info_to_howto_rel elf32_arm_info_to_howto 62 63 #define ARM_ELF_ABI_VERSION 0 64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM 65 66 /* The Adjusted Place, as defined by AAELF. */ 67 #define Pa(X) ((X) & 0xfffffffc) 68 69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd, 70 struct bfd_link_info *link_info, 71 asection *sec, 72 bfd_byte *contents); 73 74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. 75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO 76 in that slot. */ 77 78 static reloc_howto_type elf32_arm_howto_table_1[] = 79 { 80 /* No relocation. */ 81 HOWTO (R_ARM_NONE, /* type */ 82 0, /* rightshift */ 83 0, /* size (0 = byte, 1 = short, 2 = long) */ 84 0, /* bitsize */ 85 FALSE, /* pc_relative */ 86 0, /* bitpos */ 87 complain_overflow_dont,/* complain_on_overflow */ 88 bfd_elf_generic_reloc, /* special_function */ 89 "R_ARM_NONE", /* name */ 90 FALSE, /* partial_inplace */ 91 0, /* src_mask */ 92 0, /* dst_mask */ 93 FALSE), /* pcrel_offset */ 94 95 HOWTO (R_ARM_PC24, /* type */ 96 2, /* rightshift */ 97 2, /* size (0 = byte, 1 = short, 2 = long) */ 98 24, /* bitsize */ 99 TRUE, /* pc_relative */ 100 0, /* bitpos */ 101 complain_overflow_signed,/* complain_on_overflow */ 102 bfd_elf_generic_reloc, /* special_function */ 103 "R_ARM_PC24", /* name */ 104 FALSE, /* partial_inplace */ 105 0x00ffffff, /* src_mask */ 106 0x00ffffff, /* dst_mask */ 107 TRUE), /* pcrel_offset */ 108 109 /* 32 bit absolute */ 110 HOWTO (R_ARM_ABS32, /* type */ 111 0, /* rightshift */ 112 2, /* size (0 = byte, 1 = short, 2 = long) */ 113 32, /* bitsize */ 114 FALSE, /* pc_relative */ 115 0, /* bitpos */ 116 complain_overflow_bitfield,/* complain_on_overflow */ 117 bfd_elf_generic_reloc, /* special_function */ 118 "R_ARM_ABS32", /* name */ 119 FALSE, /* partial_inplace */ 120 0xffffffff, /* src_mask */ 121 0xffffffff, /* dst_mask */ 122 FALSE), /* pcrel_offset */ 123 124 /* standard 32bit pc-relative reloc */ 125 HOWTO (R_ARM_REL32, /* type */ 126 0, /* rightshift */ 127 2, /* size (0 = byte, 1 = short, 2 = long) */ 128 32, /* bitsize */ 129 TRUE, /* pc_relative */ 130 0, /* bitpos */ 131 complain_overflow_bitfield,/* complain_on_overflow */ 132 bfd_elf_generic_reloc, /* special_function */ 133 "R_ARM_REL32", /* name */ 134 FALSE, /* partial_inplace */ 135 0xffffffff, /* src_mask */ 136 0xffffffff, /* dst_mask */ 137 TRUE), /* pcrel_offset */ 138 139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */ 140 HOWTO (R_ARM_LDR_PC_G0, /* type */ 141 0, /* rightshift */ 142 0, /* size (0 = byte, 1 = short, 2 = long) */ 143 32, /* bitsize */ 144 TRUE, /* pc_relative */ 145 0, /* bitpos */ 146 complain_overflow_dont,/* complain_on_overflow */ 147 bfd_elf_generic_reloc, /* special_function */ 148 "R_ARM_LDR_PC_G0", /* name */ 149 FALSE, /* partial_inplace */ 150 0xffffffff, /* src_mask */ 151 0xffffffff, /* dst_mask */ 152 TRUE), /* pcrel_offset */ 153 154 /* 16 bit absolute */ 155 HOWTO (R_ARM_ABS16, /* type */ 156 0, /* rightshift */ 157 1, /* size (0 = byte, 1 = short, 2 = long) */ 158 16, /* bitsize */ 159 FALSE, /* pc_relative */ 160 0, /* bitpos */ 161 complain_overflow_bitfield,/* complain_on_overflow */ 162 bfd_elf_generic_reloc, /* special_function */ 163 "R_ARM_ABS16", /* name */ 164 FALSE, /* partial_inplace */ 165 0x0000ffff, /* src_mask */ 166 0x0000ffff, /* dst_mask */ 167 FALSE), /* pcrel_offset */ 168 169 /* 12 bit absolute */ 170 HOWTO (R_ARM_ABS12, /* type */ 171 0, /* rightshift */ 172 2, /* size (0 = byte, 1 = short, 2 = long) */ 173 12, /* bitsize */ 174 FALSE, /* pc_relative */ 175 0, /* bitpos */ 176 complain_overflow_bitfield,/* complain_on_overflow */ 177 bfd_elf_generic_reloc, /* special_function */ 178 "R_ARM_ABS12", /* name */ 179 FALSE, /* partial_inplace */ 180 0x00000fff, /* src_mask */ 181 0x00000fff, /* dst_mask */ 182 FALSE), /* pcrel_offset */ 183 184 HOWTO (R_ARM_THM_ABS5, /* type */ 185 6, /* rightshift */ 186 1, /* size (0 = byte, 1 = short, 2 = long) */ 187 5, /* bitsize */ 188 FALSE, /* pc_relative */ 189 0, /* bitpos */ 190 complain_overflow_bitfield,/* complain_on_overflow */ 191 bfd_elf_generic_reloc, /* special_function */ 192 "R_ARM_THM_ABS5", /* name */ 193 FALSE, /* partial_inplace */ 194 0x000007e0, /* src_mask */ 195 0x000007e0, /* dst_mask */ 196 FALSE), /* pcrel_offset */ 197 198 /* 8 bit absolute */ 199 HOWTO (R_ARM_ABS8, /* type */ 200 0, /* rightshift */ 201 0, /* size (0 = byte, 1 = short, 2 = long) */ 202 8, /* bitsize */ 203 FALSE, /* pc_relative */ 204 0, /* bitpos */ 205 complain_overflow_bitfield,/* complain_on_overflow */ 206 bfd_elf_generic_reloc, /* special_function */ 207 "R_ARM_ABS8", /* name */ 208 FALSE, /* partial_inplace */ 209 0x000000ff, /* src_mask */ 210 0x000000ff, /* dst_mask */ 211 FALSE), /* pcrel_offset */ 212 213 HOWTO (R_ARM_SBREL32, /* type */ 214 0, /* rightshift */ 215 2, /* size (0 = byte, 1 = short, 2 = long) */ 216 32, /* bitsize */ 217 FALSE, /* pc_relative */ 218 0, /* bitpos */ 219 complain_overflow_dont,/* complain_on_overflow */ 220 bfd_elf_generic_reloc, /* special_function */ 221 "R_ARM_SBREL32", /* name */ 222 FALSE, /* partial_inplace */ 223 0xffffffff, /* src_mask */ 224 0xffffffff, /* dst_mask */ 225 FALSE), /* pcrel_offset */ 226 227 HOWTO (R_ARM_THM_CALL, /* type */ 228 1, /* rightshift */ 229 2, /* size (0 = byte, 1 = short, 2 = long) */ 230 24, /* bitsize */ 231 TRUE, /* pc_relative */ 232 0, /* bitpos */ 233 complain_overflow_signed,/* complain_on_overflow */ 234 bfd_elf_generic_reloc, /* special_function */ 235 "R_ARM_THM_CALL", /* name */ 236 FALSE, /* partial_inplace */ 237 0x07ff2fff, /* src_mask */ 238 0x07ff2fff, /* dst_mask */ 239 TRUE), /* pcrel_offset */ 240 241 HOWTO (R_ARM_THM_PC8, /* type */ 242 1, /* rightshift */ 243 1, /* size (0 = byte, 1 = short, 2 = long) */ 244 8, /* bitsize */ 245 TRUE, /* pc_relative */ 246 0, /* bitpos */ 247 complain_overflow_signed,/* complain_on_overflow */ 248 bfd_elf_generic_reloc, /* special_function */ 249 "R_ARM_THM_PC8", /* name */ 250 FALSE, /* partial_inplace */ 251 0x000000ff, /* src_mask */ 252 0x000000ff, /* dst_mask */ 253 TRUE), /* pcrel_offset */ 254 255 HOWTO (R_ARM_BREL_ADJ, /* type */ 256 1, /* rightshift */ 257 1, /* size (0 = byte, 1 = short, 2 = long) */ 258 32, /* bitsize */ 259 FALSE, /* pc_relative */ 260 0, /* bitpos */ 261 complain_overflow_signed,/* complain_on_overflow */ 262 bfd_elf_generic_reloc, /* special_function */ 263 "R_ARM_BREL_ADJ", /* name */ 264 FALSE, /* partial_inplace */ 265 0xffffffff, /* src_mask */ 266 0xffffffff, /* dst_mask */ 267 FALSE), /* pcrel_offset */ 268 269 HOWTO (R_ARM_TLS_DESC, /* type */ 270 0, /* rightshift */ 271 2, /* size (0 = byte, 1 = short, 2 = long) */ 272 32, /* bitsize */ 273 FALSE, /* pc_relative */ 274 0, /* bitpos */ 275 complain_overflow_bitfield,/* complain_on_overflow */ 276 bfd_elf_generic_reloc, /* special_function */ 277 "R_ARM_TLS_DESC", /* name */ 278 FALSE, /* partial_inplace */ 279 0xffffffff, /* src_mask */ 280 0xffffffff, /* dst_mask */ 281 FALSE), /* pcrel_offset */ 282 283 HOWTO (R_ARM_THM_SWI8, /* type */ 284 0, /* rightshift */ 285 0, /* size (0 = byte, 1 = short, 2 = long) */ 286 0, /* bitsize */ 287 FALSE, /* pc_relative */ 288 0, /* bitpos */ 289 complain_overflow_signed,/* complain_on_overflow */ 290 bfd_elf_generic_reloc, /* special_function */ 291 "R_ARM_SWI8", /* name */ 292 FALSE, /* partial_inplace */ 293 0x00000000, /* src_mask */ 294 0x00000000, /* dst_mask */ 295 FALSE), /* pcrel_offset */ 296 297 /* BLX instruction for the ARM. */ 298 HOWTO (R_ARM_XPC25, /* type */ 299 2, /* rightshift */ 300 2, /* size (0 = byte, 1 = short, 2 = long) */ 301 24, /* bitsize */ 302 TRUE, /* pc_relative */ 303 0, /* bitpos */ 304 complain_overflow_signed,/* complain_on_overflow */ 305 bfd_elf_generic_reloc, /* special_function */ 306 "R_ARM_XPC25", /* name */ 307 FALSE, /* partial_inplace */ 308 0x00ffffff, /* src_mask */ 309 0x00ffffff, /* dst_mask */ 310 TRUE), /* pcrel_offset */ 311 312 /* BLX instruction for the Thumb. */ 313 HOWTO (R_ARM_THM_XPC22, /* type */ 314 2, /* rightshift */ 315 2, /* size (0 = byte, 1 = short, 2 = long) */ 316 24, /* bitsize */ 317 TRUE, /* pc_relative */ 318 0, /* bitpos */ 319 complain_overflow_signed,/* complain_on_overflow */ 320 bfd_elf_generic_reloc, /* special_function */ 321 "R_ARM_THM_XPC22", /* name */ 322 FALSE, /* partial_inplace */ 323 0x07ff2fff, /* src_mask */ 324 0x07ff2fff, /* dst_mask */ 325 TRUE), /* pcrel_offset */ 326 327 /* Dynamic TLS relocations. */ 328 329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */ 330 0, /* rightshift */ 331 2, /* size (0 = byte, 1 = short, 2 = long) */ 332 32, /* bitsize */ 333 FALSE, /* pc_relative */ 334 0, /* bitpos */ 335 complain_overflow_bitfield,/* complain_on_overflow */ 336 bfd_elf_generic_reloc, /* special_function */ 337 "R_ARM_TLS_DTPMOD32", /* name */ 338 TRUE, /* partial_inplace */ 339 0xffffffff, /* src_mask */ 340 0xffffffff, /* dst_mask */ 341 FALSE), /* pcrel_offset */ 342 343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */ 344 0, /* rightshift */ 345 2, /* size (0 = byte, 1 = short, 2 = long) */ 346 32, /* bitsize */ 347 FALSE, /* pc_relative */ 348 0, /* bitpos */ 349 complain_overflow_bitfield,/* complain_on_overflow */ 350 bfd_elf_generic_reloc, /* special_function */ 351 "R_ARM_TLS_DTPOFF32", /* name */ 352 TRUE, /* partial_inplace */ 353 0xffffffff, /* src_mask */ 354 0xffffffff, /* dst_mask */ 355 FALSE), /* pcrel_offset */ 356 357 HOWTO (R_ARM_TLS_TPOFF32, /* type */ 358 0, /* rightshift */ 359 2, /* size (0 = byte, 1 = short, 2 = long) */ 360 32, /* bitsize */ 361 FALSE, /* pc_relative */ 362 0, /* bitpos */ 363 complain_overflow_bitfield,/* complain_on_overflow */ 364 bfd_elf_generic_reloc, /* special_function */ 365 "R_ARM_TLS_TPOFF32", /* name */ 366 TRUE, /* partial_inplace */ 367 0xffffffff, /* src_mask */ 368 0xffffffff, /* dst_mask */ 369 FALSE), /* pcrel_offset */ 370 371 /* Relocs used in ARM Linux */ 372 373 HOWTO (R_ARM_COPY, /* type */ 374 0, /* rightshift */ 375 2, /* size (0 = byte, 1 = short, 2 = long) */ 376 32, /* bitsize */ 377 FALSE, /* pc_relative */ 378 0, /* bitpos */ 379 complain_overflow_bitfield,/* complain_on_overflow */ 380 bfd_elf_generic_reloc, /* special_function */ 381 "R_ARM_COPY", /* name */ 382 TRUE, /* partial_inplace */ 383 0xffffffff, /* src_mask */ 384 0xffffffff, /* dst_mask */ 385 FALSE), /* pcrel_offset */ 386 387 HOWTO (R_ARM_GLOB_DAT, /* type */ 388 0, /* rightshift */ 389 2, /* size (0 = byte, 1 = short, 2 = long) */ 390 32, /* bitsize */ 391 FALSE, /* pc_relative */ 392 0, /* bitpos */ 393 complain_overflow_bitfield,/* complain_on_overflow */ 394 bfd_elf_generic_reloc, /* special_function */ 395 "R_ARM_GLOB_DAT", /* name */ 396 TRUE, /* partial_inplace */ 397 0xffffffff, /* src_mask */ 398 0xffffffff, /* dst_mask */ 399 FALSE), /* pcrel_offset */ 400 401 HOWTO (R_ARM_JUMP_SLOT, /* type */ 402 0, /* rightshift */ 403 2, /* size (0 = byte, 1 = short, 2 = long) */ 404 32, /* bitsize */ 405 FALSE, /* pc_relative */ 406 0, /* bitpos */ 407 complain_overflow_bitfield,/* complain_on_overflow */ 408 bfd_elf_generic_reloc, /* special_function */ 409 "R_ARM_JUMP_SLOT", /* name */ 410 TRUE, /* partial_inplace */ 411 0xffffffff, /* src_mask */ 412 0xffffffff, /* dst_mask */ 413 FALSE), /* pcrel_offset */ 414 415 HOWTO (R_ARM_RELATIVE, /* type */ 416 0, /* rightshift */ 417 2, /* size (0 = byte, 1 = short, 2 = long) */ 418 32, /* bitsize */ 419 FALSE, /* pc_relative */ 420 0, /* bitpos */ 421 complain_overflow_bitfield,/* complain_on_overflow */ 422 bfd_elf_generic_reloc, /* special_function */ 423 "R_ARM_RELATIVE", /* name */ 424 TRUE, /* partial_inplace */ 425 0xffffffff, /* src_mask */ 426 0xffffffff, /* dst_mask */ 427 FALSE), /* pcrel_offset */ 428 429 HOWTO (R_ARM_GOTOFF32, /* type */ 430 0, /* rightshift */ 431 2, /* size (0 = byte, 1 = short, 2 = long) */ 432 32, /* bitsize */ 433 FALSE, /* pc_relative */ 434 0, /* bitpos */ 435 complain_overflow_bitfield,/* complain_on_overflow */ 436 bfd_elf_generic_reloc, /* special_function */ 437 "R_ARM_GOTOFF32", /* name */ 438 TRUE, /* partial_inplace */ 439 0xffffffff, /* src_mask */ 440 0xffffffff, /* dst_mask */ 441 FALSE), /* pcrel_offset */ 442 443 HOWTO (R_ARM_GOTPC, /* type */ 444 0, /* rightshift */ 445 2, /* size (0 = byte, 1 = short, 2 = long) */ 446 32, /* bitsize */ 447 TRUE, /* pc_relative */ 448 0, /* bitpos */ 449 complain_overflow_bitfield,/* complain_on_overflow */ 450 bfd_elf_generic_reloc, /* special_function */ 451 "R_ARM_GOTPC", /* name */ 452 TRUE, /* partial_inplace */ 453 0xffffffff, /* src_mask */ 454 0xffffffff, /* dst_mask */ 455 TRUE), /* pcrel_offset */ 456 457 HOWTO (R_ARM_GOT32, /* type */ 458 0, /* rightshift */ 459 2, /* size (0 = byte, 1 = short, 2 = long) */ 460 32, /* bitsize */ 461 FALSE, /* pc_relative */ 462 0, /* bitpos */ 463 complain_overflow_bitfield,/* complain_on_overflow */ 464 bfd_elf_generic_reloc, /* special_function */ 465 "R_ARM_GOT32", /* name */ 466 TRUE, /* partial_inplace */ 467 0xffffffff, /* src_mask */ 468 0xffffffff, /* dst_mask */ 469 FALSE), /* pcrel_offset */ 470 471 HOWTO (R_ARM_PLT32, /* type */ 472 2, /* rightshift */ 473 2, /* size (0 = byte, 1 = short, 2 = long) */ 474 24, /* bitsize */ 475 TRUE, /* pc_relative */ 476 0, /* bitpos */ 477 complain_overflow_bitfield,/* complain_on_overflow */ 478 bfd_elf_generic_reloc, /* special_function */ 479 "R_ARM_PLT32", /* name */ 480 FALSE, /* partial_inplace */ 481 0x00ffffff, /* src_mask */ 482 0x00ffffff, /* dst_mask */ 483 TRUE), /* pcrel_offset */ 484 485 HOWTO (R_ARM_CALL, /* type */ 486 2, /* rightshift */ 487 2, /* size (0 = byte, 1 = short, 2 = long) */ 488 24, /* bitsize */ 489 TRUE, /* pc_relative */ 490 0, /* bitpos */ 491 complain_overflow_signed,/* complain_on_overflow */ 492 bfd_elf_generic_reloc, /* special_function */ 493 "R_ARM_CALL", /* name */ 494 FALSE, /* partial_inplace */ 495 0x00ffffff, /* src_mask */ 496 0x00ffffff, /* dst_mask */ 497 TRUE), /* pcrel_offset */ 498 499 HOWTO (R_ARM_JUMP24, /* type */ 500 2, /* rightshift */ 501 2, /* size (0 = byte, 1 = short, 2 = long) */ 502 24, /* bitsize */ 503 TRUE, /* pc_relative */ 504 0, /* bitpos */ 505 complain_overflow_signed,/* complain_on_overflow */ 506 bfd_elf_generic_reloc, /* special_function */ 507 "R_ARM_JUMP24", /* name */ 508 FALSE, /* partial_inplace */ 509 0x00ffffff, /* src_mask */ 510 0x00ffffff, /* dst_mask */ 511 TRUE), /* pcrel_offset */ 512 513 HOWTO (R_ARM_THM_JUMP24, /* type */ 514 1, /* rightshift */ 515 2, /* size (0 = byte, 1 = short, 2 = long) */ 516 24, /* bitsize */ 517 TRUE, /* pc_relative */ 518 0, /* bitpos */ 519 complain_overflow_signed,/* complain_on_overflow */ 520 bfd_elf_generic_reloc, /* special_function */ 521 "R_ARM_THM_JUMP24", /* name */ 522 FALSE, /* partial_inplace */ 523 0x07ff2fff, /* src_mask */ 524 0x07ff2fff, /* dst_mask */ 525 TRUE), /* pcrel_offset */ 526 527 HOWTO (R_ARM_BASE_ABS, /* type */ 528 0, /* rightshift */ 529 2, /* size (0 = byte, 1 = short, 2 = long) */ 530 32, /* bitsize */ 531 FALSE, /* pc_relative */ 532 0, /* bitpos */ 533 complain_overflow_dont,/* complain_on_overflow */ 534 bfd_elf_generic_reloc, /* special_function */ 535 "R_ARM_BASE_ABS", /* name */ 536 FALSE, /* partial_inplace */ 537 0xffffffff, /* src_mask */ 538 0xffffffff, /* dst_mask */ 539 FALSE), /* pcrel_offset */ 540 541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */ 542 0, /* rightshift */ 543 2, /* size (0 = byte, 1 = short, 2 = long) */ 544 12, /* bitsize */ 545 TRUE, /* pc_relative */ 546 0, /* bitpos */ 547 complain_overflow_dont,/* complain_on_overflow */ 548 bfd_elf_generic_reloc, /* special_function */ 549 "R_ARM_ALU_PCREL_7_0", /* name */ 550 FALSE, /* partial_inplace */ 551 0x00000fff, /* src_mask */ 552 0x00000fff, /* dst_mask */ 553 TRUE), /* pcrel_offset */ 554 555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */ 556 0, /* rightshift */ 557 2, /* size (0 = byte, 1 = short, 2 = long) */ 558 12, /* bitsize */ 559 TRUE, /* pc_relative */ 560 8, /* bitpos */ 561 complain_overflow_dont,/* complain_on_overflow */ 562 bfd_elf_generic_reloc, /* special_function */ 563 "R_ARM_ALU_PCREL_15_8",/* name */ 564 FALSE, /* partial_inplace */ 565 0x00000fff, /* src_mask */ 566 0x00000fff, /* dst_mask */ 567 TRUE), /* pcrel_offset */ 568 569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */ 570 0, /* rightshift */ 571 2, /* size (0 = byte, 1 = short, 2 = long) */ 572 12, /* bitsize */ 573 TRUE, /* pc_relative */ 574 16, /* bitpos */ 575 complain_overflow_dont,/* complain_on_overflow */ 576 bfd_elf_generic_reloc, /* special_function */ 577 "R_ARM_ALU_PCREL_23_15",/* name */ 578 FALSE, /* partial_inplace */ 579 0x00000fff, /* src_mask */ 580 0x00000fff, /* dst_mask */ 581 TRUE), /* pcrel_offset */ 582 583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */ 584 0, /* rightshift */ 585 2, /* size (0 = byte, 1 = short, 2 = long) */ 586 12, /* bitsize */ 587 FALSE, /* pc_relative */ 588 0, /* bitpos */ 589 complain_overflow_dont,/* complain_on_overflow */ 590 bfd_elf_generic_reloc, /* special_function */ 591 "R_ARM_LDR_SBREL_11_0",/* name */ 592 FALSE, /* partial_inplace */ 593 0x00000fff, /* src_mask */ 594 0x00000fff, /* dst_mask */ 595 FALSE), /* pcrel_offset */ 596 597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */ 598 0, /* rightshift */ 599 2, /* size (0 = byte, 1 = short, 2 = long) */ 600 8, /* bitsize */ 601 FALSE, /* pc_relative */ 602 12, /* bitpos */ 603 complain_overflow_dont,/* complain_on_overflow */ 604 bfd_elf_generic_reloc, /* special_function */ 605 "R_ARM_ALU_SBREL_19_12",/* name */ 606 FALSE, /* partial_inplace */ 607 0x000ff000, /* src_mask */ 608 0x000ff000, /* dst_mask */ 609 FALSE), /* pcrel_offset */ 610 611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */ 612 0, /* rightshift */ 613 2, /* size (0 = byte, 1 = short, 2 = long) */ 614 8, /* bitsize */ 615 FALSE, /* pc_relative */ 616 20, /* bitpos */ 617 complain_overflow_dont,/* complain_on_overflow */ 618 bfd_elf_generic_reloc, /* special_function */ 619 "R_ARM_ALU_SBREL_27_20",/* name */ 620 FALSE, /* partial_inplace */ 621 0x0ff00000, /* src_mask */ 622 0x0ff00000, /* dst_mask */ 623 FALSE), /* pcrel_offset */ 624 625 HOWTO (R_ARM_TARGET1, /* type */ 626 0, /* rightshift */ 627 2, /* size (0 = byte, 1 = short, 2 = long) */ 628 32, /* bitsize */ 629 FALSE, /* pc_relative */ 630 0, /* bitpos */ 631 complain_overflow_dont,/* complain_on_overflow */ 632 bfd_elf_generic_reloc, /* special_function */ 633 "R_ARM_TARGET1", /* name */ 634 FALSE, /* partial_inplace */ 635 0xffffffff, /* src_mask */ 636 0xffffffff, /* dst_mask */ 637 FALSE), /* pcrel_offset */ 638 639 HOWTO (R_ARM_ROSEGREL32, /* type */ 640 0, /* rightshift */ 641 2, /* size (0 = byte, 1 = short, 2 = long) */ 642 32, /* bitsize */ 643 FALSE, /* pc_relative */ 644 0, /* bitpos */ 645 complain_overflow_dont,/* complain_on_overflow */ 646 bfd_elf_generic_reloc, /* special_function */ 647 "R_ARM_ROSEGREL32", /* name */ 648 FALSE, /* partial_inplace */ 649 0xffffffff, /* src_mask */ 650 0xffffffff, /* dst_mask */ 651 FALSE), /* pcrel_offset */ 652 653 HOWTO (R_ARM_V4BX, /* type */ 654 0, /* rightshift */ 655 2, /* size (0 = byte, 1 = short, 2 = long) */ 656 32, /* bitsize */ 657 FALSE, /* pc_relative */ 658 0, /* bitpos */ 659 complain_overflow_dont,/* complain_on_overflow */ 660 bfd_elf_generic_reloc, /* special_function */ 661 "R_ARM_V4BX", /* name */ 662 FALSE, /* partial_inplace */ 663 0xffffffff, /* src_mask */ 664 0xffffffff, /* dst_mask */ 665 FALSE), /* pcrel_offset */ 666 667 HOWTO (R_ARM_TARGET2, /* type */ 668 0, /* rightshift */ 669 2, /* size (0 = byte, 1 = short, 2 = long) */ 670 32, /* bitsize */ 671 FALSE, /* pc_relative */ 672 0, /* bitpos */ 673 complain_overflow_signed,/* complain_on_overflow */ 674 bfd_elf_generic_reloc, /* special_function */ 675 "R_ARM_TARGET2", /* name */ 676 FALSE, /* partial_inplace */ 677 0xffffffff, /* src_mask */ 678 0xffffffff, /* dst_mask */ 679 TRUE), /* pcrel_offset */ 680 681 HOWTO (R_ARM_PREL31, /* type */ 682 0, /* rightshift */ 683 2, /* size (0 = byte, 1 = short, 2 = long) */ 684 31, /* bitsize */ 685 TRUE, /* pc_relative */ 686 0, /* bitpos */ 687 complain_overflow_signed,/* complain_on_overflow */ 688 bfd_elf_generic_reloc, /* special_function */ 689 "R_ARM_PREL31", /* name */ 690 FALSE, /* partial_inplace */ 691 0x7fffffff, /* src_mask */ 692 0x7fffffff, /* dst_mask */ 693 TRUE), /* pcrel_offset */ 694 695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */ 696 0, /* rightshift */ 697 2, /* size (0 = byte, 1 = short, 2 = long) */ 698 16, /* bitsize */ 699 FALSE, /* pc_relative */ 700 0, /* bitpos */ 701 complain_overflow_dont,/* complain_on_overflow */ 702 bfd_elf_generic_reloc, /* special_function */ 703 "R_ARM_MOVW_ABS_NC", /* name */ 704 FALSE, /* partial_inplace */ 705 0x000f0fff, /* src_mask */ 706 0x000f0fff, /* dst_mask */ 707 FALSE), /* pcrel_offset */ 708 709 HOWTO (R_ARM_MOVT_ABS, /* type */ 710 0, /* rightshift */ 711 2, /* size (0 = byte, 1 = short, 2 = long) */ 712 16, /* bitsize */ 713 FALSE, /* pc_relative */ 714 0, /* bitpos */ 715 complain_overflow_bitfield,/* complain_on_overflow */ 716 bfd_elf_generic_reloc, /* special_function */ 717 "R_ARM_MOVT_ABS", /* name */ 718 FALSE, /* partial_inplace */ 719 0x000f0fff, /* src_mask */ 720 0x000f0fff, /* dst_mask */ 721 FALSE), /* pcrel_offset */ 722 723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */ 724 0, /* rightshift */ 725 2, /* size (0 = byte, 1 = short, 2 = long) */ 726 16, /* bitsize */ 727 TRUE, /* pc_relative */ 728 0, /* bitpos */ 729 complain_overflow_dont,/* complain_on_overflow */ 730 bfd_elf_generic_reloc, /* special_function */ 731 "R_ARM_MOVW_PREL_NC", /* name */ 732 FALSE, /* partial_inplace */ 733 0x000f0fff, /* src_mask */ 734 0x000f0fff, /* dst_mask */ 735 TRUE), /* pcrel_offset */ 736 737 HOWTO (R_ARM_MOVT_PREL, /* type */ 738 0, /* rightshift */ 739 2, /* size (0 = byte, 1 = short, 2 = long) */ 740 16, /* bitsize */ 741 TRUE, /* pc_relative */ 742 0, /* bitpos */ 743 complain_overflow_bitfield,/* complain_on_overflow */ 744 bfd_elf_generic_reloc, /* special_function */ 745 "R_ARM_MOVT_PREL", /* name */ 746 FALSE, /* partial_inplace */ 747 0x000f0fff, /* src_mask */ 748 0x000f0fff, /* dst_mask */ 749 TRUE), /* pcrel_offset */ 750 751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */ 752 0, /* rightshift */ 753 2, /* size (0 = byte, 1 = short, 2 = long) */ 754 16, /* bitsize */ 755 FALSE, /* pc_relative */ 756 0, /* bitpos */ 757 complain_overflow_dont,/* complain_on_overflow */ 758 bfd_elf_generic_reloc, /* special_function */ 759 "R_ARM_THM_MOVW_ABS_NC",/* name */ 760 FALSE, /* partial_inplace */ 761 0x040f70ff, /* src_mask */ 762 0x040f70ff, /* dst_mask */ 763 FALSE), /* pcrel_offset */ 764 765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */ 766 0, /* rightshift */ 767 2, /* size (0 = byte, 1 = short, 2 = long) */ 768 16, /* bitsize */ 769 FALSE, /* pc_relative */ 770 0, /* bitpos */ 771 complain_overflow_bitfield,/* complain_on_overflow */ 772 bfd_elf_generic_reloc, /* special_function */ 773 "R_ARM_THM_MOVT_ABS", /* name */ 774 FALSE, /* partial_inplace */ 775 0x040f70ff, /* src_mask */ 776 0x040f70ff, /* dst_mask */ 777 FALSE), /* pcrel_offset */ 778 779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */ 780 0, /* rightshift */ 781 2, /* size (0 = byte, 1 = short, 2 = long) */ 782 16, /* bitsize */ 783 TRUE, /* pc_relative */ 784 0, /* bitpos */ 785 complain_overflow_dont,/* complain_on_overflow */ 786 bfd_elf_generic_reloc, /* special_function */ 787 "R_ARM_THM_MOVW_PREL_NC",/* name */ 788 FALSE, /* partial_inplace */ 789 0x040f70ff, /* src_mask */ 790 0x040f70ff, /* dst_mask */ 791 TRUE), /* pcrel_offset */ 792 793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */ 794 0, /* rightshift */ 795 2, /* size (0 = byte, 1 = short, 2 = long) */ 796 16, /* bitsize */ 797 TRUE, /* pc_relative */ 798 0, /* bitpos */ 799 complain_overflow_bitfield,/* complain_on_overflow */ 800 bfd_elf_generic_reloc, /* special_function */ 801 "R_ARM_THM_MOVT_PREL", /* name */ 802 FALSE, /* partial_inplace */ 803 0x040f70ff, /* src_mask */ 804 0x040f70ff, /* dst_mask */ 805 TRUE), /* pcrel_offset */ 806 807 HOWTO (R_ARM_THM_JUMP19, /* type */ 808 1, /* rightshift */ 809 2, /* size (0 = byte, 1 = short, 2 = long) */ 810 19, /* bitsize */ 811 TRUE, /* pc_relative */ 812 0, /* bitpos */ 813 complain_overflow_signed,/* complain_on_overflow */ 814 bfd_elf_generic_reloc, /* special_function */ 815 "R_ARM_THM_JUMP19", /* name */ 816 FALSE, /* partial_inplace */ 817 0x043f2fff, /* src_mask */ 818 0x043f2fff, /* dst_mask */ 819 TRUE), /* pcrel_offset */ 820 821 HOWTO (R_ARM_THM_JUMP6, /* type */ 822 1, /* rightshift */ 823 1, /* size (0 = byte, 1 = short, 2 = long) */ 824 6, /* bitsize */ 825 TRUE, /* pc_relative */ 826 0, /* bitpos */ 827 complain_overflow_unsigned,/* complain_on_overflow */ 828 bfd_elf_generic_reloc, /* special_function */ 829 "R_ARM_THM_JUMP6", /* name */ 830 FALSE, /* partial_inplace */ 831 0x02f8, /* src_mask */ 832 0x02f8, /* dst_mask */ 833 TRUE), /* pcrel_offset */ 834 835 /* These are declared as 13-bit signed relocations because we can 836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice 837 versa. */ 838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */ 839 0, /* rightshift */ 840 2, /* size (0 = byte, 1 = short, 2 = long) */ 841 13, /* bitsize */ 842 TRUE, /* pc_relative */ 843 0, /* bitpos */ 844 complain_overflow_dont,/* complain_on_overflow */ 845 bfd_elf_generic_reloc, /* special_function */ 846 "R_ARM_THM_ALU_PREL_11_0",/* name */ 847 FALSE, /* partial_inplace */ 848 0xffffffff, /* src_mask */ 849 0xffffffff, /* dst_mask */ 850 TRUE), /* pcrel_offset */ 851 852 HOWTO (R_ARM_THM_PC12, /* type */ 853 0, /* rightshift */ 854 2, /* size (0 = byte, 1 = short, 2 = long) */ 855 13, /* bitsize */ 856 TRUE, /* pc_relative */ 857 0, /* bitpos */ 858 complain_overflow_dont,/* complain_on_overflow */ 859 bfd_elf_generic_reloc, /* special_function */ 860 "R_ARM_THM_PC12", /* name */ 861 FALSE, /* partial_inplace */ 862 0xffffffff, /* src_mask */ 863 0xffffffff, /* dst_mask */ 864 TRUE), /* pcrel_offset */ 865 866 HOWTO (R_ARM_ABS32_NOI, /* type */ 867 0, /* rightshift */ 868 2, /* size (0 = byte, 1 = short, 2 = long) */ 869 32, /* bitsize */ 870 FALSE, /* pc_relative */ 871 0, /* bitpos */ 872 complain_overflow_dont,/* complain_on_overflow */ 873 bfd_elf_generic_reloc, /* special_function */ 874 "R_ARM_ABS32_NOI", /* name */ 875 FALSE, /* partial_inplace */ 876 0xffffffff, /* src_mask */ 877 0xffffffff, /* dst_mask */ 878 FALSE), /* pcrel_offset */ 879 880 HOWTO (R_ARM_REL32_NOI, /* type */ 881 0, /* rightshift */ 882 2, /* size (0 = byte, 1 = short, 2 = long) */ 883 32, /* bitsize */ 884 TRUE, /* pc_relative */ 885 0, /* bitpos */ 886 complain_overflow_dont,/* complain_on_overflow */ 887 bfd_elf_generic_reloc, /* special_function */ 888 "R_ARM_REL32_NOI", /* name */ 889 FALSE, /* partial_inplace */ 890 0xffffffff, /* src_mask */ 891 0xffffffff, /* dst_mask */ 892 FALSE), /* pcrel_offset */ 893 894 /* Group relocations. */ 895 896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */ 897 0, /* rightshift */ 898 2, /* size (0 = byte, 1 = short, 2 = long) */ 899 32, /* bitsize */ 900 TRUE, /* pc_relative */ 901 0, /* bitpos */ 902 complain_overflow_dont,/* complain_on_overflow */ 903 bfd_elf_generic_reloc, /* special_function */ 904 "R_ARM_ALU_PC_G0_NC", /* name */ 905 FALSE, /* partial_inplace */ 906 0xffffffff, /* src_mask */ 907 0xffffffff, /* dst_mask */ 908 TRUE), /* pcrel_offset */ 909 910 HOWTO (R_ARM_ALU_PC_G0, /* type */ 911 0, /* rightshift */ 912 2, /* size (0 = byte, 1 = short, 2 = long) */ 913 32, /* bitsize */ 914 TRUE, /* pc_relative */ 915 0, /* bitpos */ 916 complain_overflow_dont,/* complain_on_overflow */ 917 bfd_elf_generic_reloc, /* special_function */ 918 "R_ARM_ALU_PC_G0", /* name */ 919 FALSE, /* partial_inplace */ 920 0xffffffff, /* src_mask */ 921 0xffffffff, /* dst_mask */ 922 TRUE), /* pcrel_offset */ 923 924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */ 925 0, /* rightshift */ 926 2, /* size (0 = byte, 1 = short, 2 = long) */ 927 32, /* bitsize */ 928 TRUE, /* pc_relative */ 929 0, /* bitpos */ 930 complain_overflow_dont,/* complain_on_overflow */ 931 bfd_elf_generic_reloc, /* special_function */ 932 "R_ARM_ALU_PC_G1_NC", /* name */ 933 FALSE, /* partial_inplace */ 934 0xffffffff, /* src_mask */ 935 0xffffffff, /* dst_mask */ 936 TRUE), /* pcrel_offset */ 937 938 HOWTO (R_ARM_ALU_PC_G1, /* type */ 939 0, /* rightshift */ 940 2, /* size (0 = byte, 1 = short, 2 = long) */ 941 32, /* bitsize */ 942 TRUE, /* pc_relative */ 943 0, /* bitpos */ 944 complain_overflow_dont,/* complain_on_overflow */ 945 bfd_elf_generic_reloc, /* special_function */ 946 "R_ARM_ALU_PC_G1", /* name */ 947 FALSE, /* partial_inplace */ 948 0xffffffff, /* src_mask */ 949 0xffffffff, /* dst_mask */ 950 TRUE), /* pcrel_offset */ 951 952 HOWTO (R_ARM_ALU_PC_G2, /* type */ 953 0, /* rightshift */ 954 2, /* size (0 = byte, 1 = short, 2 = long) */ 955 32, /* bitsize */ 956 TRUE, /* pc_relative */ 957 0, /* bitpos */ 958 complain_overflow_dont,/* complain_on_overflow */ 959 bfd_elf_generic_reloc, /* special_function */ 960 "R_ARM_ALU_PC_G2", /* name */ 961 FALSE, /* partial_inplace */ 962 0xffffffff, /* src_mask */ 963 0xffffffff, /* dst_mask */ 964 TRUE), /* pcrel_offset */ 965 966 HOWTO (R_ARM_LDR_PC_G1, /* type */ 967 0, /* rightshift */ 968 2, /* size (0 = byte, 1 = short, 2 = long) */ 969 32, /* bitsize */ 970 TRUE, /* pc_relative */ 971 0, /* bitpos */ 972 complain_overflow_dont,/* complain_on_overflow */ 973 bfd_elf_generic_reloc, /* special_function */ 974 "R_ARM_LDR_PC_G1", /* name */ 975 FALSE, /* partial_inplace */ 976 0xffffffff, /* src_mask */ 977 0xffffffff, /* dst_mask */ 978 TRUE), /* pcrel_offset */ 979 980 HOWTO (R_ARM_LDR_PC_G2, /* type */ 981 0, /* rightshift */ 982 2, /* size (0 = byte, 1 = short, 2 = long) */ 983 32, /* bitsize */ 984 TRUE, /* pc_relative */ 985 0, /* bitpos */ 986 complain_overflow_dont,/* complain_on_overflow */ 987 bfd_elf_generic_reloc, /* special_function */ 988 "R_ARM_LDR_PC_G2", /* name */ 989 FALSE, /* partial_inplace */ 990 0xffffffff, /* src_mask */ 991 0xffffffff, /* dst_mask */ 992 TRUE), /* pcrel_offset */ 993 994 HOWTO (R_ARM_LDRS_PC_G0, /* type */ 995 0, /* rightshift */ 996 2, /* size (0 = byte, 1 = short, 2 = long) */ 997 32, /* bitsize */ 998 TRUE, /* pc_relative */ 999 0, /* bitpos */ 1000 complain_overflow_dont,/* complain_on_overflow */ 1001 bfd_elf_generic_reloc, /* special_function */ 1002 "R_ARM_LDRS_PC_G0", /* name */ 1003 FALSE, /* partial_inplace */ 1004 0xffffffff, /* src_mask */ 1005 0xffffffff, /* dst_mask */ 1006 TRUE), /* pcrel_offset */ 1007 1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */ 1009 0, /* rightshift */ 1010 2, /* size (0 = byte, 1 = short, 2 = long) */ 1011 32, /* bitsize */ 1012 TRUE, /* pc_relative */ 1013 0, /* bitpos */ 1014 complain_overflow_dont,/* complain_on_overflow */ 1015 bfd_elf_generic_reloc, /* special_function */ 1016 "R_ARM_LDRS_PC_G1", /* name */ 1017 FALSE, /* partial_inplace */ 1018 0xffffffff, /* src_mask */ 1019 0xffffffff, /* dst_mask */ 1020 TRUE), /* pcrel_offset */ 1021 1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */ 1023 0, /* rightshift */ 1024 2, /* size (0 = byte, 1 = short, 2 = long) */ 1025 32, /* bitsize */ 1026 TRUE, /* pc_relative */ 1027 0, /* bitpos */ 1028 complain_overflow_dont,/* complain_on_overflow */ 1029 bfd_elf_generic_reloc, /* special_function */ 1030 "R_ARM_LDRS_PC_G2", /* name */ 1031 FALSE, /* partial_inplace */ 1032 0xffffffff, /* src_mask */ 1033 0xffffffff, /* dst_mask */ 1034 TRUE), /* pcrel_offset */ 1035 1036 HOWTO (R_ARM_LDC_PC_G0, /* type */ 1037 0, /* rightshift */ 1038 2, /* size (0 = byte, 1 = short, 2 = long) */ 1039 32, /* bitsize */ 1040 TRUE, /* pc_relative */ 1041 0, /* bitpos */ 1042 complain_overflow_dont,/* complain_on_overflow */ 1043 bfd_elf_generic_reloc, /* special_function */ 1044 "R_ARM_LDC_PC_G0", /* name */ 1045 FALSE, /* partial_inplace */ 1046 0xffffffff, /* src_mask */ 1047 0xffffffff, /* dst_mask */ 1048 TRUE), /* pcrel_offset */ 1049 1050 HOWTO (R_ARM_LDC_PC_G1, /* type */ 1051 0, /* rightshift */ 1052 2, /* size (0 = byte, 1 = short, 2 = long) */ 1053 32, /* bitsize */ 1054 TRUE, /* pc_relative */ 1055 0, /* bitpos */ 1056 complain_overflow_dont,/* complain_on_overflow */ 1057 bfd_elf_generic_reloc, /* special_function */ 1058 "R_ARM_LDC_PC_G1", /* name */ 1059 FALSE, /* partial_inplace */ 1060 0xffffffff, /* src_mask */ 1061 0xffffffff, /* dst_mask */ 1062 TRUE), /* pcrel_offset */ 1063 1064 HOWTO (R_ARM_LDC_PC_G2, /* type */ 1065 0, /* rightshift */ 1066 2, /* size (0 = byte, 1 = short, 2 = long) */ 1067 32, /* bitsize */ 1068 TRUE, /* pc_relative */ 1069 0, /* bitpos */ 1070 complain_overflow_dont,/* complain_on_overflow */ 1071 bfd_elf_generic_reloc, /* special_function */ 1072 "R_ARM_LDC_PC_G2", /* name */ 1073 FALSE, /* partial_inplace */ 1074 0xffffffff, /* src_mask */ 1075 0xffffffff, /* dst_mask */ 1076 TRUE), /* pcrel_offset */ 1077 1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */ 1079 0, /* rightshift */ 1080 2, /* size (0 = byte, 1 = short, 2 = long) */ 1081 32, /* bitsize */ 1082 TRUE, /* pc_relative */ 1083 0, /* bitpos */ 1084 complain_overflow_dont,/* complain_on_overflow */ 1085 bfd_elf_generic_reloc, /* special_function */ 1086 "R_ARM_ALU_SB_G0_NC", /* name */ 1087 FALSE, /* partial_inplace */ 1088 0xffffffff, /* src_mask */ 1089 0xffffffff, /* dst_mask */ 1090 TRUE), /* pcrel_offset */ 1091 1092 HOWTO (R_ARM_ALU_SB_G0, /* type */ 1093 0, /* rightshift */ 1094 2, /* size (0 = byte, 1 = short, 2 = long) */ 1095 32, /* bitsize */ 1096 TRUE, /* pc_relative */ 1097 0, /* bitpos */ 1098 complain_overflow_dont,/* complain_on_overflow */ 1099 bfd_elf_generic_reloc, /* special_function */ 1100 "R_ARM_ALU_SB_G0", /* name */ 1101 FALSE, /* partial_inplace */ 1102 0xffffffff, /* src_mask */ 1103 0xffffffff, /* dst_mask */ 1104 TRUE), /* pcrel_offset */ 1105 1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */ 1107 0, /* rightshift */ 1108 2, /* size (0 = byte, 1 = short, 2 = long) */ 1109 32, /* bitsize */ 1110 TRUE, /* pc_relative */ 1111 0, /* bitpos */ 1112 complain_overflow_dont,/* complain_on_overflow */ 1113 bfd_elf_generic_reloc, /* special_function */ 1114 "R_ARM_ALU_SB_G1_NC", /* name */ 1115 FALSE, /* partial_inplace */ 1116 0xffffffff, /* src_mask */ 1117 0xffffffff, /* dst_mask */ 1118 TRUE), /* pcrel_offset */ 1119 1120 HOWTO (R_ARM_ALU_SB_G1, /* type */ 1121 0, /* rightshift */ 1122 2, /* size (0 = byte, 1 = short, 2 = long) */ 1123 32, /* bitsize */ 1124 TRUE, /* pc_relative */ 1125 0, /* bitpos */ 1126 complain_overflow_dont,/* complain_on_overflow */ 1127 bfd_elf_generic_reloc, /* special_function */ 1128 "R_ARM_ALU_SB_G1", /* name */ 1129 FALSE, /* partial_inplace */ 1130 0xffffffff, /* src_mask */ 1131 0xffffffff, /* dst_mask */ 1132 TRUE), /* pcrel_offset */ 1133 1134 HOWTO (R_ARM_ALU_SB_G2, /* type */ 1135 0, /* rightshift */ 1136 2, /* size (0 = byte, 1 = short, 2 = long) */ 1137 32, /* bitsize */ 1138 TRUE, /* pc_relative */ 1139 0, /* bitpos */ 1140 complain_overflow_dont,/* complain_on_overflow */ 1141 bfd_elf_generic_reloc, /* special_function */ 1142 "R_ARM_ALU_SB_G2", /* name */ 1143 FALSE, /* partial_inplace */ 1144 0xffffffff, /* src_mask */ 1145 0xffffffff, /* dst_mask */ 1146 TRUE), /* pcrel_offset */ 1147 1148 HOWTO (R_ARM_LDR_SB_G0, /* type */ 1149 0, /* rightshift */ 1150 2, /* size (0 = byte, 1 = short, 2 = long) */ 1151 32, /* bitsize */ 1152 TRUE, /* pc_relative */ 1153 0, /* bitpos */ 1154 complain_overflow_dont,/* complain_on_overflow */ 1155 bfd_elf_generic_reloc, /* special_function */ 1156 "R_ARM_LDR_SB_G0", /* name */ 1157 FALSE, /* partial_inplace */ 1158 0xffffffff, /* src_mask */ 1159 0xffffffff, /* dst_mask */ 1160 TRUE), /* pcrel_offset */ 1161 1162 HOWTO (R_ARM_LDR_SB_G1, /* type */ 1163 0, /* rightshift */ 1164 2, /* size (0 = byte, 1 = short, 2 = long) */ 1165 32, /* bitsize */ 1166 TRUE, /* pc_relative */ 1167 0, /* bitpos */ 1168 complain_overflow_dont,/* complain_on_overflow */ 1169 bfd_elf_generic_reloc, /* special_function */ 1170 "R_ARM_LDR_SB_G1", /* name */ 1171 FALSE, /* partial_inplace */ 1172 0xffffffff, /* src_mask */ 1173 0xffffffff, /* dst_mask */ 1174 TRUE), /* pcrel_offset */ 1175 1176 HOWTO (R_ARM_LDR_SB_G2, /* type */ 1177 0, /* rightshift */ 1178 2, /* size (0 = byte, 1 = short, 2 = long) */ 1179 32, /* bitsize */ 1180 TRUE, /* pc_relative */ 1181 0, /* bitpos */ 1182 complain_overflow_dont,/* complain_on_overflow */ 1183 bfd_elf_generic_reloc, /* special_function */ 1184 "R_ARM_LDR_SB_G2", /* name */ 1185 FALSE, /* partial_inplace */ 1186 0xffffffff, /* src_mask */ 1187 0xffffffff, /* dst_mask */ 1188 TRUE), /* pcrel_offset */ 1189 1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */ 1191 0, /* rightshift */ 1192 2, /* size (0 = byte, 1 = short, 2 = long) */ 1193 32, /* bitsize */ 1194 TRUE, /* pc_relative */ 1195 0, /* bitpos */ 1196 complain_overflow_dont,/* complain_on_overflow */ 1197 bfd_elf_generic_reloc, /* special_function */ 1198 "R_ARM_LDRS_SB_G0", /* name */ 1199 FALSE, /* partial_inplace */ 1200 0xffffffff, /* src_mask */ 1201 0xffffffff, /* dst_mask */ 1202 TRUE), /* pcrel_offset */ 1203 1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */ 1205 0, /* rightshift */ 1206 2, /* size (0 = byte, 1 = short, 2 = long) */ 1207 32, /* bitsize */ 1208 TRUE, /* pc_relative */ 1209 0, /* bitpos */ 1210 complain_overflow_dont,/* complain_on_overflow */ 1211 bfd_elf_generic_reloc, /* special_function */ 1212 "R_ARM_LDRS_SB_G1", /* name */ 1213 FALSE, /* partial_inplace */ 1214 0xffffffff, /* src_mask */ 1215 0xffffffff, /* dst_mask */ 1216 TRUE), /* pcrel_offset */ 1217 1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */ 1219 0, /* rightshift */ 1220 2, /* size (0 = byte, 1 = short, 2 = long) */ 1221 32, /* bitsize */ 1222 TRUE, /* pc_relative */ 1223 0, /* bitpos */ 1224 complain_overflow_dont,/* complain_on_overflow */ 1225 bfd_elf_generic_reloc, /* special_function */ 1226 "R_ARM_LDRS_SB_G2", /* name */ 1227 FALSE, /* partial_inplace */ 1228 0xffffffff, /* src_mask */ 1229 0xffffffff, /* dst_mask */ 1230 TRUE), /* pcrel_offset */ 1231 1232 HOWTO (R_ARM_LDC_SB_G0, /* type */ 1233 0, /* rightshift */ 1234 2, /* size (0 = byte, 1 = short, 2 = long) */ 1235 32, /* bitsize */ 1236 TRUE, /* pc_relative */ 1237 0, /* bitpos */ 1238 complain_overflow_dont,/* complain_on_overflow */ 1239 bfd_elf_generic_reloc, /* special_function */ 1240 "R_ARM_LDC_SB_G0", /* name */ 1241 FALSE, /* partial_inplace */ 1242 0xffffffff, /* src_mask */ 1243 0xffffffff, /* dst_mask */ 1244 TRUE), /* pcrel_offset */ 1245 1246 HOWTO (R_ARM_LDC_SB_G1, /* type */ 1247 0, /* rightshift */ 1248 2, /* size (0 = byte, 1 = short, 2 = long) */ 1249 32, /* bitsize */ 1250 TRUE, /* pc_relative */ 1251 0, /* bitpos */ 1252 complain_overflow_dont,/* complain_on_overflow */ 1253 bfd_elf_generic_reloc, /* special_function */ 1254 "R_ARM_LDC_SB_G1", /* name */ 1255 FALSE, /* partial_inplace */ 1256 0xffffffff, /* src_mask */ 1257 0xffffffff, /* dst_mask */ 1258 TRUE), /* pcrel_offset */ 1259 1260 HOWTO (R_ARM_LDC_SB_G2, /* type */ 1261 0, /* rightshift */ 1262 2, /* size (0 = byte, 1 = short, 2 = long) */ 1263 32, /* bitsize */ 1264 TRUE, /* pc_relative */ 1265 0, /* bitpos */ 1266 complain_overflow_dont,/* complain_on_overflow */ 1267 bfd_elf_generic_reloc, /* special_function */ 1268 "R_ARM_LDC_SB_G2", /* name */ 1269 FALSE, /* partial_inplace */ 1270 0xffffffff, /* src_mask */ 1271 0xffffffff, /* dst_mask */ 1272 TRUE), /* pcrel_offset */ 1273 1274 /* End of group relocations. */ 1275 1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */ 1277 0, /* rightshift */ 1278 2, /* size (0 = byte, 1 = short, 2 = long) */ 1279 16, /* bitsize */ 1280 FALSE, /* pc_relative */ 1281 0, /* bitpos */ 1282 complain_overflow_dont,/* complain_on_overflow */ 1283 bfd_elf_generic_reloc, /* special_function */ 1284 "R_ARM_MOVW_BREL_NC", /* name */ 1285 FALSE, /* partial_inplace */ 1286 0x0000ffff, /* src_mask */ 1287 0x0000ffff, /* dst_mask */ 1288 FALSE), /* pcrel_offset */ 1289 1290 HOWTO (R_ARM_MOVT_BREL, /* type */ 1291 0, /* rightshift */ 1292 2, /* size (0 = byte, 1 = short, 2 = long) */ 1293 16, /* bitsize */ 1294 FALSE, /* pc_relative */ 1295 0, /* bitpos */ 1296 complain_overflow_bitfield,/* complain_on_overflow */ 1297 bfd_elf_generic_reloc, /* special_function */ 1298 "R_ARM_MOVT_BREL", /* name */ 1299 FALSE, /* partial_inplace */ 1300 0x0000ffff, /* src_mask */ 1301 0x0000ffff, /* dst_mask */ 1302 FALSE), /* pcrel_offset */ 1303 1304 HOWTO (R_ARM_MOVW_BREL, /* type */ 1305 0, /* rightshift */ 1306 2, /* size (0 = byte, 1 = short, 2 = long) */ 1307 16, /* bitsize */ 1308 FALSE, /* pc_relative */ 1309 0, /* bitpos */ 1310 complain_overflow_dont,/* complain_on_overflow */ 1311 bfd_elf_generic_reloc, /* special_function */ 1312 "R_ARM_MOVW_BREL", /* name */ 1313 FALSE, /* partial_inplace */ 1314 0x0000ffff, /* src_mask */ 1315 0x0000ffff, /* dst_mask */ 1316 FALSE), /* pcrel_offset */ 1317 1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */ 1319 0, /* rightshift */ 1320 2, /* size (0 = byte, 1 = short, 2 = long) */ 1321 16, /* bitsize */ 1322 FALSE, /* pc_relative */ 1323 0, /* bitpos */ 1324 complain_overflow_dont,/* complain_on_overflow */ 1325 bfd_elf_generic_reloc, /* special_function */ 1326 "R_ARM_THM_MOVW_BREL_NC",/* name */ 1327 FALSE, /* partial_inplace */ 1328 0x040f70ff, /* src_mask */ 1329 0x040f70ff, /* dst_mask */ 1330 FALSE), /* pcrel_offset */ 1331 1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */ 1333 0, /* rightshift */ 1334 2, /* size (0 = byte, 1 = short, 2 = long) */ 1335 16, /* bitsize */ 1336 FALSE, /* pc_relative */ 1337 0, /* bitpos */ 1338 complain_overflow_bitfield,/* complain_on_overflow */ 1339 bfd_elf_generic_reloc, /* special_function */ 1340 "R_ARM_THM_MOVT_BREL", /* name */ 1341 FALSE, /* partial_inplace */ 1342 0x040f70ff, /* src_mask */ 1343 0x040f70ff, /* dst_mask */ 1344 FALSE), /* pcrel_offset */ 1345 1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */ 1347 0, /* rightshift */ 1348 2, /* size (0 = byte, 1 = short, 2 = long) */ 1349 16, /* bitsize */ 1350 FALSE, /* pc_relative */ 1351 0, /* bitpos */ 1352 complain_overflow_dont,/* complain_on_overflow */ 1353 bfd_elf_generic_reloc, /* special_function */ 1354 "R_ARM_THM_MOVW_BREL", /* name */ 1355 FALSE, /* partial_inplace */ 1356 0x040f70ff, /* src_mask */ 1357 0x040f70ff, /* dst_mask */ 1358 FALSE), /* pcrel_offset */ 1359 1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */ 1361 0, /* rightshift */ 1362 2, /* size (0 = byte, 1 = short, 2 = long) */ 1363 32, /* bitsize */ 1364 FALSE, /* pc_relative */ 1365 0, /* bitpos */ 1366 complain_overflow_bitfield,/* complain_on_overflow */ 1367 NULL, /* special_function */ 1368 "R_ARM_TLS_GOTDESC", /* name */ 1369 TRUE, /* partial_inplace */ 1370 0xffffffff, /* src_mask */ 1371 0xffffffff, /* dst_mask */ 1372 FALSE), /* pcrel_offset */ 1373 1374 HOWTO (R_ARM_TLS_CALL, /* type */ 1375 0, /* rightshift */ 1376 2, /* size (0 = byte, 1 = short, 2 = long) */ 1377 24, /* bitsize */ 1378 FALSE, /* pc_relative */ 1379 0, /* bitpos */ 1380 complain_overflow_dont,/* complain_on_overflow */ 1381 bfd_elf_generic_reloc, /* special_function */ 1382 "R_ARM_TLS_CALL", /* name */ 1383 FALSE, /* partial_inplace */ 1384 0x00ffffff, /* src_mask */ 1385 0x00ffffff, /* dst_mask */ 1386 FALSE), /* pcrel_offset */ 1387 1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */ 1389 0, /* rightshift */ 1390 2, /* size (0 = byte, 1 = short, 2 = long) */ 1391 0, /* bitsize */ 1392 FALSE, /* pc_relative */ 1393 0, /* bitpos */ 1394 complain_overflow_bitfield,/* complain_on_overflow */ 1395 bfd_elf_generic_reloc, /* special_function */ 1396 "R_ARM_TLS_DESCSEQ", /* name */ 1397 FALSE, /* partial_inplace */ 1398 0x00000000, /* src_mask */ 1399 0x00000000, /* dst_mask */ 1400 FALSE), /* pcrel_offset */ 1401 1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */ 1403 0, /* rightshift */ 1404 2, /* size (0 = byte, 1 = short, 2 = long) */ 1405 24, /* bitsize */ 1406 FALSE, /* pc_relative */ 1407 0, /* bitpos */ 1408 complain_overflow_dont,/* complain_on_overflow */ 1409 bfd_elf_generic_reloc, /* special_function */ 1410 "R_ARM_THM_TLS_CALL", /* name */ 1411 FALSE, /* partial_inplace */ 1412 0x07ff07ff, /* src_mask */ 1413 0x07ff07ff, /* dst_mask */ 1414 FALSE), /* pcrel_offset */ 1415 1416 HOWTO (R_ARM_PLT32_ABS, /* type */ 1417 0, /* rightshift */ 1418 2, /* size (0 = byte, 1 = short, 2 = long) */ 1419 32, /* bitsize */ 1420 FALSE, /* pc_relative */ 1421 0, /* bitpos */ 1422 complain_overflow_dont,/* complain_on_overflow */ 1423 bfd_elf_generic_reloc, /* special_function */ 1424 "R_ARM_PLT32_ABS", /* name */ 1425 FALSE, /* partial_inplace */ 1426 0xffffffff, /* src_mask */ 1427 0xffffffff, /* dst_mask */ 1428 FALSE), /* pcrel_offset */ 1429 1430 HOWTO (R_ARM_GOT_ABS, /* type */ 1431 0, /* rightshift */ 1432 2, /* size (0 = byte, 1 = short, 2 = long) */ 1433 32, /* bitsize */ 1434 FALSE, /* pc_relative */ 1435 0, /* bitpos */ 1436 complain_overflow_dont,/* complain_on_overflow */ 1437 bfd_elf_generic_reloc, /* special_function */ 1438 "R_ARM_GOT_ABS", /* name */ 1439 FALSE, /* partial_inplace */ 1440 0xffffffff, /* src_mask */ 1441 0xffffffff, /* dst_mask */ 1442 FALSE), /* pcrel_offset */ 1443 1444 HOWTO (R_ARM_GOT_PREL, /* type */ 1445 0, /* rightshift */ 1446 2, /* size (0 = byte, 1 = short, 2 = long) */ 1447 32, /* bitsize */ 1448 TRUE, /* pc_relative */ 1449 0, /* bitpos */ 1450 complain_overflow_dont, /* complain_on_overflow */ 1451 bfd_elf_generic_reloc, /* special_function */ 1452 "R_ARM_GOT_PREL", /* name */ 1453 FALSE, /* partial_inplace */ 1454 0xffffffff, /* src_mask */ 1455 0xffffffff, /* dst_mask */ 1456 TRUE), /* pcrel_offset */ 1457 1458 HOWTO (R_ARM_GOT_BREL12, /* type */ 1459 0, /* rightshift */ 1460 2, /* size (0 = byte, 1 = short, 2 = long) */ 1461 12, /* bitsize */ 1462 FALSE, /* pc_relative */ 1463 0, /* bitpos */ 1464 complain_overflow_bitfield,/* complain_on_overflow */ 1465 bfd_elf_generic_reloc, /* special_function */ 1466 "R_ARM_GOT_BREL12", /* name */ 1467 FALSE, /* partial_inplace */ 1468 0x00000fff, /* src_mask */ 1469 0x00000fff, /* dst_mask */ 1470 FALSE), /* pcrel_offset */ 1471 1472 HOWTO (R_ARM_GOTOFF12, /* type */ 1473 0, /* rightshift */ 1474 2, /* size (0 = byte, 1 = short, 2 = long) */ 1475 12, /* bitsize */ 1476 FALSE, /* pc_relative */ 1477 0, /* bitpos */ 1478 complain_overflow_bitfield,/* complain_on_overflow */ 1479 bfd_elf_generic_reloc, /* special_function */ 1480 "R_ARM_GOTOFF12", /* name */ 1481 FALSE, /* partial_inplace */ 1482 0x00000fff, /* src_mask */ 1483 0x00000fff, /* dst_mask */ 1484 FALSE), /* pcrel_offset */ 1485 1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */ 1487 1488 /* GNU extension to record C++ vtable member usage */ 1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */ 1490 0, /* rightshift */ 1491 2, /* size (0 = byte, 1 = short, 2 = long) */ 1492 0, /* bitsize */ 1493 FALSE, /* pc_relative */ 1494 0, /* bitpos */ 1495 complain_overflow_dont, /* complain_on_overflow */ 1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */ 1497 "R_ARM_GNU_VTENTRY", /* name */ 1498 FALSE, /* partial_inplace */ 1499 0, /* src_mask */ 1500 0, /* dst_mask */ 1501 FALSE), /* pcrel_offset */ 1502 1503 /* GNU extension to record C++ vtable hierarchy */ 1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */ 1505 0, /* rightshift */ 1506 2, /* size (0 = byte, 1 = short, 2 = long) */ 1507 0, /* bitsize */ 1508 FALSE, /* pc_relative */ 1509 0, /* bitpos */ 1510 complain_overflow_dont, /* complain_on_overflow */ 1511 NULL, /* special_function */ 1512 "R_ARM_GNU_VTINHERIT", /* name */ 1513 FALSE, /* partial_inplace */ 1514 0, /* src_mask */ 1515 0, /* dst_mask */ 1516 FALSE), /* pcrel_offset */ 1517 1518 HOWTO (R_ARM_THM_JUMP11, /* type */ 1519 1, /* rightshift */ 1520 1, /* size (0 = byte, 1 = short, 2 = long) */ 1521 11, /* bitsize */ 1522 TRUE, /* pc_relative */ 1523 0, /* bitpos */ 1524 complain_overflow_signed, /* complain_on_overflow */ 1525 bfd_elf_generic_reloc, /* special_function */ 1526 "R_ARM_THM_JUMP11", /* name */ 1527 FALSE, /* partial_inplace */ 1528 0x000007ff, /* src_mask */ 1529 0x000007ff, /* dst_mask */ 1530 TRUE), /* pcrel_offset */ 1531 1532 HOWTO (R_ARM_THM_JUMP8, /* type */ 1533 1, /* rightshift */ 1534 1, /* size (0 = byte, 1 = short, 2 = long) */ 1535 8, /* bitsize */ 1536 TRUE, /* pc_relative */ 1537 0, /* bitpos */ 1538 complain_overflow_signed, /* complain_on_overflow */ 1539 bfd_elf_generic_reloc, /* special_function */ 1540 "R_ARM_THM_JUMP8", /* name */ 1541 FALSE, /* partial_inplace */ 1542 0x000000ff, /* src_mask */ 1543 0x000000ff, /* dst_mask */ 1544 TRUE), /* pcrel_offset */ 1545 1546 /* TLS relocations */ 1547 HOWTO (R_ARM_TLS_GD32, /* type */ 1548 0, /* rightshift */ 1549 2, /* size (0 = byte, 1 = short, 2 = long) */ 1550 32, /* bitsize */ 1551 FALSE, /* pc_relative */ 1552 0, /* bitpos */ 1553 complain_overflow_bitfield,/* complain_on_overflow */ 1554 NULL, /* special_function */ 1555 "R_ARM_TLS_GD32", /* name */ 1556 TRUE, /* partial_inplace */ 1557 0xffffffff, /* src_mask */ 1558 0xffffffff, /* dst_mask */ 1559 FALSE), /* pcrel_offset */ 1560 1561 HOWTO (R_ARM_TLS_LDM32, /* type */ 1562 0, /* rightshift */ 1563 2, /* size (0 = byte, 1 = short, 2 = long) */ 1564 32, /* bitsize */ 1565 FALSE, /* pc_relative */ 1566 0, /* bitpos */ 1567 complain_overflow_bitfield,/* complain_on_overflow */ 1568 bfd_elf_generic_reloc, /* special_function */ 1569 "R_ARM_TLS_LDM32", /* name */ 1570 TRUE, /* partial_inplace */ 1571 0xffffffff, /* src_mask */ 1572 0xffffffff, /* dst_mask */ 1573 FALSE), /* pcrel_offset */ 1574 1575 HOWTO (R_ARM_TLS_LDO32, /* type */ 1576 0, /* rightshift */ 1577 2, /* size (0 = byte, 1 = short, 2 = long) */ 1578 32, /* bitsize */ 1579 FALSE, /* pc_relative */ 1580 0, /* bitpos */ 1581 complain_overflow_bitfield,/* complain_on_overflow */ 1582 bfd_elf_generic_reloc, /* special_function */ 1583 "R_ARM_TLS_LDO32", /* name */ 1584 TRUE, /* partial_inplace */ 1585 0xffffffff, /* src_mask */ 1586 0xffffffff, /* dst_mask */ 1587 FALSE), /* pcrel_offset */ 1588 1589 HOWTO (R_ARM_TLS_IE32, /* type */ 1590 0, /* rightshift */ 1591 2, /* size (0 = byte, 1 = short, 2 = long) */ 1592 32, /* bitsize */ 1593 FALSE, /* pc_relative */ 1594 0, /* bitpos */ 1595 complain_overflow_bitfield,/* complain_on_overflow */ 1596 NULL, /* special_function */ 1597 "R_ARM_TLS_IE32", /* name */ 1598 TRUE, /* partial_inplace */ 1599 0xffffffff, /* src_mask */ 1600 0xffffffff, /* dst_mask */ 1601 FALSE), /* pcrel_offset */ 1602 1603 HOWTO (R_ARM_TLS_LE32, /* type */ 1604 0, /* rightshift */ 1605 2, /* size (0 = byte, 1 = short, 2 = long) */ 1606 32, /* bitsize */ 1607 FALSE, /* pc_relative */ 1608 0, /* bitpos */ 1609 complain_overflow_bitfield,/* complain_on_overflow */ 1610 bfd_elf_generic_reloc, /* special_function */ 1611 "R_ARM_TLS_LE32", /* name */ 1612 TRUE, /* partial_inplace */ 1613 0xffffffff, /* src_mask */ 1614 0xffffffff, /* dst_mask */ 1615 FALSE), /* pcrel_offset */ 1616 1617 HOWTO (R_ARM_TLS_LDO12, /* type */ 1618 0, /* rightshift */ 1619 2, /* size (0 = byte, 1 = short, 2 = long) */ 1620 12, /* bitsize */ 1621 FALSE, /* pc_relative */ 1622 0, /* bitpos */ 1623 complain_overflow_bitfield,/* complain_on_overflow */ 1624 bfd_elf_generic_reloc, /* special_function */ 1625 "R_ARM_TLS_LDO12", /* name */ 1626 FALSE, /* partial_inplace */ 1627 0x00000fff, /* src_mask */ 1628 0x00000fff, /* dst_mask */ 1629 FALSE), /* pcrel_offset */ 1630 1631 HOWTO (R_ARM_TLS_LE12, /* type */ 1632 0, /* rightshift */ 1633 2, /* size (0 = byte, 1 = short, 2 = long) */ 1634 12, /* bitsize */ 1635 FALSE, /* pc_relative */ 1636 0, /* bitpos */ 1637 complain_overflow_bitfield,/* complain_on_overflow */ 1638 bfd_elf_generic_reloc, /* special_function */ 1639 "R_ARM_TLS_LE12", /* name */ 1640 FALSE, /* partial_inplace */ 1641 0x00000fff, /* src_mask */ 1642 0x00000fff, /* dst_mask */ 1643 FALSE), /* pcrel_offset */ 1644 1645 HOWTO (R_ARM_TLS_IE12GP, /* type */ 1646 0, /* rightshift */ 1647 2, /* size (0 = byte, 1 = short, 2 = long) */ 1648 12, /* bitsize */ 1649 FALSE, /* pc_relative */ 1650 0, /* bitpos */ 1651 complain_overflow_bitfield,/* complain_on_overflow */ 1652 bfd_elf_generic_reloc, /* special_function */ 1653 "R_ARM_TLS_IE12GP", /* name */ 1654 FALSE, /* partial_inplace */ 1655 0x00000fff, /* src_mask */ 1656 0x00000fff, /* dst_mask */ 1657 FALSE), /* pcrel_offset */ 1658 1659 /* 112-127 private relocations. */ 1660 EMPTY_HOWTO (112), 1661 EMPTY_HOWTO (113), 1662 EMPTY_HOWTO (114), 1663 EMPTY_HOWTO (115), 1664 EMPTY_HOWTO (116), 1665 EMPTY_HOWTO (117), 1666 EMPTY_HOWTO (118), 1667 EMPTY_HOWTO (119), 1668 EMPTY_HOWTO (120), 1669 EMPTY_HOWTO (121), 1670 EMPTY_HOWTO (122), 1671 EMPTY_HOWTO (123), 1672 EMPTY_HOWTO (124), 1673 EMPTY_HOWTO (125), 1674 EMPTY_HOWTO (126), 1675 EMPTY_HOWTO (127), 1676 1677 /* R_ARM_ME_TOO, obsolete. */ 1678 EMPTY_HOWTO (128), 1679 1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */ 1681 0, /* rightshift */ 1682 1, /* size (0 = byte, 1 = short, 2 = long) */ 1683 0, /* bitsize */ 1684 FALSE, /* pc_relative */ 1685 0, /* bitpos */ 1686 complain_overflow_bitfield,/* complain_on_overflow */ 1687 bfd_elf_generic_reloc, /* special_function */ 1688 "R_ARM_THM_TLS_DESCSEQ",/* name */ 1689 FALSE, /* partial_inplace */ 1690 0x00000000, /* src_mask */ 1691 0x00000000, /* dst_mask */ 1692 FALSE), /* pcrel_offset */ 1693 }; 1694 1695 /* 160 onwards: */ 1696 static reloc_howto_type elf32_arm_howto_table_2[1] = 1697 { 1698 HOWTO (R_ARM_IRELATIVE, /* type */ 1699 0, /* rightshift */ 1700 2, /* size (0 = byte, 1 = short, 2 = long) */ 1701 32, /* bitsize */ 1702 FALSE, /* pc_relative */ 1703 0, /* bitpos */ 1704 complain_overflow_bitfield,/* complain_on_overflow */ 1705 bfd_elf_generic_reloc, /* special_function */ 1706 "R_ARM_IRELATIVE", /* name */ 1707 TRUE, /* partial_inplace */ 1708 0xffffffff, /* src_mask */ 1709 0xffffffff, /* dst_mask */ 1710 FALSE) /* pcrel_offset */ 1711 }; 1712 1713 /* 249-255 extended, currently unused, relocations: */ 1714 static reloc_howto_type elf32_arm_howto_table_3[4] = 1715 { 1716 HOWTO (R_ARM_RREL32, /* type */ 1717 0, /* rightshift */ 1718 0, /* size (0 = byte, 1 = short, 2 = long) */ 1719 0, /* bitsize */ 1720 FALSE, /* pc_relative */ 1721 0, /* bitpos */ 1722 complain_overflow_dont,/* complain_on_overflow */ 1723 bfd_elf_generic_reloc, /* special_function */ 1724 "R_ARM_RREL32", /* name */ 1725 FALSE, /* partial_inplace */ 1726 0, /* src_mask */ 1727 0, /* dst_mask */ 1728 FALSE), /* pcrel_offset */ 1729 1730 HOWTO (R_ARM_RABS32, /* type */ 1731 0, /* rightshift */ 1732 0, /* size (0 = byte, 1 = short, 2 = long) */ 1733 0, /* bitsize */ 1734 FALSE, /* pc_relative */ 1735 0, /* bitpos */ 1736 complain_overflow_dont,/* complain_on_overflow */ 1737 bfd_elf_generic_reloc, /* special_function */ 1738 "R_ARM_RABS32", /* name */ 1739 FALSE, /* partial_inplace */ 1740 0, /* src_mask */ 1741 0, /* dst_mask */ 1742 FALSE), /* pcrel_offset */ 1743 1744 HOWTO (R_ARM_RPC24, /* type */ 1745 0, /* rightshift */ 1746 0, /* size (0 = byte, 1 = short, 2 = long) */ 1747 0, /* bitsize */ 1748 FALSE, /* pc_relative */ 1749 0, /* bitpos */ 1750 complain_overflow_dont,/* complain_on_overflow */ 1751 bfd_elf_generic_reloc, /* special_function */ 1752 "R_ARM_RPC24", /* name */ 1753 FALSE, /* partial_inplace */ 1754 0, /* src_mask */ 1755 0, /* dst_mask */ 1756 FALSE), /* pcrel_offset */ 1757 1758 HOWTO (R_ARM_RBASE, /* type */ 1759 0, /* rightshift */ 1760 0, /* size (0 = byte, 1 = short, 2 = long) */ 1761 0, /* bitsize */ 1762 FALSE, /* pc_relative */ 1763 0, /* bitpos */ 1764 complain_overflow_dont,/* complain_on_overflow */ 1765 bfd_elf_generic_reloc, /* special_function */ 1766 "R_ARM_RBASE", /* name */ 1767 FALSE, /* partial_inplace */ 1768 0, /* src_mask */ 1769 0, /* dst_mask */ 1770 FALSE) /* pcrel_offset */ 1771 }; 1772 1773 static reloc_howto_type * 1774 elf32_arm_howto_from_type (unsigned int r_type) 1775 { 1776 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1)) 1777 return &elf32_arm_howto_table_1[r_type]; 1778 1779 if (r_type == R_ARM_IRELATIVE) 1780 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE]; 1781 1782 if (r_type >= R_ARM_RREL32 1783 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3)) 1784 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32]; 1785 1786 return NULL; 1787 } 1788 1789 static void 1790 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc, 1791 Elf_Internal_Rela * elf_reloc) 1792 { 1793 unsigned int r_type; 1794 1795 r_type = ELF32_R_TYPE (elf_reloc->r_info); 1796 bfd_reloc->howto = elf32_arm_howto_from_type (r_type); 1797 } 1798 1799 struct elf32_arm_reloc_map 1800 { 1801 bfd_reloc_code_real_type bfd_reloc_val; 1802 unsigned char elf_reloc_val; 1803 }; 1804 1805 /* All entries in this list must also be present in elf32_arm_howto_table. */ 1806 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] = 1807 { 1808 {BFD_RELOC_NONE, R_ARM_NONE}, 1809 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24}, 1810 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL}, 1811 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24}, 1812 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25}, 1813 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22}, 1814 {BFD_RELOC_32, R_ARM_ABS32}, 1815 {BFD_RELOC_32_PCREL, R_ARM_REL32}, 1816 {BFD_RELOC_8, R_ARM_ABS8}, 1817 {BFD_RELOC_16, R_ARM_ABS16}, 1818 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12}, 1819 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5}, 1820 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24}, 1821 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL}, 1822 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11}, 1823 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19}, 1824 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8}, 1825 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6}, 1826 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT}, 1827 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT}, 1828 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE}, 1829 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32}, 1830 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC}, 1831 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL}, 1832 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32}, 1833 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1834 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1}, 1835 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32}, 1836 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32}, 1837 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31}, 1838 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2}, 1839 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32}, 1840 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC}, 1841 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL}, 1842 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL}, 1843 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ}, 1844 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ}, 1845 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC}, 1846 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32}, 1847 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32}, 1848 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32}, 1849 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32}, 1850 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32}, 1851 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32}, 1852 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32}, 1853 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32}, 1854 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE}, 1855 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT}, 1856 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY}, 1857 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC}, 1858 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS}, 1859 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC}, 1860 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL}, 1861 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC}, 1862 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS}, 1863 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC}, 1864 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL}, 1865 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC}, 1866 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0}, 1867 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC}, 1868 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1}, 1869 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2}, 1870 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0}, 1871 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1}, 1872 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2}, 1873 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0}, 1874 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1}, 1875 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2}, 1876 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0}, 1877 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1}, 1878 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2}, 1879 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC}, 1880 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0}, 1881 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC}, 1882 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1}, 1883 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2}, 1884 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0}, 1885 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1}, 1886 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2}, 1887 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0}, 1888 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1}, 1889 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2}, 1890 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0}, 1891 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1}, 1892 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2}, 1893 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX} 1894 }; 1895 1896 static reloc_howto_type * 1897 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1898 bfd_reloc_code_real_type code) 1899 { 1900 unsigned int i; 1901 1902 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++) 1903 if (elf32_arm_reloc_map[i].bfd_reloc_val == code) 1904 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val); 1905 1906 return NULL; 1907 } 1908 1909 static reloc_howto_type * 1910 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, 1911 const char *r_name) 1912 { 1913 unsigned int i; 1914 1915 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++) 1916 if (elf32_arm_howto_table_1[i].name != NULL 1917 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0) 1918 return &elf32_arm_howto_table_1[i]; 1919 1920 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++) 1921 if (elf32_arm_howto_table_2[i].name != NULL 1922 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0) 1923 return &elf32_arm_howto_table_2[i]; 1924 1925 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++) 1926 if (elf32_arm_howto_table_3[i].name != NULL 1927 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0) 1928 return &elf32_arm_howto_table_3[i]; 1929 1930 return NULL; 1931 } 1932 1933 /* Support for core dump NOTE sections. */ 1934 1935 static bfd_boolean 1936 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 1937 { 1938 int offset; 1939 size_t size; 1940 1941 switch (note->descsz) 1942 { 1943 default: 1944 return FALSE; 1945 1946 case 148: /* Linux/ARM 32-bit. */ 1947 /* pr_cursig */ 1948 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12); 1949 1950 /* pr_pid */ 1951 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24); 1952 1953 /* pr_reg */ 1954 offset = 72; 1955 size = 72; 1956 1957 break; 1958 } 1959 1960 /* Make a ".reg/999" section. */ 1961 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 1962 size, note->descpos + offset); 1963 } 1964 1965 static bfd_boolean 1966 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 1967 { 1968 switch (note->descsz) 1969 { 1970 default: 1971 return FALSE; 1972 1973 case 124: /* Linux/ARM elf_prpsinfo. */ 1974 elf_tdata (abfd)->core_pid 1975 = bfd_get_32 (abfd, note->descdata + 12); 1976 elf_tdata (abfd)->core_program 1977 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 1978 elf_tdata (abfd)->core_command 1979 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 1980 } 1981 1982 /* Note that for some reason, a spurious space is tacked 1983 onto the end of the args in some (at least one anyway) 1984 implementations, so strip it off if it exists. */ 1985 { 1986 char *command = elf_tdata (abfd)->core_command; 1987 int n = strlen (command); 1988 1989 if (0 < n && command[n - 1] == ' ') 1990 command[n - 1] = '\0'; 1991 } 1992 1993 return TRUE; 1994 } 1995 1996 static char * 1997 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz, 1998 int note_type, ...) 1999 { 2000 switch (note_type) 2001 { 2002 default: 2003 return NULL; 2004 2005 case NT_PRPSINFO: 2006 { 2007 char data[124]; 2008 va_list ap; 2009 2010 va_start (ap, note_type); 2011 memset (data, 0, sizeof (data)); 2012 strncpy (data + 28, va_arg (ap, const char *), 16); 2013 strncpy (data + 44, va_arg (ap, const char *), 80); 2014 va_end (ap); 2015 2016 return elfcore_write_note (abfd, buf, bufsiz, 2017 "CORE", note_type, data, sizeof (data)); 2018 } 2019 2020 case NT_PRSTATUS: 2021 { 2022 char data[148]; 2023 va_list ap; 2024 long pid; 2025 int cursig; 2026 const void *greg; 2027 2028 va_start (ap, note_type); 2029 memset (data, 0, sizeof (data)); 2030 pid = va_arg (ap, long); 2031 bfd_put_32 (abfd, pid, data + 24); 2032 cursig = va_arg (ap, int); 2033 bfd_put_16 (abfd, cursig, data + 12); 2034 greg = va_arg (ap, const void *); 2035 memcpy (data + 72, greg, 72); 2036 va_end (ap); 2037 2038 return elfcore_write_note (abfd, buf, bufsiz, 2039 "CORE", note_type, data, sizeof (data)); 2040 } 2041 } 2042 } 2043 2044 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec 2045 #define TARGET_LITTLE_NAME "elf32-littlearm" 2046 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec 2047 #define TARGET_BIG_NAME "elf32-bigarm" 2048 2049 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus 2050 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo 2051 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note 2052 2053 typedef unsigned long int insn32; 2054 typedef unsigned short int insn16; 2055 2056 /* In lieu of proper flags, assume all EABIv4 or later objects are 2057 interworkable. */ 2058 #define INTERWORK_FLAG(abfd) \ 2059 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \ 2060 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \ 2061 || ((abfd)->flags & BFD_LINKER_CREATED)) 2062 2063 /* The linker script knows the section names for placement. 2064 The entry_names are used to do simple name mangling on the stubs. 2065 Given a function name, and its type, the stub can be found. The 2066 name can be changed. The only requirement is the %s be present. */ 2067 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t" 2068 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb" 2069 2070 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7" 2071 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm" 2072 2073 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer" 2074 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x" 2075 2076 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx" 2077 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d" 2078 2079 #define STUB_ENTRY_NAME "__%s_veneer" 2080 2081 /* The name of the dynamic interpreter. This is put in the .interp 2082 section. */ 2083 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1" 2084 2085 static const unsigned long tls_trampoline [] = 2086 { 2087 0xe08e0000, /* add r0, lr, r0 */ 2088 0xe5901004, /* ldr r1, [r0,#4] */ 2089 0xe12fff11, /* bx r1 */ 2090 }; 2091 2092 static const unsigned long dl_tlsdesc_lazy_trampoline [] = 2093 { 2094 0xe52d2004, /* push {r2} */ 2095 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */ 2096 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */ 2097 0xe79f2002, /* 1: ldr r2, [pc, r2] */ 2098 0xe081100f, /* 2: add r1, pc */ 2099 0xe12fff12, /* bx r2 */ 2100 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8 2101 + dl_tlsdesc_lazy_resolver(GOT) */ 2102 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */ 2103 }; 2104 2105 #ifdef FOUR_WORD_PLT 2106 2107 /* The first entry in a procedure linkage table looks like 2108 this. It is set up so that any shared library function that is 2109 called before the relocation has been set up calls the dynamic 2110 linker first. */ 2111 static const bfd_vma elf32_arm_plt0_entry [] = 2112 { 2113 0xe52de004, /* str lr, [sp, #-4]! */ 2114 0xe59fe010, /* ldr lr, [pc, #16] */ 2115 0xe08fe00e, /* add lr, pc, lr */ 2116 0xe5bef008, /* ldr pc, [lr, #8]! */ 2117 }; 2118 2119 /* Subsequent entries in a procedure linkage table look like 2120 this. */ 2121 static const bfd_vma elf32_arm_plt_entry [] = 2122 { 2123 0xe28fc600, /* add ip, pc, #NN */ 2124 0xe28cca00, /* add ip, ip, #NN */ 2125 0xe5bcf000, /* ldr pc, [ip, #NN]! */ 2126 0x00000000, /* unused */ 2127 }; 2128 2129 #else 2130 2131 /* The first entry in a procedure linkage table looks like 2132 this. It is set up so that any shared library function that is 2133 called before the relocation has been set up calls the dynamic 2134 linker first. */ 2135 static const bfd_vma elf32_arm_plt0_entry [] = 2136 { 2137 0xe52de004, /* str lr, [sp, #-4]! */ 2138 0xe59fe004, /* ldr lr, [pc, #4] */ 2139 0xe08fe00e, /* add lr, pc, lr */ 2140 0xe5bef008, /* ldr pc, [lr, #8]! */ 2141 0x00000000, /* &GOT[0] - . */ 2142 }; 2143 2144 /* Subsequent entries in a procedure linkage table look like 2145 this. */ 2146 static const bfd_vma elf32_arm_plt_entry [] = 2147 { 2148 0xe28fc600, /* add ip, pc, #0xNN00000 */ 2149 0xe28cca00, /* add ip, ip, #0xNN000 */ 2150 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */ 2151 }; 2152 2153 #endif 2154 2155 /* The format of the first entry in the procedure linkage table 2156 for a VxWorks executable. */ 2157 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] = 2158 { 2159 0xe52dc008, /* str ip,[sp,#-8]! */ 2160 0xe59fc000, /* ldr ip,[pc] */ 2161 0xe59cf008, /* ldr pc,[ip,#8] */ 2162 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */ 2163 }; 2164 2165 /* The format of subsequent entries in a VxWorks executable. */ 2166 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] = 2167 { 2168 0xe59fc000, /* ldr ip,[pc] */ 2169 0xe59cf000, /* ldr pc,[ip] */ 2170 0x00000000, /* .long @got */ 2171 0xe59fc000, /* ldr ip,[pc] */ 2172 0xea000000, /* b _PLT */ 2173 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2174 }; 2175 2176 /* The format of entries in a VxWorks shared library. */ 2177 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] = 2178 { 2179 0xe59fc000, /* ldr ip,[pc] */ 2180 0xe79cf009, /* ldr pc,[ip,r9] */ 2181 0x00000000, /* .long @got */ 2182 0xe59fc000, /* ldr ip,[pc] */ 2183 0xe599f008, /* ldr pc,[r9,#8] */ 2184 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */ 2185 }; 2186 2187 /* An initial stub used if the PLT entry is referenced from Thumb code. */ 2188 #define PLT_THUMB_STUB_SIZE 4 2189 static const bfd_vma elf32_arm_plt_thumb_stub [] = 2190 { 2191 0x4778, /* bx pc */ 2192 0x46c0 /* nop */ 2193 }; 2194 2195 /* The entries in a PLT when using a DLL-based target with multiple 2196 address spaces. */ 2197 static const bfd_vma elf32_arm_symbian_plt_entry [] = 2198 { 2199 0xe51ff004, /* ldr pc, [pc, #-4] */ 2200 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */ 2201 }; 2202 2203 /* The first entry in a procedure linkage table looks like 2204 this. It is set up so that any shared library function that is 2205 called before the relocation has been set up calls the dynamic 2206 linker first. */ 2207 static const bfd_vma elf32_arm_nacl_plt0_entry [] = 2208 { 2209 /* First bundle: */ 2210 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */ 2211 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */ 2212 0xe08cc00f, /* add ip, ip, pc */ 2213 0xe52dc008, /* str ip, [sp, #-8]! */ 2214 /* Second bundle: */ 2215 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2216 0xe59cc000, /* ldr ip, [ip] */ 2217 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2218 0xe12fff1c, /* bx ip */ 2219 /* Third bundle: */ 2220 0xe320f000, /* nop */ 2221 0xe320f000, /* nop */ 2222 0xe320f000, /* nop */ 2223 /* .Lplt_tail: */ 2224 0xe50dc004, /* str ip, [sp, #-4] */ 2225 /* Fourth bundle: */ 2226 0xe3ccc103, /* bic ip, ip, #0xc0000000 */ 2227 0xe59cc000, /* ldr ip, [ip] */ 2228 0xe3ccc13f, /* bic ip, ip, #0xc000000f */ 2229 0xe12fff1c, /* bx ip */ 2230 }; 2231 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4) 2232 2233 /* Subsequent entries in a procedure linkage table look like this. */ 2234 static const bfd_vma elf32_arm_nacl_plt_entry [] = 2235 { 2236 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */ 2237 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */ 2238 0xe08cc00f, /* add ip, ip, pc */ 2239 0xea000000, /* b .Lplt_tail */ 2240 }; 2241 2242 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8) 2243 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8) 2244 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4) 2245 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4) 2246 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) 2247 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4) 2248 2249 enum stub_insn_type 2250 { 2251 THUMB16_TYPE = 1, 2252 THUMB32_TYPE, 2253 ARM_TYPE, 2254 DATA_TYPE 2255 }; 2256 2257 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} 2258 /* A bit of a hack. A Thumb conditional branch, in which the proper condition 2259 is inserted in arm_build_one_stub(). */ 2260 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1} 2261 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} 2262 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)} 2263 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} 2264 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} 2265 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} 2266 2267 typedef struct 2268 { 2269 bfd_vma data; 2270 enum stub_insn_type type; 2271 unsigned int r_type; 2272 int reloc_addend; 2273 } insn_sequence; 2274 2275 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx 2276 to reach the stub if necessary. */ 2277 static const insn_sequence elf32_arm_stub_long_branch_any_any[] = 2278 { 2279 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2280 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2281 }; 2282 2283 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not 2284 available. */ 2285 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] = 2286 { 2287 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2288 ARM_INSN (0xe12fff1c), /* bx ip */ 2289 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2290 }; 2291 2292 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */ 2293 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] = 2294 { 2295 THUMB16_INSN (0xb401), /* push {r0} */ 2296 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2297 THUMB16_INSN (0x4684), /* mov ip, r0 */ 2298 THUMB16_INSN (0xbc01), /* pop {r0} */ 2299 THUMB16_INSN (0x4760), /* bx ip */ 2300 THUMB16_INSN (0xbf00), /* nop */ 2301 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2302 }; 2303 2304 /* V4T Thumb -> Thumb long branch stub. Using the stack is not 2305 allowed. */ 2306 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] = 2307 { 2308 THUMB16_INSN (0x4778), /* bx pc */ 2309 THUMB16_INSN (0x46c0), /* nop */ 2310 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2311 ARM_INSN (0xe12fff1c), /* bx ip */ 2312 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2313 }; 2314 2315 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not 2316 available. */ 2317 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] = 2318 { 2319 THUMB16_INSN (0x4778), /* bx pc */ 2320 THUMB16_INSN (0x46c0), /* nop */ 2321 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */ 2322 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ 2323 }; 2324 2325 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above 2326 one, when the destination is close enough. */ 2327 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] = 2328 { 2329 THUMB16_INSN (0x4778), /* bx pc */ 2330 THUMB16_INSN (0x46c0), /* nop */ 2331 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */ 2332 }; 2333 2334 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use 2335 blx to reach the stub if necessary. */ 2336 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] = 2337 { 2338 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */ 2339 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */ 2340 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2341 }; 2342 2343 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use 2344 blx to reach the stub if necessary. We can not add into pc; 2345 it is not guaranteed to mode switch (different in ARMv6 and 2346 ARMv7). */ 2347 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] = 2348 { 2349 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2350 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2351 ARM_INSN (0xe12fff1c), /* bx ip */ 2352 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2353 }; 2354 2355 /* V4T ARM -> ARM long branch stub, PIC. */ 2356 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] = 2357 { 2358 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2359 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2360 ARM_INSN (0xe12fff1c), /* bx ip */ 2361 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2362 }; 2363 2364 /* V4T Thumb -> ARM long branch stub, PIC. */ 2365 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] = 2366 { 2367 THUMB16_INSN (0x4778), /* bx pc */ 2368 THUMB16_INSN (0x46c0), /* nop */ 2369 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */ 2370 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */ 2371 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2372 }; 2373 2374 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile 2375 architectures. */ 2376 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] = 2377 { 2378 THUMB16_INSN (0xb401), /* push {r0} */ 2379 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */ 2380 THUMB16_INSN (0x46fc), /* mov ip, pc */ 2381 THUMB16_INSN (0x4484), /* add ip, r0 */ 2382 THUMB16_INSN (0xbc01), /* pop {r0} */ 2383 THUMB16_INSN (0x4760), /* bx ip */ 2384 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */ 2385 }; 2386 2387 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not 2388 allowed. */ 2389 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] = 2390 { 2391 THUMB16_INSN (0x4778), /* bx pc */ 2392 THUMB16_INSN (0x46c0), /* nop */ 2393 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */ 2394 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */ 2395 ARM_INSN (0xe12fff1c), /* bx ip */ 2396 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ 2397 }; 2398 2399 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a 2400 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2401 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] = 2402 { 2403 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */ 2404 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */ 2405 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ 2406 }; 2407 2408 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a 2409 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */ 2410 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] = 2411 { 2412 THUMB16_INSN (0x4778), /* bx pc */ 2413 THUMB16_INSN (0x46c0), /* nop */ 2414 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */ 2415 ARM_INSN (0xe081f00f), /* add pc, r1, pc */ 2416 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ 2417 }; 2418 2419 /* Cortex-A8 erratum-workaround stubs. */ 2420 2421 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we 2422 can't use a conditional branch to reach this stub). */ 2423 2424 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] = 2425 { 2426 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */ 2427 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */ 2428 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */ 2429 }; 2430 2431 /* Stub used for b.w and bl.w instructions. */ 2432 2433 static const insn_sequence elf32_arm_stub_a8_veneer_b[] = 2434 { 2435 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2436 }; 2437 2438 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] = 2439 { 2440 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */ 2441 }; 2442 2443 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w 2444 instruction (which switches to ARM mode) to point to this stub. Jump to the 2445 real destination using an ARM-mode branch. */ 2446 2447 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] = 2448 { 2449 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */ 2450 }; 2451 2452 /* For each section group there can be a specially created linker section 2453 to hold the stubs for that group. The name of the stub section is based 2454 upon the name of another section within that group with the suffix below 2455 applied. 2456 2457 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to 2458 create what appeared to be a linker stub section when it actually 2459 contained user code/data. For example, consider this fragment: 2460 2461 const char * stubborn_problems[] = { "np" }; 2462 2463 If this is compiled with "-fPIC -fdata-sections" then gcc produces a 2464 section called: 2465 2466 .data.rel.local.stubborn_problems 2467 2468 This then causes problems in arm32_arm_build_stubs() as it triggers: 2469 2470 // Ignore non-stub sections. 2471 if (!strstr (stub_sec->name, STUB_SUFFIX)) 2472 continue; 2473 2474 And so the section would be ignored instead of being processed. Hence 2475 the change in definition of STUB_SUFFIX to a name that cannot be a valid 2476 C identifier. */ 2477 #define STUB_SUFFIX ".__stub" 2478 2479 /* One entry per long/short branch stub defined above. */ 2480 #define DEF_STUBS \ 2481 DEF_STUB(long_branch_any_any) \ 2482 DEF_STUB(long_branch_v4t_arm_thumb) \ 2483 DEF_STUB(long_branch_thumb_only) \ 2484 DEF_STUB(long_branch_v4t_thumb_thumb) \ 2485 DEF_STUB(long_branch_v4t_thumb_arm) \ 2486 DEF_STUB(short_branch_v4t_thumb_arm) \ 2487 DEF_STUB(long_branch_any_arm_pic) \ 2488 DEF_STUB(long_branch_any_thumb_pic) \ 2489 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \ 2490 DEF_STUB(long_branch_v4t_arm_thumb_pic) \ 2491 DEF_STUB(long_branch_v4t_thumb_arm_pic) \ 2492 DEF_STUB(long_branch_thumb_only_pic) \ 2493 DEF_STUB(long_branch_any_tls_pic) \ 2494 DEF_STUB(long_branch_v4t_thumb_tls_pic) \ 2495 DEF_STUB(a8_veneer_b_cond) \ 2496 DEF_STUB(a8_veneer_b) \ 2497 DEF_STUB(a8_veneer_bl) \ 2498 DEF_STUB(a8_veneer_blx) 2499 2500 #define DEF_STUB(x) arm_stub_##x, 2501 enum elf32_arm_stub_type 2502 { 2503 arm_stub_none, 2504 DEF_STUBS 2505 /* Note the first a8_veneer type */ 2506 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond 2507 }; 2508 #undef DEF_STUB 2509 2510 typedef struct 2511 { 2512 const insn_sequence* template_sequence; 2513 int template_size; 2514 } stub_def; 2515 2516 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)}, 2517 static const stub_def stub_definitions[] = 2518 { 2519 {NULL, 0}, 2520 DEF_STUBS 2521 }; 2522 2523 struct elf32_arm_stub_hash_entry 2524 { 2525 /* Base hash table entry structure. */ 2526 struct bfd_hash_entry root; 2527 2528 /* The stub section. */ 2529 asection *stub_sec; 2530 2531 /* Offset within stub_sec of the beginning of this stub. */ 2532 bfd_vma stub_offset; 2533 2534 /* Given the symbol's value and its section we can determine its final 2535 value when building the stubs (so the stub knows where to jump). */ 2536 bfd_vma target_value; 2537 asection *target_section; 2538 2539 /* Offset to apply to relocation referencing target_value. */ 2540 bfd_vma target_addend; 2541 2542 /* The instruction which caused this stub to be generated (only valid for 2543 Cortex-A8 erratum workaround stubs at present). */ 2544 unsigned long orig_insn; 2545 2546 /* The stub type. */ 2547 enum elf32_arm_stub_type stub_type; 2548 /* Its encoding size in bytes. */ 2549 int stub_size; 2550 /* Its template. */ 2551 const insn_sequence *stub_template; 2552 /* The size of the template (number of entries). */ 2553 int stub_template_size; 2554 2555 /* The symbol table entry, if any, that this was derived from. */ 2556 struct elf32_arm_link_hash_entry *h; 2557 2558 /* Type of branch. */ 2559 enum arm_st_branch_type branch_type; 2560 2561 /* Where this stub is being called from, or, in the case of combined 2562 stub sections, the first input section in the group. */ 2563 asection *id_sec; 2564 2565 /* The name for the local symbol at the start of this stub. The 2566 stub name in the hash table has to be unique; this does not, so 2567 it can be friendlier. */ 2568 char *output_name; 2569 }; 2570 2571 /* Used to build a map of a section. This is required for mixed-endian 2572 code/data. */ 2573 2574 typedef struct elf32_elf_section_map 2575 { 2576 bfd_vma vma; 2577 char type; 2578 } 2579 elf32_arm_section_map; 2580 2581 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */ 2582 2583 typedef enum 2584 { 2585 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER, 2586 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER, 2587 VFP11_ERRATUM_ARM_VENEER, 2588 VFP11_ERRATUM_THUMB_VENEER 2589 } 2590 elf32_vfp11_erratum_type; 2591 2592 typedef struct elf32_vfp11_erratum_list 2593 { 2594 struct elf32_vfp11_erratum_list *next; 2595 bfd_vma vma; 2596 union 2597 { 2598 struct 2599 { 2600 struct elf32_vfp11_erratum_list *veneer; 2601 unsigned int vfp_insn; 2602 } b; 2603 struct 2604 { 2605 struct elf32_vfp11_erratum_list *branch; 2606 unsigned int id; 2607 } v; 2608 } u; 2609 elf32_vfp11_erratum_type type; 2610 } 2611 elf32_vfp11_erratum_list; 2612 2613 typedef enum 2614 { 2615 DELETE_EXIDX_ENTRY, 2616 INSERT_EXIDX_CANTUNWIND_AT_END 2617 } 2618 arm_unwind_edit_type; 2619 2620 /* A (sorted) list of edits to apply to an unwind table. */ 2621 typedef struct arm_unwind_table_edit 2622 { 2623 arm_unwind_edit_type type; 2624 /* Note: we sometimes want to insert an unwind entry corresponding to a 2625 section different from the one we're currently writing out, so record the 2626 (text) section this edit relates to here. */ 2627 asection *linked_section; 2628 unsigned int index; 2629 struct arm_unwind_table_edit *next; 2630 } 2631 arm_unwind_table_edit; 2632 2633 typedef struct _arm_elf_section_data 2634 { 2635 /* Information about mapping symbols. */ 2636 struct bfd_elf_section_data elf; 2637 unsigned int mapcount; 2638 unsigned int mapsize; 2639 elf32_arm_section_map *map; 2640 /* Information about CPU errata. */ 2641 unsigned int erratumcount; 2642 elf32_vfp11_erratum_list *erratumlist; 2643 /* Information about unwind tables. */ 2644 union 2645 { 2646 /* Unwind info attached to a text section. */ 2647 struct 2648 { 2649 asection *arm_exidx_sec; 2650 } text; 2651 2652 /* Unwind info attached to an .ARM.exidx section. */ 2653 struct 2654 { 2655 arm_unwind_table_edit *unwind_edit_list; 2656 arm_unwind_table_edit *unwind_edit_tail; 2657 } exidx; 2658 } u; 2659 } 2660 _arm_elf_section_data; 2661 2662 #define elf32_arm_section_data(sec) \ 2663 ((_arm_elf_section_data *) elf_section_data (sec)) 2664 2665 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum. 2666 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs), 2667 so may be created multiple times: we use an array of these entries whilst 2668 relaxing which we can refresh easily, then create stubs for each potentially 2669 erratum-triggering instruction once we've settled on a solution. */ 2670 2671 struct a8_erratum_fix 2672 { 2673 bfd *input_bfd; 2674 asection *section; 2675 bfd_vma offset; 2676 bfd_vma addend; 2677 unsigned long orig_insn; 2678 char *stub_name; 2679 enum elf32_arm_stub_type stub_type; 2680 enum arm_st_branch_type branch_type; 2681 }; 2682 2683 /* A table of relocs applied to branches which might trigger Cortex-A8 2684 erratum. */ 2685 2686 struct a8_erratum_reloc 2687 { 2688 bfd_vma from; 2689 bfd_vma destination; 2690 struct elf32_arm_link_hash_entry *hash; 2691 const char *sym_name; 2692 unsigned int r_type; 2693 enum arm_st_branch_type branch_type; 2694 bfd_boolean non_a8_stub; 2695 }; 2696 2697 /* The size of the thread control block. */ 2698 #define TCB_SIZE 8 2699 2700 /* ARM-specific information about a PLT entry, over and above the usual 2701 gotplt_union. */ 2702 struct arm_plt_info 2703 { 2704 /* We reference count Thumb references to a PLT entry separately, 2705 so that we can emit the Thumb trampoline only if needed. */ 2706 bfd_signed_vma thumb_refcount; 2707 2708 /* Some references from Thumb code may be eliminated by BL->BLX 2709 conversion, so record them separately. */ 2710 bfd_signed_vma maybe_thumb_refcount; 2711 2712 /* How many of the recorded PLT accesses were from non-call relocations. 2713 This information is useful when deciding whether anything takes the 2714 address of an STT_GNU_IFUNC PLT. A value of 0 means that all 2715 non-call references to the function should resolve directly to the 2716 real runtime target. */ 2717 unsigned int noncall_refcount; 2718 2719 /* Since PLT entries have variable size if the Thumb prologue is 2720 used, we need to record the index into .got.plt instead of 2721 recomputing it from the PLT offset. */ 2722 bfd_signed_vma got_offset; 2723 }; 2724 2725 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */ 2726 struct arm_local_iplt_info 2727 { 2728 /* The information that is usually found in the generic ELF part of 2729 the hash table entry. */ 2730 union gotplt_union root; 2731 2732 /* The information that is usually found in the ARM-specific part of 2733 the hash table entry. */ 2734 struct arm_plt_info arm; 2735 2736 /* A list of all potential dynamic relocations against this symbol. */ 2737 struct elf_dyn_relocs *dyn_relocs; 2738 }; 2739 2740 struct elf_arm_obj_tdata 2741 { 2742 struct elf_obj_tdata root; 2743 2744 /* tls_type for each local got entry. */ 2745 char *local_got_tls_type; 2746 2747 /* GOTPLT entries for TLS descriptors. */ 2748 bfd_vma *local_tlsdesc_gotent; 2749 2750 /* Information for local symbols that need entries in .iplt. */ 2751 struct arm_local_iplt_info **local_iplt; 2752 2753 /* Zero to warn when linking objects with incompatible enum sizes. */ 2754 int no_enum_size_warning; 2755 2756 /* Zero to warn when linking objects with incompatible wchar_t sizes. */ 2757 int no_wchar_size_warning; 2758 }; 2759 2760 #define elf_arm_tdata(bfd) \ 2761 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any) 2762 2763 #define elf32_arm_local_got_tls_type(bfd) \ 2764 (elf_arm_tdata (bfd)->local_got_tls_type) 2765 2766 #define elf32_arm_local_tlsdesc_gotent(bfd) \ 2767 (elf_arm_tdata (bfd)->local_tlsdesc_gotent) 2768 2769 #define elf32_arm_local_iplt(bfd) \ 2770 (elf_arm_tdata (bfd)->local_iplt) 2771 2772 #define is_arm_elf(bfd) \ 2773 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 2774 && elf_tdata (bfd) != NULL \ 2775 && elf_object_id (bfd) == ARM_ELF_DATA) 2776 2777 static bfd_boolean 2778 elf32_arm_mkobject (bfd *abfd) 2779 { 2780 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata), 2781 ARM_ELF_DATA); 2782 } 2783 2784 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent)) 2785 2786 /* Arm ELF linker hash entry. */ 2787 struct elf32_arm_link_hash_entry 2788 { 2789 struct elf_link_hash_entry root; 2790 2791 /* Track dynamic relocs copied for this symbol. */ 2792 struct elf_dyn_relocs *dyn_relocs; 2793 2794 /* ARM-specific PLT information. */ 2795 struct arm_plt_info plt; 2796 2797 #define GOT_UNKNOWN 0 2798 #define GOT_NORMAL 1 2799 #define GOT_TLS_GD 2 2800 #define GOT_TLS_IE 4 2801 #define GOT_TLS_GDESC 8 2802 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC)) 2803 unsigned int tls_type : 8; 2804 2805 /* True if the symbol's PLT entry is in .iplt rather than .plt. */ 2806 unsigned int is_iplt : 1; 2807 2808 unsigned int unused : 23; 2809 2810 /* Offset of the GOTPLT entry reserved for the TLS descriptor, 2811 starting at the end of the jump table. */ 2812 bfd_vma tlsdesc_got; 2813 2814 /* The symbol marking the real symbol location for exported thumb 2815 symbols with Arm stubs. */ 2816 struct elf_link_hash_entry *export_glue; 2817 2818 /* A pointer to the most recently used stub hash entry against this 2819 symbol. */ 2820 struct elf32_arm_stub_hash_entry *stub_cache; 2821 }; 2822 2823 /* Traverse an arm ELF linker hash table. */ 2824 #define elf32_arm_link_hash_traverse(table, func, info) \ 2825 (elf_link_hash_traverse \ 2826 (&(table)->root, \ 2827 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \ 2828 (info))) 2829 2830 /* Get the ARM elf linker hash table from a link_info structure. */ 2831 #define elf32_arm_hash_table(info) \ 2832 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \ 2833 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL) 2834 2835 #define arm_stub_hash_lookup(table, string, create, copy) \ 2836 ((struct elf32_arm_stub_hash_entry *) \ 2837 bfd_hash_lookup ((table), (string), (create), (copy))) 2838 2839 /* Array to keep track of which stub sections have been created, and 2840 information on stub grouping. */ 2841 struct map_stub 2842 { 2843 /* This is the section to which stubs in the group will be 2844 attached. */ 2845 asection *link_sec; 2846 /* The stub section. */ 2847 asection *stub_sec; 2848 }; 2849 2850 #define elf32_arm_compute_jump_table_size(htab) \ 2851 ((htab)->next_tls_desc_index * 4) 2852 2853 /* ARM ELF linker hash table. */ 2854 struct elf32_arm_link_hash_table 2855 { 2856 /* The main hash table. */ 2857 struct elf_link_hash_table root; 2858 2859 /* The size in bytes of the section containing the Thumb-to-ARM glue. */ 2860 bfd_size_type thumb_glue_size; 2861 2862 /* The size in bytes of the section containing the ARM-to-Thumb glue. */ 2863 bfd_size_type arm_glue_size; 2864 2865 /* The size in bytes of section containing the ARMv4 BX veneers. */ 2866 bfd_size_type bx_glue_size; 2867 2868 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when 2869 veneer has been populated. */ 2870 bfd_vma bx_glue_offset[15]; 2871 2872 /* The size in bytes of the section containing glue for VFP11 erratum 2873 veneers. */ 2874 bfd_size_type vfp11_erratum_glue_size; 2875 2876 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This 2877 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and 2878 elf32_arm_write_section(). */ 2879 struct a8_erratum_fix *a8_erratum_fixes; 2880 unsigned int num_a8_erratum_fixes; 2881 2882 /* An arbitrary input BFD chosen to hold the glue sections. */ 2883 bfd * bfd_of_glue_owner; 2884 2885 /* Nonzero to output a BE8 image. */ 2886 int byteswap_code; 2887 2888 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32. 2889 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */ 2890 int target1_is_rel; 2891 2892 /* The relocation to use for R_ARM_TARGET2 relocations. */ 2893 int target2_reloc; 2894 2895 /* 0 = Ignore R_ARM_V4BX. 2896 1 = Convert BX to MOV PC. 2897 2 = Generate v4 interworing stubs. */ 2898 int fix_v4bx; 2899 2900 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */ 2901 int fix_cortex_a8; 2902 2903 /* Whether we should fix the ARM1176 BLX immediate issue. */ 2904 int fix_arm1176; 2905 2906 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */ 2907 int use_blx; 2908 2909 /* What sort of code sequences we should look for which may trigger the 2910 VFP11 denorm erratum. */ 2911 bfd_arm_vfp11_fix vfp11_fix; 2912 2913 /* Global counter for the number of fixes we have emitted. */ 2914 int num_vfp11_fixes; 2915 2916 /* Nonzero to force PIC branch veneers. */ 2917 int pic_veneer; 2918 2919 /* The number of bytes in the initial entry in the PLT. */ 2920 bfd_size_type plt_header_size; 2921 2922 /* The number of bytes in the subsequent PLT etries. */ 2923 bfd_size_type plt_entry_size; 2924 2925 /* True if the target system is VxWorks. */ 2926 int vxworks_p; 2927 2928 /* True if the target system is Symbian OS. */ 2929 int symbian_p; 2930 2931 /* True if the target system is Native Client. */ 2932 int nacl_p; 2933 2934 /* True if the target uses REL relocations. */ 2935 int use_rel; 2936 2937 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */ 2938 bfd_vma next_tls_desc_index; 2939 2940 /* How many R_ARM_TLS_DESC relocations were generated so far. */ 2941 bfd_vma num_tls_desc; 2942 2943 /* Short-cuts to get to dynamic linker sections. */ 2944 asection *sdynbss; 2945 asection *srelbss; 2946 2947 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */ 2948 asection *srelplt2; 2949 2950 /* The offset into splt of the PLT entry for the TLS descriptor 2951 resolver. Special values are 0, if not necessary (or not found 2952 to be necessary yet), and -1 if needed but not determined 2953 yet. */ 2954 bfd_vma dt_tlsdesc_plt; 2955 2956 /* The offset into sgot of the GOT entry used by the PLT entry 2957 above. */ 2958 bfd_vma dt_tlsdesc_got; 2959 2960 /* Offset in .plt section of tls_arm_trampoline. */ 2961 bfd_vma tls_trampoline; 2962 2963 /* Data for R_ARM_TLS_LDM32 relocations. */ 2964 union 2965 { 2966 bfd_signed_vma refcount; 2967 bfd_vma offset; 2968 } tls_ldm_got; 2969 2970 /* Small local sym cache. */ 2971 struct sym_cache sym_cache; 2972 2973 /* For convenience in allocate_dynrelocs. */ 2974 bfd * obfd; 2975 2976 /* The amount of space used by the reserved portion of the sgotplt 2977 section, plus whatever space is used by the jump slots. */ 2978 bfd_vma sgotplt_jump_table_size; 2979 2980 /* The stub hash table. */ 2981 struct bfd_hash_table stub_hash_table; 2982 2983 /* Linker stub bfd. */ 2984 bfd *stub_bfd; 2985 2986 /* Linker call-backs. */ 2987 asection * (*add_stub_section) (const char *, asection *); 2988 void (*layout_sections_again) (void); 2989 2990 /* Array to keep track of which stub sections have been created, and 2991 information on stub grouping. */ 2992 struct map_stub *stub_group; 2993 2994 /* Number of elements in stub_group. */ 2995 int top_id; 2996 2997 /* Assorted information used by elf32_arm_size_stubs. */ 2998 unsigned int bfd_count; 2999 int top_index; 3000 asection **input_list; 3001 }; 3002 3003 /* Create an entry in an ARM ELF linker hash table. */ 3004 3005 static struct bfd_hash_entry * 3006 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry, 3007 struct bfd_hash_table * table, 3008 const char * string) 3009 { 3010 struct elf32_arm_link_hash_entry * ret = 3011 (struct elf32_arm_link_hash_entry *) entry; 3012 3013 /* Allocate the structure if it has not already been allocated by a 3014 subclass. */ 3015 if (ret == NULL) 3016 ret = (struct elf32_arm_link_hash_entry *) 3017 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry)); 3018 if (ret == NULL) 3019 return (struct bfd_hash_entry *) ret; 3020 3021 /* Call the allocation method of the superclass. */ 3022 ret = ((struct elf32_arm_link_hash_entry *) 3023 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, 3024 table, string)); 3025 if (ret != NULL) 3026 { 3027 ret->dyn_relocs = NULL; 3028 ret->tls_type = GOT_UNKNOWN; 3029 ret->tlsdesc_got = (bfd_vma) -1; 3030 ret->plt.thumb_refcount = 0; 3031 ret->plt.maybe_thumb_refcount = 0; 3032 ret->plt.noncall_refcount = 0; 3033 ret->plt.got_offset = -1; 3034 ret->is_iplt = FALSE; 3035 ret->export_glue = NULL; 3036 3037 ret->stub_cache = NULL; 3038 } 3039 3040 return (struct bfd_hash_entry *) ret; 3041 } 3042 3043 /* Ensure that we have allocated bookkeeping structures for ABFD's local 3044 symbols. */ 3045 3046 static bfd_boolean 3047 elf32_arm_allocate_local_sym_info (bfd *abfd) 3048 { 3049 if (elf_local_got_refcounts (abfd) == NULL) 3050 { 3051 bfd_size_type num_syms; 3052 bfd_size_type size; 3053 char *data; 3054 3055 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info; 3056 size = num_syms * (sizeof (bfd_signed_vma) 3057 + sizeof (struct arm_local_iplt_info *) 3058 + sizeof (bfd_vma) 3059 + sizeof (char)); 3060 data = bfd_zalloc (abfd, size); 3061 if (data == NULL) 3062 return FALSE; 3063 3064 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data; 3065 data += num_syms * sizeof (bfd_signed_vma); 3066 3067 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data; 3068 data += num_syms * sizeof (struct arm_local_iplt_info *); 3069 3070 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data; 3071 data += num_syms * sizeof (bfd_vma); 3072 3073 elf32_arm_local_got_tls_type (abfd) = data; 3074 } 3075 return TRUE; 3076 } 3077 3078 /* Return the .iplt information for local symbol R_SYMNDX, which belongs 3079 to input bfd ABFD. Create the information if it doesn't already exist. 3080 Return null if an allocation fails. */ 3081 3082 static struct arm_local_iplt_info * 3083 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx) 3084 { 3085 struct arm_local_iplt_info **ptr; 3086 3087 if (!elf32_arm_allocate_local_sym_info (abfd)) 3088 return NULL; 3089 3090 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info); 3091 ptr = &elf32_arm_local_iplt (abfd)[r_symndx]; 3092 if (*ptr == NULL) 3093 *ptr = bfd_zalloc (abfd, sizeof (**ptr)); 3094 return *ptr; 3095 } 3096 3097 /* Try to obtain PLT information for the symbol with index R_SYMNDX 3098 in ABFD's symbol table. If the symbol is global, H points to its 3099 hash table entry, otherwise H is null. 3100 3101 Return true if the symbol does have PLT information. When returning 3102 true, point *ROOT_PLT at the target-independent reference count/offset 3103 union and *ARM_PLT at the ARM-specific information. */ 3104 3105 static bfd_boolean 3106 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h, 3107 unsigned long r_symndx, union gotplt_union **root_plt, 3108 struct arm_plt_info **arm_plt) 3109 { 3110 struct arm_local_iplt_info *local_iplt; 3111 3112 if (h != NULL) 3113 { 3114 *root_plt = &h->root.plt; 3115 *arm_plt = &h->plt; 3116 return TRUE; 3117 } 3118 3119 if (elf32_arm_local_iplt (abfd) == NULL) 3120 return FALSE; 3121 3122 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx]; 3123 if (local_iplt == NULL) 3124 return FALSE; 3125 3126 *root_plt = &local_iplt->root; 3127 *arm_plt = &local_iplt->arm; 3128 return TRUE; 3129 } 3130 3131 /* Return true if the PLT described by ARM_PLT requires a Thumb stub 3132 before it. */ 3133 3134 static bfd_boolean 3135 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info, 3136 struct arm_plt_info *arm_plt) 3137 { 3138 struct elf32_arm_link_hash_table *htab; 3139 3140 htab = elf32_arm_hash_table (info); 3141 return (arm_plt->thumb_refcount != 0 3142 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)); 3143 } 3144 3145 /* Return a pointer to the head of the dynamic reloc list that should 3146 be used for local symbol ISYM, which is symbol number R_SYMNDX in 3147 ABFD's symbol table. Return null if an error occurs. */ 3148 3149 static struct elf_dyn_relocs ** 3150 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx, 3151 Elf_Internal_Sym *isym) 3152 { 3153 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 3154 { 3155 struct arm_local_iplt_info *local_iplt; 3156 3157 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 3158 if (local_iplt == NULL) 3159 return NULL; 3160 return &local_iplt->dyn_relocs; 3161 } 3162 else 3163 { 3164 /* Track dynamic relocs needed for local syms too. 3165 We really need local syms available to do this 3166 easily. Oh well. */ 3167 asection *s; 3168 void *vpp; 3169 3170 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 3171 if (s == NULL) 3172 abort (); 3173 3174 vpp = &elf_section_data (s)->local_dynrel; 3175 return (struct elf_dyn_relocs **) vpp; 3176 } 3177 } 3178 3179 /* Initialize an entry in the stub hash table. */ 3180 3181 static struct bfd_hash_entry * 3182 stub_hash_newfunc (struct bfd_hash_entry *entry, 3183 struct bfd_hash_table *table, 3184 const char *string) 3185 { 3186 /* Allocate the structure if it has not already been allocated by a 3187 subclass. */ 3188 if (entry == NULL) 3189 { 3190 entry = (struct bfd_hash_entry *) 3191 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry)); 3192 if (entry == NULL) 3193 return entry; 3194 } 3195 3196 /* Call the allocation method of the superclass. */ 3197 entry = bfd_hash_newfunc (entry, table, string); 3198 if (entry != NULL) 3199 { 3200 struct elf32_arm_stub_hash_entry *eh; 3201 3202 /* Initialize the local fields. */ 3203 eh = (struct elf32_arm_stub_hash_entry *) entry; 3204 eh->stub_sec = NULL; 3205 eh->stub_offset = 0; 3206 eh->target_value = 0; 3207 eh->target_section = NULL; 3208 eh->target_addend = 0; 3209 eh->orig_insn = 0; 3210 eh->stub_type = arm_stub_none; 3211 eh->stub_size = 0; 3212 eh->stub_template = NULL; 3213 eh->stub_template_size = 0; 3214 eh->h = NULL; 3215 eh->id_sec = NULL; 3216 eh->output_name = NULL; 3217 } 3218 3219 return entry; 3220 } 3221 3222 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up 3223 shortcuts to them in our hash table. */ 3224 3225 static bfd_boolean 3226 create_got_section (bfd *dynobj, struct bfd_link_info *info) 3227 { 3228 struct elf32_arm_link_hash_table *htab; 3229 3230 htab = elf32_arm_hash_table (info); 3231 if (htab == NULL) 3232 return FALSE; 3233 3234 /* BPABI objects never have a GOT, or associated sections. */ 3235 if (htab->symbian_p) 3236 return TRUE; 3237 3238 if (! _bfd_elf_create_got_section (dynobj, info)) 3239 return FALSE; 3240 3241 return TRUE; 3242 } 3243 3244 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */ 3245 3246 static bfd_boolean 3247 create_ifunc_sections (struct bfd_link_info *info) 3248 { 3249 struct elf32_arm_link_hash_table *htab; 3250 const struct elf_backend_data *bed; 3251 bfd *dynobj; 3252 asection *s; 3253 flagword flags; 3254 3255 htab = elf32_arm_hash_table (info); 3256 dynobj = htab->root.dynobj; 3257 bed = get_elf_backend_data (dynobj); 3258 flags = bed->dynamic_sec_flags; 3259 3260 if (htab->root.iplt == NULL) 3261 { 3262 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt", 3263 flags | SEC_READONLY | SEC_CODE); 3264 if (s == NULL 3265 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment)) 3266 return FALSE; 3267 htab->root.iplt = s; 3268 } 3269 3270 if (htab->root.irelplt == NULL) 3271 { 3272 s = bfd_make_section_anyway_with_flags (dynobj, 3273 RELOC_SECTION (htab, ".iplt"), 3274 flags | SEC_READONLY); 3275 if (s == NULL 3276 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3277 return FALSE; 3278 htab->root.irelplt = s; 3279 } 3280 3281 if (htab->root.igotplt == NULL) 3282 { 3283 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags); 3284 if (s == NULL 3285 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align)) 3286 return FALSE; 3287 htab->root.igotplt = s; 3288 } 3289 return TRUE; 3290 } 3291 3292 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and 3293 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our 3294 hash table. */ 3295 3296 static bfd_boolean 3297 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) 3298 { 3299 struct elf32_arm_link_hash_table *htab; 3300 3301 htab = elf32_arm_hash_table (info); 3302 if (htab == NULL) 3303 return FALSE; 3304 3305 if (!htab->root.sgot && !create_got_section (dynobj, info)) 3306 return FALSE; 3307 3308 if (!_bfd_elf_create_dynamic_sections (dynobj, info)) 3309 return FALSE; 3310 3311 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); 3312 if (!info->shared) 3313 htab->srelbss = bfd_get_linker_section (dynobj, 3314 RELOC_SECTION (htab, ".bss")); 3315 3316 if (htab->vxworks_p) 3317 { 3318 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2)) 3319 return FALSE; 3320 3321 if (info->shared) 3322 { 3323 htab->plt_header_size = 0; 3324 htab->plt_entry_size 3325 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry); 3326 } 3327 else 3328 { 3329 htab->plt_header_size 3330 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry); 3331 htab->plt_entry_size 3332 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry); 3333 } 3334 } 3335 3336 if (!htab->root.splt 3337 || !htab->root.srelplt 3338 || !htab->sdynbss 3339 || (!info->shared && !htab->srelbss)) 3340 abort (); 3341 3342 return TRUE; 3343 } 3344 3345 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 3346 3347 static void 3348 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info, 3349 struct elf_link_hash_entry *dir, 3350 struct elf_link_hash_entry *ind) 3351 { 3352 struct elf32_arm_link_hash_entry *edir, *eind; 3353 3354 edir = (struct elf32_arm_link_hash_entry *) dir; 3355 eind = (struct elf32_arm_link_hash_entry *) ind; 3356 3357 if (eind->dyn_relocs != NULL) 3358 { 3359 if (edir->dyn_relocs != NULL) 3360 { 3361 struct elf_dyn_relocs **pp; 3362 struct elf_dyn_relocs *p; 3363 3364 /* Add reloc counts against the indirect sym to the direct sym 3365 list. Merge any entries against the same section. */ 3366 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) 3367 { 3368 struct elf_dyn_relocs *q; 3369 3370 for (q = edir->dyn_relocs; q != NULL; q = q->next) 3371 if (q->sec == p->sec) 3372 { 3373 q->pc_count += p->pc_count; 3374 q->count += p->count; 3375 *pp = p->next; 3376 break; 3377 } 3378 if (q == NULL) 3379 pp = &p->next; 3380 } 3381 *pp = edir->dyn_relocs; 3382 } 3383 3384 edir->dyn_relocs = eind->dyn_relocs; 3385 eind->dyn_relocs = NULL; 3386 } 3387 3388 if (ind->root.type == bfd_link_hash_indirect) 3389 { 3390 /* Copy over PLT info. */ 3391 edir->plt.thumb_refcount += eind->plt.thumb_refcount; 3392 eind->plt.thumb_refcount = 0; 3393 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount; 3394 eind->plt.maybe_thumb_refcount = 0; 3395 edir->plt.noncall_refcount += eind->plt.noncall_refcount; 3396 eind->plt.noncall_refcount = 0; 3397 3398 /* We should only allocate a function to .iplt once the final 3399 symbol information is known. */ 3400 BFD_ASSERT (!eind->is_iplt); 3401 3402 if (dir->got.refcount <= 0) 3403 { 3404 edir->tls_type = eind->tls_type; 3405 eind->tls_type = GOT_UNKNOWN; 3406 } 3407 } 3408 3409 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 3410 } 3411 3412 /* Create an ARM elf linker hash table. */ 3413 3414 static struct bfd_link_hash_table * 3415 elf32_arm_link_hash_table_create (bfd *abfd) 3416 { 3417 struct elf32_arm_link_hash_table *ret; 3418 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table); 3419 3420 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt); 3421 if (ret == NULL) 3422 return NULL; 3423 3424 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd, 3425 elf32_arm_link_hash_newfunc, 3426 sizeof (struct elf32_arm_link_hash_entry), 3427 ARM_ELF_DATA)) 3428 { 3429 free (ret); 3430 return NULL; 3431 } 3432 3433 ret->sdynbss = NULL; 3434 ret->srelbss = NULL; 3435 ret->srelplt2 = NULL; 3436 ret->dt_tlsdesc_plt = 0; 3437 ret->dt_tlsdesc_got = 0; 3438 ret->tls_trampoline = 0; 3439 ret->next_tls_desc_index = 0; 3440 ret->num_tls_desc = 0; 3441 ret->thumb_glue_size = 0; 3442 ret->arm_glue_size = 0; 3443 ret->bx_glue_size = 0; 3444 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset)); 3445 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 3446 ret->vfp11_erratum_glue_size = 0; 3447 ret->num_vfp11_fixes = 0; 3448 ret->fix_cortex_a8 = 0; 3449 ret->fix_arm1176 = 0; 3450 ret->bfd_of_glue_owner = NULL; 3451 ret->byteswap_code = 0; 3452 ret->target1_is_rel = 0; 3453 ret->target2_reloc = R_ARM_NONE; 3454 #ifdef FOUR_WORD_PLT 3455 ret->plt_header_size = 16; 3456 ret->plt_entry_size = 16; 3457 #else 3458 ret->plt_header_size = 20; 3459 ret->plt_entry_size = 12; 3460 #endif 3461 ret->fix_v4bx = 0; 3462 ret->use_blx = 0; 3463 ret->vxworks_p = 0; 3464 ret->symbian_p = 0; 3465 ret->nacl_p = 0; 3466 ret->use_rel = 1; 3467 ret->sym_cache.abfd = NULL; 3468 ret->obfd = abfd; 3469 ret->tls_ldm_got.refcount = 0; 3470 ret->stub_bfd = NULL; 3471 ret->add_stub_section = NULL; 3472 ret->layout_sections_again = NULL; 3473 ret->stub_group = NULL; 3474 ret->top_id = 0; 3475 ret->bfd_count = 0; 3476 ret->top_index = 0; 3477 ret->input_list = NULL; 3478 3479 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, 3480 sizeof (struct elf32_arm_stub_hash_entry))) 3481 { 3482 free (ret); 3483 return NULL; 3484 } 3485 3486 return &ret->root.root; 3487 } 3488 3489 /* Free the derived linker hash table. */ 3490 3491 static void 3492 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash) 3493 { 3494 struct elf32_arm_link_hash_table *ret 3495 = (struct elf32_arm_link_hash_table *) hash; 3496 3497 bfd_hash_table_free (&ret->stub_hash_table); 3498 _bfd_generic_link_hash_table_free (hash); 3499 } 3500 3501 /* Determine if we're dealing with a Thumb only architecture. */ 3502 3503 static bfd_boolean 3504 using_thumb_only (struct elf32_arm_link_hash_table *globals) 3505 { 3506 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3507 Tag_CPU_arch); 3508 int profile; 3509 3510 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M) 3511 return TRUE; 3512 3513 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M) 3514 return FALSE; 3515 3516 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3517 Tag_CPU_arch_profile); 3518 3519 return profile == 'M'; 3520 } 3521 3522 /* Determine if we're dealing with a Thumb-2 object. */ 3523 3524 static bfd_boolean 3525 using_thumb2 (struct elf32_arm_link_hash_table *globals) 3526 { 3527 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3528 Tag_CPU_arch); 3529 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7; 3530 } 3531 3532 /* Determine what kind of NOPs are available. */ 3533 3534 static bfd_boolean 3535 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals) 3536 { 3537 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3538 Tag_CPU_arch); 3539 return arch == TAG_CPU_ARCH_V6T2 3540 || arch == TAG_CPU_ARCH_V6K 3541 || arch == TAG_CPU_ARCH_V7 3542 || arch == TAG_CPU_ARCH_V7E_M; 3543 } 3544 3545 static bfd_boolean 3546 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals) 3547 { 3548 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 3549 Tag_CPU_arch); 3550 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7 3551 || arch == TAG_CPU_ARCH_V7E_M); 3552 } 3553 3554 static bfd_boolean 3555 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type) 3556 { 3557 switch (stub_type) 3558 { 3559 case arm_stub_long_branch_thumb_only: 3560 case arm_stub_long_branch_v4t_thumb_arm: 3561 case arm_stub_short_branch_v4t_thumb_arm: 3562 case arm_stub_long_branch_v4t_thumb_arm_pic: 3563 case arm_stub_long_branch_v4t_thumb_tls_pic: 3564 case arm_stub_long_branch_thumb_only_pic: 3565 return TRUE; 3566 case arm_stub_none: 3567 BFD_FAIL (); 3568 return FALSE; 3569 break; 3570 default: 3571 return FALSE; 3572 } 3573 } 3574 3575 /* Determine the type of stub needed, if any, for a call. */ 3576 3577 static enum elf32_arm_stub_type 3578 arm_type_of_stub (struct bfd_link_info *info, 3579 asection *input_sec, 3580 const Elf_Internal_Rela *rel, 3581 unsigned char st_type, 3582 enum arm_st_branch_type *actual_branch_type, 3583 struct elf32_arm_link_hash_entry *hash, 3584 bfd_vma destination, 3585 asection *sym_sec, 3586 bfd *input_bfd, 3587 const char *name) 3588 { 3589 bfd_vma location; 3590 bfd_signed_vma branch_offset; 3591 unsigned int r_type; 3592 struct elf32_arm_link_hash_table * globals; 3593 int thumb2; 3594 int thumb_only; 3595 enum elf32_arm_stub_type stub_type = arm_stub_none; 3596 int use_plt = 0; 3597 enum arm_st_branch_type branch_type = *actual_branch_type; 3598 union gotplt_union *root_plt; 3599 struct arm_plt_info *arm_plt; 3600 3601 if (branch_type == ST_BRANCH_LONG) 3602 return stub_type; 3603 3604 globals = elf32_arm_hash_table (info); 3605 if (globals == NULL) 3606 return stub_type; 3607 3608 thumb_only = using_thumb_only (globals); 3609 3610 thumb2 = using_thumb2 (globals); 3611 3612 /* Determine where the call point is. */ 3613 location = (input_sec->output_offset 3614 + input_sec->output_section->vma 3615 + rel->r_offset); 3616 3617 r_type = ELF32_R_TYPE (rel->r_info); 3618 3619 /* For TLS call relocs, it is the caller's responsibility to provide 3620 the address of the appropriate trampoline. */ 3621 if (r_type != R_ARM_TLS_CALL 3622 && r_type != R_ARM_THM_TLS_CALL 3623 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info), 3624 &root_plt, &arm_plt) 3625 && root_plt->offset != (bfd_vma) -1) 3626 { 3627 asection *splt; 3628 3629 if (hash == NULL || hash->is_iplt) 3630 splt = globals->root.iplt; 3631 else 3632 splt = globals->root.splt; 3633 if (splt != NULL) 3634 { 3635 use_plt = 1; 3636 3637 /* Note when dealing with PLT entries: the main PLT stub is in 3638 ARM mode, so if the branch is in Thumb mode, another 3639 Thumb->ARM stub will be inserted later just before the ARM 3640 PLT stub. We don't take this extra distance into account 3641 here, because if a long branch stub is needed, we'll add a 3642 Thumb->Arm one and branch directly to the ARM PLT entry 3643 because it avoids spreading offset corrections in several 3644 places. */ 3645 3646 destination = (splt->output_section->vma 3647 + splt->output_offset 3648 + root_plt->offset); 3649 st_type = STT_FUNC; 3650 branch_type = ST_BRANCH_TO_ARM; 3651 } 3652 } 3653 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */ 3654 BFD_ASSERT (st_type != STT_GNU_IFUNC); 3655 3656 branch_offset = (bfd_signed_vma)(destination - location); 3657 3658 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24 3659 || r_type == R_ARM_THM_TLS_CALL) 3660 { 3661 /* Handle cases where: 3662 - this call goes too far (different Thumb/Thumb2 max 3663 distance) 3664 - it's a Thumb->Arm call and blx is not available, or it's a 3665 Thumb->Arm branch (not bl). A stub is needed in this case, 3666 but only if this call is not through a PLT entry. Indeed, 3667 PLT stubs handle mode switching already. 3668 */ 3669 if ((!thumb2 3670 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET 3671 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET))) 3672 || (thumb2 3673 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET 3674 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) 3675 || (branch_type == ST_BRANCH_TO_ARM 3676 && (((r_type == R_ARM_THM_CALL 3677 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx) 3678 || (r_type == R_ARM_THM_JUMP24)) 3679 && !use_plt)) 3680 { 3681 if (branch_type == ST_BRANCH_TO_THUMB) 3682 { 3683 /* Thumb to thumb. */ 3684 if (!thumb_only) 3685 { 3686 stub_type = (info->shared | globals->pic_veneer) 3687 /* PIC stubs. */ 3688 ? ((globals->use_blx 3689 && (r_type == R_ARM_THM_CALL)) 3690 /* V5T and above. Stub starts with ARM code, so 3691 we must be able to switch mode before 3692 reaching it, which is only possible for 'bl' 3693 (ie R_ARM_THM_CALL relocation). */ 3694 ? arm_stub_long_branch_any_thumb_pic 3695 /* On V4T, use Thumb code only. */ 3696 : arm_stub_long_branch_v4t_thumb_thumb_pic) 3697 3698 /* non-PIC stubs. */ 3699 : ((globals->use_blx 3700 && (r_type == R_ARM_THM_CALL)) 3701 /* V5T and above. */ 3702 ? arm_stub_long_branch_any_any 3703 /* V4T. */ 3704 : arm_stub_long_branch_v4t_thumb_thumb); 3705 } 3706 else 3707 { 3708 stub_type = (info->shared | globals->pic_veneer) 3709 /* PIC stub. */ 3710 ? arm_stub_long_branch_thumb_only_pic 3711 /* non-PIC stub. */ 3712 : arm_stub_long_branch_thumb_only; 3713 } 3714 } 3715 else 3716 { 3717 /* Thumb to arm. */ 3718 if (sym_sec != NULL 3719 && sym_sec->owner != NULL 3720 && !INTERWORK_FLAG (sym_sec->owner)) 3721 { 3722 (*_bfd_error_handler) 3723 (_("%B(%s): warning: interworking not enabled.\n" 3724 " first occurrence: %B: Thumb call to ARM"), 3725 sym_sec->owner, input_bfd, name); 3726 } 3727 3728 stub_type = 3729 (info->shared | globals->pic_veneer) 3730 /* PIC stubs. */ 3731 ? (r_type == R_ARM_THM_TLS_CALL 3732 /* TLS PIC stubs */ 3733 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic 3734 : arm_stub_long_branch_v4t_thumb_tls_pic) 3735 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 3736 /* V5T PIC and above. */ 3737 ? arm_stub_long_branch_any_arm_pic 3738 /* V4T PIC stub. */ 3739 : arm_stub_long_branch_v4t_thumb_arm_pic)) 3740 3741 /* non-PIC stubs. */ 3742 : ((globals->use_blx && r_type == R_ARM_THM_CALL) 3743 /* V5T and above. */ 3744 ? arm_stub_long_branch_any_any 3745 /* V4T. */ 3746 : arm_stub_long_branch_v4t_thumb_arm); 3747 3748 /* Handle v4t short branches. */ 3749 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm) 3750 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET) 3751 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET)) 3752 stub_type = arm_stub_short_branch_v4t_thumb_arm; 3753 } 3754 } 3755 } 3756 else if (r_type == R_ARM_CALL 3757 || r_type == R_ARM_JUMP24 3758 || r_type == R_ARM_PLT32 3759 || r_type == R_ARM_TLS_CALL) 3760 { 3761 if (branch_type == ST_BRANCH_TO_THUMB) 3762 { 3763 /* Arm to thumb. */ 3764 3765 if (sym_sec != NULL 3766 && sym_sec->owner != NULL 3767 && !INTERWORK_FLAG (sym_sec->owner)) 3768 { 3769 (*_bfd_error_handler) 3770 (_("%B(%s): warning: interworking not enabled.\n" 3771 " first occurrence: %B: ARM call to Thumb"), 3772 sym_sec->owner, input_bfd, name); 3773 } 3774 3775 /* We have an extra 2-bytes reach because of 3776 the mode change (bit 24 (H) of BLX encoding). */ 3777 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2) 3778 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET) 3779 || (r_type == R_ARM_CALL && !globals->use_blx) 3780 || (r_type == R_ARM_JUMP24) 3781 || (r_type == R_ARM_PLT32)) 3782 { 3783 stub_type = (info->shared | globals->pic_veneer) 3784 /* PIC stubs. */ 3785 ? ((globals->use_blx) 3786 /* V5T and above. */ 3787 ? arm_stub_long_branch_any_thumb_pic 3788 /* V4T stub. */ 3789 : arm_stub_long_branch_v4t_arm_thumb_pic) 3790 3791 /* non-PIC stubs. */ 3792 : ((globals->use_blx) 3793 /* V5T and above. */ 3794 ? arm_stub_long_branch_any_any 3795 /* V4T. */ 3796 : arm_stub_long_branch_v4t_arm_thumb); 3797 } 3798 } 3799 else 3800 { 3801 /* Arm to arm. */ 3802 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET 3803 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)) 3804 { 3805 stub_type = 3806 (info->shared | globals->pic_veneer) 3807 /* PIC stubs. */ 3808 ? (r_type == R_ARM_TLS_CALL 3809 /* TLS PIC Stub */ 3810 ? arm_stub_long_branch_any_tls_pic 3811 : arm_stub_long_branch_any_arm_pic) 3812 /* non-PIC stubs. */ 3813 : arm_stub_long_branch_any_any; 3814 } 3815 } 3816 } 3817 3818 /* If a stub is needed, record the actual destination type. */ 3819 if (stub_type != arm_stub_none) 3820 *actual_branch_type = branch_type; 3821 3822 return stub_type; 3823 } 3824 3825 /* Build a name for an entry in the stub hash table. */ 3826 3827 static char * 3828 elf32_arm_stub_name (const asection *input_section, 3829 const asection *sym_sec, 3830 const struct elf32_arm_link_hash_entry *hash, 3831 const Elf_Internal_Rela *rel, 3832 enum elf32_arm_stub_type stub_type) 3833 { 3834 char *stub_name; 3835 bfd_size_type len; 3836 3837 if (hash) 3838 { 3839 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1; 3840 stub_name = (char *) bfd_malloc (len); 3841 if (stub_name != NULL) 3842 sprintf (stub_name, "%08x_%s+%x_%d", 3843 input_section->id & 0xffffffff, 3844 hash->root.root.root.string, 3845 (int) rel->r_addend & 0xffffffff, 3846 (int) stub_type); 3847 } 3848 else 3849 { 3850 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1; 3851 stub_name = (char *) bfd_malloc (len); 3852 if (stub_name != NULL) 3853 sprintf (stub_name, "%08x_%x:%x+%x_%d", 3854 input_section->id & 0xffffffff, 3855 sym_sec->id & 0xffffffff, 3856 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL 3857 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL 3858 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff, 3859 (int) rel->r_addend & 0xffffffff, 3860 (int) stub_type); 3861 } 3862 3863 return stub_name; 3864 } 3865 3866 /* Look up an entry in the stub hash. Stub entries are cached because 3867 creating the stub name takes a bit of time. */ 3868 3869 static struct elf32_arm_stub_hash_entry * 3870 elf32_arm_get_stub_entry (const asection *input_section, 3871 const asection *sym_sec, 3872 struct elf_link_hash_entry *hash, 3873 const Elf_Internal_Rela *rel, 3874 struct elf32_arm_link_hash_table *htab, 3875 enum elf32_arm_stub_type stub_type) 3876 { 3877 struct elf32_arm_stub_hash_entry *stub_entry; 3878 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash; 3879 const asection *id_sec; 3880 3881 if ((input_section->flags & SEC_CODE) == 0) 3882 return NULL; 3883 3884 /* If this input section is part of a group of sections sharing one 3885 stub section, then use the id of the first section in the group. 3886 Stub names need to include a section id, as there may well be 3887 more than one stub used to reach say, printf, and we need to 3888 distinguish between them. */ 3889 id_sec = htab->stub_group[input_section->id].link_sec; 3890 3891 if (h != NULL && h->stub_cache != NULL 3892 && h->stub_cache->h == h 3893 && h->stub_cache->id_sec == id_sec 3894 && h->stub_cache->stub_type == stub_type) 3895 { 3896 stub_entry = h->stub_cache; 3897 } 3898 else 3899 { 3900 char *stub_name; 3901 3902 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type); 3903 if (stub_name == NULL) 3904 return NULL; 3905 3906 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, 3907 stub_name, FALSE, FALSE); 3908 if (h != NULL) 3909 h->stub_cache = stub_entry; 3910 3911 free (stub_name); 3912 } 3913 3914 return stub_entry; 3915 } 3916 3917 /* Find or create a stub section. Returns a pointer to the stub section, and 3918 the section to which the stub section will be attached (in *LINK_SEC_P). 3919 LINK_SEC_P may be NULL. */ 3920 3921 static asection * 3922 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, 3923 struct elf32_arm_link_hash_table *htab) 3924 { 3925 asection *link_sec; 3926 asection *stub_sec; 3927 3928 link_sec = htab->stub_group[section->id].link_sec; 3929 BFD_ASSERT (link_sec != NULL); 3930 stub_sec = htab->stub_group[section->id].stub_sec; 3931 3932 if (stub_sec == NULL) 3933 { 3934 stub_sec = htab->stub_group[link_sec->id].stub_sec; 3935 if (stub_sec == NULL) 3936 { 3937 size_t namelen; 3938 bfd_size_type len; 3939 char *s_name; 3940 3941 namelen = strlen (link_sec->name); 3942 len = namelen + sizeof (STUB_SUFFIX); 3943 s_name = (char *) bfd_alloc (htab->stub_bfd, len); 3944 if (s_name == NULL) 3945 return NULL; 3946 3947 memcpy (s_name, link_sec->name, namelen); 3948 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); 3949 stub_sec = (*htab->add_stub_section) (s_name, link_sec); 3950 if (stub_sec == NULL) 3951 return NULL; 3952 htab->stub_group[link_sec->id].stub_sec = stub_sec; 3953 } 3954 htab->stub_group[section->id].stub_sec = stub_sec; 3955 } 3956 3957 if (link_sec_p) 3958 *link_sec_p = link_sec; 3959 3960 return stub_sec; 3961 } 3962 3963 /* Add a new stub entry to the stub hash. Not all fields of the new 3964 stub entry are initialised. */ 3965 3966 static struct elf32_arm_stub_hash_entry * 3967 elf32_arm_add_stub (const char *stub_name, 3968 asection *section, 3969 struct elf32_arm_link_hash_table *htab) 3970 { 3971 asection *link_sec; 3972 asection *stub_sec; 3973 struct elf32_arm_stub_hash_entry *stub_entry; 3974 3975 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab); 3976 if (stub_sec == NULL) 3977 return NULL; 3978 3979 /* Enter this entry into the linker stub hash table. */ 3980 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 3981 TRUE, FALSE); 3982 if (stub_entry == NULL) 3983 { 3984 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 3985 section->owner, 3986 stub_name); 3987 return NULL; 3988 } 3989 3990 stub_entry->stub_sec = stub_sec; 3991 stub_entry->stub_offset = 0; 3992 stub_entry->id_sec = link_sec; 3993 3994 return stub_entry; 3995 } 3996 3997 /* Store an Arm insn into an output section not processed by 3998 elf32_arm_write_section. */ 3999 4000 static void 4001 put_arm_insn (struct elf32_arm_link_hash_table * htab, 4002 bfd * output_bfd, bfd_vma val, void * ptr) 4003 { 4004 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4005 bfd_putl32 (val, ptr); 4006 else 4007 bfd_putb32 (val, ptr); 4008 } 4009 4010 /* Store a 16-bit Thumb insn into an output section not processed by 4011 elf32_arm_write_section. */ 4012 4013 static void 4014 put_thumb_insn (struct elf32_arm_link_hash_table * htab, 4015 bfd * output_bfd, bfd_vma val, void * ptr) 4016 { 4017 if (htab->byteswap_code != bfd_little_endian (output_bfd)) 4018 bfd_putl16 (val, ptr); 4019 else 4020 bfd_putb16 (val, ptr); 4021 } 4022 4023 /* If it's possible to change R_TYPE to a more efficient access 4024 model, return the new reloc type. */ 4025 4026 static unsigned 4027 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type, 4028 struct elf_link_hash_entry *h) 4029 { 4030 int is_local = (h == NULL); 4031 4032 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak)) 4033 return r_type; 4034 4035 /* We do not support relaxations for Old TLS models. */ 4036 switch (r_type) 4037 { 4038 case R_ARM_TLS_GOTDESC: 4039 case R_ARM_TLS_CALL: 4040 case R_ARM_THM_TLS_CALL: 4041 case R_ARM_TLS_DESCSEQ: 4042 case R_ARM_THM_TLS_DESCSEQ: 4043 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32; 4044 } 4045 4046 return r_type; 4047 } 4048 4049 static bfd_reloc_status_type elf32_arm_final_link_relocate 4050 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *, 4051 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *, 4052 const char *, unsigned char, enum arm_st_branch_type, 4053 struct elf_link_hash_entry *, bfd_boolean *, char **); 4054 4055 static unsigned int 4056 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type) 4057 { 4058 switch (stub_type) 4059 { 4060 case arm_stub_a8_veneer_b_cond: 4061 case arm_stub_a8_veneer_b: 4062 case arm_stub_a8_veneer_bl: 4063 return 2; 4064 4065 case arm_stub_long_branch_any_any: 4066 case arm_stub_long_branch_v4t_arm_thumb: 4067 case arm_stub_long_branch_thumb_only: 4068 case arm_stub_long_branch_v4t_thumb_thumb: 4069 case arm_stub_long_branch_v4t_thumb_arm: 4070 case arm_stub_short_branch_v4t_thumb_arm: 4071 case arm_stub_long_branch_any_arm_pic: 4072 case arm_stub_long_branch_any_thumb_pic: 4073 case arm_stub_long_branch_v4t_thumb_thumb_pic: 4074 case arm_stub_long_branch_v4t_arm_thumb_pic: 4075 case arm_stub_long_branch_v4t_thumb_arm_pic: 4076 case arm_stub_long_branch_thumb_only_pic: 4077 case arm_stub_long_branch_any_tls_pic: 4078 case arm_stub_long_branch_v4t_thumb_tls_pic: 4079 case arm_stub_a8_veneer_blx: 4080 return 4; 4081 4082 default: 4083 abort (); /* Should be unreachable. */ 4084 } 4085 } 4086 4087 static bfd_boolean 4088 arm_build_one_stub (struct bfd_hash_entry *gen_entry, 4089 void * in_arg) 4090 { 4091 #define MAXRELOCS 2 4092 struct elf32_arm_stub_hash_entry *stub_entry; 4093 struct elf32_arm_link_hash_table *globals; 4094 struct bfd_link_info *info; 4095 asection *stub_sec; 4096 bfd *stub_bfd; 4097 bfd_byte *loc; 4098 bfd_vma sym_value; 4099 int template_size; 4100 int size; 4101 const insn_sequence *template_sequence; 4102 int i; 4103 int stub_reloc_idx[MAXRELOCS] = {-1, -1}; 4104 int stub_reloc_offset[MAXRELOCS] = {0, 0}; 4105 int nrelocs = 0; 4106 4107 /* Massage our args to the form they really have. */ 4108 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4109 info = (struct bfd_link_info *) in_arg; 4110 4111 globals = elf32_arm_hash_table (info); 4112 if (globals == NULL) 4113 return FALSE; 4114 4115 stub_sec = stub_entry->stub_sec; 4116 4117 if ((globals->fix_cortex_a8 < 0) 4118 != (arm_stub_required_alignment (stub_entry->stub_type) == 2)) 4119 /* We have to do less-strictly-aligned fixes last. */ 4120 return TRUE; 4121 4122 /* Make a note of the offset within the stubs for this entry. */ 4123 stub_entry->stub_offset = stub_sec->size; 4124 loc = stub_sec->contents + stub_entry->stub_offset; 4125 4126 stub_bfd = stub_sec->owner; 4127 4128 /* This is the address of the stub destination. */ 4129 sym_value = (stub_entry->target_value 4130 + stub_entry->target_section->output_offset 4131 + stub_entry->target_section->output_section->vma); 4132 4133 template_sequence = stub_entry->stub_template; 4134 template_size = stub_entry->stub_template_size; 4135 4136 size = 0; 4137 for (i = 0; i < template_size; i++) 4138 { 4139 switch (template_sequence[i].type) 4140 { 4141 case THUMB16_TYPE: 4142 { 4143 bfd_vma data = (bfd_vma) template_sequence[i].data; 4144 if (template_sequence[i].reloc_addend != 0) 4145 { 4146 /* We've borrowed the reloc_addend field to mean we should 4147 insert a condition code into this (Thumb-1 branch) 4148 instruction. See THUMB16_BCOND_INSN. */ 4149 BFD_ASSERT ((data & 0xff00) == 0xd000); 4150 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8; 4151 } 4152 bfd_put_16 (stub_bfd, data, loc + size); 4153 size += 2; 4154 } 4155 break; 4156 4157 case THUMB32_TYPE: 4158 bfd_put_16 (stub_bfd, 4159 (template_sequence[i].data >> 16) & 0xffff, 4160 loc + size); 4161 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff, 4162 loc + size + 2); 4163 if (template_sequence[i].r_type != R_ARM_NONE) 4164 { 4165 stub_reloc_idx[nrelocs] = i; 4166 stub_reloc_offset[nrelocs++] = size; 4167 } 4168 size += 4; 4169 break; 4170 4171 case ARM_TYPE: 4172 bfd_put_32 (stub_bfd, template_sequence[i].data, 4173 loc + size); 4174 /* Handle cases where the target is encoded within the 4175 instruction. */ 4176 if (template_sequence[i].r_type == R_ARM_JUMP24) 4177 { 4178 stub_reloc_idx[nrelocs] = i; 4179 stub_reloc_offset[nrelocs++] = size; 4180 } 4181 size += 4; 4182 break; 4183 4184 case DATA_TYPE: 4185 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size); 4186 stub_reloc_idx[nrelocs] = i; 4187 stub_reloc_offset[nrelocs++] = size; 4188 size += 4; 4189 break; 4190 4191 default: 4192 BFD_FAIL (); 4193 return FALSE; 4194 } 4195 } 4196 4197 stub_sec->size += size; 4198 4199 /* Stub size has already been computed in arm_size_one_stub. Check 4200 consistency. */ 4201 BFD_ASSERT (size == stub_entry->stub_size); 4202 4203 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */ 4204 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB) 4205 sym_value |= 1; 4206 4207 /* Assume there is at least one and at most MAXRELOCS entries to relocate 4208 in each stub. */ 4209 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); 4210 4211 for (i = 0; i < nrelocs; i++) 4212 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24 4213 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19 4214 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL 4215 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22) 4216 { 4217 Elf_Internal_Rela rel; 4218 bfd_boolean unresolved_reloc; 4219 char *error_message; 4220 enum arm_st_branch_type branch_type 4221 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22 4222 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM); 4223 bfd_vma points_to = sym_value + stub_entry->target_addend; 4224 4225 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; 4226 rel.r_info = ELF32_R_INFO (0, 4227 template_sequence[stub_reloc_idx[i]].r_type); 4228 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend; 4229 4230 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) 4231 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] 4232 template should refer back to the instruction after the original 4233 branch. */ 4234 points_to = sym_value; 4235 4236 /* There may be unintended consequences if this is not true. */ 4237 BFD_ASSERT (stub_entry->h == NULL); 4238 4239 /* Note: _bfd_final_link_relocate doesn't handle these relocations 4240 properly. We should probably use this function unconditionally, 4241 rather than only for certain relocations listed in the enclosing 4242 conditional, for the sake of consistency. */ 4243 elf32_arm_final_link_relocate (elf32_arm_howto_from_type 4244 (template_sequence[stub_reloc_idx[i]].r_type), 4245 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, 4246 points_to, info, stub_entry->target_section, "", STT_FUNC, 4247 branch_type, (struct elf_link_hash_entry *) stub_entry->h, 4248 &unresolved_reloc, &error_message); 4249 } 4250 else 4251 { 4252 Elf_Internal_Rela rel; 4253 bfd_boolean unresolved_reloc; 4254 char *error_message; 4255 bfd_vma points_to = sym_value + stub_entry->target_addend 4256 + template_sequence[stub_reloc_idx[i]].reloc_addend; 4257 4258 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; 4259 rel.r_info = ELF32_R_INFO (0, 4260 template_sequence[stub_reloc_idx[i]].r_type); 4261 rel.r_addend = 0; 4262 4263 elf32_arm_final_link_relocate (elf32_arm_howto_from_type 4264 (template_sequence[stub_reloc_idx[i]].r_type), 4265 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, 4266 points_to, info, stub_entry->target_section, "", STT_FUNC, 4267 stub_entry->branch_type, 4268 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, 4269 &error_message); 4270 } 4271 4272 return TRUE; 4273 #undef MAXRELOCS 4274 } 4275 4276 /* Calculate the template, template size and instruction size for a stub. 4277 Return value is the instruction size. */ 4278 4279 static unsigned int 4280 find_stub_size_and_template (enum elf32_arm_stub_type stub_type, 4281 const insn_sequence **stub_template, 4282 int *stub_template_size) 4283 { 4284 const insn_sequence *template_sequence = NULL; 4285 int template_size = 0, i; 4286 unsigned int size; 4287 4288 template_sequence = stub_definitions[stub_type].template_sequence; 4289 if (stub_template) 4290 *stub_template = template_sequence; 4291 4292 template_size = stub_definitions[stub_type].template_size; 4293 if (stub_template_size) 4294 *stub_template_size = template_size; 4295 4296 size = 0; 4297 for (i = 0; i < template_size; i++) 4298 { 4299 switch (template_sequence[i].type) 4300 { 4301 case THUMB16_TYPE: 4302 size += 2; 4303 break; 4304 4305 case ARM_TYPE: 4306 case THUMB32_TYPE: 4307 case DATA_TYPE: 4308 size += 4; 4309 break; 4310 4311 default: 4312 BFD_FAIL (); 4313 return 0; 4314 } 4315 } 4316 4317 return size; 4318 } 4319 4320 /* As above, but don't actually build the stub. Just bump offset so 4321 we know stub section sizes. */ 4322 4323 static bfd_boolean 4324 arm_size_one_stub (struct bfd_hash_entry *gen_entry, 4325 void *in_arg ATTRIBUTE_UNUSED) 4326 { 4327 struct elf32_arm_stub_hash_entry *stub_entry; 4328 const insn_sequence *template_sequence; 4329 int template_size, size; 4330 4331 /* Massage our args to the form they really have. */ 4332 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 4333 4334 BFD_ASSERT((stub_entry->stub_type > arm_stub_none) 4335 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions)); 4336 4337 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence, 4338 &template_size); 4339 4340 stub_entry->stub_size = size; 4341 stub_entry->stub_template = template_sequence; 4342 stub_entry->stub_template_size = template_size; 4343 4344 size = (size + 7) & ~7; 4345 stub_entry->stub_sec->size += size; 4346 4347 return TRUE; 4348 } 4349 4350 /* External entry points for sizing and building linker stubs. */ 4351 4352 /* Set up various things so that we can make a list of input sections 4353 for each output section included in the link. Returns -1 on error, 4354 0 when no stubs will be needed, and 1 on success. */ 4355 4356 int 4357 elf32_arm_setup_section_lists (bfd *output_bfd, 4358 struct bfd_link_info *info) 4359 { 4360 bfd *input_bfd; 4361 unsigned int bfd_count; 4362 int top_id, top_index; 4363 asection *section; 4364 asection **input_list, **list; 4365 bfd_size_type amt; 4366 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4367 4368 if (htab == NULL) 4369 return 0; 4370 if (! is_elf_hash_table (htab)) 4371 return 0; 4372 4373 /* Count the number of input BFDs and find the top input section id. */ 4374 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; 4375 input_bfd != NULL; 4376 input_bfd = input_bfd->link_next) 4377 { 4378 bfd_count += 1; 4379 for (section = input_bfd->sections; 4380 section != NULL; 4381 section = section->next) 4382 { 4383 if (top_id < section->id) 4384 top_id = section->id; 4385 } 4386 } 4387 htab->bfd_count = bfd_count; 4388 4389 amt = sizeof (struct map_stub) * (top_id + 1); 4390 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt); 4391 if (htab->stub_group == NULL) 4392 return -1; 4393 htab->top_id = top_id; 4394 4395 /* We can't use output_bfd->section_count here to find the top output 4396 section index as some sections may have been removed, and 4397 _bfd_strip_section_from_output doesn't renumber the indices. */ 4398 for (section = output_bfd->sections, top_index = 0; 4399 section != NULL; 4400 section = section->next) 4401 { 4402 if (top_index < section->index) 4403 top_index = section->index; 4404 } 4405 4406 htab->top_index = top_index; 4407 amt = sizeof (asection *) * (top_index + 1); 4408 input_list = (asection **) bfd_malloc (amt); 4409 htab->input_list = input_list; 4410 if (input_list == NULL) 4411 return -1; 4412 4413 /* For sections we aren't interested in, mark their entries with a 4414 value we can check later. */ 4415 list = input_list + top_index; 4416 do 4417 *list = bfd_abs_section_ptr; 4418 while (list-- != input_list); 4419 4420 for (section = output_bfd->sections; 4421 section != NULL; 4422 section = section->next) 4423 { 4424 if ((section->flags & SEC_CODE) != 0) 4425 input_list[section->index] = NULL; 4426 } 4427 4428 return 1; 4429 } 4430 4431 /* The linker repeatedly calls this function for each input section, 4432 in the order that input sections are linked into output sections. 4433 Build lists of input sections to determine groupings between which 4434 we may insert linker stubs. */ 4435 4436 void 4437 elf32_arm_next_input_section (struct bfd_link_info *info, 4438 asection *isec) 4439 { 4440 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4441 4442 if (htab == NULL) 4443 return; 4444 4445 if (isec->output_section->index <= htab->top_index) 4446 { 4447 asection **list = htab->input_list + isec->output_section->index; 4448 4449 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) 4450 { 4451 /* Steal the link_sec pointer for our list. */ 4452 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) 4453 /* This happens to make the list in reverse order, 4454 which we reverse later. */ 4455 PREV_SEC (isec) = *list; 4456 *list = isec; 4457 } 4458 } 4459 } 4460 4461 /* See whether we can group stub sections together. Grouping stub 4462 sections may result in fewer stubs. More importantly, we need to 4463 put all .init* and .fini* stubs at the end of the .init or 4464 .fini output sections respectively, because glibc splits the 4465 _init and _fini functions into multiple parts. Putting a stub in 4466 the middle of a function is not a good idea. */ 4467 4468 static void 4469 group_sections (struct elf32_arm_link_hash_table *htab, 4470 bfd_size_type stub_group_size, 4471 bfd_boolean stubs_always_after_branch) 4472 { 4473 asection **list = htab->input_list; 4474 4475 do 4476 { 4477 asection *tail = *list; 4478 asection *head; 4479 4480 if (tail == bfd_abs_section_ptr) 4481 continue; 4482 4483 /* Reverse the list: we must avoid placing stubs at the 4484 beginning of the section because the beginning of the text 4485 section may be required for an interrupt vector in bare metal 4486 code. */ 4487 #define NEXT_SEC PREV_SEC 4488 head = NULL; 4489 while (tail != NULL) 4490 { 4491 /* Pop from tail. */ 4492 asection *item = tail; 4493 tail = PREV_SEC (item); 4494 4495 /* Push on head. */ 4496 NEXT_SEC (item) = head; 4497 head = item; 4498 } 4499 4500 while (head != NULL) 4501 { 4502 asection *curr; 4503 asection *next; 4504 bfd_vma stub_group_start = head->output_offset; 4505 bfd_vma end_of_next; 4506 4507 curr = head; 4508 while (NEXT_SEC (curr) != NULL) 4509 { 4510 next = NEXT_SEC (curr); 4511 end_of_next = next->output_offset + next->size; 4512 if (end_of_next - stub_group_start >= stub_group_size) 4513 /* End of NEXT is too far from start, so stop. */ 4514 break; 4515 /* Add NEXT to the group. */ 4516 curr = next; 4517 } 4518 4519 /* OK, the size from the start to the start of CURR is less 4520 than stub_group_size and thus can be handled by one stub 4521 section. (Or the head section is itself larger than 4522 stub_group_size, in which case we may be toast.) 4523 We should really be keeping track of the total size of 4524 stubs added here, as stubs contribute to the final output 4525 section size. */ 4526 do 4527 { 4528 next = NEXT_SEC (head); 4529 /* Set up this stub group. */ 4530 htab->stub_group[head->id].link_sec = curr; 4531 } 4532 while (head != curr && (head = next) != NULL); 4533 4534 /* But wait, there's more! Input sections up to stub_group_size 4535 bytes after the stub section can be handled by it too. */ 4536 if (!stubs_always_after_branch) 4537 { 4538 stub_group_start = curr->output_offset + curr->size; 4539 4540 while (next != NULL) 4541 { 4542 end_of_next = next->output_offset + next->size; 4543 if (end_of_next - stub_group_start >= stub_group_size) 4544 /* End of NEXT is too far from stubs, so stop. */ 4545 break; 4546 /* Add NEXT to the stub group. */ 4547 head = next; 4548 next = NEXT_SEC (head); 4549 htab->stub_group[head->id].link_sec = curr; 4550 } 4551 } 4552 head = next; 4553 } 4554 } 4555 while (list++ != htab->input_list + htab->top_index); 4556 4557 free (htab->input_list); 4558 #undef PREV_SEC 4559 #undef NEXT_SEC 4560 } 4561 4562 /* Comparison function for sorting/searching relocations relating to Cortex-A8 4563 erratum fix. */ 4564 4565 static int 4566 a8_reloc_compare (const void *a, const void *b) 4567 { 4568 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a; 4569 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b; 4570 4571 if (ra->from < rb->from) 4572 return -1; 4573 else if (ra->from > rb->from) 4574 return 1; 4575 else 4576 return 0; 4577 } 4578 4579 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *, 4580 const char *, char **); 4581 4582 /* Helper function to scan code for sequences which might trigger the Cortex-A8 4583 branch/TLB erratum. Fill in the table described by A8_FIXES_P, 4584 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false 4585 otherwise. */ 4586 4587 static bfd_boolean 4588 cortex_a8_erratum_scan (bfd *input_bfd, 4589 struct bfd_link_info *info, 4590 struct a8_erratum_fix **a8_fixes_p, 4591 unsigned int *num_a8_fixes_p, 4592 unsigned int *a8_fix_table_size_p, 4593 struct a8_erratum_reloc *a8_relocs, 4594 unsigned int num_a8_relocs, 4595 unsigned prev_num_a8_fixes, 4596 bfd_boolean *stub_changed_p) 4597 { 4598 asection *section; 4599 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4600 struct a8_erratum_fix *a8_fixes = *a8_fixes_p; 4601 unsigned int num_a8_fixes = *num_a8_fixes_p; 4602 unsigned int a8_fix_table_size = *a8_fix_table_size_p; 4603 4604 if (htab == NULL) 4605 return FALSE; 4606 4607 for (section = input_bfd->sections; 4608 section != NULL; 4609 section = section->next) 4610 { 4611 bfd_byte *contents = NULL; 4612 struct _arm_elf_section_data *sec_data; 4613 unsigned int span; 4614 bfd_vma base_vma; 4615 4616 if (elf_section_type (section) != SHT_PROGBITS 4617 || (elf_section_flags (section) & SHF_EXECINSTR) == 0 4618 || (section->flags & SEC_EXCLUDE) != 0 4619 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS) 4620 || (section->output_section == bfd_abs_section_ptr)) 4621 continue; 4622 4623 base_vma = section->output_section->vma + section->output_offset; 4624 4625 if (elf_section_data (section)->this_hdr.contents != NULL) 4626 contents = elf_section_data (section)->this_hdr.contents; 4627 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) 4628 return TRUE; 4629 4630 sec_data = elf32_arm_section_data (section); 4631 4632 for (span = 0; span < sec_data->mapcount; span++) 4633 { 4634 unsigned int span_start = sec_data->map[span].vma; 4635 unsigned int span_end = (span == sec_data->mapcount - 1) 4636 ? section->size : sec_data->map[span + 1].vma; 4637 unsigned int i; 4638 char span_type = sec_data->map[span].type; 4639 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE; 4640 4641 if (span_type != 't') 4642 continue; 4643 4644 /* Span is entirely within a single 4KB region: skip scanning. */ 4645 if (((base_vma + span_start) & ~0xfff) 4646 == ((base_vma + span_end) & ~0xfff)) 4647 continue; 4648 4649 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where: 4650 4651 * The opcode is BLX.W, BL.W, B.W, Bcc.W 4652 * The branch target is in the same 4KB region as the 4653 first half of the branch. 4654 * The instruction before the branch is a 32-bit 4655 length non-branch instruction. */ 4656 for (i = span_start; i < span_end;) 4657 { 4658 unsigned int insn = bfd_getl16 (&contents[i]); 4659 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE; 4660 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch; 4661 4662 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) 4663 insn_32bit = TRUE; 4664 4665 if (insn_32bit) 4666 { 4667 /* Load the rest of the insn (in manual-friendly order). */ 4668 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]); 4669 4670 /* Encoding T4: B<c>.W. */ 4671 is_b = (insn & 0xf800d000) == 0xf0009000; 4672 /* Encoding T1: BL<c>.W. */ 4673 is_bl = (insn & 0xf800d000) == 0xf000d000; 4674 /* Encoding T2: BLX<c>.W. */ 4675 is_blx = (insn & 0xf800d000) == 0xf000c000; 4676 /* Encoding T3: B<c>.W (not permitted in IT block). */ 4677 is_bcc = (insn & 0xf800d000) == 0xf0008000 4678 && (insn & 0x07f00000) != 0x03800000; 4679 } 4680 4681 is_32bit_branch = is_b || is_bl || is_blx || is_bcc; 4682 4683 if (((base_vma + i) & 0xfff) == 0xffe 4684 && insn_32bit 4685 && is_32bit_branch 4686 && last_was_32bit 4687 && ! last_was_branch) 4688 { 4689 bfd_signed_vma offset = 0; 4690 bfd_boolean force_target_arm = FALSE; 4691 bfd_boolean force_target_thumb = FALSE; 4692 bfd_vma target; 4693 enum elf32_arm_stub_type stub_type = arm_stub_none; 4694 struct a8_erratum_reloc key, *found; 4695 bfd_boolean use_plt = FALSE; 4696 4697 key.from = base_vma + i; 4698 found = (struct a8_erratum_reloc *) 4699 bsearch (&key, a8_relocs, num_a8_relocs, 4700 sizeof (struct a8_erratum_reloc), 4701 &a8_reloc_compare); 4702 4703 if (found) 4704 { 4705 char *error_message = NULL; 4706 struct elf_link_hash_entry *entry; 4707 4708 /* We don't care about the error returned from this 4709 function, only if there is glue or not. */ 4710 entry = find_thumb_glue (info, found->sym_name, 4711 &error_message); 4712 4713 if (entry) 4714 found->non_a8_stub = TRUE; 4715 4716 /* Keep a simpler condition, for the sake of clarity. */ 4717 if (htab->root.splt != NULL && found->hash != NULL 4718 && found->hash->root.plt.offset != (bfd_vma) -1) 4719 use_plt = TRUE; 4720 4721 if (found->r_type == R_ARM_THM_CALL) 4722 { 4723 if (found->branch_type == ST_BRANCH_TO_ARM 4724 || use_plt) 4725 force_target_arm = TRUE; 4726 else 4727 force_target_thumb = TRUE; 4728 } 4729 } 4730 4731 /* Check if we have an offending branch instruction. */ 4732 4733 if (found && found->non_a8_stub) 4734 /* We've already made a stub for this instruction, e.g. 4735 it's a long branch or a Thumb->ARM stub. Assume that 4736 stub will suffice to work around the A8 erratum (see 4737 setting of always_after_branch above). */ 4738 ; 4739 else if (is_bcc) 4740 { 4741 offset = (insn & 0x7ff) << 1; 4742 offset |= (insn & 0x3f0000) >> 4; 4743 offset |= (insn & 0x2000) ? 0x40000 : 0; 4744 offset |= (insn & 0x800) ? 0x80000 : 0; 4745 offset |= (insn & 0x4000000) ? 0x100000 : 0; 4746 if (offset & 0x100000) 4747 offset |= ~ ((bfd_signed_vma) 0xfffff); 4748 stub_type = arm_stub_a8_veneer_b_cond; 4749 } 4750 else if (is_b || is_bl || is_blx) 4751 { 4752 int s = (insn & 0x4000000) != 0; 4753 int j1 = (insn & 0x2000) != 0; 4754 int j2 = (insn & 0x800) != 0; 4755 int i1 = !(j1 ^ s); 4756 int i2 = !(j2 ^ s); 4757 4758 offset = (insn & 0x7ff) << 1; 4759 offset |= (insn & 0x3ff0000) >> 4; 4760 offset |= i2 << 22; 4761 offset |= i1 << 23; 4762 offset |= s << 24; 4763 if (offset & 0x1000000) 4764 offset |= ~ ((bfd_signed_vma) 0xffffff); 4765 4766 if (is_blx) 4767 offset &= ~ ((bfd_signed_vma) 3); 4768 4769 stub_type = is_blx ? arm_stub_a8_veneer_blx : 4770 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b; 4771 } 4772 4773 if (stub_type != arm_stub_none) 4774 { 4775 bfd_vma pc_for_insn = base_vma + i + 4; 4776 4777 /* The original instruction is a BL, but the target is 4778 an ARM instruction. If we were not making a stub, 4779 the BL would have been converted to a BLX. Use the 4780 BLX stub instead in that case. */ 4781 if (htab->use_blx && force_target_arm 4782 && stub_type == arm_stub_a8_veneer_bl) 4783 { 4784 stub_type = arm_stub_a8_veneer_blx; 4785 is_blx = TRUE; 4786 is_bl = FALSE; 4787 } 4788 /* Conversely, if the original instruction was 4789 BLX but the target is Thumb mode, use the BL 4790 stub. */ 4791 else if (force_target_thumb 4792 && stub_type == arm_stub_a8_veneer_blx) 4793 { 4794 stub_type = arm_stub_a8_veneer_bl; 4795 is_blx = FALSE; 4796 is_bl = TRUE; 4797 } 4798 4799 if (is_blx) 4800 pc_for_insn &= ~ ((bfd_vma) 3); 4801 4802 /* If we found a relocation, use the proper destination, 4803 not the offset in the (unrelocated) instruction. 4804 Note this is always done if we switched the stub type 4805 above. */ 4806 if (found) 4807 offset = 4808 (bfd_signed_vma) (found->destination - pc_for_insn); 4809 4810 /* If the stub will use a Thumb-mode branch to a 4811 PLT target, redirect it to the preceding Thumb 4812 entry point. */ 4813 if (stub_type != arm_stub_a8_veneer_blx && use_plt) 4814 offset -= PLT_THUMB_STUB_SIZE; 4815 4816 target = pc_for_insn + offset; 4817 4818 /* The BLX stub is ARM-mode code. Adjust the offset to 4819 take the different PC value (+8 instead of +4) into 4820 account. */ 4821 if (stub_type == arm_stub_a8_veneer_blx) 4822 offset += 4; 4823 4824 if (((base_vma + i) & ~0xfff) == (target & ~0xfff)) 4825 { 4826 char *stub_name = NULL; 4827 4828 if (num_a8_fixes == a8_fix_table_size) 4829 { 4830 a8_fix_table_size *= 2; 4831 a8_fixes = (struct a8_erratum_fix *) 4832 bfd_realloc (a8_fixes, 4833 sizeof (struct a8_erratum_fix) 4834 * a8_fix_table_size); 4835 } 4836 4837 if (num_a8_fixes < prev_num_a8_fixes) 4838 { 4839 /* If we're doing a subsequent scan, 4840 check if we've found the same fix as 4841 before, and try and reuse the stub 4842 name. */ 4843 stub_name = a8_fixes[num_a8_fixes].stub_name; 4844 if ((a8_fixes[num_a8_fixes].section != section) 4845 || (a8_fixes[num_a8_fixes].offset != i)) 4846 { 4847 free (stub_name); 4848 stub_name = NULL; 4849 *stub_changed_p = TRUE; 4850 } 4851 } 4852 4853 if (!stub_name) 4854 { 4855 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1); 4856 if (stub_name != NULL) 4857 sprintf (stub_name, "%x:%x", section->id, i); 4858 } 4859 4860 a8_fixes[num_a8_fixes].input_bfd = input_bfd; 4861 a8_fixes[num_a8_fixes].section = section; 4862 a8_fixes[num_a8_fixes].offset = i; 4863 a8_fixes[num_a8_fixes].addend = offset; 4864 a8_fixes[num_a8_fixes].orig_insn = insn; 4865 a8_fixes[num_a8_fixes].stub_name = stub_name; 4866 a8_fixes[num_a8_fixes].stub_type = stub_type; 4867 a8_fixes[num_a8_fixes].branch_type = 4868 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB; 4869 4870 num_a8_fixes++; 4871 } 4872 } 4873 } 4874 4875 i += insn_32bit ? 4 : 2; 4876 last_was_32bit = insn_32bit; 4877 last_was_branch = is_32bit_branch; 4878 } 4879 } 4880 4881 if (elf_section_data (section)->this_hdr.contents == NULL) 4882 free (contents); 4883 } 4884 4885 *a8_fixes_p = a8_fixes; 4886 *num_a8_fixes_p = num_a8_fixes; 4887 *a8_fix_table_size_p = a8_fix_table_size; 4888 4889 return FALSE; 4890 } 4891 4892 /* Determine and set the size of the stub section for a final link. 4893 4894 The basic idea here is to examine all the relocations looking for 4895 PC-relative calls to a target that is unreachable with a "bl" 4896 instruction. */ 4897 4898 bfd_boolean 4899 elf32_arm_size_stubs (bfd *output_bfd, 4900 bfd *stub_bfd, 4901 struct bfd_link_info *info, 4902 bfd_signed_vma group_size, 4903 asection * (*add_stub_section) (const char *, asection *), 4904 void (*layout_sections_again) (void)) 4905 { 4906 bfd_size_type stub_group_size; 4907 bfd_boolean stubs_always_after_branch; 4908 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 4909 struct a8_erratum_fix *a8_fixes = NULL; 4910 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10; 4911 struct a8_erratum_reloc *a8_relocs = NULL; 4912 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i; 4913 4914 if (htab == NULL) 4915 return FALSE; 4916 4917 if (htab->fix_cortex_a8) 4918 { 4919 a8_fixes = (struct a8_erratum_fix *) 4920 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size); 4921 a8_relocs = (struct a8_erratum_reloc *) 4922 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size); 4923 } 4924 4925 /* Propagate mach to stub bfd, because it may not have been 4926 finalized when we created stub_bfd. */ 4927 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), 4928 bfd_get_mach (output_bfd)); 4929 4930 /* Stash our params away. */ 4931 htab->stub_bfd = stub_bfd; 4932 htab->add_stub_section = add_stub_section; 4933 htab->layout_sections_again = layout_sections_again; 4934 stubs_always_after_branch = group_size < 0; 4935 4936 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page 4937 as the first half of a 32-bit branch straddling two 4K pages. This is a 4938 crude way of enforcing that. */ 4939 if (htab->fix_cortex_a8) 4940 stubs_always_after_branch = 1; 4941 4942 if (group_size < 0) 4943 stub_group_size = -group_size; 4944 else 4945 stub_group_size = group_size; 4946 4947 if (stub_group_size == 1) 4948 { 4949 /* Default values. */ 4950 /* Thumb branch range is +-4MB has to be used as the default 4951 maximum size (a given section can contain both ARM and Thumb 4952 code, so the worst case has to be taken into account). 4953 4954 This value is 24K less than that, which allows for 2025 4955 12-byte stubs. If we exceed that, then we will fail to link. 4956 The user will have to relink with an explicit group size 4957 option. */ 4958 stub_group_size = 4170000; 4959 } 4960 4961 group_sections (htab, stub_group_size, stubs_always_after_branch); 4962 4963 /* If we're applying the cortex A8 fix, we need to determine the 4964 program header size now, because we cannot change it later -- 4965 that could alter section placements. Notice the A8 erratum fix 4966 ends up requiring the section addresses to remain unchanged 4967 modulo the page size. That's something we cannot represent 4968 inside BFD, and we don't want to force the section alignment to 4969 be the page size. */ 4970 if (htab->fix_cortex_a8) 4971 (*htab->layout_sections_again) (); 4972 4973 while (1) 4974 { 4975 bfd *input_bfd; 4976 unsigned int bfd_indx; 4977 asection *stub_sec; 4978 bfd_boolean stub_changed = FALSE; 4979 unsigned prev_num_a8_fixes = num_a8_fixes; 4980 4981 num_a8_fixes = 0; 4982 for (input_bfd = info->input_bfds, bfd_indx = 0; 4983 input_bfd != NULL; 4984 input_bfd = input_bfd->link_next, bfd_indx++) 4985 { 4986 Elf_Internal_Shdr *symtab_hdr; 4987 asection *section; 4988 Elf_Internal_Sym *local_syms = NULL; 4989 4990 if (!is_arm_elf (input_bfd)) 4991 continue; 4992 4993 num_a8_relocs = 0; 4994 4995 /* We'll need the symbol table in a second. */ 4996 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; 4997 if (symtab_hdr->sh_info == 0) 4998 continue; 4999 5000 /* Walk over each section attached to the input bfd. */ 5001 for (section = input_bfd->sections; 5002 section != NULL; 5003 section = section->next) 5004 { 5005 Elf_Internal_Rela *internal_relocs, *irelaend, *irela; 5006 5007 /* If there aren't any relocs, then there's nothing more 5008 to do. */ 5009 if ((section->flags & SEC_RELOC) == 0 5010 || section->reloc_count == 0 5011 || (section->flags & SEC_CODE) == 0) 5012 continue; 5013 5014 /* If this section is a link-once section that will be 5015 discarded, then don't create any stubs. */ 5016 if (section->output_section == NULL 5017 || section->output_section->owner != output_bfd) 5018 continue; 5019 5020 /* Get the relocs. */ 5021 internal_relocs 5022 = _bfd_elf_link_read_relocs (input_bfd, section, NULL, 5023 NULL, info->keep_memory); 5024 if (internal_relocs == NULL) 5025 goto error_ret_free_local; 5026 5027 /* Now examine each relocation. */ 5028 irela = internal_relocs; 5029 irelaend = irela + section->reloc_count; 5030 for (; irela < irelaend; irela++) 5031 { 5032 unsigned int r_type, r_indx; 5033 enum elf32_arm_stub_type stub_type; 5034 struct elf32_arm_stub_hash_entry *stub_entry; 5035 asection *sym_sec; 5036 bfd_vma sym_value; 5037 bfd_vma destination; 5038 struct elf32_arm_link_hash_entry *hash; 5039 const char *sym_name; 5040 char *stub_name; 5041 const asection *id_sec; 5042 unsigned char st_type; 5043 enum arm_st_branch_type branch_type; 5044 bfd_boolean created_stub = FALSE; 5045 5046 r_type = ELF32_R_TYPE (irela->r_info); 5047 r_indx = ELF32_R_SYM (irela->r_info); 5048 5049 if (r_type >= (unsigned int) R_ARM_max) 5050 { 5051 bfd_set_error (bfd_error_bad_value); 5052 error_ret_free_internal: 5053 if (elf_section_data (section)->relocs == NULL) 5054 free (internal_relocs); 5055 goto error_ret_free_local; 5056 } 5057 5058 hash = NULL; 5059 if (r_indx >= symtab_hdr->sh_info) 5060 hash = elf32_arm_hash_entry 5061 (elf_sym_hashes (input_bfd) 5062 [r_indx - symtab_hdr->sh_info]); 5063 5064 /* Only look for stubs on branch instructions, or 5065 non-relaxed TLSCALL */ 5066 if ((r_type != (unsigned int) R_ARM_CALL) 5067 && (r_type != (unsigned int) R_ARM_THM_CALL) 5068 && (r_type != (unsigned int) R_ARM_JUMP24) 5069 && (r_type != (unsigned int) R_ARM_THM_JUMP19) 5070 && (r_type != (unsigned int) R_ARM_THM_XPC22) 5071 && (r_type != (unsigned int) R_ARM_THM_JUMP24) 5072 && (r_type != (unsigned int) R_ARM_PLT32) 5073 && !((r_type == (unsigned int) R_ARM_TLS_CALL 5074 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5075 && r_type == elf32_arm_tls_transition 5076 (info, r_type, &hash->root) 5077 && ((hash ? hash->tls_type 5078 : (elf32_arm_local_got_tls_type 5079 (input_bfd)[r_indx])) 5080 & GOT_TLS_GDESC) != 0)) 5081 continue; 5082 5083 /* Now determine the call target, its name, value, 5084 section. */ 5085 sym_sec = NULL; 5086 sym_value = 0; 5087 destination = 0; 5088 sym_name = NULL; 5089 5090 if (r_type == (unsigned int) R_ARM_TLS_CALL 5091 || r_type == (unsigned int) R_ARM_THM_TLS_CALL) 5092 { 5093 /* A non-relaxed TLS call. The target is the 5094 plt-resident trampoline and nothing to do 5095 with the symbol. */ 5096 BFD_ASSERT (htab->tls_trampoline > 0); 5097 sym_sec = htab->root.splt; 5098 sym_value = htab->tls_trampoline; 5099 hash = 0; 5100 st_type = STT_FUNC; 5101 branch_type = ST_BRANCH_TO_ARM; 5102 } 5103 else if (!hash) 5104 { 5105 /* It's a local symbol. */ 5106 Elf_Internal_Sym *sym; 5107 5108 if (local_syms == NULL) 5109 { 5110 local_syms 5111 = (Elf_Internal_Sym *) symtab_hdr->contents; 5112 if (local_syms == NULL) 5113 local_syms 5114 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, 5115 symtab_hdr->sh_info, 0, 5116 NULL, NULL, NULL); 5117 if (local_syms == NULL) 5118 goto error_ret_free_internal; 5119 } 5120 5121 sym = local_syms + r_indx; 5122 if (sym->st_shndx == SHN_UNDEF) 5123 sym_sec = bfd_und_section_ptr; 5124 else if (sym->st_shndx == SHN_ABS) 5125 sym_sec = bfd_abs_section_ptr; 5126 else if (sym->st_shndx == SHN_COMMON) 5127 sym_sec = bfd_com_section_ptr; 5128 else 5129 sym_sec = 5130 bfd_section_from_elf_index (input_bfd, sym->st_shndx); 5131 5132 if (!sym_sec) 5133 /* This is an undefined symbol. It can never 5134 be resolved. */ 5135 continue; 5136 5137 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) 5138 sym_value = sym->st_value; 5139 destination = (sym_value + irela->r_addend 5140 + sym_sec->output_offset 5141 + sym_sec->output_section->vma); 5142 st_type = ELF_ST_TYPE (sym->st_info); 5143 branch_type = ARM_SYM_BRANCH_TYPE (sym); 5144 sym_name 5145 = bfd_elf_string_from_elf_section (input_bfd, 5146 symtab_hdr->sh_link, 5147 sym->st_name); 5148 } 5149 else 5150 { 5151 /* It's an external symbol. */ 5152 while (hash->root.root.type == bfd_link_hash_indirect 5153 || hash->root.root.type == bfd_link_hash_warning) 5154 hash = ((struct elf32_arm_link_hash_entry *) 5155 hash->root.root.u.i.link); 5156 5157 if (hash->root.root.type == bfd_link_hash_defined 5158 || hash->root.root.type == bfd_link_hash_defweak) 5159 { 5160 sym_sec = hash->root.root.u.def.section; 5161 sym_value = hash->root.root.u.def.value; 5162 5163 struct elf32_arm_link_hash_table *globals = 5164 elf32_arm_hash_table (info); 5165 5166 /* For a destination in a shared library, 5167 use the PLT stub as target address to 5168 decide whether a branch stub is 5169 needed. */ 5170 if (globals != NULL 5171 && globals->root.splt != NULL 5172 && hash != NULL 5173 && hash->root.plt.offset != (bfd_vma) -1) 5174 { 5175 sym_sec = globals->root.splt; 5176 sym_value = hash->root.plt.offset; 5177 if (sym_sec->output_section != NULL) 5178 destination = (sym_value 5179 + sym_sec->output_offset 5180 + sym_sec->output_section->vma); 5181 } 5182 else if (sym_sec->output_section != NULL) 5183 destination = (sym_value + irela->r_addend 5184 + sym_sec->output_offset 5185 + sym_sec->output_section->vma); 5186 } 5187 else if ((hash->root.root.type == bfd_link_hash_undefined) 5188 || (hash->root.root.type == bfd_link_hash_undefweak)) 5189 { 5190 /* For a shared library, use the PLT stub as 5191 target address to decide whether a long 5192 branch stub is needed. 5193 For absolute code, they cannot be handled. */ 5194 struct elf32_arm_link_hash_table *globals = 5195 elf32_arm_hash_table (info); 5196 5197 if (globals != NULL 5198 && globals->root.splt != NULL 5199 && hash != NULL 5200 && hash->root.plt.offset != (bfd_vma) -1) 5201 { 5202 sym_sec = globals->root.splt; 5203 sym_value = hash->root.plt.offset; 5204 if (sym_sec->output_section != NULL) 5205 destination = (sym_value 5206 + sym_sec->output_offset 5207 + sym_sec->output_section->vma); 5208 } 5209 else 5210 continue; 5211 } 5212 else 5213 { 5214 bfd_set_error (bfd_error_bad_value); 5215 goto error_ret_free_internal; 5216 } 5217 st_type = hash->root.type; 5218 branch_type = hash->root.target_internal; 5219 sym_name = hash->root.root.root.string; 5220 } 5221 5222 do 5223 { 5224 /* Determine what (if any) linker stub is needed. */ 5225 stub_type = arm_type_of_stub (info, section, irela, 5226 st_type, &branch_type, 5227 hash, destination, sym_sec, 5228 input_bfd, sym_name); 5229 if (stub_type == arm_stub_none) 5230 break; 5231 5232 /* Support for grouping stub sections. */ 5233 id_sec = htab->stub_group[section->id].link_sec; 5234 5235 /* Get the name of this stub. */ 5236 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, 5237 irela, stub_type); 5238 if (!stub_name) 5239 goto error_ret_free_internal; 5240 5241 /* We've either created a stub for this reloc already, 5242 or we are about to. */ 5243 created_stub = TRUE; 5244 5245 stub_entry = arm_stub_hash_lookup 5246 (&htab->stub_hash_table, stub_name, 5247 FALSE, FALSE); 5248 if (stub_entry != NULL) 5249 { 5250 /* The proper stub has already been created. */ 5251 free (stub_name); 5252 stub_entry->target_value = sym_value; 5253 break; 5254 } 5255 5256 stub_entry = elf32_arm_add_stub (stub_name, section, 5257 htab); 5258 if (stub_entry == NULL) 5259 { 5260 free (stub_name); 5261 goto error_ret_free_internal; 5262 } 5263 5264 stub_entry->target_value = sym_value; 5265 stub_entry->target_section = sym_sec; 5266 stub_entry->stub_type = stub_type; 5267 stub_entry->h = hash; 5268 stub_entry->branch_type = branch_type; 5269 5270 if (sym_name == NULL) 5271 sym_name = "unnamed"; 5272 stub_entry->output_name = (char *) 5273 bfd_alloc (htab->stub_bfd, 5274 sizeof (THUMB2ARM_GLUE_ENTRY_NAME) 5275 + strlen (sym_name)); 5276 if (stub_entry->output_name == NULL) 5277 { 5278 free (stub_name); 5279 goto error_ret_free_internal; 5280 } 5281 5282 /* For historical reasons, use the existing names for 5283 ARM-to-Thumb and Thumb-to-ARM stubs. */ 5284 if ((r_type == (unsigned int) R_ARM_THM_CALL 5285 || r_type == (unsigned int) R_ARM_THM_JUMP24) 5286 && branch_type == ST_BRANCH_TO_ARM) 5287 sprintf (stub_entry->output_name, 5288 THUMB2ARM_GLUE_ENTRY_NAME, sym_name); 5289 else if ((r_type == (unsigned int) R_ARM_CALL 5290 || r_type == (unsigned int) R_ARM_JUMP24) 5291 && branch_type == ST_BRANCH_TO_THUMB) 5292 sprintf (stub_entry->output_name, 5293 ARM2THUMB_GLUE_ENTRY_NAME, sym_name); 5294 else 5295 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, 5296 sym_name); 5297 5298 stub_changed = TRUE; 5299 } 5300 while (0); 5301 5302 /* Look for relocations which might trigger Cortex-A8 5303 erratum. */ 5304 if (htab->fix_cortex_a8 5305 && (r_type == (unsigned int) R_ARM_THM_JUMP24 5306 || r_type == (unsigned int) R_ARM_THM_JUMP19 5307 || r_type == (unsigned int) R_ARM_THM_CALL 5308 || r_type == (unsigned int) R_ARM_THM_XPC22)) 5309 { 5310 bfd_vma from = section->output_section->vma 5311 + section->output_offset 5312 + irela->r_offset; 5313 5314 if ((from & 0xfff) == 0xffe) 5315 { 5316 /* Found a candidate. Note we haven't checked the 5317 destination is within 4K here: if we do so (and 5318 don't create an entry in a8_relocs) we can't tell 5319 that a branch should have been relocated when 5320 scanning later. */ 5321 if (num_a8_relocs == a8_reloc_table_size) 5322 { 5323 a8_reloc_table_size *= 2; 5324 a8_relocs = (struct a8_erratum_reloc *) 5325 bfd_realloc (a8_relocs, 5326 sizeof (struct a8_erratum_reloc) 5327 * a8_reloc_table_size); 5328 } 5329 5330 a8_relocs[num_a8_relocs].from = from; 5331 a8_relocs[num_a8_relocs].destination = destination; 5332 a8_relocs[num_a8_relocs].r_type = r_type; 5333 a8_relocs[num_a8_relocs].branch_type = branch_type; 5334 a8_relocs[num_a8_relocs].sym_name = sym_name; 5335 a8_relocs[num_a8_relocs].non_a8_stub = created_stub; 5336 a8_relocs[num_a8_relocs].hash = hash; 5337 5338 num_a8_relocs++; 5339 } 5340 } 5341 } 5342 5343 /* We're done with the internal relocs, free them. */ 5344 if (elf_section_data (section)->relocs == NULL) 5345 free (internal_relocs); 5346 } 5347 5348 if (htab->fix_cortex_a8) 5349 { 5350 /* Sort relocs which might apply to Cortex-A8 erratum. */ 5351 qsort (a8_relocs, num_a8_relocs, 5352 sizeof (struct a8_erratum_reloc), 5353 &a8_reloc_compare); 5354 5355 /* Scan for branches which might trigger Cortex-A8 erratum. */ 5356 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes, 5357 &num_a8_fixes, &a8_fix_table_size, 5358 a8_relocs, num_a8_relocs, 5359 prev_num_a8_fixes, &stub_changed) 5360 != 0) 5361 goto error_ret_free_local; 5362 } 5363 } 5364 5365 if (prev_num_a8_fixes != num_a8_fixes) 5366 stub_changed = TRUE; 5367 5368 if (!stub_changed) 5369 break; 5370 5371 /* OK, we've added some stubs. Find out the new size of the 5372 stub sections. */ 5373 for (stub_sec = htab->stub_bfd->sections; 5374 stub_sec != NULL; 5375 stub_sec = stub_sec->next) 5376 { 5377 /* Ignore non-stub sections. */ 5378 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5379 continue; 5380 5381 stub_sec->size = 0; 5382 } 5383 5384 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab); 5385 5386 /* Add Cortex-A8 erratum veneers to stub section sizes too. */ 5387 if (htab->fix_cortex_a8) 5388 for (i = 0; i < num_a8_fixes; i++) 5389 { 5390 stub_sec = elf32_arm_create_or_find_stub_sec (NULL, 5391 a8_fixes[i].section, htab); 5392 5393 if (stub_sec == NULL) 5394 goto error_ret_free_local; 5395 5396 stub_sec->size 5397 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL, 5398 NULL); 5399 } 5400 5401 5402 /* Ask the linker to do its stuff. */ 5403 (*htab->layout_sections_again) (); 5404 } 5405 5406 /* Add stubs for Cortex-A8 erratum fixes now. */ 5407 if (htab->fix_cortex_a8) 5408 { 5409 for (i = 0; i < num_a8_fixes; i++) 5410 { 5411 struct elf32_arm_stub_hash_entry *stub_entry; 5412 char *stub_name = a8_fixes[i].stub_name; 5413 asection *section = a8_fixes[i].section; 5414 unsigned int section_id = a8_fixes[i].section->id; 5415 asection *link_sec = htab->stub_group[section_id].link_sec; 5416 asection *stub_sec = htab->stub_group[section_id].stub_sec; 5417 const insn_sequence *template_sequence; 5418 int template_size, size = 0; 5419 5420 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, 5421 TRUE, FALSE); 5422 if (stub_entry == NULL) 5423 { 5424 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), 5425 section->owner, 5426 stub_name); 5427 return FALSE; 5428 } 5429 5430 stub_entry->stub_sec = stub_sec; 5431 stub_entry->stub_offset = 0; 5432 stub_entry->id_sec = link_sec; 5433 stub_entry->stub_type = a8_fixes[i].stub_type; 5434 stub_entry->target_section = a8_fixes[i].section; 5435 stub_entry->target_value = a8_fixes[i].offset; 5436 stub_entry->target_addend = a8_fixes[i].addend; 5437 stub_entry->orig_insn = a8_fixes[i].orig_insn; 5438 stub_entry->branch_type = a8_fixes[i].branch_type; 5439 5440 size = find_stub_size_and_template (a8_fixes[i].stub_type, 5441 &template_sequence, 5442 &template_size); 5443 5444 stub_entry->stub_size = size; 5445 stub_entry->stub_template = template_sequence; 5446 stub_entry->stub_template_size = template_size; 5447 } 5448 5449 /* Stash the Cortex-A8 erratum fix array for use later in 5450 elf32_arm_write_section(). */ 5451 htab->a8_erratum_fixes = a8_fixes; 5452 htab->num_a8_erratum_fixes = num_a8_fixes; 5453 } 5454 else 5455 { 5456 htab->a8_erratum_fixes = NULL; 5457 htab->num_a8_erratum_fixes = 0; 5458 } 5459 return TRUE; 5460 5461 error_ret_free_local: 5462 return FALSE; 5463 } 5464 5465 /* Build all the stubs associated with the current output file. The 5466 stubs are kept in a hash table attached to the main linker hash 5467 table. We also set up the .plt entries for statically linked PIC 5468 functions here. This function is called via arm_elf_finish in the 5469 linker. */ 5470 5471 bfd_boolean 5472 elf32_arm_build_stubs (struct bfd_link_info *info) 5473 { 5474 asection *stub_sec; 5475 struct bfd_hash_table *table; 5476 struct elf32_arm_link_hash_table *htab; 5477 5478 htab = elf32_arm_hash_table (info); 5479 if (htab == NULL) 5480 return FALSE; 5481 5482 for (stub_sec = htab->stub_bfd->sections; 5483 stub_sec != NULL; 5484 stub_sec = stub_sec->next) 5485 { 5486 bfd_size_type size; 5487 5488 /* Ignore non-stub sections. */ 5489 if (!strstr (stub_sec->name, STUB_SUFFIX)) 5490 continue; 5491 5492 /* Allocate memory to hold the linker stubs. */ 5493 size = stub_sec->size; 5494 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size); 5495 if (stub_sec->contents == NULL && size != 0) 5496 return FALSE; 5497 stub_sec->size = 0; 5498 } 5499 5500 /* Build the stubs as directed by the stub hash table. */ 5501 table = &htab->stub_hash_table; 5502 bfd_hash_traverse (table, arm_build_one_stub, info); 5503 if (htab->fix_cortex_a8) 5504 { 5505 /* Place the cortex a8 stubs last. */ 5506 htab->fix_cortex_a8 = -1; 5507 bfd_hash_traverse (table, arm_build_one_stub, info); 5508 } 5509 5510 return TRUE; 5511 } 5512 5513 /* Locate the Thumb encoded calling stub for NAME. */ 5514 5515 static struct elf_link_hash_entry * 5516 find_thumb_glue (struct bfd_link_info *link_info, 5517 const char *name, 5518 char **error_message) 5519 { 5520 char *tmp_name; 5521 struct elf_link_hash_entry *hash; 5522 struct elf32_arm_link_hash_table *hash_table; 5523 5524 /* We need a pointer to the armelf specific hash table. */ 5525 hash_table = elf32_arm_hash_table (link_info); 5526 if (hash_table == NULL) 5527 return NULL; 5528 5529 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5530 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); 5531 5532 BFD_ASSERT (tmp_name); 5533 5534 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name); 5535 5536 hash = elf_link_hash_lookup 5537 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 5538 5539 if (hash == NULL 5540 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"), 5541 tmp_name, name) == -1) 5542 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 5543 5544 free (tmp_name); 5545 5546 return hash; 5547 } 5548 5549 /* Locate the ARM encoded calling stub for NAME. */ 5550 5551 static struct elf_link_hash_entry * 5552 find_arm_glue (struct bfd_link_info *link_info, 5553 const char *name, 5554 char **error_message) 5555 { 5556 char *tmp_name; 5557 struct elf_link_hash_entry *myh; 5558 struct elf32_arm_link_hash_table *hash_table; 5559 5560 /* We need a pointer to the elfarm specific hash table. */ 5561 hash_table = elf32_arm_hash_table (link_info); 5562 if (hash_table == NULL) 5563 return NULL; 5564 5565 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5566 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 5567 5568 BFD_ASSERT (tmp_name); 5569 5570 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 5571 5572 myh = elf_link_hash_lookup 5573 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); 5574 5575 if (myh == NULL 5576 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"), 5577 tmp_name, name) == -1) 5578 *error_message = (char *) bfd_errmsg (bfd_error_system_call); 5579 5580 free (tmp_name); 5581 5582 return myh; 5583 } 5584 5585 /* ARM->Thumb glue (static images): 5586 5587 .arm 5588 __func_from_arm: 5589 ldr r12, __func_addr 5590 bx r12 5591 __func_addr: 5592 .word func @ behave as if you saw a ARM_32 reloc. 5593 5594 (v5t static images) 5595 .arm 5596 __func_from_arm: 5597 ldr pc, __func_addr 5598 __func_addr: 5599 .word func @ behave as if you saw a ARM_32 reloc. 5600 5601 (relocatable images) 5602 .arm 5603 __func_from_arm: 5604 ldr r12, __func_offset 5605 add r12, r12, pc 5606 bx r12 5607 __func_offset: 5608 .word func - . */ 5609 5610 #define ARM2THUMB_STATIC_GLUE_SIZE 12 5611 static const insn32 a2t1_ldr_insn = 0xe59fc000; 5612 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c; 5613 static const insn32 a2t3_func_addr_insn = 0x00000001; 5614 5615 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8 5616 static const insn32 a2t1v5_ldr_insn = 0xe51ff004; 5617 static const insn32 a2t2v5_func_addr_insn = 0x00000001; 5618 5619 #define ARM2THUMB_PIC_GLUE_SIZE 16 5620 static const insn32 a2t1p_ldr_insn = 0xe59fc004; 5621 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f; 5622 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c; 5623 5624 /* Thumb->ARM: Thumb->(non-interworking aware) ARM 5625 5626 .thumb .thumb 5627 .align 2 .align 2 5628 __func_from_thumb: __func_from_thumb: 5629 bx pc push {r6, lr} 5630 nop ldr r6, __func_addr 5631 .arm mov lr, pc 5632 b func bx r6 5633 .arm 5634 ;; back_to_thumb 5635 ldmia r13! {r6, lr} 5636 bx lr 5637 __func_addr: 5638 .word func */ 5639 5640 #define THUMB2ARM_GLUE_SIZE 8 5641 static const insn16 t2a1_bx_pc_insn = 0x4778; 5642 static const insn16 t2a2_noop_insn = 0x46c0; 5643 static const insn32 t2a3_b_insn = 0xea000000; 5644 5645 #define VFP11_ERRATUM_VENEER_SIZE 8 5646 5647 #define ARM_BX_VENEER_SIZE 12 5648 static const insn32 armbx1_tst_insn = 0xe3100001; 5649 static const insn32 armbx2_moveq_insn = 0x01a0f000; 5650 static const insn32 armbx3_bx_insn = 0xe12fff10; 5651 5652 #ifndef ELFARM_NABI_C_INCLUDED 5653 static void 5654 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name) 5655 { 5656 asection * s; 5657 bfd_byte * contents; 5658 5659 if (size == 0) 5660 { 5661 /* Do not include empty glue sections in the output. */ 5662 if (abfd != NULL) 5663 { 5664 s = bfd_get_linker_section (abfd, name); 5665 if (s != NULL) 5666 s->flags |= SEC_EXCLUDE; 5667 } 5668 return; 5669 } 5670 5671 BFD_ASSERT (abfd != NULL); 5672 5673 s = bfd_get_linker_section (abfd, name); 5674 BFD_ASSERT (s != NULL); 5675 5676 contents = (bfd_byte *) bfd_alloc (abfd, size); 5677 5678 BFD_ASSERT (s->size == size); 5679 s->contents = contents; 5680 } 5681 5682 bfd_boolean 5683 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info) 5684 { 5685 struct elf32_arm_link_hash_table * globals; 5686 5687 globals = elf32_arm_hash_table (info); 5688 BFD_ASSERT (globals != NULL); 5689 5690 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5691 globals->arm_glue_size, 5692 ARM2THUMB_GLUE_SECTION_NAME); 5693 5694 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5695 globals->thumb_glue_size, 5696 THUMB2ARM_GLUE_SECTION_NAME); 5697 5698 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5699 globals->vfp11_erratum_glue_size, 5700 VFP11_ERRATUM_VENEER_SECTION_NAME); 5701 5702 arm_allocate_glue_section_space (globals->bfd_of_glue_owner, 5703 globals->bx_glue_size, 5704 ARM_BX_GLUE_SECTION_NAME); 5705 5706 return TRUE; 5707 } 5708 5709 /* Allocate space and symbols for calling a Thumb function from Arm mode. 5710 returns the symbol identifying the stub. */ 5711 5712 static struct elf_link_hash_entry * 5713 record_arm_to_thumb_glue (struct bfd_link_info * link_info, 5714 struct elf_link_hash_entry * h) 5715 { 5716 const char * name = h->root.root.string; 5717 asection * s; 5718 char * tmp_name; 5719 struct elf_link_hash_entry * myh; 5720 struct bfd_link_hash_entry * bh; 5721 struct elf32_arm_link_hash_table * globals; 5722 bfd_vma val; 5723 bfd_size_type size; 5724 5725 globals = elf32_arm_hash_table (link_info); 5726 BFD_ASSERT (globals != NULL); 5727 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 5728 5729 s = bfd_get_linker_section 5730 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME); 5731 5732 BFD_ASSERT (s != NULL); 5733 5734 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) 5735 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); 5736 5737 BFD_ASSERT (tmp_name); 5738 5739 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name); 5740 5741 myh = elf_link_hash_lookup 5742 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 5743 5744 if (myh != NULL) 5745 { 5746 /* We've already seen this guy. */ 5747 free (tmp_name); 5748 return myh; 5749 } 5750 5751 /* The only trick here is using hash_table->arm_glue_size as the value. 5752 Even though the section isn't allocated yet, this is where we will be 5753 putting it. The +1 on the value marks that the stub has not been 5754 output yet - not that it is a Thumb function. */ 5755 bh = NULL; 5756 val = globals->arm_glue_size + 1; 5757 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 5758 tmp_name, BSF_GLOBAL, s, val, 5759 NULL, TRUE, FALSE, &bh); 5760 5761 myh = (struct elf_link_hash_entry *) bh; 5762 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5763 myh->forced_local = 1; 5764 5765 free (tmp_name); 5766 5767 if (link_info->shared || globals->root.is_relocatable_executable 5768 || globals->pic_veneer) 5769 size = ARM2THUMB_PIC_GLUE_SIZE; 5770 else if (globals->use_blx) 5771 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 5772 else 5773 size = ARM2THUMB_STATIC_GLUE_SIZE; 5774 5775 s->size += size; 5776 globals->arm_glue_size += size; 5777 5778 return myh; 5779 } 5780 5781 /* Allocate space for ARMv4 BX veneers. */ 5782 5783 static void 5784 record_arm_bx_glue (struct bfd_link_info * link_info, int reg) 5785 { 5786 asection * s; 5787 struct elf32_arm_link_hash_table *globals; 5788 char *tmp_name; 5789 struct elf_link_hash_entry *myh; 5790 struct bfd_link_hash_entry *bh; 5791 bfd_vma val; 5792 5793 /* BX PC does not need a veneer. */ 5794 if (reg == 15) 5795 return; 5796 5797 globals = elf32_arm_hash_table (link_info); 5798 BFD_ASSERT (globals != NULL); 5799 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 5800 5801 /* Check if this veneer has already been allocated. */ 5802 if (globals->bx_glue_offset[reg]) 5803 return; 5804 5805 s = bfd_get_linker_section 5806 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME); 5807 5808 BFD_ASSERT (s != NULL); 5809 5810 /* Add symbol for veneer. */ 5811 tmp_name = (char *) 5812 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1); 5813 5814 BFD_ASSERT (tmp_name); 5815 5816 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg); 5817 5818 myh = elf_link_hash_lookup 5819 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE); 5820 5821 BFD_ASSERT (myh == NULL); 5822 5823 bh = NULL; 5824 val = globals->bx_glue_size; 5825 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner, 5826 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 5827 NULL, TRUE, FALSE, &bh); 5828 5829 myh = (struct elf_link_hash_entry *) bh; 5830 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5831 myh->forced_local = 1; 5832 5833 s->size += ARM_BX_VENEER_SIZE; 5834 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2; 5835 globals->bx_glue_size += ARM_BX_VENEER_SIZE; 5836 } 5837 5838 5839 /* Add an entry to the code/data map for section SEC. */ 5840 5841 static void 5842 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma) 5843 { 5844 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 5845 unsigned int newidx; 5846 5847 if (sec_data->map == NULL) 5848 { 5849 sec_data->map = (elf32_arm_section_map *) 5850 bfd_malloc (sizeof (elf32_arm_section_map)); 5851 sec_data->mapcount = 0; 5852 sec_data->mapsize = 1; 5853 } 5854 5855 newidx = sec_data->mapcount++; 5856 5857 if (sec_data->mapcount > sec_data->mapsize) 5858 { 5859 sec_data->mapsize *= 2; 5860 sec_data->map = (elf32_arm_section_map *) 5861 bfd_realloc_or_free (sec_data->map, sec_data->mapsize 5862 * sizeof (elf32_arm_section_map)); 5863 } 5864 5865 if (sec_data->map) 5866 { 5867 sec_data->map[newidx].vma = vma; 5868 sec_data->map[newidx].type = type; 5869 } 5870 } 5871 5872 5873 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode 5874 veneers are handled for now. */ 5875 5876 static bfd_vma 5877 record_vfp11_erratum_veneer (struct bfd_link_info *link_info, 5878 elf32_vfp11_erratum_list *branch, 5879 bfd *branch_bfd, 5880 asection *branch_sec, 5881 unsigned int offset) 5882 { 5883 asection *s; 5884 struct elf32_arm_link_hash_table *hash_table; 5885 char *tmp_name; 5886 struct elf_link_hash_entry *myh; 5887 struct bfd_link_hash_entry *bh; 5888 bfd_vma val; 5889 struct _arm_elf_section_data *sec_data; 5890 elf32_vfp11_erratum_list *newerr; 5891 5892 hash_table = elf32_arm_hash_table (link_info); 5893 BFD_ASSERT (hash_table != NULL); 5894 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); 5895 5896 s = bfd_get_linker_section 5897 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME); 5898 5899 sec_data = elf32_arm_section_data (s); 5900 5901 BFD_ASSERT (s != NULL); 5902 5903 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 5904 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 5905 5906 BFD_ASSERT (tmp_name); 5907 5908 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 5909 hash_table->num_vfp11_fixes); 5910 5911 myh = elf_link_hash_lookup 5912 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 5913 5914 BFD_ASSERT (myh == NULL); 5915 5916 bh = NULL; 5917 val = hash_table->vfp11_erratum_glue_size; 5918 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, 5919 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, 5920 NULL, TRUE, FALSE, &bh); 5921 5922 myh = (struct elf_link_hash_entry *) bh; 5923 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5924 myh->forced_local = 1; 5925 5926 /* Link veneer back to calling location. */ 5927 sec_data->erratumcount += 1; 5928 newerr = (elf32_vfp11_erratum_list *) 5929 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 5930 5931 newerr->type = VFP11_ERRATUM_ARM_VENEER; 5932 newerr->vma = -1; 5933 newerr->u.v.branch = branch; 5934 newerr->u.v.id = hash_table->num_vfp11_fixes; 5935 branch->u.b.veneer = newerr; 5936 5937 newerr->next = sec_data->erratumlist; 5938 sec_data->erratumlist = newerr; 5939 5940 /* A symbol for the return from the veneer. */ 5941 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 5942 hash_table->num_vfp11_fixes); 5943 5944 myh = elf_link_hash_lookup 5945 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); 5946 5947 if (myh != NULL) 5948 abort (); 5949 5950 bh = NULL; 5951 val = offset + 4; 5952 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, 5953 branch_sec, val, NULL, TRUE, FALSE, &bh); 5954 5955 myh = (struct elf_link_hash_entry *) bh; 5956 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 5957 myh->forced_local = 1; 5958 5959 free (tmp_name); 5960 5961 /* Generate a mapping symbol for the veneer section, and explicitly add an 5962 entry for that symbol to the code/data map for the section. */ 5963 if (hash_table->vfp11_erratum_glue_size == 0) 5964 { 5965 bh = NULL; 5966 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it 5967 ever requires this erratum fix. */ 5968 _bfd_generic_link_add_one_symbol (link_info, 5969 hash_table->bfd_of_glue_owner, "$a", 5970 BSF_LOCAL, s, 0, NULL, 5971 TRUE, FALSE, &bh); 5972 5973 myh = (struct elf_link_hash_entry *) bh; 5974 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 5975 myh->forced_local = 1; 5976 5977 /* The elf32_arm_init_maps function only cares about symbols from input 5978 BFDs. We must make a note of this generated mapping symbol 5979 ourselves so that code byteswapping works properly in 5980 elf32_arm_write_section. */ 5981 elf32_arm_section_map_add (s, 'a', 0); 5982 } 5983 5984 s->size += VFP11_ERRATUM_VENEER_SIZE; 5985 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE; 5986 hash_table->num_vfp11_fixes++; 5987 5988 /* The offset of the veneer. */ 5989 return val; 5990 } 5991 5992 #define ARM_GLUE_SECTION_FLAGS \ 5993 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ 5994 | SEC_READONLY | SEC_LINKER_CREATED) 5995 5996 /* Create a fake section for use by the ARM backend of the linker. */ 5997 5998 static bfd_boolean 5999 arm_make_glue_section (bfd * abfd, const char * name) 6000 { 6001 asection * sec; 6002 6003 sec = bfd_get_linker_section (abfd, name); 6004 if (sec != NULL) 6005 /* Already made. */ 6006 return TRUE; 6007 6008 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS); 6009 6010 if (sec == NULL 6011 || !bfd_set_section_alignment (abfd, sec, 2)) 6012 return FALSE; 6013 6014 /* Set the gc mark to prevent the section from being removed by garbage 6015 collection, despite the fact that no relocs refer to this section. */ 6016 sec->gc_mark = 1; 6017 6018 return TRUE; 6019 } 6020 6021 /* Add the glue sections to ABFD. This function is called from the 6022 linker scripts in ld/emultempl/{armelf}.em. */ 6023 6024 bfd_boolean 6025 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, 6026 struct bfd_link_info *info) 6027 { 6028 /* If we are only performing a partial 6029 link do not bother adding the glue. */ 6030 if (info->relocatable) 6031 return TRUE; 6032 6033 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) 6034 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) 6035 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) 6036 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME); 6037 } 6038 6039 /* Select a BFD to be used to hold the sections used by the glue code. 6040 This function is called from the linker scripts in ld/emultempl/ 6041 {armelf/pe}.em. */ 6042 6043 bfd_boolean 6044 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info) 6045 { 6046 struct elf32_arm_link_hash_table *globals; 6047 6048 /* If we are only performing a partial link 6049 do not bother getting a bfd to hold the glue. */ 6050 if (info->relocatable) 6051 return TRUE; 6052 6053 /* Make sure we don't attach the glue sections to a dynamic object. */ 6054 BFD_ASSERT (!(abfd->flags & DYNAMIC)); 6055 6056 globals = elf32_arm_hash_table (info); 6057 BFD_ASSERT (globals != NULL); 6058 6059 if (globals->bfd_of_glue_owner != NULL) 6060 return TRUE; 6061 6062 /* Save the bfd for later use. */ 6063 globals->bfd_of_glue_owner = abfd; 6064 6065 return TRUE; 6066 } 6067 6068 static void 6069 check_use_blx (struct elf32_arm_link_hash_table *globals) 6070 { 6071 int cpu_arch; 6072 6073 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, 6074 Tag_CPU_arch); 6075 6076 if (globals->fix_arm1176) 6077 { 6078 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K) 6079 globals->use_blx = 1; 6080 } 6081 else 6082 { 6083 if (cpu_arch > TAG_CPU_ARCH_V4T) 6084 globals->use_blx = 1; 6085 } 6086 } 6087 6088 bfd_boolean 6089 bfd_elf32_arm_process_before_allocation (bfd *abfd, 6090 struct bfd_link_info *link_info) 6091 { 6092 Elf_Internal_Shdr *symtab_hdr; 6093 Elf_Internal_Rela *internal_relocs = NULL; 6094 Elf_Internal_Rela *irel, *irelend; 6095 bfd_byte *contents = NULL; 6096 6097 asection *sec; 6098 struct elf32_arm_link_hash_table *globals; 6099 6100 /* If we are only performing a partial link do not bother 6101 to construct any glue. */ 6102 if (link_info->relocatable) 6103 return TRUE; 6104 6105 /* Here we have a bfd that is to be included on the link. We have a 6106 hook to do reloc rummaging, before section sizes are nailed down. */ 6107 globals = elf32_arm_hash_table (link_info); 6108 BFD_ASSERT (globals != NULL); 6109 6110 check_use_blx (globals); 6111 6112 if (globals->byteswap_code && !bfd_big_endian (abfd)) 6113 { 6114 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."), 6115 abfd); 6116 return FALSE; 6117 } 6118 6119 /* PR 5398: If we have not decided to include any loadable sections in 6120 the output then we will not have a glue owner bfd. This is OK, it 6121 just means that there is nothing else for us to do here. */ 6122 if (globals->bfd_of_glue_owner == NULL) 6123 return TRUE; 6124 6125 /* Rummage around all the relocs and map the glue vectors. */ 6126 sec = abfd->sections; 6127 6128 if (sec == NULL) 6129 return TRUE; 6130 6131 for (; sec != NULL; sec = sec->next) 6132 { 6133 if (sec->reloc_count == 0) 6134 continue; 6135 6136 if ((sec->flags & SEC_EXCLUDE) != 0) 6137 continue; 6138 6139 symtab_hdr = & elf_symtab_hdr (abfd); 6140 6141 /* Load the relocs. */ 6142 internal_relocs 6143 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE); 6144 6145 if (internal_relocs == NULL) 6146 goto error_return; 6147 6148 irelend = internal_relocs + sec->reloc_count; 6149 for (irel = internal_relocs; irel < irelend; irel++) 6150 { 6151 long r_type; 6152 unsigned long r_index; 6153 6154 struct elf_link_hash_entry *h; 6155 6156 r_type = ELF32_R_TYPE (irel->r_info); 6157 r_index = ELF32_R_SYM (irel->r_info); 6158 6159 /* These are the only relocation types we care about. */ 6160 if ( r_type != R_ARM_PC24 6161 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2)) 6162 continue; 6163 6164 /* Get the section contents if we haven't done so already. */ 6165 if (contents == NULL) 6166 { 6167 /* Get cached copy if it exists. */ 6168 if (elf_section_data (sec)->this_hdr.contents != NULL) 6169 contents = elf_section_data (sec)->this_hdr.contents; 6170 else 6171 { 6172 /* Go get them off disk. */ 6173 if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 6174 goto error_return; 6175 } 6176 } 6177 6178 if (r_type == R_ARM_V4BX) 6179 { 6180 int reg; 6181 6182 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf; 6183 record_arm_bx_glue (link_info, reg); 6184 continue; 6185 } 6186 6187 /* If the relocation is not against a symbol it cannot concern us. */ 6188 h = NULL; 6189 6190 /* We don't care about local symbols. */ 6191 if (r_index < symtab_hdr->sh_info) 6192 continue; 6193 6194 /* This is an external symbol. */ 6195 r_index -= symtab_hdr->sh_info; 6196 h = (struct elf_link_hash_entry *) 6197 elf_sym_hashes (abfd)[r_index]; 6198 6199 /* If the relocation is against a static symbol it must be within 6200 the current section and so cannot be a cross ARM/Thumb relocation. */ 6201 if (h == NULL) 6202 continue; 6203 6204 /* If the call will go through a PLT entry then we do not need 6205 glue. */ 6206 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1) 6207 continue; 6208 6209 switch (r_type) 6210 { 6211 case R_ARM_PC24: 6212 /* This one is a call from arm code. We need to look up 6213 the target of the call. If it is a thumb target, we 6214 insert glue. */ 6215 if (h->target_internal == ST_BRANCH_TO_THUMB) 6216 record_arm_to_thumb_glue (link_info, h); 6217 break; 6218 6219 default: 6220 abort (); 6221 } 6222 } 6223 6224 if (contents != NULL 6225 && elf_section_data (sec)->this_hdr.contents != contents) 6226 free (contents); 6227 contents = NULL; 6228 6229 if (internal_relocs != NULL 6230 && elf_section_data (sec)->relocs != internal_relocs) 6231 free (internal_relocs); 6232 internal_relocs = NULL; 6233 } 6234 6235 return TRUE; 6236 6237 error_return: 6238 if (contents != NULL 6239 && elf_section_data (sec)->this_hdr.contents != contents) 6240 free (contents); 6241 if (internal_relocs != NULL 6242 && elf_section_data (sec)->relocs != internal_relocs) 6243 free (internal_relocs); 6244 6245 return FALSE; 6246 } 6247 #endif 6248 6249 6250 /* Initialise maps of ARM/Thumb/data for input BFDs. */ 6251 6252 void 6253 bfd_elf32_arm_init_maps (bfd *abfd) 6254 { 6255 Elf_Internal_Sym *isymbuf; 6256 Elf_Internal_Shdr *hdr; 6257 unsigned int i, localsyms; 6258 6259 /* PR 7093: Make sure that we are dealing with an arm elf binary. */ 6260 if (! is_arm_elf (abfd)) 6261 return; 6262 6263 if ((abfd->flags & DYNAMIC) != 0) 6264 return; 6265 6266 hdr = & elf_symtab_hdr (abfd); 6267 localsyms = hdr->sh_info; 6268 6269 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field 6270 should contain the number of local symbols, which should come before any 6271 global symbols. Mapping symbols are always local. */ 6272 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, 6273 NULL); 6274 6275 /* No internal symbols read? Skip this BFD. */ 6276 if (isymbuf == NULL) 6277 return; 6278 6279 for (i = 0; i < localsyms; i++) 6280 { 6281 Elf_Internal_Sym *isym = &isymbuf[i]; 6282 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); 6283 const char *name; 6284 6285 if (sec != NULL 6286 && ELF_ST_BIND (isym->st_info) == STB_LOCAL) 6287 { 6288 name = bfd_elf_string_from_elf_section (abfd, 6289 hdr->sh_link, isym->st_name); 6290 6291 if (bfd_is_arm_special_symbol_name (name, 6292 BFD_ARM_SPECIAL_SYM_TYPE_MAP)) 6293 elf32_arm_section_map_add (sec, name[1], isym->st_value); 6294 } 6295 } 6296 } 6297 6298 6299 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly 6300 say what they wanted. */ 6301 6302 void 6303 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info) 6304 { 6305 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6306 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 6307 6308 if (globals == NULL) 6309 return; 6310 6311 if (globals->fix_cortex_a8 == -1) 6312 { 6313 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */ 6314 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7 6315 && (out_attr[Tag_CPU_arch_profile].i == 'A' 6316 || out_attr[Tag_CPU_arch_profile].i == 0)) 6317 globals->fix_cortex_a8 = 1; 6318 else 6319 globals->fix_cortex_a8 = 0; 6320 } 6321 } 6322 6323 6324 void 6325 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) 6326 { 6327 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6328 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); 6329 6330 if (globals == NULL) 6331 return; 6332 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */ 6333 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7) 6334 { 6335 switch (globals->vfp11_fix) 6336 { 6337 case BFD_ARM_VFP11_FIX_DEFAULT: 6338 case BFD_ARM_VFP11_FIX_NONE: 6339 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 6340 break; 6341 6342 default: 6343 /* Give a warning, but do as the user requests anyway. */ 6344 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum " 6345 "workaround is not necessary for target architecture"), obfd); 6346 } 6347 } 6348 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT) 6349 /* For earlier architectures, we might need the workaround, but do not 6350 enable it by default. If users is running with broken hardware, they 6351 must enable the erratum fix explicitly. */ 6352 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; 6353 } 6354 6355 6356 enum bfd_arm_vfp11_pipe 6357 { 6358 VFP11_FMAC, 6359 VFP11_LS, 6360 VFP11_DS, 6361 VFP11_BAD 6362 }; 6363 6364 /* Return a VFP register number. This is encoded as RX:X for single-precision 6365 registers, or X:RX for double-precision registers, where RX is the group of 6366 four bits in the instruction encoding and X is the single extension bit. 6367 RX and X fields are specified using their lowest (starting) bit. The return 6368 value is: 6369 6370 0...31: single-precision registers s0...s31 6371 32...63: double-precision registers d0...d31. 6372 6373 Although X should be zero for VFP11 (encoding d0...d15 only), we might 6374 encounter VFP3 instructions, so we allow the full range for DP registers. */ 6375 6376 static unsigned int 6377 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx, 6378 unsigned int x) 6379 { 6380 if (is_double) 6381 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32; 6382 else 6383 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1); 6384 } 6385 6386 /* Set bits in *WMASK according to a register number REG as encoded by 6387 bfd_arm_vfp11_regno(). Ignore d16-d31. */ 6388 6389 static void 6390 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg) 6391 { 6392 if (reg < 32) 6393 *wmask |= 1 << reg; 6394 else if (reg < 48) 6395 *wmask |= 3 << ((reg - 32) * 2); 6396 } 6397 6398 /* Return TRUE if WMASK overwrites anything in REGS. */ 6399 6400 static bfd_boolean 6401 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs) 6402 { 6403 int i; 6404 6405 for (i = 0; i < numregs; i++) 6406 { 6407 unsigned int reg = regs[i]; 6408 6409 if (reg < 32 && (wmask & (1 << reg)) != 0) 6410 return TRUE; 6411 6412 reg -= 32; 6413 6414 if (reg >= 16) 6415 continue; 6416 6417 if ((wmask & (3 << (reg * 2))) != 0) 6418 return TRUE; 6419 } 6420 6421 return FALSE; 6422 } 6423 6424 /* In this function, we're interested in two things: finding input registers 6425 for VFP data-processing instructions, and finding the set of registers which 6426 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to 6427 hold the written set, so FLDM etc. are easy to deal with (we're only 6428 interested in 32 SP registers or 16 dp registers, due to the VFP version 6429 implemented by the chip in question). DP registers are marked by setting 6430 both SP registers in the write mask). */ 6431 6432 static enum bfd_arm_vfp11_pipe 6433 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, 6434 int *numregs) 6435 { 6436 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD; 6437 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0; 6438 6439 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */ 6440 { 6441 unsigned int pqrs; 6442 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 6443 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 6444 6445 pqrs = ((insn & 0x00800000) >> 20) 6446 | ((insn & 0x00300000) >> 19) 6447 | ((insn & 0x00000040) >> 6); 6448 6449 switch (pqrs) 6450 { 6451 case 0: /* fmac[sd]. */ 6452 case 1: /* fnmac[sd]. */ 6453 case 2: /* fmsc[sd]. */ 6454 case 3: /* fnmsc[sd]. */ 6455 vpipe = VFP11_FMAC; 6456 bfd_arm_vfp11_write_mask (destmask, fd); 6457 regs[0] = fd; 6458 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 6459 regs[2] = fm; 6460 *numregs = 3; 6461 break; 6462 6463 case 4: /* fmul[sd]. */ 6464 case 5: /* fnmul[sd]. */ 6465 case 6: /* fadd[sd]. */ 6466 case 7: /* fsub[sd]. */ 6467 vpipe = VFP11_FMAC; 6468 goto vfp_binop; 6469 6470 case 8: /* fdiv[sd]. */ 6471 vpipe = VFP11_DS; 6472 vfp_binop: 6473 bfd_arm_vfp11_write_mask (destmask, fd); 6474 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ 6475 regs[1] = fm; 6476 *numregs = 2; 6477 break; 6478 6479 case 15: /* extended opcode. */ 6480 { 6481 unsigned int extn = ((insn >> 15) & 0x1e) 6482 | ((insn >> 7) & 1); 6483 6484 switch (extn) 6485 { 6486 case 0: /* fcpy[sd]. */ 6487 case 1: /* fabs[sd]. */ 6488 case 2: /* fneg[sd]. */ 6489 case 8: /* fcmp[sd]. */ 6490 case 9: /* fcmpe[sd]. */ 6491 case 10: /* fcmpz[sd]. */ 6492 case 11: /* fcmpez[sd]. */ 6493 case 16: /* fuito[sd]. */ 6494 case 17: /* fsito[sd]. */ 6495 case 24: /* ftoui[sd]. */ 6496 case 25: /* ftouiz[sd]. */ 6497 case 26: /* ftosi[sd]. */ 6498 case 27: /* ftosiz[sd]. */ 6499 /* These instructions will not bounce due to underflow. */ 6500 *numregs = 0; 6501 vpipe = VFP11_FMAC; 6502 break; 6503 6504 case 3: /* fsqrt[sd]. */ 6505 /* fsqrt cannot underflow, but it can (perhaps) overwrite 6506 registers to cause the erratum in previous instructions. */ 6507 bfd_arm_vfp11_write_mask (destmask, fd); 6508 vpipe = VFP11_DS; 6509 break; 6510 6511 case 15: /* fcvt{ds,sd}. */ 6512 { 6513 int rnum = 0; 6514 6515 bfd_arm_vfp11_write_mask (destmask, fd); 6516 6517 /* Only FCVTSD can underflow. */ 6518 if ((insn & 0x100) != 0) 6519 regs[rnum++] = fm; 6520 6521 *numregs = rnum; 6522 6523 vpipe = VFP11_FMAC; 6524 } 6525 break; 6526 6527 default: 6528 return VFP11_BAD; 6529 } 6530 } 6531 break; 6532 6533 default: 6534 return VFP11_BAD; 6535 } 6536 } 6537 /* Two-register transfer. */ 6538 else if ((insn & 0x0fe00ed0) == 0x0c400a10) 6539 { 6540 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5); 6541 6542 if ((insn & 0x100000) == 0) 6543 { 6544 if (is_double) 6545 bfd_arm_vfp11_write_mask (destmask, fm); 6546 else 6547 { 6548 bfd_arm_vfp11_write_mask (destmask, fm); 6549 bfd_arm_vfp11_write_mask (destmask, fm + 1); 6550 } 6551 } 6552 6553 vpipe = VFP11_LS; 6554 } 6555 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */ 6556 { 6557 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22); 6558 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1); 6559 6560 switch (puw) 6561 { 6562 case 0: /* Two-reg transfer. We should catch these above. */ 6563 abort (); 6564 6565 case 2: /* fldm[sdx]. */ 6566 case 3: 6567 case 5: 6568 { 6569 unsigned int i, offset = insn & 0xff; 6570 6571 if (is_double) 6572 offset >>= 1; 6573 6574 for (i = fd; i < fd + offset; i++) 6575 bfd_arm_vfp11_write_mask (destmask, i); 6576 } 6577 break; 6578 6579 case 4: /* fld[sd]. */ 6580 case 6: 6581 bfd_arm_vfp11_write_mask (destmask, fd); 6582 break; 6583 6584 default: 6585 return VFP11_BAD; 6586 } 6587 6588 vpipe = VFP11_LS; 6589 } 6590 /* Single-register transfer. Note L==0. */ 6591 else if ((insn & 0x0f100e10) == 0x0e000a10) 6592 { 6593 unsigned int opcode = (insn >> 21) & 7; 6594 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7); 6595 6596 switch (opcode) 6597 { 6598 case 0: /* fmsr/fmdlr. */ 6599 case 1: /* fmdhr. */ 6600 /* Mark fmdhr and fmdlr as writing to the whole of the DP 6601 destination register. I don't know if this is exactly right, 6602 but it is the conservative choice. */ 6603 bfd_arm_vfp11_write_mask (destmask, fn); 6604 break; 6605 6606 case 7: /* fmxr. */ 6607 break; 6608 } 6609 6610 vpipe = VFP11_LS; 6611 } 6612 6613 return vpipe; 6614 } 6615 6616 6617 static int elf32_arm_compare_mapping (const void * a, const void * b); 6618 6619 6620 /* Look for potentially-troublesome code sequences which might trigger the 6621 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet 6622 (available from ARM) for details of the erratum. A short version is 6623 described in ld.texinfo. */ 6624 6625 bfd_boolean 6626 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) 6627 { 6628 asection *sec; 6629 bfd_byte *contents = NULL; 6630 int state = 0; 6631 int regs[3], numregs = 0; 6632 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 6633 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR); 6634 6635 if (globals == NULL) 6636 return FALSE; 6637 6638 /* We use a simple FSM to match troublesome VFP11 instruction sequences. 6639 The states transition as follows: 6640 6641 0 -> 1 (vector) or 0 -> 2 (scalar) 6642 A VFP FMAC-pipeline instruction has been seen. Fill 6643 regs[0]..regs[numregs-1] with its input operands. Remember this 6644 instruction in 'first_fmac'. 6645 6646 1 -> 2 6647 Any instruction, except for a VFP instruction which overwrites 6648 regs[*]. 6649 6650 1 -> 3 [ -> 0 ] or 6651 2 -> 3 [ -> 0 ] 6652 A VFP instruction has been seen which overwrites any of regs[*]. 6653 We must make a veneer! Reset state to 0 before examining next 6654 instruction. 6655 6656 2 -> 0 6657 If we fail to match anything in state 2, reset to state 0 and reset 6658 the instruction pointer to the instruction after 'first_fmac'. 6659 6660 If the VFP11 vector mode is in use, there must be at least two unrelated 6661 instructions between anti-dependent VFP11 instructions to properly avoid 6662 triggering the erratum, hence the use of the extra state 1. */ 6663 6664 /* If we are only performing a partial link do not bother 6665 to construct any glue. */ 6666 if (link_info->relocatable) 6667 return TRUE; 6668 6669 /* Skip if this bfd does not correspond to an ELF image. */ 6670 if (! is_arm_elf (abfd)) 6671 return TRUE; 6672 6673 /* We should have chosen a fix type by the time we get here. */ 6674 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT); 6675 6676 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE) 6677 return TRUE; 6678 6679 /* Skip this BFD if it corresponds to an executable or dynamic object. */ 6680 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) 6681 return TRUE; 6682 6683 for (sec = abfd->sections; sec != NULL; sec = sec->next) 6684 { 6685 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0; 6686 struct _arm_elf_section_data *sec_data; 6687 6688 /* If we don't have executable progbits, we're not interested in this 6689 section. Also skip if section is to be excluded. */ 6690 if (elf_section_type (sec) != SHT_PROGBITS 6691 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 6692 || (sec->flags & SEC_EXCLUDE) != 0 6693 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS 6694 || sec->output_section == bfd_abs_section_ptr 6695 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0) 6696 continue; 6697 6698 sec_data = elf32_arm_section_data (sec); 6699 6700 if (sec_data->mapcount == 0) 6701 continue; 6702 6703 if (elf_section_data (sec)->this_hdr.contents != NULL) 6704 contents = elf_section_data (sec)->this_hdr.contents; 6705 else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) 6706 goto error_return; 6707 6708 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), 6709 elf32_arm_compare_mapping); 6710 6711 for (span = 0; span < sec_data->mapcount; span++) 6712 { 6713 unsigned int span_start = sec_data->map[span].vma; 6714 unsigned int span_end = (span == sec_data->mapcount - 1) 6715 ? sec->size : sec_data->map[span + 1].vma; 6716 char span_type = sec_data->map[span].type; 6717 6718 /* FIXME: Only ARM mode is supported at present. We may need to 6719 support Thumb-2 mode also at some point. */ 6720 if (span_type != 'a') 6721 continue; 6722 6723 for (i = span_start; i < span_end;) 6724 { 6725 unsigned int next_i = i + 4; 6726 unsigned int insn = bfd_big_endian (abfd) 6727 ? (contents[i] << 24) 6728 | (contents[i + 1] << 16) 6729 | (contents[i + 2] << 8) 6730 | contents[i + 3] 6731 : (contents[i + 3] << 24) 6732 | (contents[i + 2] << 16) 6733 | (contents[i + 1] << 8) 6734 | contents[i]; 6735 unsigned int writemask = 0; 6736 enum bfd_arm_vfp11_pipe vpipe; 6737 6738 switch (state) 6739 { 6740 case 0: 6741 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs, 6742 &numregs); 6743 /* I'm assuming the VFP11 erratum can trigger with denorm 6744 operands on either the FMAC or the DS pipeline. This might 6745 lead to slightly overenthusiastic veneer insertion. */ 6746 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS) 6747 { 6748 state = use_vector ? 1 : 2; 6749 first_fmac = i; 6750 veneer_of_insn = insn; 6751 } 6752 break; 6753 6754 case 1: 6755 { 6756 int other_regs[3], other_numregs; 6757 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 6758 other_regs, 6759 &other_numregs); 6760 if (vpipe != VFP11_BAD 6761 && bfd_arm_vfp11_antidependency (writemask, regs, 6762 numregs)) 6763 state = 3; 6764 else 6765 state = 2; 6766 } 6767 break; 6768 6769 case 2: 6770 { 6771 int other_regs[3], other_numregs; 6772 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, 6773 other_regs, 6774 &other_numregs); 6775 if (vpipe != VFP11_BAD 6776 && bfd_arm_vfp11_antidependency (writemask, regs, 6777 numregs)) 6778 state = 3; 6779 else 6780 { 6781 state = 0; 6782 next_i = first_fmac + 4; 6783 } 6784 } 6785 break; 6786 6787 case 3: 6788 abort (); /* Should be unreachable. */ 6789 } 6790 6791 if (state == 3) 6792 { 6793 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *) 6794 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); 6795 6796 elf32_arm_section_data (sec)->erratumcount += 1; 6797 6798 newerr->u.b.vfp_insn = veneer_of_insn; 6799 6800 switch (span_type) 6801 { 6802 case 'a': 6803 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER; 6804 break; 6805 6806 default: 6807 abort (); 6808 } 6809 6810 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec, 6811 first_fmac); 6812 6813 newerr->vma = -1; 6814 6815 newerr->next = sec_data->erratumlist; 6816 sec_data->erratumlist = newerr; 6817 6818 state = 0; 6819 } 6820 6821 i = next_i; 6822 } 6823 } 6824 6825 if (contents != NULL 6826 && elf_section_data (sec)->this_hdr.contents != contents) 6827 free (contents); 6828 contents = NULL; 6829 } 6830 6831 return TRUE; 6832 6833 error_return: 6834 if (contents != NULL 6835 && elf_section_data (sec)->this_hdr.contents != contents) 6836 free (contents); 6837 6838 return FALSE; 6839 } 6840 6841 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations 6842 after sections have been laid out, using specially-named symbols. */ 6843 6844 void 6845 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, 6846 struct bfd_link_info *link_info) 6847 { 6848 asection *sec; 6849 struct elf32_arm_link_hash_table *globals; 6850 char *tmp_name; 6851 6852 if (link_info->relocatable) 6853 return; 6854 6855 /* Skip if this bfd does not correspond to an ELF image. */ 6856 if (! is_arm_elf (abfd)) 6857 return; 6858 6859 globals = elf32_arm_hash_table (link_info); 6860 if (globals == NULL) 6861 return; 6862 6863 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen 6864 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); 6865 6866 for (sec = abfd->sections; sec != NULL; sec = sec->next) 6867 { 6868 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); 6869 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist; 6870 6871 for (; errnode != NULL; errnode = errnode->next) 6872 { 6873 struct elf_link_hash_entry *myh; 6874 bfd_vma vma; 6875 6876 switch (errnode->type) 6877 { 6878 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 6879 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER: 6880 /* Find veneer symbol. */ 6881 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME, 6882 errnode->u.b.veneer->u.v.id); 6883 6884 myh = elf_link_hash_lookup 6885 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 6886 6887 if (myh == NULL) 6888 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 6889 "`%s'"), abfd, tmp_name); 6890 6891 vma = myh->root.u.def.section->output_section->vma 6892 + myh->root.u.def.section->output_offset 6893 + myh->root.u.def.value; 6894 6895 errnode->u.b.veneer->vma = vma; 6896 break; 6897 6898 case VFP11_ERRATUM_ARM_VENEER: 6899 case VFP11_ERRATUM_THUMB_VENEER: 6900 /* Find return location. */ 6901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r", 6902 errnode->u.v.id); 6903 6904 myh = elf_link_hash_lookup 6905 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); 6906 6907 if (myh == NULL) 6908 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer " 6909 "`%s'"), abfd, tmp_name); 6910 6911 vma = myh->root.u.def.section->output_section->vma 6912 + myh->root.u.def.section->output_offset 6913 + myh->root.u.def.value; 6914 6915 errnode->u.v.branch->vma = vma; 6916 break; 6917 6918 default: 6919 abort (); 6920 } 6921 } 6922 } 6923 6924 free (tmp_name); 6925 } 6926 6927 6928 /* Set target relocation values needed during linking. */ 6929 6930 void 6931 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, 6932 struct bfd_link_info *link_info, 6933 int target1_is_rel, 6934 char * target2_type, 6935 int fix_v4bx, 6936 int use_blx, 6937 bfd_arm_vfp11_fix vfp11_fix, 6938 int no_enum_warn, int no_wchar_warn, 6939 int pic_veneer, int fix_cortex_a8, 6940 int fix_arm1176) 6941 { 6942 struct elf32_arm_link_hash_table *globals; 6943 6944 globals = elf32_arm_hash_table (link_info); 6945 if (globals == NULL) 6946 return; 6947 6948 globals->target1_is_rel = target1_is_rel; 6949 if (strcmp (target2_type, "rel") == 0) 6950 globals->target2_reloc = R_ARM_REL32; 6951 else if (strcmp (target2_type, "abs") == 0) 6952 globals->target2_reloc = R_ARM_ABS32; 6953 else if (strcmp (target2_type, "got-rel") == 0) 6954 globals->target2_reloc = R_ARM_GOT_PREL; 6955 else 6956 { 6957 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."), 6958 target2_type); 6959 } 6960 globals->fix_v4bx = fix_v4bx; 6961 globals->use_blx |= use_blx; 6962 globals->vfp11_fix = vfp11_fix; 6963 globals->pic_veneer = pic_veneer; 6964 globals->fix_cortex_a8 = fix_cortex_a8; 6965 globals->fix_arm1176 = fix_arm1176; 6966 6967 BFD_ASSERT (is_arm_elf (output_bfd)); 6968 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; 6969 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; 6970 } 6971 6972 /* Replace the target offset of a Thumb bl or b.w instruction. */ 6973 6974 static void 6975 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn) 6976 { 6977 bfd_vma upper; 6978 bfd_vma lower; 6979 int reloc_sign; 6980 6981 BFD_ASSERT ((offset & 1) == 0); 6982 6983 upper = bfd_get_16 (abfd, insn); 6984 lower = bfd_get_16 (abfd, insn + 2); 6985 reloc_sign = (offset < 0) ? 1 : 0; 6986 upper = (upper & ~(bfd_vma) 0x7ff) 6987 | ((offset >> 12) & 0x3ff) 6988 | (reloc_sign << 10); 6989 lower = (lower & ~(bfd_vma) 0x2fff) 6990 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13) 6991 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11) 6992 | ((offset >> 1) & 0x7ff); 6993 bfd_put_16 (abfd, upper, insn); 6994 bfd_put_16 (abfd, lower, insn + 2); 6995 } 6996 6997 /* Thumb code calling an ARM function. */ 6998 6999 static int 7000 elf32_thumb_to_arm_stub (struct bfd_link_info * info, 7001 const char * name, 7002 bfd * input_bfd, 7003 bfd * output_bfd, 7004 asection * input_section, 7005 bfd_byte * hit_data, 7006 asection * sym_sec, 7007 bfd_vma offset, 7008 bfd_signed_vma addend, 7009 bfd_vma val, 7010 char **error_message) 7011 { 7012 asection * s = 0; 7013 bfd_vma my_offset; 7014 long int ret_offset; 7015 struct elf_link_hash_entry * myh; 7016 struct elf32_arm_link_hash_table * globals; 7017 7018 myh = find_thumb_glue (info, name, error_message); 7019 if (myh == NULL) 7020 return FALSE; 7021 7022 globals = elf32_arm_hash_table (info); 7023 BFD_ASSERT (globals != NULL); 7024 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7025 7026 my_offset = myh->root.u.def.value; 7027 7028 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7029 THUMB2ARM_GLUE_SECTION_NAME); 7030 7031 BFD_ASSERT (s != NULL); 7032 BFD_ASSERT (s->contents != NULL); 7033 BFD_ASSERT (s->output_section != NULL); 7034 7035 if ((my_offset & 0x01) == 0x01) 7036 { 7037 if (sym_sec != NULL 7038 && sym_sec->owner != NULL 7039 && !INTERWORK_FLAG (sym_sec->owner)) 7040 { 7041 (*_bfd_error_handler) 7042 (_("%B(%s): warning: interworking not enabled.\n" 7043 " first occurrence: %B: Thumb call to ARM"), 7044 sym_sec->owner, input_bfd, name); 7045 7046 return FALSE; 7047 } 7048 7049 --my_offset; 7050 myh->root.u.def.value = my_offset; 7051 7052 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn, 7053 s->contents + my_offset); 7054 7055 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn, 7056 s->contents + my_offset + 2); 7057 7058 ret_offset = 7059 /* Address of destination of the stub. */ 7060 ((bfd_signed_vma) val) 7061 - ((bfd_signed_vma) 7062 /* Offset from the start of the current section 7063 to the start of the stubs. */ 7064 (s->output_offset 7065 /* Offset of the start of this stub from the start of the stubs. */ 7066 + my_offset 7067 /* Address of the start of the current section. */ 7068 + s->output_section->vma) 7069 /* The branch instruction is 4 bytes into the stub. */ 7070 + 4 7071 /* ARM branches work from the pc of the instruction + 8. */ 7072 + 8); 7073 7074 put_arm_insn (globals, output_bfd, 7075 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF), 7076 s->contents + my_offset + 4); 7077 } 7078 7079 BFD_ASSERT (my_offset <= globals->thumb_glue_size); 7080 7081 /* Now go back and fix up the original BL insn to point to here. */ 7082 ret_offset = 7083 /* Address of where the stub is located. */ 7084 (s->output_section->vma + s->output_offset + my_offset) 7085 /* Address of where the BL is located. */ 7086 - (input_section->output_section->vma + input_section->output_offset 7087 + offset) 7088 /* Addend in the relocation. */ 7089 - addend 7090 /* Biassing for PC-relative addressing. */ 7091 - 8; 7092 7093 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma); 7094 7095 return TRUE; 7096 } 7097 7098 /* Populate an Arm to Thumb stub. Returns the stub symbol. */ 7099 7100 static struct elf_link_hash_entry * 7101 elf32_arm_create_thumb_stub (struct bfd_link_info * info, 7102 const char * name, 7103 bfd * input_bfd, 7104 bfd * output_bfd, 7105 asection * sym_sec, 7106 bfd_vma val, 7107 asection * s, 7108 char ** error_message) 7109 { 7110 bfd_vma my_offset; 7111 long int ret_offset; 7112 struct elf_link_hash_entry * myh; 7113 struct elf32_arm_link_hash_table * globals; 7114 7115 myh = find_arm_glue (info, name, error_message); 7116 if (myh == NULL) 7117 return NULL; 7118 7119 globals = elf32_arm_hash_table (info); 7120 BFD_ASSERT (globals != NULL); 7121 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7122 7123 my_offset = myh->root.u.def.value; 7124 7125 if ((my_offset & 0x01) == 0x01) 7126 { 7127 if (sym_sec != NULL 7128 && sym_sec->owner != NULL 7129 && !INTERWORK_FLAG (sym_sec->owner)) 7130 { 7131 (*_bfd_error_handler) 7132 (_("%B(%s): warning: interworking not enabled.\n" 7133 " first occurrence: %B: arm call to thumb"), 7134 sym_sec->owner, input_bfd, name); 7135 } 7136 7137 --my_offset; 7138 myh->root.u.def.value = my_offset; 7139 7140 if (info->shared || globals->root.is_relocatable_executable 7141 || globals->pic_veneer) 7142 { 7143 /* For relocatable objects we can't use absolute addresses, 7144 so construct the address from a relative offset. */ 7145 /* TODO: If the offset is small it's probably worth 7146 constructing the address with adds. */ 7147 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn, 7148 s->contents + my_offset); 7149 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn, 7150 s->contents + my_offset + 4); 7151 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn, 7152 s->contents + my_offset + 8); 7153 /* Adjust the offset by 4 for the position of the add, 7154 and 8 for the pipeline offset. */ 7155 ret_offset = (val - (s->output_offset 7156 + s->output_section->vma 7157 + my_offset + 12)) 7158 | 1; 7159 bfd_put_32 (output_bfd, ret_offset, 7160 s->contents + my_offset + 12); 7161 } 7162 else if (globals->use_blx) 7163 { 7164 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn, 7165 s->contents + my_offset); 7166 7167 /* It's a thumb address. Add the low order bit. */ 7168 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn, 7169 s->contents + my_offset + 4); 7170 } 7171 else 7172 { 7173 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn, 7174 s->contents + my_offset); 7175 7176 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn, 7177 s->contents + my_offset + 4); 7178 7179 /* It's a thumb address. Add the low order bit. */ 7180 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn, 7181 s->contents + my_offset + 8); 7182 7183 my_offset += 12; 7184 } 7185 } 7186 7187 BFD_ASSERT (my_offset <= globals->arm_glue_size); 7188 7189 return myh; 7190 } 7191 7192 /* Arm code calling a Thumb function. */ 7193 7194 static int 7195 elf32_arm_to_thumb_stub (struct bfd_link_info * info, 7196 const char * name, 7197 bfd * input_bfd, 7198 bfd * output_bfd, 7199 asection * input_section, 7200 bfd_byte * hit_data, 7201 asection * sym_sec, 7202 bfd_vma offset, 7203 bfd_signed_vma addend, 7204 bfd_vma val, 7205 char **error_message) 7206 { 7207 unsigned long int tmp; 7208 bfd_vma my_offset; 7209 asection * s; 7210 long int ret_offset; 7211 struct elf_link_hash_entry * myh; 7212 struct elf32_arm_link_hash_table * globals; 7213 7214 globals = elf32_arm_hash_table (info); 7215 BFD_ASSERT (globals != NULL); 7216 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7217 7218 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7219 ARM2THUMB_GLUE_SECTION_NAME); 7220 BFD_ASSERT (s != NULL); 7221 BFD_ASSERT (s->contents != NULL); 7222 BFD_ASSERT (s->output_section != NULL); 7223 7224 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd, 7225 sym_sec, val, s, error_message); 7226 if (!myh) 7227 return FALSE; 7228 7229 my_offset = myh->root.u.def.value; 7230 tmp = bfd_get_32 (input_bfd, hit_data); 7231 tmp = tmp & 0xFF000000; 7232 7233 /* Somehow these are both 4 too far, so subtract 8. */ 7234 ret_offset = (s->output_offset 7235 + my_offset 7236 + s->output_section->vma 7237 - (input_section->output_offset 7238 + input_section->output_section->vma 7239 + offset + addend) 7240 - 8); 7241 7242 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF); 7243 7244 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma); 7245 7246 return TRUE; 7247 } 7248 7249 /* Populate Arm stub for an exported Thumb function. */ 7250 7251 static bfd_boolean 7252 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf) 7253 { 7254 struct bfd_link_info * info = (struct bfd_link_info *) inf; 7255 asection * s; 7256 struct elf_link_hash_entry * myh; 7257 struct elf32_arm_link_hash_entry *eh; 7258 struct elf32_arm_link_hash_table * globals; 7259 asection *sec; 7260 bfd_vma val; 7261 char *error_message; 7262 7263 eh = elf32_arm_hash_entry (h); 7264 /* Allocate stubs for exported Thumb functions on v4t. */ 7265 if (eh->export_glue == NULL) 7266 return TRUE; 7267 7268 globals = elf32_arm_hash_table (info); 7269 BFD_ASSERT (globals != NULL); 7270 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7271 7272 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7273 ARM2THUMB_GLUE_SECTION_NAME); 7274 BFD_ASSERT (s != NULL); 7275 BFD_ASSERT (s->contents != NULL); 7276 BFD_ASSERT (s->output_section != NULL); 7277 7278 sec = eh->export_glue->root.u.def.section; 7279 7280 BFD_ASSERT (sec->output_section != NULL); 7281 7282 val = eh->export_glue->root.u.def.value + sec->output_offset 7283 + sec->output_section->vma; 7284 7285 myh = elf32_arm_create_thumb_stub (info, h->root.root.string, 7286 h->root.u.def.section->owner, 7287 globals->obfd, sec, val, s, 7288 &error_message); 7289 BFD_ASSERT (myh); 7290 return TRUE; 7291 } 7292 7293 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */ 7294 7295 static bfd_vma 7296 elf32_arm_bx_glue (struct bfd_link_info * info, int reg) 7297 { 7298 bfd_byte *p; 7299 bfd_vma glue_addr; 7300 asection *s; 7301 struct elf32_arm_link_hash_table *globals; 7302 7303 globals = elf32_arm_hash_table (info); 7304 BFD_ASSERT (globals != NULL); 7305 BFD_ASSERT (globals->bfd_of_glue_owner != NULL); 7306 7307 s = bfd_get_linker_section (globals->bfd_of_glue_owner, 7308 ARM_BX_GLUE_SECTION_NAME); 7309 BFD_ASSERT (s != NULL); 7310 BFD_ASSERT (s->contents != NULL); 7311 BFD_ASSERT (s->output_section != NULL); 7312 7313 BFD_ASSERT (globals->bx_glue_offset[reg] & 2); 7314 7315 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3; 7316 7317 if ((globals->bx_glue_offset[reg] & 1) == 0) 7318 { 7319 p = s->contents + glue_addr; 7320 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p); 7321 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4); 7322 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8); 7323 globals->bx_glue_offset[reg] |= 1; 7324 } 7325 7326 return glue_addr + s->output_section->vma + s->output_offset; 7327 } 7328 7329 /* Generate Arm stubs for exported Thumb symbols. */ 7330 static void 7331 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED, 7332 struct bfd_link_info *link_info) 7333 { 7334 struct elf32_arm_link_hash_table * globals; 7335 7336 if (link_info == NULL) 7337 /* Ignore this if we are not called by the ELF backend linker. */ 7338 return; 7339 7340 globals = elf32_arm_hash_table (link_info); 7341 if (globals == NULL) 7342 return; 7343 7344 /* If blx is available then exported Thumb symbols are OK and there is 7345 nothing to do. */ 7346 if (globals->use_blx) 7347 return; 7348 7349 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub, 7350 link_info); 7351 } 7352 7353 /* Reserve space for COUNT dynamic relocations in relocation selection 7354 SRELOC. */ 7355 7356 static void 7357 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc, 7358 bfd_size_type count) 7359 { 7360 struct elf32_arm_link_hash_table *htab; 7361 7362 htab = elf32_arm_hash_table (info); 7363 BFD_ASSERT (htab->root.dynamic_sections_created); 7364 if (sreloc == NULL) 7365 abort (); 7366 sreloc->size += RELOC_SIZE (htab) * count; 7367 } 7368 7369 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is 7370 dynamic, the relocations should go in SRELOC, otherwise they should 7371 go in the special .rel.iplt section. */ 7372 7373 static void 7374 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc, 7375 bfd_size_type count) 7376 { 7377 struct elf32_arm_link_hash_table *htab; 7378 7379 htab = elf32_arm_hash_table (info); 7380 if (!htab->root.dynamic_sections_created) 7381 htab->root.irelplt->size += RELOC_SIZE (htab) * count; 7382 else 7383 { 7384 BFD_ASSERT (sreloc != NULL); 7385 sreloc->size += RELOC_SIZE (htab) * count; 7386 } 7387 } 7388 7389 /* Add relocation REL to the end of relocation section SRELOC. */ 7390 7391 static void 7392 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info, 7393 asection *sreloc, Elf_Internal_Rela *rel) 7394 { 7395 bfd_byte *loc; 7396 struct elf32_arm_link_hash_table *htab; 7397 7398 htab = elf32_arm_hash_table (info); 7399 if (!htab->root.dynamic_sections_created 7400 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE) 7401 sreloc = htab->root.irelplt; 7402 if (sreloc == NULL) 7403 abort (); 7404 loc = sreloc->contents; 7405 loc += sreloc->reloc_count++ * RELOC_SIZE (htab); 7406 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size) 7407 abort (); 7408 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc); 7409 } 7410 7411 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT. 7412 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than 7413 to .plt. */ 7414 7415 static void 7416 elf32_arm_allocate_plt_entry (struct bfd_link_info *info, 7417 bfd_boolean is_iplt_entry, 7418 union gotplt_union *root_plt, 7419 struct arm_plt_info *arm_plt) 7420 { 7421 struct elf32_arm_link_hash_table *htab; 7422 asection *splt; 7423 asection *sgotplt; 7424 7425 htab = elf32_arm_hash_table (info); 7426 7427 if (is_iplt_entry) 7428 { 7429 splt = htab->root.iplt; 7430 sgotplt = htab->root.igotplt; 7431 7432 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */ 7433 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1); 7434 } 7435 else 7436 { 7437 splt = htab->root.splt; 7438 sgotplt = htab->root.sgotplt; 7439 7440 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */ 7441 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 7442 7443 /* If this is the first .plt entry, make room for the special 7444 first entry. */ 7445 if (splt->size == 0) 7446 splt->size += htab->plt_header_size; 7447 } 7448 7449 /* Allocate the PLT entry itself, including any leading Thumb stub. */ 7450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 7451 splt->size += PLT_THUMB_STUB_SIZE; 7452 root_plt->offset = splt->size; 7453 splt->size += htab->plt_entry_size; 7454 7455 if (!htab->symbian_p) 7456 { 7457 /* We also need to make an entry in the .got.plt section, which 7458 will be placed in the .got section by the linker script. */ 7459 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc; 7460 sgotplt->size += 4; 7461 } 7462 } 7463 7464 static bfd_vma 7465 arm_movw_immediate (bfd_vma value) 7466 { 7467 return (value & 0x00000fff) | ((value & 0x0000f000) << 4); 7468 } 7469 7470 static bfd_vma 7471 arm_movt_immediate (bfd_vma value) 7472 { 7473 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12); 7474 } 7475 7476 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1, 7477 the entry lives in .iplt and resolves to (*SYM_VALUE)(). 7478 Otherwise, DYNINDX is the index of the symbol in the dynamic 7479 symbol table and SYM_VALUE is undefined. 7480 7481 ROOT_PLT points to the offset of the PLT entry from the start of its 7482 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific 7483 bookkeeping information. */ 7484 7485 static void 7486 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, 7487 union gotplt_union *root_plt, 7488 struct arm_plt_info *arm_plt, 7489 int dynindx, bfd_vma sym_value) 7490 { 7491 struct elf32_arm_link_hash_table *htab; 7492 asection *sgot; 7493 asection *splt; 7494 asection *srel; 7495 bfd_byte *loc; 7496 bfd_vma plt_index; 7497 Elf_Internal_Rela rel; 7498 bfd_vma plt_header_size; 7499 bfd_vma got_header_size; 7500 7501 htab = elf32_arm_hash_table (info); 7502 7503 /* Pick the appropriate sections and sizes. */ 7504 if (dynindx == -1) 7505 { 7506 splt = htab->root.iplt; 7507 sgot = htab->root.igotplt; 7508 srel = htab->root.irelplt; 7509 7510 /* There are no reserved entries in .igot.plt, and no special 7511 first entry in .iplt. */ 7512 got_header_size = 0; 7513 plt_header_size = 0; 7514 } 7515 else 7516 { 7517 splt = htab->root.splt; 7518 sgot = htab->root.sgotplt; 7519 srel = htab->root.srelplt; 7520 7521 got_header_size = get_elf_backend_data (output_bfd)->got_header_size; 7522 plt_header_size = htab->plt_header_size; 7523 } 7524 BFD_ASSERT (splt != NULL && srel != NULL); 7525 7526 /* Fill in the entry in the procedure linkage table. */ 7527 if (htab->symbian_p) 7528 { 7529 BFD_ASSERT (dynindx >= 0); 7530 put_arm_insn (htab, output_bfd, 7531 elf32_arm_symbian_plt_entry[0], 7532 splt->contents + root_plt->offset); 7533 bfd_put_32 (output_bfd, 7534 elf32_arm_symbian_plt_entry[1], 7535 splt->contents + root_plt->offset + 4); 7536 7537 /* Fill in the entry in the .rel.plt section. */ 7538 rel.r_offset = (splt->output_section->vma 7539 + splt->output_offset 7540 + root_plt->offset + 4); 7541 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT); 7542 7543 /* Get the index in the procedure linkage table which 7544 corresponds to this symbol. This is the index of this symbol 7545 in all the symbols for which we are making plt entries. The 7546 first entry in the procedure linkage table is reserved. */ 7547 plt_index = ((root_plt->offset - plt_header_size) 7548 / htab->plt_entry_size); 7549 } 7550 else 7551 { 7552 bfd_vma got_offset, got_address, plt_address; 7553 bfd_vma got_displacement, initial_got_entry; 7554 bfd_byte * ptr; 7555 7556 BFD_ASSERT (sgot != NULL); 7557 7558 /* Get the offset into the .(i)got.plt table of the entry that 7559 corresponds to this function. */ 7560 got_offset = (arm_plt->got_offset & -2); 7561 7562 /* Get the index in the procedure linkage table which 7563 corresponds to this symbol. This is the index of this symbol 7564 in all the symbols for which we are making plt entries. 7565 After the reserved .got.plt entries, all symbols appear in 7566 the same order as in .plt. */ 7567 plt_index = (got_offset - got_header_size) / 4; 7568 7569 /* Calculate the address of the GOT entry. */ 7570 got_address = (sgot->output_section->vma 7571 + sgot->output_offset 7572 + got_offset); 7573 7574 /* ...and the address of the PLT entry. */ 7575 plt_address = (splt->output_section->vma 7576 + splt->output_offset 7577 + root_plt->offset); 7578 7579 ptr = splt->contents + root_plt->offset; 7580 if (htab->vxworks_p && info->shared) 7581 { 7582 unsigned int i; 7583 bfd_vma val; 7584 7585 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 7586 { 7587 val = elf32_arm_vxworks_shared_plt_entry[i]; 7588 if (i == 2) 7589 val |= got_address - sgot->output_section->vma; 7590 if (i == 5) 7591 val |= plt_index * RELOC_SIZE (htab); 7592 if (i == 2 || i == 5) 7593 bfd_put_32 (output_bfd, val, ptr); 7594 else 7595 put_arm_insn (htab, output_bfd, val, ptr); 7596 } 7597 } 7598 else if (htab->vxworks_p) 7599 { 7600 unsigned int i; 7601 bfd_vma val; 7602 7603 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4) 7604 { 7605 val = elf32_arm_vxworks_exec_plt_entry[i]; 7606 if (i == 2) 7607 val |= got_address; 7608 if (i == 4) 7609 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2); 7610 if (i == 5) 7611 val |= plt_index * RELOC_SIZE (htab); 7612 if (i == 2 || i == 5) 7613 bfd_put_32 (output_bfd, val, ptr); 7614 else 7615 put_arm_insn (htab, output_bfd, val, ptr); 7616 } 7617 7618 loc = (htab->srelplt2->contents 7619 + (plt_index * 2 + 1) * RELOC_SIZE (htab)); 7620 7621 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation 7622 referencing the GOT for this PLT entry. */ 7623 rel.r_offset = plt_address + 8; 7624 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 7625 rel.r_addend = got_offset; 7626 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7627 loc += RELOC_SIZE (htab); 7628 7629 /* Create the R_ARM_ABS32 relocation referencing the 7630 beginning of the PLT for this GOT entry. */ 7631 rel.r_offset = got_address; 7632 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 7633 rel.r_addend = 0; 7634 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7635 } 7636 else if (htab->nacl_p) 7637 { 7638 /* Calculate the displacement between the PLT slot and the 7639 common tail that's part of the special initial PLT slot. */ 7640 int32_t tail_displacement 7641 = ((splt->output_section->vma + splt->output_offset 7642 + ARM_NACL_PLT_TAIL_OFFSET) 7643 - (plt_address + htab->plt_entry_size + 4)); 7644 BFD_ASSERT ((tail_displacement & 3) == 0); 7645 tail_displacement >>= 2; 7646 7647 BFD_ASSERT ((tail_displacement & 0xff000000) == 0 7648 || (-tail_displacement & 0xff000000) == 0); 7649 7650 /* Calculate the displacement between the PLT slot and the entry 7651 in the GOT. The offset accounts for the value produced by 7652 adding to pc in the penultimate instruction of the PLT stub. */ 7653 got_displacement = (got_address 7654 - (plt_address + htab->plt_entry_size)); 7655 7656 /* NaCl does not support interworking at all. */ 7657 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)); 7658 7659 put_arm_insn (htab, output_bfd, 7660 elf32_arm_nacl_plt_entry[0] 7661 | arm_movw_immediate (got_displacement), 7662 ptr + 0); 7663 put_arm_insn (htab, output_bfd, 7664 elf32_arm_nacl_plt_entry[1] 7665 | arm_movt_immediate (got_displacement), 7666 ptr + 4); 7667 put_arm_insn (htab, output_bfd, 7668 elf32_arm_nacl_plt_entry[2], 7669 ptr + 8); 7670 put_arm_insn (htab, output_bfd, 7671 elf32_arm_nacl_plt_entry[3] 7672 | (tail_displacement & 0x00ffffff), 7673 ptr + 12); 7674 } 7675 else 7676 { 7677 /* Calculate the displacement between the PLT slot and the 7678 entry in the GOT. The eight-byte offset accounts for the 7679 value produced by adding to pc in the first instruction 7680 of the PLT stub. */ 7681 got_displacement = got_address - (plt_address + 8); 7682 7683 BFD_ASSERT ((got_displacement & 0xf0000000) == 0); 7684 7685 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt)) 7686 { 7687 put_thumb_insn (htab, output_bfd, 7688 elf32_arm_plt_thumb_stub[0], ptr - 4); 7689 put_thumb_insn (htab, output_bfd, 7690 elf32_arm_plt_thumb_stub[1], ptr - 2); 7691 } 7692 7693 put_arm_insn (htab, output_bfd, 7694 elf32_arm_plt_entry[0] 7695 | ((got_displacement & 0x0ff00000) >> 20), 7696 ptr + 0); 7697 put_arm_insn (htab, output_bfd, 7698 elf32_arm_plt_entry[1] 7699 | ((got_displacement & 0x000ff000) >> 12), 7700 ptr+ 4); 7701 put_arm_insn (htab, output_bfd, 7702 elf32_arm_plt_entry[2] 7703 | (got_displacement & 0x00000fff), 7704 ptr + 8); 7705 #ifdef FOUR_WORD_PLT 7706 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12); 7707 #endif 7708 } 7709 7710 /* Fill in the entry in the .rel(a).(i)plt section. */ 7711 rel.r_offset = got_address; 7712 rel.r_addend = 0; 7713 if (dynindx == -1) 7714 { 7715 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE. 7716 The dynamic linker or static executable then calls SYM_VALUE 7717 to determine the correct run-time value of the .igot.plt entry. */ 7718 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 7719 initial_got_entry = sym_value; 7720 } 7721 else 7722 { 7723 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT); 7724 initial_got_entry = (splt->output_section->vma 7725 + splt->output_offset); 7726 } 7727 7728 /* Fill in the entry in the global offset table. */ 7729 bfd_put_32 (output_bfd, initial_got_entry, 7730 sgot->contents + got_offset); 7731 } 7732 7733 loc = srel->contents + plt_index * RELOC_SIZE (htab); 7734 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc); 7735 } 7736 7737 /* Some relocations map to different relocations depending on the 7738 target. Return the real relocation. */ 7739 7740 static int 7741 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals, 7742 int r_type) 7743 { 7744 switch (r_type) 7745 { 7746 case R_ARM_TARGET1: 7747 if (globals->target1_is_rel) 7748 return R_ARM_REL32; 7749 else 7750 return R_ARM_ABS32; 7751 7752 case R_ARM_TARGET2: 7753 return globals->target2_reloc; 7754 7755 default: 7756 return r_type; 7757 } 7758 } 7759 7760 /* Return the base VMA address which should be subtracted from real addresses 7761 when resolving @dtpoff relocation. 7762 This is PT_TLS segment p_vaddr. */ 7763 7764 static bfd_vma 7765 dtpoff_base (struct bfd_link_info *info) 7766 { 7767 /* If tls_sec is NULL, we should have signalled an error already. */ 7768 if (elf_hash_table (info)->tls_sec == NULL) 7769 return 0; 7770 return elf_hash_table (info)->tls_sec->vma; 7771 } 7772 7773 /* Return the relocation value for @tpoff relocation 7774 if STT_TLS virtual address is ADDRESS. */ 7775 7776 static bfd_vma 7777 tpoff (struct bfd_link_info *info, bfd_vma address) 7778 { 7779 struct elf_link_hash_table *htab = elf_hash_table (info); 7780 bfd_vma base; 7781 7782 /* If tls_sec is NULL, we should have signalled an error already. */ 7783 if (htab->tls_sec == NULL) 7784 return 0; 7785 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power); 7786 return address - htab->tls_sec->vma + base; 7787 } 7788 7789 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA. 7790 VALUE is the relocation value. */ 7791 7792 static bfd_reloc_status_type 7793 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value) 7794 { 7795 if (value > 0xfff) 7796 return bfd_reloc_overflow; 7797 7798 value |= bfd_get_32 (abfd, data) & 0xfffff000; 7799 bfd_put_32 (abfd, value, data); 7800 return bfd_reloc_ok; 7801 } 7802 7803 /* Handle TLS relaxations. Relaxing is possible for symbols that use 7804 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or 7805 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link. 7806 7807 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller 7808 is to then call final_link_relocate. Return other values in the 7809 case of error. 7810 7811 FIXME:When --emit-relocs is in effect, we'll emit relocs describing 7812 the pre-relaxed code. It would be nice if the relocs were updated 7813 to match the optimization. */ 7814 7815 static bfd_reloc_status_type 7816 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals, 7817 bfd *input_bfd, asection *input_sec, bfd_byte *contents, 7818 Elf_Internal_Rela *rel, unsigned long is_local) 7819 { 7820 unsigned long insn; 7821 7822 switch (ELF32_R_TYPE (rel->r_info)) 7823 { 7824 default: 7825 return bfd_reloc_notsupported; 7826 7827 case R_ARM_TLS_GOTDESC: 7828 if (is_local) 7829 insn = 0; 7830 else 7831 { 7832 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 7833 if (insn & 1) 7834 insn -= 5; /* THUMB */ 7835 else 7836 insn -= 8; /* ARM */ 7837 } 7838 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 7839 return bfd_reloc_continue; 7840 7841 case R_ARM_THM_TLS_DESCSEQ: 7842 /* Thumb insn. */ 7843 insn = bfd_get_16 (input_bfd, contents + rel->r_offset); 7844 if ((insn & 0xff78) == 0x4478) /* add rx, pc */ 7845 { 7846 if (is_local) 7847 /* nop */ 7848 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 7849 } 7850 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */ 7851 { 7852 if (is_local) 7853 /* nop */ 7854 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 7855 else 7856 /* ldr rx,[ry] */ 7857 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset); 7858 } 7859 else if ((insn & 0xff87) == 0x4780) /* blx rx */ 7860 { 7861 if (is_local) 7862 /* nop */ 7863 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset); 7864 else 7865 /* mov r0, rx */ 7866 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78), 7867 contents + rel->r_offset); 7868 } 7869 else 7870 { 7871 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 7872 /* It's a 32 bit instruction, fetch the rest of it for 7873 error generation. */ 7874 insn = (insn << 16) 7875 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2); 7876 (*_bfd_error_handler) 7877 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"), 7878 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 7879 return bfd_reloc_notsupported; 7880 } 7881 break; 7882 7883 case R_ARM_TLS_DESCSEQ: 7884 /* arm insn. */ 7885 insn = bfd_get_32 (input_bfd, contents + rel->r_offset); 7886 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */ 7887 { 7888 if (is_local) 7889 /* mov rx, ry */ 7890 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff), 7891 contents + rel->r_offset); 7892 } 7893 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/ 7894 { 7895 if (is_local) 7896 /* nop */ 7897 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 7898 else 7899 /* ldr rx,[ry] */ 7900 bfd_put_32 (input_bfd, insn & 0xfffff000, 7901 contents + rel->r_offset); 7902 } 7903 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */ 7904 { 7905 if (is_local) 7906 /* nop */ 7907 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset); 7908 else 7909 /* mov r0, rx */ 7910 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf), 7911 contents + rel->r_offset); 7912 } 7913 else 7914 { 7915 (*_bfd_error_handler) 7916 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"), 7917 input_bfd, input_sec, (unsigned long)rel->r_offset, insn); 7918 return bfd_reloc_notsupported; 7919 } 7920 break; 7921 7922 case R_ARM_TLS_CALL: 7923 /* GD->IE relaxation, turn the instruction into 'nop' or 7924 'ldr r0, [pc,r0]' */ 7925 insn = is_local ? 0xe1a00000 : 0xe79f0000; 7926 bfd_put_32 (input_bfd, insn, contents + rel->r_offset); 7927 break; 7928 7929 case R_ARM_THM_TLS_CALL: 7930 /* GD->IE relaxation */ 7931 if (!is_local) 7932 /* add r0,pc; ldr r0, [r0] */ 7933 insn = 0x44786800; 7934 else if (arch_has_thumb2_nop (globals)) 7935 /* nop.w */ 7936 insn = 0xf3af8000; 7937 else 7938 /* nop; nop */ 7939 insn = 0xbf00bf00; 7940 7941 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset); 7942 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2); 7943 break; 7944 } 7945 return bfd_reloc_ok; 7946 } 7947 7948 /* For a given value of n, calculate the value of G_n as required to 7949 deal with group relocations. We return it in the form of an 7950 encoded constant-and-rotation, together with the final residual. If n is 7951 specified as less than zero, then final_residual is filled with the 7952 input value and no further action is performed. */ 7953 7954 static bfd_vma 7955 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual) 7956 { 7957 int current_n; 7958 bfd_vma g_n; 7959 bfd_vma encoded_g_n = 0; 7960 bfd_vma residual = value; /* Also known as Y_n. */ 7961 7962 for (current_n = 0; current_n <= n; current_n++) 7963 { 7964 int shift; 7965 7966 /* Calculate which part of the value to mask. */ 7967 if (residual == 0) 7968 shift = 0; 7969 else 7970 { 7971 int msb; 7972 7973 /* Determine the most significant bit in the residual and 7974 align the resulting value to a 2-bit boundary. */ 7975 for (msb = 30; msb >= 0; msb -= 2) 7976 if (residual & (3 << msb)) 7977 break; 7978 7979 /* The desired shift is now (msb - 6), or zero, whichever 7980 is the greater. */ 7981 shift = msb - 6; 7982 if (shift < 0) 7983 shift = 0; 7984 } 7985 7986 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */ 7987 g_n = residual & (0xff << shift); 7988 encoded_g_n = (g_n >> shift) 7989 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8); 7990 7991 /* Calculate the residual for the next time around. */ 7992 residual &= ~g_n; 7993 } 7994 7995 *final_residual = residual; 7996 7997 return encoded_g_n; 7998 } 7999 8000 /* Given an ARM instruction, determine whether it is an ADD or a SUB. 8001 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */ 8002 8003 static int 8004 identify_add_or_sub (bfd_vma insn) 8005 { 8006 int opcode = insn & 0x1e00000; 8007 8008 if (opcode == 1 << 23) /* ADD */ 8009 return 1; 8010 8011 if (opcode == 1 << 22) /* SUB */ 8012 return -1; 8013 8014 return 0; 8015 } 8016 8017 /* Perform a relocation as part of a final link. */ 8018 8019 static bfd_reloc_status_type 8020 elf32_arm_final_link_relocate (reloc_howto_type * howto, 8021 bfd * input_bfd, 8022 bfd * output_bfd, 8023 asection * input_section, 8024 bfd_byte * contents, 8025 Elf_Internal_Rela * rel, 8026 bfd_vma value, 8027 struct bfd_link_info * info, 8028 asection * sym_sec, 8029 const char * sym_name, 8030 unsigned char st_type, 8031 enum arm_st_branch_type branch_type, 8032 struct elf_link_hash_entry * h, 8033 bfd_boolean * unresolved_reloc_p, 8034 char ** error_message) 8035 { 8036 unsigned long r_type = howto->type; 8037 unsigned long r_symndx; 8038 bfd_byte * hit_data = contents + rel->r_offset; 8039 bfd_vma * local_got_offsets; 8040 bfd_vma * local_tlsdesc_gotents; 8041 asection * sgot; 8042 asection * splt; 8043 asection * sreloc = NULL; 8044 asection * srelgot; 8045 bfd_vma addend; 8046 bfd_signed_vma signed_addend; 8047 unsigned char dynreloc_st_type; 8048 bfd_vma dynreloc_value; 8049 struct elf32_arm_link_hash_table * globals; 8050 struct elf32_arm_link_hash_entry *eh; 8051 union gotplt_union *root_plt; 8052 struct arm_plt_info *arm_plt; 8053 bfd_vma plt_offset; 8054 bfd_vma gotplt_offset; 8055 bfd_boolean has_iplt_entry; 8056 8057 globals = elf32_arm_hash_table (info); 8058 if (globals == NULL) 8059 return bfd_reloc_notsupported; 8060 8061 BFD_ASSERT (is_arm_elf (input_bfd)); 8062 8063 /* Some relocation types map to different relocations depending on the 8064 target. We pick the right one here. */ 8065 r_type = arm_real_reloc_type (globals, r_type); 8066 8067 /* It is possible to have linker relaxations on some TLS access 8068 models. Update our information here. */ 8069 r_type = elf32_arm_tls_transition (info, r_type, h); 8070 8071 if (r_type != howto->type) 8072 howto = elf32_arm_howto_from_type (r_type); 8073 8074 /* If the start address has been set, then set the EF_ARM_HASENTRY 8075 flag. Setting this more than once is redundant, but the cost is 8076 not too high, and it keeps the code simple. 8077 8078 The test is done here, rather than somewhere else, because the 8079 start address is only set just before the final link commences. 8080 8081 Note - if the user deliberately sets a start address of 0, the 8082 flag will not be set. */ 8083 if (bfd_get_start_address (output_bfd) != 0) 8084 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY; 8085 8086 eh = (struct elf32_arm_link_hash_entry *) h; 8087 sgot = globals->root.sgot; 8088 local_got_offsets = elf_local_got_offsets (input_bfd); 8089 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd); 8090 8091 if (globals->root.dynamic_sections_created) 8092 srelgot = globals->root.srelgot; 8093 else 8094 srelgot = NULL; 8095 8096 r_symndx = ELF32_R_SYM (rel->r_info); 8097 8098 if (globals->use_rel) 8099 { 8100 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask; 8101 8102 if (addend & ((howto->src_mask + 1) >> 1)) 8103 { 8104 signed_addend = -1; 8105 signed_addend &= ~ howto->src_mask; 8106 signed_addend |= addend; 8107 } 8108 else 8109 signed_addend = addend; 8110 } 8111 else 8112 addend = signed_addend = rel->r_addend; 8113 8114 /* Record the symbol information that should be used in dynamic 8115 relocations. */ 8116 dynreloc_st_type = st_type; 8117 dynreloc_value = value; 8118 if (branch_type == ST_BRANCH_TO_THUMB) 8119 dynreloc_value |= 1; 8120 8121 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and 8122 VALUE appropriately for relocations that we resolve at link time. */ 8123 has_iplt_entry = FALSE; 8124 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt) 8125 && root_plt->offset != (bfd_vma) -1) 8126 { 8127 plt_offset = root_plt->offset; 8128 gotplt_offset = arm_plt->got_offset; 8129 8130 if (h == NULL || eh->is_iplt) 8131 { 8132 has_iplt_entry = TRUE; 8133 splt = globals->root.iplt; 8134 8135 /* Populate .iplt entries here, because not all of them will 8136 be seen by finish_dynamic_symbol. The lower bit is set if 8137 we have already populated the entry. */ 8138 if (plt_offset & 1) 8139 plt_offset--; 8140 else 8141 { 8142 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt, 8143 -1, dynreloc_value); 8144 root_plt->offset |= 1; 8145 } 8146 8147 /* Static relocations always resolve to the .iplt entry. */ 8148 st_type = STT_FUNC; 8149 value = (splt->output_section->vma 8150 + splt->output_offset 8151 + plt_offset); 8152 branch_type = ST_BRANCH_TO_ARM; 8153 8154 /* If there are non-call relocations that resolve to the .iplt 8155 entry, then all dynamic ones must too. */ 8156 if (arm_plt->noncall_refcount != 0) 8157 { 8158 dynreloc_st_type = st_type; 8159 dynreloc_value = value; 8160 } 8161 } 8162 else 8163 /* We populate the .plt entry in finish_dynamic_symbol. */ 8164 splt = globals->root.splt; 8165 } 8166 else 8167 { 8168 splt = NULL; 8169 plt_offset = (bfd_vma) -1; 8170 gotplt_offset = (bfd_vma) -1; 8171 } 8172 8173 switch (r_type) 8174 { 8175 case R_ARM_NONE: 8176 /* We don't need to find a value for this symbol. It's just a 8177 marker. */ 8178 *unresolved_reloc_p = FALSE; 8179 return bfd_reloc_ok; 8180 8181 case R_ARM_ABS12: 8182 if (!globals->vxworks_p) 8183 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 8184 8185 case R_ARM_PC24: 8186 case R_ARM_ABS32: 8187 case R_ARM_ABS32_NOI: 8188 case R_ARM_REL32: 8189 case R_ARM_REL32_NOI: 8190 case R_ARM_CALL: 8191 case R_ARM_JUMP24: 8192 case R_ARM_XPC25: 8193 case R_ARM_PREL31: 8194 case R_ARM_PLT32: 8195 /* Handle relocations which should use the PLT entry. ABS32/REL32 8196 will use the symbol's value, which may point to a PLT entry, but we 8197 don't need to handle that here. If we created a PLT entry, all 8198 branches in this object should go to it, except if the PLT is too 8199 far away, in which case a long branch stub should be inserted. */ 8200 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32 8201 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI 8202 && r_type != R_ARM_CALL 8203 && r_type != R_ARM_JUMP24 8204 && r_type != R_ARM_PLT32) 8205 && plt_offset != (bfd_vma) -1) 8206 { 8207 /* If we've created a .plt section, and assigned a PLT entry 8208 to this function, it must either be a STT_GNU_IFUNC reference 8209 or not be known to bind locally. In other cases, we should 8210 have cleared the PLT entry by now. */ 8211 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h)); 8212 8213 value = (splt->output_section->vma 8214 + splt->output_offset 8215 + plt_offset); 8216 *unresolved_reloc_p = FALSE; 8217 return _bfd_final_link_relocate (howto, input_bfd, input_section, 8218 contents, rel->r_offset, value, 8219 rel->r_addend); 8220 } 8221 8222 /* When generating a shared object or relocatable executable, these 8223 relocations are copied into the output file to be resolved at 8224 run time. */ 8225 if ((info->shared || globals->root.is_relocatable_executable) 8226 && (input_section->flags & SEC_ALLOC) 8227 && !(globals->vxworks_p 8228 && strcmp (input_section->output_section->name, 8229 ".tls_vars") == 0) 8230 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI) 8231 || !SYMBOL_CALLS_LOCAL (info, h)) 8232 && (!strstr (input_section->name, STUB_SUFFIX)) 8233 && (h == NULL 8234 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 8235 || h->root.type != bfd_link_hash_undefweak) 8236 && r_type != R_ARM_PC24 8237 && r_type != R_ARM_CALL 8238 && r_type != R_ARM_JUMP24 8239 && r_type != R_ARM_PREL31 8240 && r_type != R_ARM_PLT32) 8241 { 8242 Elf_Internal_Rela outrel; 8243 bfd_boolean skip, relocate; 8244 8245 *unresolved_reloc_p = FALSE; 8246 8247 if (sreloc == NULL && globals->root.dynamic_sections_created) 8248 { 8249 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section, 8250 ! globals->use_rel); 8251 8252 if (sreloc == NULL) 8253 return bfd_reloc_notsupported; 8254 } 8255 8256 skip = FALSE; 8257 relocate = FALSE; 8258 8259 outrel.r_addend = addend; 8260 outrel.r_offset = 8261 _bfd_elf_section_offset (output_bfd, info, input_section, 8262 rel->r_offset); 8263 if (outrel.r_offset == (bfd_vma) -1) 8264 skip = TRUE; 8265 else if (outrel.r_offset == (bfd_vma) -2) 8266 skip = TRUE, relocate = TRUE; 8267 outrel.r_offset += (input_section->output_section->vma 8268 + input_section->output_offset); 8269 8270 if (skip) 8271 memset (&outrel, 0, sizeof outrel); 8272 else if (h != NULL 8273 && h->dynindx != -1 8274 && (!info->shared 8275 || !info->symbolic 8276 || !h->def_regular)) 8277 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type); 8278 else 8279 { 8280 int symbol; 8281 8282 /* This symbol is local, or marked to become local. */ 8283 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI); 8284 if (globals->symbian_p) 8285 { 8286 asection *osec; 8287 8288 /* On Symbian OS, the data segment and text segement 8289 can be relocated independently. Therefore, we 8290 must indicate the segment to which this 8291 relocation is relative. The BPABI allows us to 8292 use any symbol in the right segment; we just use 8293 the section symbol as it is convenient. (We 8294 cannot use the symbol given by "h" directly as it 8295 will not appear in the dynamic symbol table.) 8296 8297 Note that the dynamic linker ignores the section 8298 symbol value, so we don't subtract osec->vma 8299 from the emitted reloc addend. */ 8300 if (sym_sec) 8301 osec = sym_sec->output_section; 8302 else 8303 osec = input_section->output_section; 8304 symbol = elf_section_data (osec)->dynindx; 8305 if (symbol == 0) 8306 { 8307 struct elf_link_hash_table *htab = elf_hash_table (info); 8308 8309 if ((osec->flags & SEC_READONLY) == 0 8310 && htab->data_index_section != NULL) 8311 osec = htab->data_index_section; 8312 else 8313 osec = htab->text_index_section; 8314 symbol = elf_section_data (osec)->dynindx; 8315 } 8316 BFD_ASSERT (symbol != 0); 8317 } 8318 else 8319 /* On SVR4-ish systems, the dynamic loader cannot 8320 relocate the text and data segments independently, 8321 so the symbol does not matter. */ 8322 symbol = 0; 8323 if (dynreloc_st_type == STT_GNU_IFUNC) 8324 /* We have an STT_GNU_IFUNC symbol that doesn't resolve 8325 to the .iplt entry. Instead, every non-call reference 8326 must use an R_ARM_IRELATIVE relocation to obtain the 8327 correct run-time address. */ 8328 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE); 8329 else 8330 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE); 8331 if (globals->use_rel) 8332 relocate = TRUE; 8333 else 8334 outrel.r_addend += dynreloc_value; 8335 } 8336 8337 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel); 8338 8339 /* If this reloc is against an external symbol, we do not want to 8340 fiddle with the addend. Otherwise, we need to include the symbol 8341 value so that it becomes an addend for the dynamic reloc. */ 8342 if (! relocate) 8343 return bfd_reloc_ok; 8344 8345 return _bfd_final_link_relocate (howto, input_bfd, input_section, 8346 contents, rel->r_offset, 8347 dynreloc_value, (bfd_vma) 0); 8348 } 8349 else switch (r_type) 8350 { 8351 case R_ARM_ABS12: 8352 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend); 8353 8354 case R_ARM_XPC25: /* Arm BLX instruction. */ 8355 case R_ARM_CALL: 8356 case R_ARM_JUMP24: 8357 case R_ARM_PC24: /* Arm B/BL instruction. */ 8358 case R_ARM_PLT32: 8359 { 8360 struct elf32_arm_stub_hash_entry *stub_entry = NULL; 8361 8362 if (r_type == R_ARM_XPC25) 8363 { 8364 /* Check for Arm calling Arm function. */ 8365 /* FIXME: Should we translate the instruction into a BL 8366 instruction instead ? */ 8367 if (branch_type != ST_BRANCH_TO_THUMB) 8368 (*_bfd_error_handler) 8369 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."), 8370 input_bfd, 8371 h ? h->root.root.string : "(local)"); 8372 } 8373 else if (r_type == R_ARM_PC24) 8374 { 8375 /* Check for Arm calling Thumb function. */ 8376 if (branch_type == ST_BRANCH_TO_THUMB) 8377 { 8378 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd, 8379 output_bfd, input_section, 8380 hit_data, sym_sec, rel->r_offset, 8381 signed_addend, value, 8382 error_message)) 8383 return bfd_reloc_ok; 8384 else 8385 return bfd_reloc_dangerous; 8386 } 8387 } 8388 8389 /* Check if a stub has to be inserted because the 8390 destination is too far or we are changing mode. */ 8391 if ( r_type == R_ARM_CALL 8392 || r_type == R_ARM_JUMP24 8393 || r_type == R_ARM_PLT32) 8394 { 8395 enum elf32_arm_stub_type stub_type = arm_stub_none; 8396 struct elf32_arm_link_hash_entry *hash; 8397 8398 hash = (struct elf32_arm_link_hash_entry *) h; 8399 stub_type = arm_type_of_stub (info, input_section, rel, 8400 st_type, &branch_type, 8401 hash, value, sym_sec, 8402 input_bfd, sym_name); 8403 8404 if (stub_type != arm_stub_none) 8405 { 8406 /* The target is out of reach, so redirect the 8407 branch to the local stub for this function. */ 8408 stub_entry = elf32_arm_get_stub_entry (input_section, 8409 sym_sec, h, 8410 rel, globals, 8411 stub_type); 8412 { 8413 if (stub_entry != NULL) 8414 value = (stub_entry->stub_offset 8415 + stub_entry->stub_sec->output_offset 8416 + stub_entry->stub_sec->output_section->vma); 8417 8418 if (plt_offset != (bfd_vma) -1) 8419 *unresolved_reloc_p = FALSE; 8420 } 8421 } 8422 else 8423 { 8424 /* If the call goes through a PLT entry, make sure to 8425 check distance to the right destination address. */ 8426 if (plt_offset != (bfd_vma) -1) 8427 { 8428 value = (splt->output_section->vma 8429 + splt->output_offset 8430 + plt_offset); 8431 *unresolved_reloc_p = FALSE; 8432 /* The PLT entry is in ARM mode, regardless of the 8433 target function. */ 8434 branch_type = ST_BRANCH_TO_ARM; 8435 } 8436 } 8437 } 8438 8439 /* The ARM ELF ABI says that this reloc is computed as: S - P + A 8440 where: 8441 S is the address of the symbol in the relocation. 8442 P is address of the instruction being relocated. 8443 A is the addend (extracted from the instruction) in bytes. 8444 8445 S is held in 'value'. 8446 P is the base address of the section containing the 8447 instruction plus the offset of the reloc into that 8448 section, ie: 8449 (input_section->output_section->vma + 8450 input_section->output_offset + 8451 rel->r_offset). 8452 A is the addend, converted into bytes, ie: 8453 (signed_addend * 4) 8454 8455 Note: None of these operations have knowledge of the pipeline 8456 size of the processor, thus it is up to the assembler to 8457 encode this information into the addend. */ 8458 value -= (input_section->output_section->vma 8459 + input_section->output_offset); 8460 value -= rel->r_offset; 8461 if (globals->use_rel) 8462 value += (signed_addend << howto->size); 8463 else 8464 /* RELA addends do not have to be adjusted by howto->size. */ 8465 value += signed_addend; 8466 8467 signed_addend = value; 8468 signed_addend >>= howto->rightshift; 8469 8470 /* A branch to an undefined weak symbol is turned into a jump to 8471 the next instruction unless a PLT entry will be created. 8472 Do the same for local undefined symbols (but not for STN_UNDEF). 8473 The jump to the next instruction is optimized as a NOP depending 8474 on the architecture. */ 8475 if (h ? (h->root.type == bfd_link_hash_undefweak 8476 && plt_offset == (bfd_vma) -1) 8477 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec)) 8478 { 8479 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000); 8480 8481 if (arch_has_arm_nop (globals)) 8482 value |= 0x0320f000; 8483 else 8484 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */ 8485 } 8486 else 8487 { 8488 /* Perform a signed range check. */ 8489 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1)) 8490 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1))) 8491 return bfd_reloc_overflow; 8492 8493 addend = (value & 2); 8494 8495 value = (signed_addend & howto->dst_mask) 8496 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask)); 8497 8498 if (r_type == R_ARM_CALL) 8499 { 8500 /* Set the H bit in the BLX instruction. */ 8501 if (branch_type == ST_BRANCH_TO_THUMB) 8502 { 8503 if (addend) 8504 value |= (1 << 24); 8505 else 8506 value &= ~(bfd_vma)(1 << 24); 8507 } 8508 8509 /* Select the correct instruction (BL or BLX). */ 8510 /* Only if we are not handling a BL to a stub. In this 8511 case, mode switching is performed by the stub. */ 8512 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry) 8513 value |= (1 << 28); 8514 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN) 8515 { 8516 value &= ~(bfd_vma)(1 << 28); 8517 value |= (1 << 24); 8518 } 8519 } 8520 } 8521 } 8522 break; 8523 8524 case R_ARM_ABS32: 8525 value += addend; 8526 if (branch_type == ST_BRANCH_TO_THUMB) 8527 value |= 1; 8528 break; 8529 8530 case R_ARM_ABS32_NOI: 8531 value += addend; 8532 break; 8533 8534 case R_ARM_REL32: 8535 value += addend; 8536 if (branch_type == ST_BRANCH_TO_THUMB) 8537 value |= 1; 8538 value -= (input_section->output_section->vma 8539 + input_section->output_offset + rel->r_offset); 8540 break; 8541 8542 case R_ARM_REL32_NOI: 8543 value += addend; 8544 value -= (input_section->output_section->vma 8545 + input_section->output_offset + rel->r_offset); 8546 break; 8547 8548 case R_ARM_PREL31: 8549 value -= (input_section->output_section->vma 8550 + input_section->output_offset + rel->r_offset); 8551 value += signed_addend; 8552 if (! h || h->root.type != bfd_link_hash_undefweak) 8553 { 8554 /* Check for overflow. */ 8555 if ((value ^ (value >> 1)) & (1 << 30)) 8556 return bfd_reloc_overflow; 8557 } 8558 value &= 0x7fffffff; 8559 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000); 8560 if (branch_type == ST_BRANCH_TO_THUMB) 8561 value |= 1; 8562 break; 8563 } 8564 8565 bfd_put_32 (input_bfd, value, hit_data); 8566 return bfd_reloc_ok; 8567 8568 case R_ARM_ABS8: 8569 value += addend; 8570 8571 /* There is no way to tell whether the user intended to use a signed or 8572 unsigned addend. When checking for overflow we accept either, 8573 as specified by the AAELF. */ 8574 if ((long) value > 0xff || (long) value < -0x80) 8575 return bfd_reloc_overflow; 8576 8577 bfd_put_8 (input_bfd, value, hit_data); 8578 return bfd_reloc_ok; 8579 8580 case R_ARM_ABS16: 8581 value += addend; 8582 8583 /* See comment for R_ARM_ABS8. */ 8584 if ((long) value > 0xffff || (long) value < -0x8000) 8585 return bfd_reloc_overflow; 8586 8587 bfd_put_16 (input_bfd, value, hit_data); 8588 return bfd_reloc_ok; 8589 8590 case R_ARM_THM_ABS5: 8591 /* Support ldr and str instructions for the thumb. */ 8592 if (globals->use_rel) 8593 { 8594 /* Need to refetch addend. */ 8595 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 8596 /* ??? Need to determine shift amount from operand size. */ 8597 addend >>= howto->rightshift; 8598 } 8599 value += addend; 8600 8601 /* ??? Isn't value unsigned? */ 8602 if ((long) value > 0x1f || (long) value < -0x10) 8603 return bfd_reloc_overflow; 8604 8605 /* ??? Value needs to be properly shifted into place first. */ 8606 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f; 8607 bfd_put_16 (input_bfd, value, hit_data); 8608 return bfd_reloc_ok; 8609 8610 case R_ARM_THM_ALU_PREL_11_0: 8611 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */ 8612 { 8613 bfd_vma insn; 8614 bfd_signed_vma relocation; 8615 8616 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 8617 | bfd_get_16 (input_bfd, hit_data + 2); 8618 8619 if (globals->use_rel) 8620 { 8621 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4) 8622 | ((insn & (1 << 26)) >> 15); 8623 if (insn & 0xf00000) 8624 signed_addend = -signed_addend; 8625 } 8626 8627 relocation = value + signed_addend; 8628 relocation -= Pa (input_section->output_section->vma 8629 + input_section->output_offset 8630 + rel->r_offset); 8631 8632 value = abs (relocation); 8633 8634 if (value >= 0x1000) 8635 return bfd_reloc_overflow; 8636 8637 insn = (insn & 0xfb0f8f00) | (value & 0xff) 8638 | ((value & 0x700) << 4) 8639 | ((value & 0x800) << 15); 8640 if (relocation < 0) 8641 insn |= 0xa00000; 8642 8643 bfd_put_16 (input_bfd, insn >> 16, hit_data); 8644 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 8645 8646 return bfd_reloc_ok; 8647 } 8648 8649 case R_ARM_THM_PC8: 8650 /* PR 10073: This reloc is not generated by the GNU toolchain, 8651 but it is supported for compatibility with third party libraries 8652 generated by other compilers, specifically the ARM/IAR. */ 8653 { 8654 bfd_vma insn; 8655 bfd_signed_vma relocation; 8656 8657 insn = bfd_get_16 (input_bfd, hit_data); 8658 8659 if (globals->use_rel) 8660 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4; 8661 8662 relocation = value + addend; 8663 relocation -= Pa (input_section->output_section->vma 8664 + input_section->output_offset 8665 + rel->r_offset); 8666 8667 value = abs (relocation); 8668 8669 /* We do not check for overflow of this reloc. Although strictly 8670 speaking this is incorrect, it appears to be necessary in order 8671 to work with IAR generated relocs. Since GCC and GAS do not 8672 generate R_ARM_THM_PC8 relocs, the lack of a check should not be 8673 a problem for them. */ 8674 value &= 0x3fc; 8675 8676 insn = (insn & 0xff00) | (value >> 2); 8677 8678 bfd_put_16 (input_bfd, insn, hit_data); 8679 8680 return bfd_reloc_ok; 8681 } 8682 8683 case R_ARM_THM_PC12: 8684 /* Corresponds to: ldr.w reg, [pc, #offset]. */ 8685 { 8686 bfd_vma insn; 8687 bfd_signed_vma relocation; 8688 8689 insn = (bfd_get_16 (input_bfd, hit_data) << 16) 8690 | bfd_get_16 (input_bfd, hit_data + 2); 8691 8692 if (globals->use_rel) 8693 { 8694 signed_addend = insn & 0xfff; 8695 if (!(insn & (1 << 23))) 8696 signed_addend = -signed_addend; 8697 } 8698 8699 relocation = value + signed_addend; 8700 relocation -= Pa (input_section->output_section->vma 8701 + input_section->output_offset 8702 + rel->r_offset); 8703 8704 value = abs (relocation); 8705 8706 if (value >= 0x1000) 8707 return bfd_reloc_overflow; 8708 8709 insn = (insn & 0xff7ff000) | value; 8710 if (relocation >= 0) 8711 insn |= (1 << 23); 8712 8713 bfd_put_16 (input_bfd, insn >> 16, hit_data); 8714 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 8715 8716 return bfd_reloc_ok; 8717 } 8718 8719 case R_ARM_THM_XPC22: 8720 case R_ARM_THM_CALL: 8721 case R_ARM_THM_JUMP24: 8722 /* Thumb BL (branch long instruction). */ 8723 { 8724 bfd_vma relocation; 8725 bfd_vma reloc_sign; 8726 bfd_boolean overflow = FALSE; 8727 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 8728 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 8729 bfd_signed_vma reloc_signed_max; 8730 bfd_signed_vma reloc_signed_min; 8731 bfd_vma check; 8732 bfd_signed_vma signed_check; 8733 int bitsize; 8734 const int thumb2 = using_thumb2 (globals); 8735 8736 /* A branch to an undefined weak symbol is turned into a jump to 8737 the next instruction unless a PLT entry will be created. 8738 The jump to the next instruction is optimized as a NOP.W for 8739 Thumb-2 enabled architectures. */ 8740 if (h && h->root.type == bfd_link_hash_undefweak 8741 && plt_offset == (bfd_vma) -1) 8742 { 8743 if (arch_has_thumb2_nop (globals)) 8744 { 8745 bfd_put_16 (input_bfd, 0xf3af, hit_data); 8746 bfd_put_16 (input_bfd, 0x8000, hit_data + 2); 8747 } 8748 else 8749 { 8750 bfd_put_16 (input_bfd, 0xe000, hit_data); 8751 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2); 8752 } 8753 return bfd_reloc_ok; 8754 } 8755 8756 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible 8757 with Thumb-1) involving the J1 and J2 bits. */ 8758 if (globals->use_rel) 8759 { 8760 bfd_vma s = (upper_insn & (1 << 10)) >> 10; 8761 bfd_vma upper = upper_insn & 0x3ff; 8762 bfd_vma lower = lower_insn & 0x7ff; 8763 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13; 8764 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11; 8765 bfd_vma i1 = j1 ^ s ? 0 : 1; 8766 bfd_vma i2 = j2 ^ s ? 0 : 1; 8767 8768 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1); 8769 /* Sign extend. */ 8770 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24); 8771 8772 signed_addend = addend; 8773 } 8774 8775 if (r_type == R_ARM_THM_XPC22) 8776 { 8777 /* Check for Thumb to Thumb call. */ 8778 /* FIXME: Should we translate the instruction into a BL 8779 instruction instead ? */ 8780 if (branch_type == ST_BRANCH_TO_THUMB) 8781 (*_bfd_error_handler) 8782 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."), 8783 input_bfd, 8784 h ? h->root.root.string : "(local)"); 8785 } 8786 else 8787 { 8788 /* If it is not a call to Thumb, assume call to Arm. 8789 If it is a call relative to a section name, then it is not a 8790 function call at all, but rather a long jump. Calls through 8791 the PLT do not require stubs. */ 8792 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1) 8793 { 8794 if (globals->use_blx && r_type == R_ARM_THM_CALL) 8795 { 8796 /* Convert BL to BLX. */ 8797 lower_insn = (lower_insn & ~0x1000) | 0x0800; 8798 } 8799 else if (( r_type != R_ARM_THM_CALL) 8800 && (r_type != R_ARM_THM_JUMP24)) 8801 { 8802 if (elf32_thumb_to_arm_stub 8803 (info, sym_name, input_bfd, output_bfd, input_section, 8804 hit_data, sym_sec, rel->r_offset, signed_addend, value, 8805 error_message)) 8806 return bfd_reloc_ok; 8807 else 8808 return bfd_reloc_dangerous; 8809 } 8810 } 8811 else if (branch_type == ST_BRANCH_TO_THUMB 8812 && globals->use_blx 8813 && r_type == R_ARM_THM_CALL) 8814 { 8815 /* Make sure this is a BL. */ 8816 lower_insn |= 0x1800; 8817 } 8818 } 8819 8820 enum elf32_arm_stub_type stub_type = arm_stub_none; 8821 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) 8822 { 8823 /* Check if a stub has to be inserted because the destination 8824 is too far. */ 8825 struct elf32_arm_stub_hash_entry *stub_entry; 8826 struct elf32_arm_link_hash_entry *hash; 8827 8828 hash = (struct elf32_arm_link_hash_entry *) h; 8829 8830 stub_type = arm_type_of_stub (info, input_section, rel, 8831 st_type, &branch_type, 8832 hash, value, sym_sec, 8833 input_bfd, sym_name); 8834 8835 if (stub_type != arm_stub_none) 8836 { 8837 /* The target is out of reach or we are changing modes, so 8838 redirect the branch to the local stub for this 8839 function. */ 8840 stub_entry = elf32_arm_get_stub_entry (input_section, 8841 sym_sec, h, 8842 rel, globals, 8843 stub_type); 8844 if (stub_entry != NULL) 8845 { 8846 value = (stub_entry->stub_offset 8847 + stub_entry->stub_sec->output_offset 8848 + stub_entry->stub_sec->output_section->vma); 8849 8850 if (plt_offset != (bfd_vma) -1) 8851 *unresolved_reloc_p = FALSE; 8852 } 8853 8854 /* If this call becomes a call to Arm, force BLX. */ 8855 if (globals->use_blx && (r_type == R_ARM_THM_CALL)) 8856 { 8857 if ((stub_entry 8858 && !arm_stub_is_thumb (stub_entry->stub_type)) 8859 || branch_type != ST_BRANCH_TO_THUMB) 8860 lower_insn = (lower_insn & ~0x1000) | 0x0800; 8861 } 8862 } 8863 } 8864 8865 /* Handle calls via the PLT. */ 8866 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1) 8867 { 8868 value = (splt->output_section->vma 8869 + splt->output_offset 8870 + plt_offset); 8871 8872 if (globals->use_blx && r_type == R_ARM_THM_CALL) 8873 { 8874 /* If the Thumb BLX instruction is available, convert 8875 the BL to a BLX instruction to call the ARM-mode 8876 PLT entry. */ 8877 lower_insn = (lower_insn & ~0x1000) | 0x0800; 8878 branch_type = ST_BRANCH_TO_ARM; 8879 } 8880 else 8881 { 8882 /* Target the Thumb stub before the ARM PLT entry. */ 8883 value -= PLT_THUMB_STUB_SIZE; 8884 branch_type = ST_BRANCH_TO_THUMB; 8885 } 8886 *unresolved_reloc_p = FALSE; 8887 } 8888 8889 relocation = value + signed_addend; 8890 8891 relocation -= (input_section->output_section->vma 8892 + input_section->output_offset 8893 + rel->r_offset); 8894 8895 check = relocation >> howto->rightshift; 8896 8897 /* If this is a signed value, the rightshift just dropped 8898 leading 1 bits (assuming twos complement). */ 8899 if ((bfd_signed_vma) relocation >= 0) 8900 signed_check = check; 8901 else 8902 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift); 8903 8904 /* Calculate the permissable maximum and minimum values for 8905 this relocation according to whether we're relocating for 8906 Thumb-2 or not. */ 8907 bitsize = howto->bitsize; 8908 if (!thumb2) 8909 bitsize -= 2; 8910 reloc_signed_max = (1 << (bitsize - 1)) - 1; 8911 reloc_signed_min = ~reloc_signed_max; 8912 8913 /* Assumes two's complement. */ 8914 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 8915 overflow = TRUE; 8916 8917 if ((lower_insn & 0x5000) == 0x4000) 8918 /* For a BLX instruction, make sure that the relocation is rounded up 8919 to a word boundary. This follows the semantics of the instruction 8920 which specifies that bit 1 of the target address will come from bit 8921 1 of the base address. */ 8922 relocation = (relocation + 2) & ~ 3; 8923 8924 /* Put RELOCATION back into the insn. Assumes two's complement. 8925 We use the Thumb-2 encoding, which is safe even if dealing with 8926 a Thumb-1 instruction by virtue of our overflow check above. */ 8927 reloc_sign = (signed_check < 0) ? 1 : 0; 8928 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff) 8929 | ((relocation >> 12) & 0x3ff) 8930 | (reloc_sign << 10); 8931 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff) 8932 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13) 8933 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11) 8934 | ((relocation >> 1) & 0x7ff); 8935 8936 /* Put the relocated value back in the object file: */ 8937 bfd_put_16 (input_bfd, upper_insn, hit_data); 8938 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 8939 8940 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 8941 } 8942 break; 8943 8944 case R_ARM_THM_JUMP19: 8945 /* Thumb32 conditional branch instruction. */ 8946 { 8947 bfd_vma relocation; 8948 bfd_boolean overflow = FALSE; 8949 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data); 8950 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2); 8951 bfd_signed_vma reloc_signed_max = 0xffffe; 8952 bfd_signed_vma reloc_signed_min = -0x100000; 8953 bfd_signed_vma signed_check; 8954 8955 /* Need to refetch the addend, reconstruct the top three bits, 8956 and squish the two 11 bit pieces together. */ 8957 if (globals->use_rel) 8958 { 8959 bfd_vma S = (upper_insn & 0x0400) >> 10; 8960 bfd_vma upper = (upper_insn & 0x003f); 8961 bfd_vma J1 = (lower_insn & 0x2000) >> 13; 8962 bfd_vma J2 = (lower_insn & 0x0800) >> 11; 8963 bfd_vma lower = (lower_insn & 0x07ff); 8964 8965 upper |= J1 << 6; 8966 upper |= J2 << 7; 8967 upper |= (!S) << 8; 8968 upper -= 0x0100; /* Sign extend. */ 8969 8970 addend = (upper << 12) | (lower << 1); 8971 signed_addend = addend; 8972 } 8973 8974 /* Handle calls via the PLT. */ 8975 if (plt_offset != (bfd_vma) -1) 8976 { 8977 value = (splt->output_section->vma 8978 + splt->output_offset 8979 + plt_offset); 8980 /* Target the Thumb stub before the ARM PLT entry. */ 8981 value -= PLT_THUMB_STUB_SIZE; 8982 *unresolved_reloc_p = FALSE; 8983 } 8984 8985 /* ??? Should handle interworking? GCC might someday try to 8986 use this for tail calls. */ 8987 8988 relocation = value + signed_addend; 8989 relocation -= (input_section->output_section->vma 8990 + input_section->output_offset 8991 + rel->r_offset); 8992 signed_check = (bfd_signed_vma) relocation; 8993 8994 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 8995 overflow = TRUE; 8996 8997 /* Put RELOCATION back into the insn. */ 8998 { 8999 bfd_vma S = (relocation & 0x00100000) >> 20; 9000 bfd_vma J2 = (relocation & 0x00080000) >> 19; 9001 bfd_vma J1 = (relocation & 0x00040000) >> 18; 9002 bfd_vma hi = (relocation & 0x0003f000) >> 12; 9003 bfd_vma lo = (relocation & 0x00000ffe) >> 1; 9004 9005 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi; 9006 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo; 9007 } 9008 9009 /* Put the relocated value back in the object file: */ 9010 bfd_put_16 (input_bfd, upper_insn, hit_data); 9011 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 9012 9013 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok); 9014 } 9015 9016 case R_ARM_THM_JUMP11: 9017 case R_ARM_THM_JUMP8: 9018 case R_ARM_THM_JUMP6: 9019 /* Thumb B (branch) instruction). */ 9020 { 9021 bfd_signed_vma relocation; 9022 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1; 9023 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max; 9024 bfd_signed_vma signed_check; 9025 9026 /* CZB cannot jump backward. */ 9027 if (r_type == R_ARM_THM_JUMP6) 9028 reloc_signed_min = 0; 9029 9030 if (globals->use_rel) 9031 { 9032 /* Need to refetch addend. */ 9033 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask; 9034 if (addend & ((howto->src_mask + 1) >> 1)) 9035 { 9036 signed_addend = -1; 9037 signed_addend &= ~ howto->src_mask; 9038 signed_addend |= addend; 9039 } 9040 else 9041 signed_addend = addend; 9042 /* The value in the insn has been right shifted. We need to 9043 undo this, so that we can perform the address calculation 9044 in terms of bytes. */ 9045 signed_addend <<= howto->rightshift; 9046 } 9047 relocation = value + signed_addend; 9048 9049 relocation -= (input_section->output_section->vma 9050 + input_section->output_offset 9051 + rel->r_offset); 9052 9053 relocation >>= howto->rightshift; 9054 signed_check = relocation; 9055 9056 if (r_type == R_ARM_THM_JUMP6) 9057 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3); 9058 else 9059 relocation &= howto->dst_mask; 9060 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask)); 9061 9062 bfd_put_16 (input_bfd, relocation, hit_data); 9063 9064 /* Assumes two's complement. */ 9065 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min) 9066 return bfd_reloc_overflow; 9067 9068 return bfd_reloc_ok; 9069 } 9070 9071 case R_ARM_ALU_PCREL7_0: 9072 case R_ARM_ALU_PCREL15_8: 9073 case R_ARM_ALU_PCREL23_15: 9074 { 9075 bfd_vma insn; 9076 bfd_vma relocation; 9077 9078 insn = bfd_get_32 (input_bfd, hit_data); 9079 if (globals->use_rel) 9080 { 9081 /* Extract the addend. */ 9082 addend = (insn & 0xff) << ((insn & 0xf00) >> 7); 9083 signed_addend = addend; 9084 } 9085 relocation = value + signed_addend; 9086 9087 relocation -= (input_section->output_section->vma 9088 + input_section->output_offset 9089 + rel->r_offset); 9090 insn = (insn & ~0xfff) 9091 | ((howto->bitpos << 7) & 0xf00) 9092 | ((relocation >> howto->bitpos) & 0xff); 9093 bfd_put_32 (input_bfd, value, hit_data); 9094 } 9095 return bfd_reloc_ok; 9096 9097 case R_ARM_GNU_VTINHERIT: 9098 case R_ARM_GNU_VTENTRY: 9099 return bfd_reloc_ok; 9100 9101 case R_ARM_GOTOFF32: 9102 /* Relocation is relative to the start of the 9103 global offset table. */ 9104 9105 BFD_ASSERT (sgot != NULL); 9106 if (sgot == NULL) 9107 return bfd_reloc_notsupported; 9108 9109 /* If we are addressing a Thumb function, we need to adjust the 9110 address by one, so that attempts to call the function pointer will 9111 correctly interpret it as Thumb code. */ 9112 if (branch_type == ST_BRANCH_TO_THUMB) 9113 value += 1; 9114 9115 /* Note that sgot->output_offset is not involved in this 9116 calculation. We always want the start of .got. If we 9117 define _GLOBAL_OFFSET_TABLE in a different way, as is 9118 permitted by the ABI, we might have to change this 9119 calculation. */ 9120 value -= sgot->output_section->vma; 9121 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9122 contents, rel->r_offset, value, 9123 rel->r_addend); 9124 9125 case R_ARM_GOTPC: 9126 /* Use global offset table as symbol value. */ 9127 BFD_ASSERT (sgot != NULL); 9128 9129 if (sgot == NULL) 9130 return bfd_reloc_notsupported; 9131 9132 *unresolved_reloc_p = FALSE; 9133 value = sgot->output_section->vma; 9134 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9135 contents, rel->r_offset, value, 9136 rel->r_addend); 9137 9138 case R_ARM_GOT32: 9139 case R_ARM_GOT_PREL: 9140 /* Relocation is to the entry for this symbol in the 9141 global offset table. */ 9142 if (sgot == NULL) 9143 return bfd_reloc_notsupported; 9144 9145 if (dynreloc_st_type == STT_GNU_IFUNC 9146 && plt_offset != (bfd_vma) -1 9147 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h))) 9148 { 9149 /* We have a relocation against a locally-binding STT_GNU_IFUNC 9150 symbol, and the relocation resolves directly to the runtime 9151 target rather than to the .iplt entry. This means that any 9152 .got entry would be the same value as the .igot.plt entry, 9153 so there's no point creating both. */ 9154 sgot = globals->root.igotplt; 9155 value = sgot->output_offset + gotplt_offset; 9156 } 9157 else if (h != NULL) 9158 { 9159 bfd_vma off; 9160 9161 off = h->got.offset; 9162 BFD_ASSERT (off != (bfd_vma) -1); 9163 if ((off & 1) != 0) 9164 { 9165 /* We have already processsed one GOT relocation against 9166 this symbol. */ 9167 off &= ~1; 9168 if (globals->root.dynamic_sections_created 9169 && !SYMBOL_REFERENCES_LOCAL (info, h)) 9170 *unresolved_reloc_p = FALSE; 9171 } 9172 else 9173 { 9174 Elf_Internal_Rela outrel; 9175 9176 if (!SYMBOL_REFERENCES_LOCAL (info, h)) 9177 { 9178 /* If the symbol doesn't resolve locally in a static 9179 object, we have an undefined reference. If the 9180 symbol doesn't resolve locally in a dynamic object, 9181 it should be resolved by the dynamic linker. */ 9182 if (globals->root.dynamic_sections_created) 9183 { 9184 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT); 9185 *unresolved_reloc_p = FALSE; 9186 } 9187 else 9188 outrel.r_info = 0; 9189 outrel.r_addend = 0; 9190 } 9191 else 9192 { 9193 if (dynreloc_st_type == STT_GNU_IFUNC) 9194 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 9195 else if (info->shared) 9196 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 9197 else 9198 outrel.r_info = 0; 9199 outrel.r_addend = dynreloc_value; 9200 } 9201 9202 /* The GOT entry is initialized to zero by default. 9203 See if we should install a different value. */ 9204 if (outrel.r_addend != 0 9205 && (outrel.r_info == 0 || globals->use_rel)) 9206 { 9207 bfd_put_32 (output_bfd, outrel.r_addend, 9208 sgot->contents + off); 9209 outrel.r_addend = 0; 9210 } 9211 9212 if (outrel.r_info != 0) 9213 { 9214 outrel.r_offset = (sgot->output_section->vma 9215 + sgot->output_offset 9216 + off); 9217 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9218 } 9219 h->got.offset |= 1; 9220 } 9221 value = sgot->output_offset + off; 9222 } 9223 else 9224 { 9225 bfd_vma off; 9226 9227 BFD_ASSERT (local_got_offsets != NULL && 9228 local_got_offsets[r_symndx] != (bfd_vma) -1); 9229 9230 off = local_got_offsets[r_symndx]; 9231 9232 /* The offset must always be a multiple of 4. We use the 9233 least significant bit to record whether we have already 9234 generated the necessary reloc. */ 9235 if ((off & 1) != 0) 9236 off &= ~1; 9237 else 9238 { 9239 if (globals->use_rel) 9240 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off); 9241 9242 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC) 9243 { 9244 Elf_Internal_Rela outrel; 9245 9246 outrel.r_addend = addend + dynreloc_value; 9247 outrel.r_offset = (sgot->output_section->vma 9248 + sgot->output_offset 9249 + off); 9250 if (dynreloc_st_type == STT_GNU_IFUNC) 9251 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); 9252 else 9253 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); 9254 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9255 } 9256 9257 local_got_offsets[r_symndx] |= 1; 9258 } 9259 9260 value = sgot->output_offset + off; 9261 } 9262 if (r_type != R_ARM_GOT32) 9263 value += sgot->output_section->vma; 9264 9265 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9266 contents, rel->r_offset, value, 9267 rel->r_addend); 9268 9269 case R_ARM_TLS_LDO32: 9270 value = value - dtpoff_base (info); 9271 9272 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9273 contents, rel->r_offset, value, 9274 rel->r_addend); 9275 9276 case R_ARM_TLS_LDM32: 9277 { 9278 bfd_vma off; 9279 9280 if (sgot == NULL) 9281 abort (); 9282 9283 off = globals->tls_ldm_got.offset; 9284 9285 if ((off & 1) != 0) 9286 off &= ~1; 9287 else 9288 { 9289 /* If we don't know the module number, create a relocation 9290 for it. */ 9291 if (info->shared) 9292 { 9293 Elf_Internal_Rela outrel; 9294 9295 if (srelgot == NULL) 9296 abort (); 9297 9298 outrel.r_addend = 0; 9299 outrel.r_offset = (sgot->output_section->vma 9300 + sgot->output_offset + off); 9301 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32); 9302 9303 if (globals->use_rel) 9304 bfd_put_32 (output_bfd, outrel.r_addend, 9305 sgot->contents + off); 9306 9307 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9308 } 9309 else 9310 bfd_put_32 (output_bfd, 1, sgot->contents + off); 9311 9312 globals->tls_ldm_got.offset |= 1; 9313 } 9314 9315 value = sgot->output_section->vma + sgot->output_offset + off 9316 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset); 9317 9318 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9319 contents, rel->r_offset, value, 9320 rel->r_addend); 9321 } 9322 9323 case R_ARM_TLS_CALL: 9324 case R_ARM_THM_TLS_CALL: 9325 case R_ARM_TLS_GD32: 9326 case R_ARM_TLS_IE32: 9327 case R_ARM_TLS_GOTDESC: 9328 case R_ARM_TLS_DESCSEQ: 9329 case R_ARM_THM_TLS_DESCSEQ: 9330 { 9331 bfd_vma off, offplt; 9332 int indx = 0; 9333 char tls_type; 9334 9335 BFD_ASSERT (sgot != NULL); 9336 9337 if (h != NULL) 9338 { 9339 bfd_boolean dyn; 9340 dyn = globals->root.dynamic_sections_created; 9341 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) 9342 && (!info->shared 9343 || !SYMBOL_REFERENCES_LOCAL (info, h))) 9344 { 9345 *unresolved_reloc_p = FALSE; 9346 indx = h->dynindx; 9347 } 9348 off = h->got.offset; 9349 offplt = elf32_arm_hash_entry (h)->tlsdesc_got; 9350 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type; 9351 } 9352 else 9353 { 9354 BFD_ASSERT (local_got_offsets != NULL); 9355 off = local_got_offsets[r_symndx]; 9356 offplt = local_tlsdesc_gotents[r_symndx]; 9357 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx]; 9358 } 9359 9360 /* Linker relaxations happens from one of the 9361 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */ 9362 if (ELF32_R_TYPE(rel->r_info) != r_type) 9363 tls_type = GOT_TLS_IE; 9364 9365 BFD_ASSERT (tls_type != GOT_UNKNOWN); 9366 9367 if ((off & 1) != 0) 9368 off &= ~1; 9369 else 9370 { 9371 bfd_boolean need_relocs = FALSE; 9372 Elf_Internal_Rela outrel; 9373 int cur_off = off; 9374 9375 /* The GOT entries have not been initialized yet. Do it 9376 now, and emit any relocations. If both an IE GOT and a 9377 GD GOT are necessary, we emit the GD first. */ 9378 9379 if ((info->shared || indx != 0) 9380 && (h == NULL 9381 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 9382 || h->root.type != bfd_link_hash_undefweak)) 9383 { 9384 need_relocs = TRUE; 9385 BFD_ASSERT (srelgot != NULL); 9386 } 9387 9388 if (tls_type & GOT_TLS_GDESC) 9389 { 9390 bfd_byte *loc; 9391 9392 /* We should have relaxed, unless this is an undefined 9393 weak symbol. */ 9394 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak)) 9395 || info->shared); 9396 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8 9397 <= globals->root.sgotplt->size); 9398 9399 outrel.r_addend = 0; 9400 outrel.r_offset = (globals->root.sgotplt->output_section->vma 9401 + globals->root.sgotplt->output_offset 9402 + offplt 9403 + globals->sgotplt_jump_table_size); 9404 9405 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC); 9406 sreloc = globals->root.srelplt; 9407 loc = sreloc->contents; 9408 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals); 9409 BFD_ASSERT (loc + RELOC_SIZE (globals) 9410 <= sreloc->contents + sreloc->size); 9411 9412 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc); 9413 9414 /* For globals, the first word in the relocation gets 9415 the relocation index and the top bit set, or zero, 9416 if we're binding now. For locals, it gets the 9417 symbol's offset in the tls section. */ 9418 bfd_put_32 (output_bfd, 9419 !h ? value - elf_hash_table (info)->tls_sec->vma 9420 : info->flags & DF_BIND_NOW ? 0 9421 : 0x80000000 | ELF32_R_SYM (outrel.r_info), 9422 globals->root.sgotplt->contents + offplt 9423 + globals->sgotplt_jump_table_size); 9424 9425 /* Second word in the relocation is always zero. */ 9426 bfd_put_32 (output_bfd, 0, 9427 globals->root.sgotplt->contents + offplt 9428 + globals->sgotplt_jump_table_size + 4); 9429 } 9430 if (tls_type & GOT_TLS_GD) 9431 { 9432 if (need_relocs) 9433 { 9434 outrel.r_addend = 0; 9435 outrel.r_offset = (sgot->output_section->vma 9436 + sgot->output_offset 9437 + cur_off); 9438 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32); 9439 9440 if (globals->use_rel) 9441 bfd_put_32 (output_bfd, outrel.r_addend, 9442 sgot->contents + cur_off); 9443 9444 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9445 9446 if (indx == 0) 9447 bfd_put_32 (output_bfd, value - dtpoff_base (info), 9448 sgot->contents + cur_off + 4); 9449 else 9450 { 9451 outrel.r_addend = 0; 9452 outrel.r_info = ELF32_R_INFO (indx, 9453 R_ARM_TLS_DTPOFF32); 9454 outrel.r_offset += 4; 9455 9456 if (globals->use_rel) 9457 bfd_put_32 (output_bfd, outrel.r_addend, 9458 sgot->contents + cur_off + 4); 9459 9460 elf32_arm_add_dynreloc (output_bfd, info, 9461 srelgot, &outrel); 9462 } 9463 } 9464 else 9465 { 9466 /* If we are not emitting relocations for a 9467 general dynamic reference, then we must be in a 9468 static link or an executable link with the 9469 symbol binding locally. Mark it as belonging 9470 to module 1, the executable. */ 9471 bfd_put_32 (output_bfd, 1, 9472 sgot->contents + cur_off); 9473 bfd_put_32 (output_bfd, value - dtpoff_base (info), 9474 sgot->contents + cur_off + 4); 9475 } 9476 9477 cur_off += 8; 9478 } 9479 9480 if (tls_type & GOT_TLS_IE) 9481 { 9482 if (need_relocs) 9483 { 9484 if (indx == 0) 9485 outrel.r_addend = value - dtpoff_base (info); 9486 else 9487 outrel.r_addend = 0; 9488 outrel.r_offset = (sgot->output_section->vma 9489 + sgot->output_offset 9490 + cur_off); 9491 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32); 9492 9493 if (globals->use_rel) 9494 bfd_put_32 (output_bfd, outrel.r_addend, 9495 sgot->contents + cur_off); 9496 9497 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel); 9498 } 9499 else 9500 bfd_put_32 (output_bfd, tpoff (info, value), 9501 sgot->contents + cur_off); 9502 cur_off += 4; 9503 } 9504 9505 if (h != NULL) 9506 h->got.offset |= 1; 9507 else 9508 local_got_offsets[r_symndx] |= 1; 9509 } 9510 9511 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32) 9512 off += 8; 9513 else if (tls_type & GOT_TLS_GDESC) 9514 off = offplt; 9515 9516 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL 9517 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL) 9518 { 9519 bfd_signed_vma offset; 9520 /* TLS stubs are arm mode. The original symbol is a 9521 data object, so branch_type is bogus. */ 9522 branch_type = ST_BRANCH_TO_ARM; 9523 enum elf32_arm_stub_type stub_type 9524 = arm_type_of_stub (info, input_section, rel, 9525 st_type, &branch_type, 9526 (struct elf32_arm_link_hash_entry *)h, 9527 globals->tls_trampoline, globals->root.splt, 9528 input_bfd, sym_name); 9529 9530 if (stub_type != arm_stub_none) 9531 { 9532 struct elf32_arm_stub_hash_entry *stub_entry 9533 = elf32_arm_get_stub_entry 9534 (input_section, globals->root.splt, 0, rel, 9535 globals, stub_type); 9536 offset = (stub_entry->stub_offset 9537 + stub_entry->stub_sec->output_offset 9538 + stub_entry->stub_sec->output_section->vma); 9539 } 9540 else 9541 offset = (globals->root.splt->output_section->vma 9542 + globals->root.splt->output_offset 9543 + globals->tls_trampoline); 9544 9545 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL) 9546 { 9547 unsigned long inst; 9548 9549 offset -= (input_section->output_section->vma 9550 + input_section->output_offset 9551 + rel->r_offset + 8); 9552 9553 inst = offset >> 2; 9554 inst &= 0x00ffffff; 9555 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000); 9556 } 9557 else 9558 { 9559 /* Thumb blx encodes the offset in a complicated 9560 fashion. */ 9561 unsigned upper_insn, lower_insn; 9562 unsigned neg; 9563 9564 offset -= (input_section->output_section->vma 9565 + input_section->output_offset 9566 + rel->r_offset + 4); 9567 9568 if (stub_type != arm_stub_none 9569 && arm_stub_is_thumb (stub_type)) 9570 { 9571 lower_insn = 0xd000; 9572 } 9573 else 9574 { 9575 lower_insn = 0xc000; 9576 /* Round up the offset to a word boundary */ 9577 offset = (offset + 2) & ~2; 9578 } 9579 9580 neg = offset < 0; 9581 upper_insn = (0xf000 9582 | ((offset >> 12) & 0x3ff) 9583 | (neg << 10)); 9584 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13) 9585 | (((!((offset >> 22) & 1)) ^ neg) << 11) 9586 | ((offset >> 1) & 0x7ff); 9587 bfd_put_16 (input_bfd, upper_insn, hit_data); 9588 bfd_put_16 (input_bfd, lower_insn, hit_data + 2); 9589 return bfd_reloc_ok; 9590 } 9591 } 9592 /* These relocations needs special care, as besides the fact 9593 they point somewhere in .gotplt, the addend must be 9594 adjusted accordingly depending on the type of instruction 9595 we refer to */ 9596 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC)) 9597 { 9598 unsigned long data, insn; 9599 unsigned thumb; 9600 9601 data = bfd_get_32 (input_bfd, hit_data); 9602 thumb = data & 1; 9603 data &= ~1u; 9604 9605 if (thumb) 9606 { 9607 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data); 9608 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800) 9609 insn = (insn << 16) 9610 | bfd_get_16 (input_bfd, 9611 contents + rel->r_offset - data + 2); 9612 if ((insn & 0xf800c000) == 0xf000c000) 9613 /* bl/blx */ 9614 value = -6; 9615 else if ((insn & 0xffffff00) == 0x4400) 9616 /* add */ 9617 value = -5; 9618 else 9619 { 9620 (*_bfd_error_handler) 9621 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"), 9622 input_bfd, input_section, 9623 (unsigned long)rel->r_offset, insn); 9624 return bfd_reloc_notsupported; 9625 } 9626 } 9627 else 9628 { 9629 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data); 9630 9631 switch (insn >> 24) 9632 { 9633 case 0xeb: /* bl */ 9634 case 0xfa: /* blx */ 9635 value = -4; 9636 break; 9637 9638 case 0xe0: /* add */ 9639 value = -8; 9640 break; 9641 9642 default: 9643 (*_bfd_error_handler) 9644 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"), 9645 input_bfd, input_section, 9646 (unsigned long)rel->r_offset, insn); 9647 return bfd_reloc_notsupported; 9648 } 9649 } 9650 9651 value += ((globals->root.sgotplt->output_section->vma 9652 + globals->root.sgotplt->output_offset + off) 9653 - (input_section->output_section->vma 9654 + input_section->output_offset 9655 + rel->r_offset) 9656 + globals->sgotplt_jump_table_size); 9657 } 9658 else 9659 value = ((globals->root.sgot->output_section->vma 9660 + globals->root.sgot->output_offset + off) 9661 - (input_section->output_section->vma 9662 + input_section->output_offset + rel->r_offset)); 9663 9664 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9665 contents, rel->r_offset, value, 9666 rel->r_addend); 9667 } 9668 9669 case R_ARM_TLS_LE32: 9670 if (info->shared && !info->pie) 9671 { 9672 (*_bfd_error_handler) 9673 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), 9674 input_bfd, input_section, 9675 (long) rel->r_offset, howto->name); 9676 return bfd_reloc_notsupported; 9677 } 9678 else 9679 value = tpoff (info, value); 9680 9681 return _bfd_final_link_relocate (howto, input_bfd, input_section, 9682 contents, rel->r_offset, value, 9683 rel->r_addend); 9684 9685 case R_ARM_V4BX: 9686 if (globals->fix_v4bx) 9687 { 9688 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9689 9690 /* Ensure that we have a BX instruction. */ 9691 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10); 9692 9693 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf) 9694 { 9695 /* Branch to veneer. */ 9696 bfd_vma glue_addr; 9697 glue_addr = elf32_arm_bx_glue (info, insn & 0xf); 9698 glue_addr -= input_section->output_section->vma 9699 + input_section->output_offset 9700 + rel->r_offset + 8; 9701 insn = (insn & 0xf0000000) | 0x0a000000 9702 | ((glue_addr >> 2) & 0x00ffffff); 9703 } 9704 else 9705 { 9706 /* Preserve Rm (lowest four bits) and the condition code 9707 (highest four bits). Other bits encode MOV PC,Rm. */ 9708 insn = (insn & 0xf000000f) | 0x01a0f000; 9709 } 9710 9711 bfd_put_32 (input_bfd, insn, hit_data); 9712 } 9713 return bfd_reloc_ok; 9714 9715 case R_ARM_MOVW_ABS_NC: 9716 case R_ARM_MOVT_ABS: 9717 case R_ARM_MOVW_PREL_NC: 9718 case R_ARM_MOVT_PREL: 9719 /* Until we properly support segment-base-relative addressing then 9720 we assume the segment base to be zero, as for the group relocations. 9721 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC 9722 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */ 9723 case R_ARM_MOVW_BREL_NC: 9724 case R_ARM_MOVW_BREL: 9725 case R_ARM_MOVT_BREL: 9726 { 9727 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9728 9729 if (globals->use_rel) 9730 { 9731 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff); 9732 signed_addend = (addend ^ 0x8000) - 0x8000; 9733 } 9734 9735 value += signed_addend; 9736 9737 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL) 9738 value -= (input_section->output_section->vma 9739 + input_section->output_offset + rel->r_offset); 9740 9741 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000) 9742 return bfd_reloc_overflow; 9743 9744 if (branch_type == ST_BRANCH_TO_THUMB) 9745 value |= 1; 9746 9747 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL 9748 || r_type == R_ARM_MOVT_BREL) 9749 value >>= 16; 9750 9751 insn &= 0xfff0f000; 9752 insn |= value & 0xfff; 9753 insn |= (value & 0xf000) << 4; 9754 bfd_put_32 (input_bfd, insn, hit_data); 9755 } 9756 return bfd_reloc_ok; 9757 9758 case R_ARM_THM_MOVW_ABS_NC: 9759 case R_ARM_THM_MOVT_ABS: 9760 case R_ARM_THM_MOVW_PREL_NC: 9761 case R_ARM_THM_MOVT_PREL: 9762 /* Until we properly support segment-base-relative addressing then 9763 we assume the segment base to be zero, as for the above relocations. 9764 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as 9765 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics 9766 as R_ARM_THM_MOVT_ABS. */ 9767 case R_ARM_THM_MOVW_BREL_NC: 9768 case R_ARM_THM_MOVW_BREL: 9769 case R_ARM_THM_MOVT_BREL: 9770 { 9771 bfd_vma insn; 9772 9773 insn = bfd_get_16 (input_bfd, hit_data) << 16; 9774 insn |= bfd_get_16 (input_bfd, hit_data + 2); 9775 9776 if (globals->use_rel) 9777 { 9778 addend = ((insn >> 4) & 0xf000) 9779 | ((insn >> 15) & 0x0800) 9780 | ((insn >> 4) & 0x0700) 9781 | (insn & 0x00ff); 9782 signed_addend = (addend ^ 0x8000) - 0x8000; 9783 } 9784 9785 value += signed_addend; 9786 9787 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL) 9788 value -= (input_section->output_section->vma 9789 + input_section->output_offset + rel->r_offset); 9790 9791 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000) 9792 return bfd_reloc_overflow; 9793 9794 if (branch_type == ST_BRANCH_TO_THUMB) 9795 value |= 1; 9796 9797 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL 9798 || r_type == R_ARM_THM_MOVT_BREL) 9799 value >>= 16; 9800 9801 insn &= 0xfbf08f00; 9802 insn |= (value & 0xf000) << 4; 9803 insn |= (value & 0x0800) << 15; 9804 insn |= (value & 0x0700) << 4; 9805 insn |= (value & 0x00ff); 9806 9807 bfd_put_16 (input_bfd, insn >> 16, hit_data); 9808 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2); 9809 } 9810 return bfd_reloc_ok; 9811 9812 case R_ARM_ALU_PC_G0_NC: 9813 case R_ARM_ALU_PC_G1_NC: 9814 case R_ARM_ALU_PC_G0: 9815 case R_ARM_ALU_PC_G1: 9816 case R_ARM_ALU_PC_G2: 9817 case R_ARM_ALU_SB_G0_NC: 9818 case R_ARM_ALU_SB_G1_NC: 9819 case R_ARM_ALU_SB_G0: 9820 case R_ARM_ALU_SB_G1: 9821 case R_ARM_ALU_SB_G2: 9822 { 9823 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9824 bfd_vma pc = input_section->output_section->vma 9825 + input_section->output_offset + rel->r_offset; 9826 /* sb should be the origin of the *segment* containing the symbol. 9827 It is not clear how to obtain this OS-dependent value, so we 9828 make an arbitrary choice of zero. */ 9829 bfd_vma sb = 0; 9830 bfd_vma residual; 9831 bfd_vma g_n; 9832 bfd_signed_vma signed_value; 9833 int group = 0; 9834 9835 /* Determine which group of bits to select. */ 9836 switch (r_type) 9837 { 9838 case R_ARM_ALU_PC_G0_NC: 9839 case R_ARM_ALU_PC_G0: 9840 case R_ARM_ALU_SB_G0_NC: 9841 case R_ARM_ALU_SB_G0: 9842 group = 0; 9843 break; 9844 9845 case R_ARM_ALU_PC_G1_NC: 9846 case R_ARM_ALU_PC_G1: 9847 case R_ARM_ALU_SB_G1_NC: 9848 case R_ARM_ALU_SB_G1: 9849 group = 1; 9850 break; 9851 9852 case R_ARM_ALU_PC_G2: 9853 case R_ARM_ALU_SB_G2: 9854 group = 2; 9855 break; 9856 9857 default: 9858 abort (); 9859 } 9860 9861 /* If REL, extract the addend from the insn. If RELA, it will 9862 have already been fetched for us. */ 9863 if (globals->use_rel) 9864 { 9865 int negative; 9866 bfd_vma constant = insn & 0xff; 9867 bfd_vma rotation = (insn & 0xf00) >> 8; 9868 9869 if (rotation == 0) 9870 signed_addend = constant; 9871 else 9872 { 9873 /* Compensate for the fact that in the instruction, the 9874 rotation is stored in multiples of 2 bits. */ 9875 rotation *= 2; 9876 9877 /* Rotate "constant" right by "rotation" bits. */ 9878 signed_addend = (constant >> rotation) | 9879 (constant << (8 * sizeof (bfd_vma) - rotation)); 9880 } 9881 9882 /* Determine if the instruction is an ADD or a SUB. 9883 (For REL, this determines the sign of the addend.) */ 9884 negative = identify_add_or_sub (insn); 9885 if (negative == 0) 9886 { 9887 (*_bfd_error_handler) 9888 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"), 9889 input_bfd, input_section, 9890 (long) rel->r_offset, howto->name); 9891 return bfd_reloc_overflow; 9892 } 9893 9894 signed_addend *= negative; 9895 } 9896 9897 /* Compute the value (X) to go in the place. */ 9898 if (r_type == R_ARM_ALU_PC_G0_NC 9899 || r_type == R_ARM_ALU_PC_G1_NC 9900 || r_type == R_ARM_ALU_PC_G0 9901 || r_type == R_ARM_ALU_PC_G1 9902 || r_type == R_ARM_ALU_PC_G2) 9903 /* PC relative. */ 9904 signed_value = value - pc + signed_addend; 9905 else 9906 /* Section base relative. */ 9907 signed_value = value - sb + signed_addend; 9908 9909 /* If the target symbol is a Thumb function, then set the 9910 Thumb bit in the address. */ 9911 if (branch_type == ST_BRANCH_TO_THUMB) 9912 signed_value |= 1; 9913 9914 /* Calculate the value of the relevant G_n, in encoded 9915 constant-with-rotation format. */ 9916 g_n = calculate_group_reloc_mask (abs (signed_value), group, 9917 &residual); 9918 9919 /* Check for overflow if required. */ 9920 if ((r_type == R_ARM_ALU_PC_G0 9921 || r_type == R_ARM_ALU_PC_G1 9922 || r_type == R_ARM_ALU_PC_G2 9923 || r_type == R_ARM_ALU_SB_G0 9924 || r_type == R_ARM_ALU_SB_G1 9925 || r_type == R_ARM_ALU_SB_G2) && residual != 0) 9926 { 9927 (*_bfd_error_handler) 9928 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 9929 input_bfd, input_section, 9930 (long) rel->r_offset, abs (signed_value), howto->name); 9931 return bfd_reloc_overflow; 9932 } 9933 9934 /* Mask out the value and the ADD/SUB part of the opcode; take care 9935 not to destroy the S bit. */ 9936 insn &= 0xff1ff000; 9937 9938 /* Set the opcode according to whether the value to go in the 9939 place is negative. */ 9940 if (signed_value < 0) 9941 insn |= 1 << 22; 9942 else 9943 insn |= 1 << 23; 9944 9945 /* Encode the offset. */ 9946 insn |= g_n; 9947 9948 bfd_put_32 (input_bfd, insn, hit_data); 9949 } 9950 return bfd_reloc_ok; 9951 9952 case R_ARM_LDR_PC_G0: 9953 case R_ARM_LDR_PC_G1: 9954 case R_ARM_LDR_PC_G2: 9955 case R_ARM_LDR_SB_G0: 9956 case R_ARM_LDR_SB_G1: 9957 case R_ARM_LDR_SB_G2: 9958 { 9959 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 9960 bfd_vma pc = input_section->output_section->vma 9961 + input_section->output_offset + rel->r_offset; 9962 bfd_vma sb = 0; /* See note above. */ 9963 bfd_vma residual; 9964 bfd_signed_vma signed_value; 9965 int group = 0; 9966 9967 /* Determine which groups of bits to calculate. */ 9968 switch (r_type) 9969 { 9970 case R_ARM_LDR_PC_G0: 9971 case R_ARM_LDR_SB_G0: 9972 group = 0; 9973 break; 9974 9975 case R_ARM_LDR_PC_G1: 9976 case R_ARM_LDR_SB_G1: 9977 group = 1; 9978 break; 9979 9980 case R_ARM_LDR_PC_G2: 9981 case R_ARM_LDR_SB_G2: 9982 group = 2; 9983 break; 9984 9985 default: 9986 abort (); 9987 } 9988 9989 /* If REL, extract the addend from the insn. If RELA, it will 9990 have already been fetched for us. */ 9991 if (globals->use_rel) 9992 { 9993 int negative = (insn & (1 << 23)) ? 1 : -1; 9994 signed_addend = negative * (insn & 0xfff); 9995 } 9996 9997 /* Compute the value (X) to go in the place. */ 9998 if (r_type == R_ARM_LDR_PC_G0 9999 || r_type == R_ARM_LDR_PC_G1 10000 || r_type == R_ARM_LDR_PC_G2) 10001 /* PC relative. */ 10002 signed_value = value - pc + signed_addend; 10003 else 10004 /* Section base relative. */ 10005 signed_value = value - sb + signed_addend; 10006 10007 /* Calculate the value of the relevant G_{n-1} to obtain 10008 the residual at that stage. */ 10009 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10010 10011 /* Check for overflow. */ 10012 if (residual >= 0x1000) 10013 { 10014 (*_bfd_error_handler) 10015 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10016 input_bfd, input_section, 10017 (long) rel->r_offset, abs (signed_value), howto->name); 10018 return bfd_reloc_overflow; 10019 } 10020 10021 /* Mask out the value and U bit. */ 10022 insn &= 0xff7ff000; 10023 10024 /* Set the U bit if the value to go in the place is non-negative. */ 10025 if (signed_value >= 0) 10026 insn |= 1 << 23; 10027 10028 /* Encode the offset. */ 10029 insn |= residual; 10030 10031 bfd_put_32 (input_bfd, insn, hit_data); 10032 } 10033 return bfd_reloc_ok; 10034 10035 case R_ARM_LDRS_PC_G0: 10036 case R_ARM_LDRS_PC_G1: 10037 case R_ARM_LDRS_PC_G2: 10038 case R_ARM_LDRS_SB_G0: 10039 case R_ARM_LDRS_SB_G1: 10040 case R_ARM_LDRS_SB_G2: 10041 { 10042 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10043 bfd_vma pc = input_section->output_section->vma 10044 + input_section->output_offset + rel->r_offset; 10045 bfd_vma sb = 0; /* See note above. */ 10046 bfd_vma residual; 10047 bfd_signed_vma signed_value; 10048 int group = 0; 10049 10050 /* Determine which groups of bits to calculate. */ 10051 switch (r_type) 10052 { 10053 case R_ARM_LDRS_PC_G0: 10054 case R_ARM_LDRS_SB_G0: 10055 group = 0; 10056 break; 10057 10058 case R_ARM_LDRS_PC_G1: 10059 case R_ARM_LDRS_SB_G1: 10060 group = 1; 10061 break; 10062 10063 case R_ARM_LDRS_PC_G2: 10064 case R_ARM_LDRS_SB_G2: 10065 group = 2; 10066 break; 10067 10068 default: 10069 abort (); 10070 } 10071 10072 /* If REL, extract the addend from the insn. If RELA, it will 10073 have already been fetched for us. */ 10074 if (globals->use_rel) 10075 { 10076 int negative = (insn & (1 << 23)) ? 1 : -1; 10077 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf)); 10078 } 10079 10080 /* Compute the value (X) to go in the place. */ 10081 if (r_type == R_ARM_LDRS_PC_G0 10082 || r_type == R_ARM_LDRS_PC_G1 10083 || r_type == R_ARM_LDRS_PC_G2) 10084 /* PC relative. */ 10085 signed_value = value - pc + signed_addend; 10086 else 10087 /* Section base relative. */ 10088 signed_value = value - sb + signed_addend; 10089 10090 /* Calculate the value of the relevant G_{n-1} to obtain 10091 the residual at that stage. */ 10092 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10093 10094 /* Check for overflow. */ 10095 if (residual >= 0x100) 10096 { 10097 (*_bfd_error_handler) 10098 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10099 input_bfd, input_section, 10100 (long) rel->r_offset, abs (signed_value), howto->name); 10101 return bfd_reloc_overflow; 10102 } 10103 10104 /* Mask out the value and U bit. */ 10105 insn &= 0xff7ff0f0; 10106 10107 /* Set the U bit if the value to go in the place is non-negative. */ 10108 if (signed_value >= 0) 10109 insn |= 1 << 23; 10110 10111 /* Encode the offset. */ 10112 insn |= ((residual & 0xf0) << 4) | (residual & 0xf); 10113 10114 bfd_put_32 (input_bfd, insn, hit_data); 10115 } 10116 return bfd_reloc_ok; 10117 10118 case R_ARM_LDC_PC_G0: 10119 case R_ARM_LDC_PC_G1: 10120 case R_ARM_LDC_PC_G2: 10121 case R_ARM_LDC_SB_G0: 10122 case R_ARM_LDC_SB_G1: 10123 case R_ARM_LDC_SB_G2: 10124 { 10125 bfd_vma insn = bfd_get_32 (input_bfd, hit_data); 10126 bfd_vma pc = input_section->output_section->vma 10127 + input_section->output_offset + rel->r_offset; 10128 bfd_vma sb = 0; /* See note above. */ 10129 bfd_vma residual; 10130 bfd_signed_vma signed_value; 10131 int group = 0; 10132 10133 /* Determine which groups of bits to calculate. */ 10134 switch (r_type) 10135 { 10136 case R_ARM_LDC_PC_G0: 10137 case R_ARM_LDC_SB_G0: 10138 group = 0; 10139 break; 10140 10141 case R_ARM_LDC_PC_G1: 10142 case R_ARM_LDC_SB_G1: 10143 group = 1; 10144 break; 10145 10146 case R_ARM_LDC_PC_G2: 10147 case R_ARM_LDC_SB_G2: 10148 group = 2; 10149 break; 10150 10151 default: 10152 abort (); 10153 } 10154 10155 /* If REL, extract the addend from the insn. If RELA, it will 10156 have already been fetched for us. */ 10157 if (globals->use_rel) 10158 { 10159 int negative = (insn & (1 << 23)) ? 1 : -1; 10160 signed_addend = negative * ((insn & 0xff) << 2); 10161 } 10162 10163 /* Compute the value (X) to go in the place. */ 10164 if (r_type == R_ARM_LDC_PC_G0 10165 || r_type == R_ARM_LDC_PC_G1 10166 || r_type == R_ARM_LDC_PC_G2) 10167 /* PC relative. */ 10168 signed_value = value - pc + signed_addend; 10169 else 10170 /* Section base relative. */ 10171 signed_value = value - sb + signed_addend; 10172 10173 /* Calculate the value of the relevant G_{n-1} to obtain 10174 the residual at that stage. */ 10175 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); 10176 10177 /* Check for overflow. (The absolute value to go in the place must be 10178 divisible by four and, after having been divided by four, must 10179 fit in eight bits.) */ 10180 if ((residual & 0x3) != 0 || residual >= 0x400) 10181 { 10182 (*_bfd_error_handler) 10183 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), 10184 input_bfd, input_section, 10185 (long) rel->r_offset, abs (signed_value), howto->name); 10186 return bfd_reloc_overflow; 10187 } 10188 10189 /* Mask out the value and U bit. */ 10190 insn &= 0xff7fff00; 10191 10192 /* Set the U bit if the value to go in the place is non-negative. */ 10193 if (signed_value >= 0) 10194 insn |= 1 << 23; 10195 10196 /* Encode the offset. */ 10197 insn |= residual >> 2; 10198 10199 bfd_put_32 (input_bfd, insn, hit_data); 10200 } 10201 return bfd_reloc_ok; 10202 10203 default: 10204 return bfd_reloc_notsupported; 10205 } 10206 } 10207 10208 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */ 10209 static void 10210 arm_add_to_rel (bfd * abfd, 10211 bfd_byte * address, 10212 reloc_howto_type * howto, 10213 bfd_signed_vma increment) 10214 { 10215 bfd_signed_vma addend; 10216 10217 if (howto->type == R_ARM_THM_CALL 10218 || howto->type == R_ARM_THM_JUMP24) 10219 { 10220 int upper_insn, lower_insn; 10221 int upper, lower; 10222 10223 upper_insn = bfd_get_16 (abfd, address); 10224 lower_insn = bfd_get_16 (abfd, address + 2); 10225 upper = upper_insn & 0x7ff; 10226 lower = lower_insn & 0x7ff; 10227 10228 addend = (upper << 12) | (lower << 1); 10229 addend += increment; 10230 addend >>= 1; 10231 10232 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff); 10233 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff); 10234 10235 bfd_put_16 (abfd, (bfd_vma) upper_insn, address); 10236 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2); 10237 } 10238 else 10239 { 10240 bfd_vma contents; 10241 10242 contents = bfd_get_32 (abfd, address); 10243 10244 /* Get the (signed) value from the instruction. */ 10245 addend = contents & howto->src_mask; 10246 if (addend & ((howto->src_mask + 1) >> 1)) 10247 { 10248 bfd_signed_vma mask; 10249 10250 mask = -1; 10251 mask &= ~ howto->src_mask; 10252 addend |= mask; 10253 } 10254 10255 /* Add in the increment, (which is a byte value). */ 10256 switch (howto->type) 10257 { 10258 default: 10259 addend += increment; 10260 break; 10261 10262 case R_ARM_PC24: 10263 case R_ARM_PLT32: 10264 case R_ARM_CALL: 10265 case R_ARM_JUMP24: 10266 addend <<= howto->size; 10267 addend += increment; 10268 10269 /* Should we check for overflow here ? */ 10270 10271 /* Drop any undesired bits. */ 10272 addend >>= howto->rightshift; 10273 break; 10274 } 10275 10276 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask); 10277 10278 bfd_put_32 (abfd, contents, address); 10279 } 10280 } 10281 10282 #define IS_ARM_TLS_RELOC(R_TYPE) \ 10283 ((R_TYPE) == R_ARM_TLS_GD32 \ 10284 || (R_TYPE) == R_ARM_TLS_LDO32 \ 10285 || (R_TYPE) == R_ARM_TLS_LDM32 \ 10286 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \ 10287 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \ 10288 || (R_TYPE) == R_ARM_TLS_TPOFF32 \ 10289 || (R_TYPE) == R_ARM_TLS_LE32 \ 10290 || (R_TYPE) == R_ARM_TLS_IE32 \ 10291 || IS_ARM_TLS_GNU_RELOC (R_TYPE)) 10292 10293 /* Specific set of relocations for the gnu tls dialect. */ 10294 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \ 10295 ((R_TYPE) == R_ARM_TLS_GOTDESC \ 10296 || (R_TYPE) == R_ARM_TLS_CALL \ 10297 || (R_TYPE) == R_ARM_THM_TLS_CALL \ 10298 || (R_TYPE) == R_ARM_TLS_DESCSEQ \ 10299 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ) 10300 10301 /* Relocate an ARM ELF section. */ 10302 10303 static bfd_boolean 10304 elf32_arm_relocate_section (bfd * output_bfd, 10305 struct bfd_link_info * info, 10306 bfd * input_bfd, 10307 asection * input_section, 10308 bfd_byte * contents, 10309 Elf_Internal_Rela * relocs, 10310 Elf_Internal_Sym * local_syms, 10311 asection ** local_sections) 10312 { 10313 Elf_Internal_Shdr *symtab_hdr; 10314 struct elf_link_hash_entry **sym_hashes; 10315 Elf_Internal_Rela *rel; 10316 Elf_Internal_Rela *relend; 10317 const char *name; 10318 struct elf32_arm_link_hash_table * globals; 10319 10320 globals = elf32_arm_hash_table (info); 10321 if (globals == NULL) 10322 return FALSE; 10323 10324 symtab_hdr = & elf_symtab_hdr (input_bfd); 10325 sym_hashes = elf_sym_hashes (input_bfd); 10326 10327 rel = relocs; 10328 relend = relocs + input_section->reloc_count; 10329 for (; rel < relend; rel++) 10330 { 10331 int r_type; 10332 reloc_howto_type * howto; 10333 unsigned long r_symndx; 10334 Elf_Internal_Sym * sym; 10335 asection * sec; 10336 struct elf_link_hash_entry * h; 10337 bfd_vma relocation; 10338 bfd_reloc_status_type r; 10339 arelent bfd_reloc; 10340 char sym_type; 10341 bfd_boolean unresolved_reloc = FALSE; 10342 char *error_message = NULL; 10343 10344 r_symndx = ELF32_R_SYM (rel->r_info); 10345 r_type = ELF32_R_TYPE (rel->r_info); 10346 r_type = arm_real_reloc_type (globals, r_type); 10347 10348 if ( r_type == R_ARM_GNU_VTENTRY 10349 || r_type == R_ARM_GNU_VTINHERIT) 10350 continue; 10351 10352 bfd_reloc.howto = elf32_arm_howto_from_type (r_type); 10353 howto = bfd_reloc.howto; 10354 10355 h = NULL; 10356 sym = NULL; 10357 sec = NULL; 10358 10359 if (r_symndx < symtab_hdr->sh_info) 10360 { 10361 sym = local_syms + r_symndx; 10362 sym_type = ELF32_ST_TYPE (sym->st_info); 10363 sec = local_sections[r_symndx]; 10364 10365 /* An object file might have a reference to a local 10366 undefined symbol. This is a daft object file, but we 10367 should at least do something about it. V4BX & NONE 10368 relocations do not use the symbol and are explicitly 10369 allowed to use the undefined symbol, so allow those. 10370 Likewise for relocations against STN_UNDEF. */ 10371 if (r_type != R_ARM_V4BX 10372 && r_type != R_ARM_NONE 10373 && r_symndx != STN_UNDEF 10374 && bfd_is_und_section (sec) 10375 && ELF_ST_BIND (sym->st_info) != STB_WEAK) 10376 { 10377 if (!info->callbacks->undefined_symbol 10378 (info, bfd_elf_string_from_elf_section 10379 (input_bfd, symtab_hdr->sh_link, sym->st_name), 10380 input_bfd, input_section, 10381 rel->r_offset, TRUE)) 10382 return FALSE; 10383 } 10384 10385 if (globals->use_rel) 10386 { 10387 relocation = (sec->output_section->vma 10388 + sec->output_offset 10389 + sym->st_value); 10390 if (!info->relocatable 10391 && (sec->flags & SEC_MERGE) 10392 && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 10393 { 10394 asection *msec; 10395 bfd_vma addend, value; 10396 10397 switch (r_type) 10398 { 10399 case R_ARM_MOVW_ABS_NC: 10400 case R_ARM_MOVT_ABS: 10401 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 10402 addend = ((value & 0xf0000) >> 4) | (value & 0xfff); 10403 addend = (addend ^ 0x8000) - 0x8000; 10404 break; 10405 10406 case R_ARM_THM_MOVW_ABS_NC: 10407 case R_ARM_THM_MOVT_ABS: 10408 value = bfd_get_16 (input_bfd, contents + rel->r_offset) 10409 << 16; 10410 value |= bfd_get_16 (input_bfd, 10411 contents + rel->r_offset + 2); 10412 addend = ((value & 0xf7000) >> 4) | (value & 0xff) 10413 | ((value & 0x04000000) >> 15); 10414 addend = (addend ^ 0x8000) - 0x8000; 10415 break; 10416 10417 default: 10418 if (howto->rightshift 10419 || (howto->src_mask & (howto->src_mask + 1))) 10420 { 10421 (*_bfd_error_handler) 10422 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"), 10423 input_bfd, input_section, 10424 (long) rel->r_offset, howto->name); 10425 return FALSE; 10426 } 10427 10428 value = bfd_get_32 (input_bfd, contents + rel->r_offset); 10429 10430 /* Get the (signed) value from the instruction. */ 10431 addend = value & howto->src_mask; 10432 if (addend & ((howto->src_mask + 1) >> 1)) 10433 { 10434 bfd_signed_vma mask; 10435 10436 mask = -1; 10437 mask &= ~ howto->src_mask; 10438 addend |= mask; 10439 } 10440 break; 10441 } 10442 10443 msec = sec; 10444 addend = 10445 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend) 10446 - relocation; 10447 addend += msec->output_section->vma + msec->output_offset; 10448 10449 /* Cases here must match those in the preceding 10450 switch statement. */ 10451 switch (r_type) 10452 { 10453 case R_ARM_MOVW_ABS_NC: 10454 case R_ARM_MOVT_ABS: 10455 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4) 10456 | (addend & 0xfff); 10457 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 10458 break; 10459 10460 case R_ARM_THM_MOVW_ABS_NC: 10461 case R_ARM_THM_MOVT_ABS: 10462 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4) 10463 | (addend & 0xff) | ((addend & 0x0800) << 15); 10464 bfd_put_16 (input_bfd, value >> 16, 10465 contents + rel->r_offset); 10466 bfd_put_16 (input_bfd, value, 10467 contents + rel->r_offset + 2); 10468 break; 10469 10470 default: 10471 value = (value & ~ howto->dst_mask) 10472 | (addend & howto->dst_mask); 10473 bfd_put_32 (input_bfd, value, contents + rel->r_offset); 10474 break; 10475 } 10476 } 10477 } 10478 else 10479 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); 10480 } 10481 else 10482 { 10483 bfd_boolean warned; 10484 10485 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 10486 r_symndx, symtab_hdr, sym_hashes, 10487 h, sec, relocation, 10488 unresolved_reloc, warned); 10489 10490 sym_type = h->type; 10491 } 10492 10493 if (sec != NULL && discarded_section (sec)) 10494 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 10495 rel, 1, relend, howto, 0, contents); 10496 10497 if (info->relocatable) 10498 { 10499 /* This is a relocatable link. We don't have to change 10500 anything, unless the reloc is against a section symbol, 10501 in which case we have to adjust according to where the 10502 section symbol winds up in the output section. */ 10503 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION) 10504 { 10505 if (globals->use_rel) 10506 arm_add_to_rel (input_bfd, contents + rel->r_offset, 10507 howto, (bfd_signed_vma) sec->output_offset); 10508 else 10509 rel->r_addend += sec->output_offset; 10510 } 10511 continue; 10512 } 10513 10514 if (h != NULL) 10515 name = h->root.root.string; 10516 else 10517 { 10518 name = (bfd_elf_string_from_elf_section 10519 (input_bfd, symtab_hdr->sh_link, sym->st_name)); 10520 if (name == NULL || *name == '\0') 10521 name = bfd_section_name (input_bfd, sec); 10522 } 10523 10524 if (r_symndx != STN_UNDEF 10525 && r_type != R_ARM_NONE 10526 && (h == NULL 10527 || h->root.type == bfd_link_hash_defined 10528 || h->root.type == bfd_link_hash_defweak) 10529 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS)) 10530 { 10531 (*_bfd_error_handler) 10532 ((sym_type == STT_TLS 10533 ? _("%B(%A+0x%lx): %s used with TLS symbol %s") 10534 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")), 10535 input_bfd, 10536 input_section, 10537 (long) rel->r_offset, 10538 howto->name, 10539 name); 10540 } 10541 10542 /* We call elf32_arm_final_link_relocate unless we're completely 10543 done, i.e., the relaxation produced the final output we want, 10544 and we won't let anybody mess with it. Also, we have to do 10545 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation 10546 both in relaxed and non-relaxed cases */ 10547 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type) 10548 || (IS_ARM_TLS_GNU_RELOC (r_type) 10549 && !((h ? elf32_arm_hash_entry (h)->tls_type : 10550 elf32_arm_local_got_tls_type (input_bfd)[r_symndx]) 10551 & GOT_TLS_GDESC))) 10552 { 10553 r = elf32_arm_tls_relax (globals, input_bfd, input_section, 10554 contents, rel, h == NULL); 10555 /* This may have been marked unresolved because it came from 10556 a shared library. But we've just dealt with that. */ 10557 unresolved_reloc = 0; 10558 } 10559 else 10560 r = bfd_reloc_continue; 10561 10562 if (r == bfd_reloc_continue) 10563 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd, 10564 input_section, contents, rel, 10565 relocation, info, sec, name, sym_type, 10566 (h ? h->target_internal 10567 : ARM_SYM_BRANCH_TYPE (sym)), h, 10568 &unresolved_reloc, &error_message); 10569 10570 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 10571 because such sections are not SEC_ALLOC and thus ld.so will 10572 not process them. */ 10573 if (unresolved_reloc 10574 && !((input_section->flags & SEC_DEBUGGING) != 0 10575 && h->def_dynamic) 10576 && _bfd_elf_section_offset (output_bfd, info, input_section, 10577 rel->r_offset) != (bfd_vma) -1) 10578 { 10579 (*_bfd_error_handler) 10580 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), 10581 input_bfd, 10582 input_section, 10583 (long) rel->r_offset, 10584 howto->name, 10585 h->root.root.string); 10586 return FALSE; 10587 } 10588 10589 if (r != bfd_reloc_ok) 10590 { 10591 switch (r) 10592 { 10593 case bfd_reloc_overflow: 10594 /* If the overflowing reloc was to an undefined symbol, 10595 we have already printed one error message and there 10596 is no point complaining again. */ 10597 if ((! h || 10598 h->root.type != bfd_link_hash_undefined) 10599 && (!((*info->callbacks->reloc_overflow) 10600 (info, (h ? &h->root : NULL), name, howto->name, 10601 (bfd_vma) 0, input_bfd, input_section, 10602 rel->r_offset)))) 10603 return FALSE; 10604 break; 10605 10606 case bfd_reloc_undefined: 10607 if (!((*info->callbacks->undefined_symbol) 10608 (info, name, input_bfd, input_section, 10609 rel->r_offset, TRUE))) 10610 return FALSE; 10611 break; 10612 10613 case bfd_reloc_outofrange: 10614 error_message = _("out of range"); 10615 goto common_error; 10616 10617 case bfd_reloc_notsupported: 10618 error_message = _("unsupported relocation"); 10619 goto common_error; 10620 10621 case bfd_reloc_dangerous: 10622 /* error_message should already be set. */ 10623 goto common_error; 10624 10625 default: 10626 error_message = _("unknown error"); 10627 /* Fall through. */ 10628 10629 common_error: 10630 BFD_ASSERT (error_message != NULL); 10631 if (!((*info->callbacks->reloc_dangerous) 10632 (info, error_message, input_bfd, input_section, 10633 rel->r_offset))) 10634 return FALSE; 10635 break; 10636 } 10637 } 10638 } 10639 10640 return TRUE; 10641 } 10642 10643 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero, 10644 adds the edit to the start of the list. (The list must be built in order of 10645 ascending TINDEX: the function's callers are primarily responsible for 10646 maintaining that condition). */ 10647 10648 static void 10649 add_unwind_table_edit (arm_unwind_table_edit **head, 10650 arm_unwind_table_edit **tail, 10651 arm_unwind_edit_type type, 10652 asection *linked_section, 10653 unsigned int tindex) 10654 { 10655 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *) 10656 xmalloc (sizeof (arm_unwind_table_edit)); 10657 10658 new_edit->type = type; 10659 new_edit->linked_section = linked_section; 10660 new_edit->index = tindex; 10661 10662 if (tindex > 0) 10663 { 10664 new_edit->next = NULL; 10665 10666 if (*tail) 10667 (*tail)->next = new_edit; 10668 10669 (*tail) = new_edit; 10670 10671 if (!*head) 10672 (*head) = new_edit; 10673 } 10674 else 10675 { 10676 new_edit->next = *head; 10677 10678 if (!*tail) 10679 *tail = new_edit; 10680 10681 *head = new_edit; 10682 } 10683 } 10684 10685 static _arm_elf_section_data *get_arm_elf_section_data (asection *); 10686 10687 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */ 10688 static void 10689 adjust_exidx_size(asection *exidx_sec, int adjust) 10690 { 10691 asection *out_sec; 10692 10693 if (!exidx_sec->rawsize) 10694 exidx_sec->rawsize = exidx_sec->size; 10695 10696 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust); 10697 out_sec = exidx_sec->output_section; 10698 /* Adjust size of output section. */ 10699 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust); 10700 } 10701 10702 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */ 10703 static void 10704 insert_cantunwind_after(asection *text_sec, asection *exidx_sec) 10705 { 10706 struct _arm_elf_section_data *exidx_arm_data; 10707 10708 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 10709 add_unwind_table_edit ( 10710 &exidx_arm_data->u.exidx.unwind_edit_list, 10711 &exidx_arm_data->u.exidx.unwind_edit_tail, 10712 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); 10713 10714 adjust_exidx_size(exidx_sec, 8); 10715 } 10716 10717 /* Scan .ARM.exidx tables, and create a list describing edits which should be 10718 made to those tables, such that: 10719 10720 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries. 10721 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind 10722 codes which have been inlined into the index). 10723 10724 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged. 10725 10726 The edits are applied when the tables are written 10727 (in elf32_arm_write_section). */ 10728 10729 bfd_boolean 10730 elf32_arm_fix_exidx_coverage (asection **text_section_order, 10731 unsigned int num_text_sections, 10732 struct bfd_link_info *info, 10733 bfd_boolean merge_exidx_entries) 10734 { 10735 bfd *inp; 10736 unsigned int last_second_word = 0, i; 10737 asection *last_exidx_sec = NULL; 10738 asection *last_text_sec = NULL; 10739 int last_unwind_type = -1; 10740 10741 /* Walk over all EXIDX sections, and create backlinks from the corrsponding 10742 text sections. */ 10743 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next) 10744 { 10745 asection *sec; 10746 10747 for (sec = inp->sections; sec != NULL; sec = sec->next) 10748 { 10749 struct bfd_elf_section_data *elf_sec = elf_section_data (sec); 10750 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr; 10751 10752 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX) 10753 continue; 10754 10755 if (elf_sec->linked_to) 10756 { 10757 Elf_Internal_Shdr *linked_hdr 10758 = &elf_section_data (elf_sec->linked_to)->this_hdr; 10759 struct _arm_elf_section_data *linked_sec_arm_data 10760 = get_arm_elf_section_data (linked_hdr->bfd_section); 10761 10762 if (linked_sec_arm_data == NULL) 10763 continue; 10764 10765 /* Link this .ARM.exidx section back from the text section it 10766 describes. */ 10767 linked_sec_arm_data->u.text.arm_exidx_sec = sec; 10768 } 10769 } 10770 } 10771 10772 /* Walk all text sections in order of increasing VMA. Eilminate duplicate 10773 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes), 10774 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */ 10775 10776 for (i = 0; i < num_text_sections; i++) 10777 { 10778 asection *sec = text_section_order[i]; 10779 asection *exidx_sec; 10780 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec); 10781 struct _arm_elf_section_data *exidx_arm_data; 10782 bfd_byte *contents = NULL; 10783 int deleted_exidx_bytes = 0; 10784 bfd_vma j; 10785 arm_unwind_table_edit *unwind_edit_head = NULL; 10786 arm_unwind_table_edit *unwind_edit_tail = NULL; 10787 Elf_Internal_Shdr *hdr; 10788 bfd *ibfd; 10789 10790 if (arm_data == NULL) 10791 continue; 10792 10793 exidx_sec = arm_data->u.text.arm_exidx_sec; 10794 if (exidx_sec == NULL) 10795 { 10796 /* Section has no unwind data. */ 10797 if (last_unwind_type == 0 || !last_exidx_sec) 10798 continue; 10799 10800 /* Ignore zero sized sections. */ 10801 if (sec->size == 0) 10802 continue; 10803 10804 insert_cantunwind_after(last_text_sec, last_exidx_sec); 10805 last_unwind_type = 0; 10806 continue; 10807 } 10808 10809 /* Skip /DISCARD/ sections. */ 10810 if (bfd_is_abs_section (exidx_sec->output_section)) 10811 continue; 10812 10813 hdr = &elf_section_data (exidx_sec)->this_hdr; 10814 if (hdr->sh_type != SHT_ARM_EXIDX) 10815 continue; 10816 10817 exidx_arm_data = get_arm_elf_section_data (exidx_sec); 10818 if (exidx_arm_data == NULL) 10819 continue; 10820 10821 ibfd = exidx_sec->owner; 10822 10823 if (hdr->contents != NULL) 10824 contents = hdr->contents; 10825 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents)) 10826 /* An error? */ 10827 continue; 10828 10829 for (j = 0; j < hdr->sh_size; j += 8) 10830 { 10831 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); 10832 int unwind_type; 10833 int elide = 0; 10834 10835 /* An EXIDX_CANTUNWIND entry. */ 10836 if (second_word == 1) 10837 { 10838 if (last_unwind_type == 0) 10839 elide = 1; 10840 unwind_type = 0; 10841 } 10842 /* Inlined unwinding data. Merge if equal to previous. */ 10843 else if ((second_word & 0x80000000) != 0) 10844 { 10845 if (merge_exidx_entries 10846 && last_second_word == second_word && last_unwind_type == 1) 10847 elide = 1; 10848 unwind_type = 1; 10849 last_second_word = second_word; 10850 } 10851 /* Normal table entry. In theory we could merge these too, 10852 but duplicate entries are likely to be much less common. */ 10853 else 10854 unwind_type = 2; 10855 10856 if (elide) 10857 { 10858 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, 10859 DELETE_EXIDX_ENTRY, NULL, j / 8); 10860 10861 deleted_exidx_bytes += 8; 10862 } 10863 10864 last_unwind_type = unwind_type; 10865 } 10866 10867 /* Free contents if we allocated it ourselves. */ 10868 if (contents != hdr->contents) 10869 free (contents); 10870 10871 /* Record edits to be applied later (in elf32_arm_write_section). */ 10872 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head; 10873 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail; 10874 10875 if (deleted_exidx_bytes > 0) 10876 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes); 10877 10878 last_exidx_sec = exidx_sec; 10879 last_text_sec = sec; 10880 } 10881 10882 /* Add terminating CANTUNWIND entry. */ 10883 if (last_exidx_sec && last_unwind_type != 0) 10884 insert_cantunwind_after(last_text_sec, last_exidx_sec); 10885 10886 return TRUE; 10887 } 10888 10889 static bfd_boolean 10890 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd, 10891 bfd *ibfd, const char *name) 10892 { 10893 asection *sec, *osec; 10894 10895 sec = bfd_get_linker_section (ibfd, name); 10896 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0) 10897 return TRUE; 10898 10899 osec = sec->output_section; 10900 if (elf32_arm_write_section (obfd, info, sec, sec->contents)) 10901 return TRUE; 10902 10903 if (! bfd_set_section_contents (obfd, osec, sec->contents, 10904 sec->output_offset, sec->size)) 10905 return FALSE; 10906 10907 return TRUE; 10908 } 10909 10910 static bfd_boolean 10911 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) 10912 { 10913 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); 10914 asection *sec, *osec; 10915 10916 if (globals == NULL) 10917 return FALSE; 10918 10919 /* Invoke the regular ELF backend linker to do all the work. */ 10920 if (!bfd_elf_final_link (abfd, info)) 10921 return FALSE; 10922 10923 /* Process stub sections (eg BE8 encoding, ...). */ 10924 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); 10925 int i; 10926 for (i=0; i<htab->top_id; i++) 10927 { 10928 sec = htab->stub_group[i].stub_sec; 10929 /* Only process it once, in its link_sec slot. */ 10930 if (sec && i == htab->stub_group[i].link_sec->id) 10931 { 10932 osec = sec->output_section; 10933 elf32_arm_write_section (abfd, info, sec, sec->contents); 10934 if (! bfd_set_section_contents (abfd, osec, sec->contents, 10935 sec->output_offset, sec->size)) 10936 return FALSE; 10937 } 10938 } 10939 10940 /* Write out any glue sections now that we have created all the 10941 stubs. */ 10942 if (globals->bfd_of_glue_owner != NULL) 10943 { 10944 if (! elf32_arm_output_glue_section (info, abfd, 10945 globals->bfd_of_glue_owner, 10946 ARM2THUMB_GLUE_SECTION_NAME)) 10947 return FALSE; 10948 10949 if (! elf32_arm_output_glue_section (info, abfd, 10950 globals->bfd_of_glue_owner, 10951 THUMB2ARM_GLUE_SECTION_NAME)) 10952 return FALSE; 10953 10954 if (! elf32_arm_output_glue_section (info, abfd, 10955 globals->bfd_of_glue_owner, 10956 VFP11_ERRATUM_VENEER_SECTION_NAME)) 10957 return FALSE; 10958 10959 if (! elf32_arm_output_glue_section (info, abfd, 10960 globals->bfd_of_glue_owner, 10961 ARM_BX_GLUE_SECTION_NAME)) 10962 return FALSE; 10963 } 10964 10965 return TRUE; 10966 } 10967 10968 /* Return a best guess for the machine number based on the attributes. */ 10969 10970 static unsigned int 10971 bfd_arm_get_mach_from_attributes (bfd * abfd) 10972 { 10973 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch); 10974 10975 switch (arch) 10976 { 10977 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4; 10978 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T; 10979 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T; 10980 10981 case TAG_CPU_ARCH_V5TE: 10982 { 10983 char * name; 10984 10985 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES); 10986 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s; 10987 10988 if (name) 10989 { 10990 if (strcmp (name, "IWMMXT2") == 0) 10991 return bfd_mach_arm_iWMMXt2; 10992 10993 if (strcmp (name, "IWMMXT") == 0) 10994 return bfd_mach_arm_iWMMXt; 10995 } 10996 10997 return bfd_mach_arm_5TE; 10998 } 10999 11000 default: 11001 return bfd_mach_arm_unknown; 11002 } 11003 } 11004 11005 /* Set the right machine number. */ 11006 11007 static bfd_boolean 11008 elf32_arm_object_p (bfd *abfd) 11009 { 11010 unsigned int mach; 11011 11012 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION); 11013 11014 if (mach == bfd_mach_arm_unknown) 11015 { 11016 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT) 11017 mach = bfd_mach_arm_ep9312; 11018 else 11019 mach = bfd_arm_get_mach_from_attributes (abfd); 11020 } 11021 11022 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach); 11023 return TRUE; 11024 } 11025 11026 /* Function to keep ARM specific flags in the ELF header. */ 11027 11028 static bfd_boolean 11029 elf32_arm_set_private_flags (bfd *abfd, flagword flags) 11030 { 11031 if (elf_flags_init (abfd) 11032 && elf_elfheader (abfd)->e_flags != flags) 11033 { 11034 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN) 11035 { 11036 if (flags & EF_ARM_INTERWORK) 11037 (*_bfd_error_handler) 11038 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"), 11039 abfd); 11040 else 11041 _bfd_error_handler 11042 (_("Warning: Clearing the interworking flag of %B due to outside request"), 11043 abfd); 11044 } 11045 } 11046 else 11047 { 11048 elf_elfheader (abfd)->e_flags = flags; 11049 elf_flags_init (abfd) = TRUE; 11050 } 11051 11052 return TRUE; 11053 } 11054 11055 /* Copy backend specific data from one object module to another. */ 11056 11057 static bfd_boolean 11058 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd) 11059 { 11060 flagword in_flags; 11061 flagword out_flags; 11062 11063 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 11064 return TRUE; 11065 11066 in_flags = elf_elfheader (ibfd)->e_flags; 11067 out_flags = elf_elfheader (obfd)->e_flags; 11068 11069 if (elf_flags_init (obfd) 11070 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN 11071 && in_flags != out_flags) 11072 { 11073 /* Cannot mix APCS26 and APCS32 code. */ 11074 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 11075 return FALSE; 11076 11077 /* Cannot mix float APCS and non-float APCS code. */ 11078 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 11079 return FALSE; 11080 11081 /* If the src and dest have different interworking flags 11082 then turn off the interworking bit. */ 11083 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 11084 { 11085 if (out_flags & EF_ARM_INTERWORK) 11086 _bfd_error_handler 11087 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"), 11088 obfd, ibfd); 11089 11090 in_flags &= ~EF_ARM_INTERWORK; 11091 } 11092 11093 /* Likewise for PIC, though don't warn for this case. */ 11094 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC)) 11095 in_flags &= ~EF_ARM_PIC; 11096 } 11097 11098 elf_elfheader (obfd)->e_flags = in_flags; 11099 elf_flags_init (obfd) = TRUE; 11100 11101 /* Also copy the EI_OSABI field. */ 11102 elf_elfheader (obfd)->e_ident[EI_OSABI] = 11103 elf_elfheader (ibfd)->e_ident[EI_OSABI]; 11104 11105 /* Copy object attributes. */ 11106 _bfd_elf_copy_obj_attributes (ibfd, obfd); 11107 11108 return TRUE; 11109 } 11110 11111 /* Values for Tag_ABI_PCS_R9_use. */ 11112 enum 11113 { 11114 AEABI_R9_V6, 11115 AEABI_R9_SB, 11116 AEABI_R9_TLS, 11117 AEABI_R9_unused 11118 }; 11119 11120 /* Values for Tag_ABI_PCS_RW_data. */ 11121 enum 11122 { 11123 AEABI_PCS_RW_data_absolute, 11124 AEABI_PCS_RW_data_PCrel, 11125 AEABI_PCS_RW_data_SBrel, 11126 AEABI_PCS_RW_data_unused 11127 }; 11128 11129 /* Values for Tag_ABI_enum_size. */ 11130 enum 11131 { 11132 AEABI_enum_unused, 11133 AEABI_enum_short, 11134 AEABI_enum_wide, 11135 AEABI_enum_forced_wide 11136 }; 11137 11138 /* Determine whether an object attribute tag takes an integer, a 11139 string or both. */ 11140 11141 static int 11142 elf32_arm_obj_attrs_arg_type (int tag) 11143 { 11144 if (tag == Tag_compatibility) 11145 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL; 11146 else if (tag == Tag_nodefaults) 11147 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT; 11148 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name) 11149 return ATTR_TYPE_FLAG_STR_VAL; 11150 else if (tag < 32) 11151 return ATTR_TYPE_FLAG_INT_VAL; 11152 else 11153 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL; 11154 } 11155 11156 /* The ABI defines that Tag_conformance should be emitted first, and that 11157 Tag_nodefaults should be second (if either is defined). This sets those 11158 two positions, and bumps up the position of all the remaining tags to 11159 compensate. */ 11160 static int 11161 elf32_arm_obj_attrs_order (int num) 11162 { 11163 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE) 11164 return Tag_conformance; 11165 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1) 11166 return Tag_nodefaults; 11167 if ((num - 2) < Tag_nodefaults) 11168 return num - 2; 11169 if ((num - 1) < Tag_conformance) 11170 return num - 1; 11171 return num; 11172 } 11173 11174 /* Attribute numbers >=64 (mod 128) can be safely ignored. */ 11175 static bfd_boolean 11176 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag) 11177 { 11178 if ((tag & 127) < 64) 11179 { 11180 _bfd_error_handler 11181 (_("%B: Unknown mandatory EABI object attribute %d"), 11182 abfd, tag); 11183 bfd_set_error (bfd_error_bad_value); 11184 return FALSE; 11185 } 11186 else 11187 { 11188 _bfd_error_handler 11189 (_("Warning: %B: Unknown EABI object attribute %d"), 11190 abfd, tag); 11191 return TRUE; 11192 } 11193 } 11194 11195 /* Read the architecture from the Tag_also_compatible_with attribute, if any. 11196 Returns -1 if no architecture could be read. */ 11197 11198 static int 11199 get_secondary_compatible_arch (bfd *abfd) 11200 { 11201 obj_attribute *attr = 11202 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 11203 11204 /* Note: the tag and its argument below are uleb128 values, though 11205 currently-defined values fit in one byte for each. */ 11206 if (attr->s 11207 && attr->s[0] == Tag_CPU_arch 11208 && (attr->s[1] & 128) != 128 11209 && attr->s[2] == 0) 11210 return attr->s[1]; 11211 11212 /* This tag is "safely ignorable", so don't complain if it looks funny. */ 11213 return -1; 11214 } 11215 11216 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute. 11217 The tag is removed if ARCH is -1. */ 11218 11219 static void 11220 set_secondary_compatible_arch (bfd *abfd, int arch) 11221 { 11222 obj_attribute *attr = 11223 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with]; 11224 11225 if (arch == -1) 11226 { 11227 attr->s = NULL; 11228 return; 11229 } 11230 11231 /* Note: the tag and its argument below are uleb128 values, though 11232 currently-defined values fit in one byte for each. */ 11233 if (!attr->s) 11234 attr->s = (char *) bfd_alloc (abfd, 3); 11235 attr->s[0] = Tag_CPU_arch; 11236 attr->s[1] = arch; 11237 attr->s[2] = '\0'; 11238 } 11239 11240 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags 11241 into account. */ 11242 11243 static int 11244 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, 11245 int newtag, int secondary_compat) 11246 { 11247 #define T(X) TAG_CPU_ARCH_##X 11248 int tagl, tagh, result; 11249 const int v6t2[] = 11250 { 11251 T(V6T2), /* PRE_V4. */ 11252 T(V6T2), /* V4. */ 11253 T(V6T2), /* V4T. */ 11254 T(V6T2), /* V5T. */ 11255 T(V6T2), /* V5TE. */ 11256 T(V6T2), /* V5TEJ. */ 11257 T(V6T2), /* V6. */ 11258 T(V7), /* V6KZ. */ 11259 T(V6T2) /* V6T2. */ 11260 }; 11261 const int v6k[] = 11262 { 11263 T(V6K), /* PRE_V4. */ 11264 T(V6K), /* V4. */ 11265 T(V6K), /* V4T. */ 11266 T(V6K), /* V5T. */ 11267 T(V6K), /* V5TE. */ 11268 T(V6K), /* V5TEJ. */ 11269 T(V6K), /* V6. */ 11270 T(V6KZ), /* V6KZ. */ 11271 T(V7), /* V6T2. */ 11272 T(V6K) /* V6K. */ 11273 }; 11274 const int v7[] = 11275 { 11276 T(V7), /* PRE_V4. */ 11277 T(V7), /* V4. */ 11278 T(V7), /* V4T. */ 11279 T(V7), /* V5T. */ 11280 T(V7), /* V5TE. */ 11281 T(V7), /* V5TEJ. */ 11282 T(V7), /* V6. */ 11283 T(V7), /* V6KZ. */ 11284 T(V7), /* V6T2. */ 11285 T(V7), /* V6K. */ 11286 T(V7) /* V7. */ 11287 }; 11288 const int v6_m[] = 11289 { 11290 -1, /* PRE_V4. */ 11291 -1, /* V4. */ 11292 T(V6K), /* V4T. */ 11293 T(V6K), /* V5T. */ 11294 T(V6K), /* V5TE. */ 11295 T(V6K), /* V5TEJ. */ 11296 T(V6K), /* V6. */ 11297 T(V6KZ), /* V6KZ. */ 11298 T(V7), /* V6T2. */ 11299 T(V6K), /* V6K. */ 11300 T(V7), /* V7. */ 11301 T(V6_M) /* V6_M. */ 11302 }; 11303 const int v6s_m[] = 11304 { 11305 -1, /* PRE_V4. */ 11306 -1, /* V4. */ 11307 T(V6K), /* V4T. */ 11308 T(V6K), /* V5T. */ 11309 T(V6K), /* V5TE. */ 11310 T(V6K), /* V5TEJ. */ 11311 T(V6K), /* V6. */ 11312 T(V6KZ), /* V6KZ. */ 11313 T(V7), /* V6T2. */ 11314 T(V6K), /* V6K. */ 11315 T(V7), /* V7. */ 11316 T(V6S_M), /* V6_M. */ 11317 T(V6S_M) /* V6S_M. */ 11318 }; 11319 const int v7e_m[] = 11320 { 11321 -1, /* PRE_V4. */ 11322 -1, /* V4. */ 11323 T(V7E_M), /* V4T. */ 11324 T(V7E_M), /* V5T. */ 11325 T(V7E_M), /* V5TE. */ 11326 T(V7E_M), /* V5TEJ. */ 11327 T(V7E_M), /* V6. */ 11328 T(V7E_M), /* V6KZ. */ 11329 T(V7E_M), /* V6T2. */ 11330 T(V7E_M), /* V6K. */ 11331 T(V7E_M), /* V7. */ 11332 T(V7E_M), /* V6_M. */ 11333 T(V7E_M), /* V6S_M. */ 11334 T(V7E_M) /* V7E_M. */ 11335 }; 11336 const int v8[] = 11337 { 11338 T(V8), /* PRE_V4. */ 11339 T(V8), /* V4. */ 11340 T(V8), /* V4T. */ 11341 T(V8), /* V5T. */ 11342 T(V8), /* V5TE. */ 11343 T(V8), /* V5TEJ. */ 11344 T(V8), /* V6. */ 11345 T(V8), /* V6KZ. */ 11346 T(V8), /* V6T2. */ 11347 T(V8), /* V6K. */ 11348 T(V8), /* V7. */ 11349 T(V8), /* V6_M. */ 11350 T(V8), /* V6S_M. */ 11351 T(V8), /* V7E_M. */ 11352 T(V8) /* V8. */ 11353 }; 11354 const int v4t_plus_v6_m[] = 11355 { 11356 -1, /* PRE_V4. */ 11357 -1, /* V4. */ 11358 T(V4T), /* V4T. */ 11359 T(V5T), /* V5T. */ 11360 T(V5TE), /* V5TE. */ 11361 T(V5TEJ), /* V5TEJ. */ 11362 T(V6), /* V6. */ 11363 T(V6KZ), /* V6KZ. */ 11364 T(V6T2), /* V6T2. */ 11365 T(V6K), /* V6K. */ 11366 T(V7), /* V7. */ 11367 T(V6_M), /* V6_M. */ 11368 T(V6S_M), /* V6S_M. */ 11369 T(V7E_M), /* V7E_M. */ 11370 T(V8), /* V8. */ 11371 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ 11372 }; 11373 const int *comb[] = 11374 { 11375 v6t2, 11376 v6k, 11377 v7, 11378 v6_m, 11379 v6s_m, 11380 v7e_m, 11381 v8, 11382 /* Pseudo-architecture. */ 11383 v4t_plus_v6_m 11384 }; 11385 11386 /* Check we've not got a higher architecture than we know about. */ 11387 11388 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH) 11389 { 11390 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd); 11391 return -1; 11392 } 11393 11394 /* Override old tag if we have a Tag_also_compatible_with on the output. */ 11395 11396 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T)) 11397 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M))) 11398 oldtag = T(V4T_PLUS_V6_M); 11399 11400 /* And override the new tag if we have a Tag_also_compatible_with on the 11401 input. */ 11402 11403 if ((newtag == T(V6_M) && secondary_compat == T(V4T)) 11404 || (newtag == T(V4T) && secondary_compat == T(V6_M))) 11405 newtag = T(V4T_PLUS_V6_M); 11406 11407 tagl = (oldtag < newtag) ? oldtag : newtag; 11408 result = tagh = (oldtag > newtag) ? oldtag : newtag; 11409 11410 /* Architectures before V6KZ add features monotonically. */ 11411 if (tagh <= TAG_CPU_ARCH_V6KZ) 11412 return result; 11413 11414 result = comb[tagh - T(V6T2)][tagl]; 11415 11416 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M) 11417 as the canonical version. */ 11418 if (result == T(V4T_PLUS_V6_M)) 11419 { 11420 result = T(V4T); 11421 *secondary_compat_out = T(V6_M); 11422 } 11423 else 11424 *secondary_compat_out = -1; 11425 11426 if (result == -1) 11427 { 11428 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"), 11429 ibfd, oldtag, newtag); 11430 return -1; 11431 } 11432 11433 return result; 11434 #undef T 11435 } 11436 11437 /* Query attributes object to see if integer divide instructions may be 11438 present in an object. */ 11439 static bfd_boolean 11440 elf32_arm_attributes_accept_div (const obj_attribute *attr) 11441 { 11442 int arch = attr[Tag_CPU_arch].i; 11443 int profile = attr[Tag_CPU_arch_profile].i; 11444 11445 switch (attr[Tag_DIV_use].i) 11446 { 11447 case 0: 11448 /* Integer divide allowed if instruction contained in archetecture. */ 11449 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M')) 11450 return TRUE; 11451 else if (arch >= TAG_CPU_ARCH_V7E_M) 11452 return TRUE; 11453 else 11454 return FALSE; 11455 11456 case 1: 11457 /* Integer divide explicitly prohibited. */ 11458 return FALSE; 11459 11460 default: 11461 /* Unrecognised case - treat as allowing divide everywhere. */ 11462 case 2: 11463 /* Integer divide allowed in ARM state. */ 11464 return TRUE; 11465 } 11466 } 11467 11468 /* Query attributes object to see if integer divide instructions are 11469 forbidden to be in the object. This is not the inverse of 11470 elf32_arm_attributes_accept_div. */ 11471 static bfd_boolean 11472 elf32_arm_attributes_forbid_div (const obj_attribute *attr) 11473 { 11474 return attr[Tag_DIV_use].i == 1; 11475 } 11476 11477 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there 11478 are conflicting attributes. */ 11479 11480 static bfd_boolean 11481 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) 11482 { 11483 obj_attribute *in_attr; 11484 obj_attribute *out_attr; 11485 /* Some tags have 0 = don't care, 1 = strong requirement, 11486 2 = weak requirement. */ 11487 static const int order_021[3] = {0, 2, 1}; 11488 int i; 11489 bfd_boolean result = TRUE; 11490 11491 /* Skip the linker stubs file. This preserves previous behavior 11492 of accepting unknown attributes in the first input file - but 11493 is that a bug? */ 11494 if (ibfd->flags & BFD_LINKER_CREATED) 11495 return TRUE; 11496 11497 if (!elf_known_obj_attributes_proc (obfd)[0].i) 11498 { 11499 /* This is the first object. Copy the attributes. */ 11500 _bfd_elf_copy_obj_attributes (ibfd, obfd); 11501 11502 out_attr = elf_known_obj_attributes_proc (obfd); 11503 11504 /* Use the Tag_null value to indicate the attributes have been 11505 initialized. */ 11506 out_attr[0].i = 1; 11507 11508 /* We do not output objects with Tag_MPextension_use_legacy - we move 11509 the attribute's value to Tag_MPextension_use. */ 11510 if (out_attr[Tag_MPextension_use_legacy].i != 0) 11511 { 11512 if (out_attr[Tag_MPextension_use].i != 0 11513 && out_attr[Tag_MPextension_use_legacy].i 11514 != out_attr[Tag_MPextension_use].i) 11515 { 11516 _bfd_error_handler 11517 (_("Error: %B has both the current and legacy " 11518 "Tag_MPextension_use attributes"), ibfd); 11519 result = FALSE; 11520 } 11521 11522 out_attr[Tag_MPextension_use] = 11523 out_attr[Tag_MPextension_use_legacy]; 11524 out_attr[Tag_MPextension_use_legacy].type = 0; 11525 out_attr[Tag_MPextension_use_legacy].i = 0; 11526 } 11527 11528 return result; 11529 } 11530 11531 in_attr = elf_known_obj_attributes_proc (ibfd); 11532 out_attr = elf_known_obj_attributes_proc (obfd); 11533 /* This needs to happen before Tag_ABI_FP_number_model is merged. */ 11534 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i) 11535 { 11536 /* Ignore mismatches if the object doesn't use floating point. */ 11537 if (out_attr[Tag_ABI_FP_number_model].i == 0) 11538 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i; 11539 else if (in_attr[Tag_ABI_FP_number_model].i != 0) 11540 { 11541 _bfd_error_handler 11542 (_("error: %B uses VFP register arguments, %B does not"), 11543 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd, 11544 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd); 11545 result = FALSE; 11546 } 11547 } 11548 11549 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++) 11550 { 11551 /* Merge this attribute with existing attributes. */ 11552 switch (i) 11553 { 11554 case Tag_CPU_raw_name: 11555 case Tag_CPU_name: 11556 /* These are merged after Tag_CPU_arch. */ 11557 break; 11558 11559 case Tag_ABI_optimization_goals: 11560 case Tag_ABI_FP_optimization_goals: 11561 /* Use the first value seen. */ 11562 break; 11563 11564 case Tag_CPU_arch: 11565 { 11566 int secondary_compat = -1, secondary_compat_out = -1; 11567 unsigned int saved_out_attr = out_attr[i].i; 11568 static const char *name_table[] = { 11569 /* These aren't real CPU names, but we can't guess 11570 that from the architecture version alone. */ 11571 "Pre v4", 11572 "ARM v4", 11573 "ARM v4T", 11574 "ARM v5T", 11575 "ARM v5TE", 11576 "ARM v5TEJ", 11577 "ARM v6", 11578 "ARM v6KZ", 11579 "ARM v6T2", 11580 "ARM v6K", 11581 "ARM v7", 11582 "ARM v6-M", 11583 "ARM v6S-M", 11584 "ARM v8" 11585 }; 11586 11587 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */ 11588 secondary_compat = get_secondary_compatible_arch (ibfd); 11589 secondary_compat_out = get_secondary_compatible_arch (obfd); 11590 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i, 11591 &secondary_compat_out, 11592 in_attr[i].i, 11593 secondary_compat); 11594 set_secondary_compatible_arch (obfd, secondary_compat_out); 11595 11596 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */ 11597 if (out_attr[i].i == saved_out_attr) 11598 ; /* Leave the names alone. */ 11599 else if (out_attr[i].i == in_attr[i].i) 11600 { 11601 /* The output architecture has been changed to match the 11602 input architecture. Use the input names. */ 11603 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s 11604 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s) 11605 : NULL; 11606 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s 11607 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s) 11608 : NULL; 11609 } 11610 else 11611 { 11612 out_attr[Tag_CPU_name].s = NULL; 11613 out_attr[Tag_CPU_raw_name].s = NULL; 11614 } 11615 11616 /* If we still don't have a value for Tag_CPU_name, 11617 make one up now. Tag_CPU_raw_name remains blank. */ 11618 if (out_attr[Tag_CPU_name].s == NULL 11619 && out_attr[i].i < ARRAY_SIZE (name_table)) 11620 out_attr[Tag_CPU_name].s = 11621 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]); 11622 } 11623 break; 11624 11625 case Tag_ARM_ISA_use: 11626 case Tag_THUMB_ISA_use: 11627 case Tag_WMMX_arch: 11628 case Tag_Advanced_SIMD_arch: 11629 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */ 11630 case Tag_ABI_FP_rounding: 11631 case Tag_ABI_FP_exceptions: 11632 case Tag_ABI_FP_user_exceptions: 11633 case Tag_ABI_FP_number_model: 11634 case Tag_FP_HP_extension: 11635 case Tag_CPU_unaligned_access: 11636 case Tag_T2EE_use: 11637 case Tag_MPextension_use: 11638 /* Use the largest value specified. */ 11639 if (in_attr[i].i > out_attr[i].i) 11640 out_attr[i].i = in_attr[i].i; 11641 break; 11642 11643 case Tag_ABI_align_preserved: 11644 case Tag_ABI_PCS_RO_data: 11645 /* Use the smallest value specified. */ 11646 if (in_attr[i].i < out_attr[i].i) 11647 out_attr[i].i = in_attr[i].i; 11648 break; 11649 11650 case Tag_ABI_align_needed: 11651 if ((in_attr[i].i > 0 || out_attr[i].i > 0) 11652 && (in_attr[Tag_ABI_align_preserved].i == 0 11653 || out_attr[Tag_ABI_align_preserved].i == 0)) 11654 { 11655 /* This error message should be enabled once all non-conformant 11656 binaries in the toolchain have had the attributes set 11657 properly. 11658 _bfd_error_handler 11659 (_("error: %B: 8-byte data alignment conflicts with %B"), 11660 obfd, ibfd); 11661 result = FALSE; */ 11662 } 11663 /* Fall through. */ 11664 case Tag_ABI_FP_denormal: 11665 case Tag_ABI_PCS_GOT_use: 11666 /* Use the "greatest" from the sequence 0, 2, 1, or the largest 11667 value if greater than 2 (for future-proofing). */ 11668 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i) 11669 || (in_attr[i].i <= 2 && out_attr[i].i <= 2 11670 && order_021[in_attr[i].i] > order_021[out_attr[i].i])) 11671 out_attr[i].i = in_attr[i].i; 11672 break; 11673 11674 case Tag_Virtualization_use: 11675 /* The virtualization tag effectively stores two bits of 11676 information: the intended use of TrustZone (in bit 0), and the 11677 intended use of Virtualization (in bit 1). */ 11678 if (out_attr[i].i == 0) 11679 out_attr[i].i = in_attr[i].i; 11680 else if (in_attr[i].i != 0 11681 && in_attr[i].i != out_attr[i].i) 11682 { 11683 if (in_attr[i].i <= 3 && out_attr[i].i <= 3) 11684 out_attr[i].i = 3; 11685 else 11686 { 11687 _bfd_error_handler 11688 (_("error: %B: unable to merge virtualization attributes " 11689 "with %B"), 11690 obfd, ibfd); 11691 result = FALSE; 11692 } 11693 } 11694 break; 11695 11696 case Tag_CPU_arch_profile: 11697 if (out_attr[i].i != in_attr[i].i) 11698 { 11699 /* 0 will merge with anything. 11700 'A' and 'S' merge to 'A'. 11701 'R' and 'S' merge to 'R'. 11702 'M' and 'A|R|S' is an error. */ 11703 if (out_attr[i].i == 0 11704 || (out_attr[i].i == 'S' 11705 && (in_attr[i].i == 'A' || in_attr[i].i == 'R'))) 11706 out_attr[i].i = in_attr[i].i; 11707 else if (in_attr[i].i == 0 11708 || (in_attr[i].i == 'S' 11709 && (out_attr[i].i == 'A' || out_attr[i].i == 'R'))) 11710 ; /* Do nothing. */ 11711 else 11712 { 11713 _bfd_error_handler 11714 (_("error: %B: Conflicting architecture profiles %c/%c"), 11715 ibfd, 11716 in_attr[i].i ? in_attr[i].i : '0', 11717 out_attr[i].i ? out_attr[i].i : '0'); 11718 result = FALSE; 11719 } 11720 } 11721 break; 11722 case Tag_FP_arch: 11723 { 11724 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since 11725 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch 11726 when it's 0. It might mean absence of FP hardware if 11727 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */ 11728 11729 #define VFP_VERSION_COUNT 8 11730 static const struct 11731 { 11732 int ver; 11733 int regs; 11734 } vfp_versions[VFP_VERSION_COUNT] = 11735 { 11736 {0, 0}, 11737 {1, 16}, 11738 {2, 16}, 11739 {3, 32}, 11740 {3, 16}, 11741 {4, 32}, 11742 {4, 16}, 11743 {8, 32} 11744 }; 11745 int ver; 11746 int regs; 11747 int newval; 11748 11749 /* If the output has no requirement about FP hardware, 11750 follow the requirement of the input. */ 11751 if (out_attr[i].i == 0) 11752 { 11753 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0); 11754 out_attr[i].i = in_attr[i].i; 11755 out_attr[Tag_ABI_HardFP_use].i 11756 = in_attr[Tag_ABI_HardFP_use].i; 11757 break; 11758 } 11759 /* If the input has no requirement about FP hardware, do 11760 nothing. */ 11761 else if (in_attr[i].i == 0) 11762 { 11763 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0); 11764 break; 11765 } 11766 11767 /* Both the input and the output have nonzero Tag_FP_arch. 11768 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */ 11769 11770 /* If both the input and the output have zero Tag_ABI_HardFP_use, 11771 do nothing. */ 11772 if (in_attr[Tag_ABI_HardFP_use].i == 0 11773 && out_attr[Tag_ABI_HardFP_use].i == 0) 11774 ; 11775 /* If the input and the output have different Tag_ABI_HardFP_use, 11776 the combination of them is 3 (SP & DP). */ 11777 else if (in_attr[Tag_ABI_HardFP_use].i 11778 != out_attr[Tag_ABI_HardFP_use].i) 11779 out_attr[Tag_ABI_HardFP_use].i = 3; 11780 11781 /* Now we can handle Tag_FP_arch. */ 11782 11783 /* Values of VFP_VERSION_COUNT or more aren't defined, so just 11784 pick the biggest. */ 11785 if (in_attr[i].i >= VFP_VERSION_COUNT 11786 && in_attr[i].i > out_attr[i].i) 11787 { 11788 out_attr[i] = in_attr[i]; 11789 break; 11790 } 11791 /* The output uses the superset of input features 11792 (ISA version) and registers. */ 11793 ver = vfp_versions[in_attr[i].i].ver; 11794 if (ver < vfp_versions[out_attr[i].i].ver) 11795 ver = vfp_versions[out_attr[i].i].ver; 11796 regs = vfp_versions[in_attr[i].i].regs; 11797 if (regs < vfp_versions[out_attr[i].i].regs) 11798 regs = vfp_versions[out_attr[i].i].regs; 11799 /* This assumes all possible supersets are also a valid 11800 options. */ 11801 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--) 11802 { 11803 if (regs == vfp_versions[newval].regs 11804 && ver == vfp_versions[newval].ver) 11805 break; 11806 } 11807 out_attr[i].i = newval; 11808 } 11809 break; 11810 case Tag_PCS_config: 11811 if (out_attr[i].i == 0) 11812 out_attr[i].i = in_attr[i].i; 11813 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i) 11814 { 11815 /* It's sometimes ok to mix different configs, so this is only 11816 a warning. */ 11817 _bfd_error_handler 11818 (_("Warning: %B: Conflicting platform configuration"), ibfd); 11819 } 11820 break; 11821 case Tag_ABI_PCS_R9_use: 11822 if (in_attr[i].i != out_attr[i].i 11823 && out_attr[i].i != AEABI_R9_unused 11824 && in_attr[i].i != AEABI_R9_unused) 11825 { 11826 _bfd_error_handler 11827 (_("error: %B: Conflicting use of R9"), ibfd); 11828 result = FALSE; 11829 } 11830 if (out_attr[i].i == AEABI_R9_unused) 11831 out_attr[i].i = in_attr[i].i; 11832 break; 11833 case Tag_ABI_PCS_RW_data: 11834 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel 11835 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB 11836 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused) 11837 { 11838 _bfd_error_handler 11839 (_("error: %B: SB relative addressing conflicts with use of R9"), 11840 ibfd); 11841 result = FALSE; 11842 } 11843 /* Use the smallest value specified. */ 11844 if (in_attr[i].i < out_attr[i].i) 11845 out_attr[i].i = in_attr[i].i; 11846 break; 11847 case Tag_ABI_PCS_wchar_t: 11848 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i 11849 && !elf_arm_tdata (obfd)->no_wchar_size_warning) 11850 { 11851 _bfd_error_handler 11852 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"), 11853 ibfd, in_attr[i].i, out_attr[i].i); 11854 } 11855 else if (in_attr[i].i && !out_attr[i].i) 11856 out_attr[i].i = in_attr[i].i; 11857 break; 11858 case Tag_ABI_enum_size: 11859 if (in_attr[i].i != AEABI_enum_unused) 11860 { 11861 if (out_attr[i].i == AEABI_enum_unused 11862 || out_attr[i].i == AEABI_enum_forced_wide) 11863 { 11864 /* The existing object is compatible with anything. 11865 Use whatever requirements the new object has. */ 11866 out_attr[i].i = in_attr[i].i; 11867 } 11868 else if (in_attr[i].i != AEABI_enum_forced_wide 11869 && out_attr[i].i != in_attr[i].i 11870 && !elf_arm_tdata (obfd)->no_enum_size_warning) 11871 { 11872 static const char *aeabi_enum_names[] = 11873 { "", "variable-size", "32-bit", "" }; 11874 const char *in_name = 11875 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 11876 ? aeabi_enum_names[in_attr[i].i] 11877 : "<unknown>"; 11878 const char *out_name = 11879 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names) 11880 ? aeabi_enum_names[out_attr[i].i] 11881 : "<unknown>"; 11882 _bfd_error_handler 11883 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"), 11884 ibfd, in_name, out_name); 11885 } 11886 } 11887 break; 11888 case Tag_ABI_VFP_args: 11889 /* Aready done. */ 11890 break; 11891 case Tag_ABI_WMMX_args: 11892 if (in_attr[i].i != out_attr[i].i) 11893 { 11894 _bfd_error_handler 11895 (_("error: %B uses iWMMXt register arguments, %B does not"), 11896 ibfd, obfd); 11897 result = FALSE; 11898 } 11899 break; 11900 case Tag_compatibility: 11901 /* Merged in target-independent code. */ 11902 break; 11903 case Tag_ABI_HardFP_use: 11904 /* This is handled along with Tag_FP_arch. */ 11905 break; 11906 case Tag_ABI_FP_16bit_format: 11907 if (in_attr[i].i != 0 && out_attr[i].i != 0) 11908 { 11909 if (in_attr[i].i != out_attr[i].i) 11910 { 11911 _bfd_error_handler 11912 (_("error: fp16 format mismatch between %B and %B"), 11913 ibfd, obfd); 11914 result = FALSE; 11915 } 11916 } 11917 if (in_attr[i].i != 0) 11918 out_attr[i].i = in_attr[i].i; 11919 break; 11920 11921 case Tag_DIV_use: 11922 /* A value of zero on input means that the divide instruction may 11923 be used if available in the base architecture as specified via 11924 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that 11925 the user did not want divide instructions. A value of 2 11926 explicitly means that divide instructions were allowed in ARM 11927 and Thumb state. */ 11928 if (in_attr[i].i == out_attr[i].i) 11929 /* Do nothing. */ ; 11930 else if (elf32_arm_attributes_forbid_div (in_attr) 11931 && !elf32_arm_attributes_accept_div (out_attr)) 11932 out_attr[i].i = 1; 11933 else if (elf32_arm_attributes_forbid_div (out_attr) 11934 && elf32_arm_attributes_accept_div (in_attr)) 11935 out_attr[i].i = in_attr[i].i; 11936 else if (in_attr[i].i == 2) 11937 out_attr[i].i = in_attr[i].i; 11938 break; 11939 11940 case Tag_MPextension_use_legacy: 11941 /* We don't output objects with Tag_MPextension_use_legacy - we 11942 move the value to Tag_MPextension_use. */ 11943 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0) 11944 { 11945 if (in_attr[Tag_MPextension_use].i != in_attr[i].i) 11946 { 11947 _bfd_error_handler 11948 (_("%B has has both the current and legacy " 11949 "Tag_MPextension_use attributes"), 11950 ibfd); 11951 result = FALSE; 11952 } 11953 } 11954 11955 if (in_attr[i].i > out_attr[Tag_MPextension_use].i) 11956 out_attr[Tag_MPextension_use] = in_attr[i]; 11957 11958 break; 11959 11960 case Tag_nodefaults: 11961 /* This tag is set if it exists, but the value is unused (and is 11962 typically zero). We don't actually need to do anything here - 11963 the merge happens automatically when the type flags are merged 11964 below. */ 11965 break; 11966 case Tag_also_compatible_with: 11967 /* Already done in Tag_CPU_arch. */ 11968 break; 11969 case Tag_conformance: 11970 /* Keep the attribute if it matches. Throw it away otherwise. 11971 No attribute means no claim to conform. */ 11972 if (!in_attr[i].s || !out_attr[i].s 11973 || strcmp (in_attr[i].s, out_attr[i].s) != 0) 11974 out_attr[i].s = NULL; 11975 break; 11976 11977 default: 11978 result 11979 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i); 11980 } 11981 11982 /* If out_attr was copied from in_attr then it won't have a type yet. */ 11983 if (in_attr[i].type && !out_attr[i].type) 11984 out_attr[i].type = in_attr[i].type; 11985 } 11986 11987 /* Merge Tag_compatibility attributes and any common GNU ones. */ 11988 if (!_bfd_elf_merge_object_attributes (ibfd, obfd)) 11989 return FALSE; 11990 11991 /* Check for any attributes not known on ARM. */ 11992 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd); 11993 11994 return result; 11995 } 11996 11997 11998 /* Return TRUE if the two EABI versions are incompatible. */ 11999 12000 static bfd_boolean 12001 elf32_arm_versions_compatible (unsigned iver, unsigned over) 12002 { 12003 /* v4 and v5 are the same spec before and after it was released, 12004 so allow mixing them. */ 12005 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5) 12006 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4)) 12007 return TRUE; 12008 12009 return (iver == over); 12010 } 12011 12012 /* Merge backend specific data from an object file to the output 12013 object file when linking. */ 12014 12015 static bfd_boolean 12016 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd); 12017 12018 /* Display the flags field. */ 12019 12020 static bfd_boolean 12021 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) 12022 { 12023 FILE * file = (FILE *) ptr; 12024 unsigned long flags; 12025 12026 BFD_ASSERT (abfd != NULL && ptr != NULL); 12027 12028 /* Print normal ELF private data. */ 12029 _bfd_elf_print_private_bfd_data (abfd, ptr); 12030 12031 flags = elf_elfheader (abfd)->e_flags; 12032 /* Ignore init flag - it may not be set, despite the flags field 12033 containing valid data. */ 12034 12035 /* xgettext:c-format */ 12036 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); 12037 12038 switch (EF_ARM_EABI_VERSION (flags)) 12039 { 12040 case EF_ARM_EABI_UNKNOWN: 12041 /* The following flag bits are GNU extensions and not part of the 12042 official ARM ELF extended ABI. Hence they are only decoded if 12043 the EABI version is not set. */ 12044 if (flags & EF_ARM_INTERWORK) 12045 fprintf (file, _(" [interworking enabled]")); 12046 12047 if (flags & EF_ARM_APCS_26) 12048 fprintf (file, " [APCS-26]"); 12049 else 12050 fprintf (file, " [APCS-32]"); 12051 12052 if (flags & EF_ARM_VFP_FLOAT) 12053 fprintf (file, _(" [VFP float format]")); 12054 else if (flags & EF_ARM_MAVERICK_FLOAT) 12055 fprintf (file, _(" [Maverick float format]")); 12056 else 12057 fprintf (file, _(" [FPA float format]")); 12058 12059 if (flags & EF_ARM_APCS_FLOAT) 12060 fprintf (file, _(" [floats passed in float registers]")); 12061 12062 if (flags & EF_ARM_PIC) 12063 fprintf (file, _(" [position independent]")); 12064 12065 if (flags & EF_ARM_NEW_ABI) 12066 fprintf (file, _(" [new ABI]")); 12067 12068 if (flags & EF_ARM_OLD_ABI) 12069 fprintf (file, _(" [old ABI]")); 12070 12071 if (flags & EF_ARM_SOFT_FLOAT) 12072 fprintf (file, _(" [software FP]")); 12073 12074 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT 12075 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI 12076 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT 12077 | EF_ARM_MAVERICK_FLOAT); 12078 break; 12079 12080 case EF_ARM_EABI_VER1: 12081 fprintf (file, _(" [Version1 EABI]")); 12082 12083 if (flags & EF_ARM_SYMSARESORTED) 12084 fprintf (file, _(" [sorted symbol table]")); 12085 else 12086 fprintf (file, _(" [unsorted symbol table]")); 12087 12088 flags &= ~ EF_ARM_SYMSARESORTED; 12089 break; 12090 12091 case EF_ARM_EABI_VER2: 12092 fprintf (file, _(" [Version2 EABI]")); 12093 12094 if (flags & EF_ARM_SYMSARESORTED) 12095 fprintf (file, _(" [sorted symbol table]")); 12096 else 12097 fprintf (file, _(" [unsorted symbol table]")); 12098 12099 if (flags & EF_ARM_DYNSYMSUSESEGIDX) 12100 fprintf (file, _(" [dynamic symbols use segment index]")); 12101 12102 if (flags & EF_ARM_MAPSYMSFIRST) 12103 fprintf (file, _(" [mapping symbols precede others]")); 12104 12105 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX 12106 | EF_ARM_MAPSYMSFIRST); 12107 break; 12108 12109 case EF_ARM_EABI_VER3: 12110 fprintf (file, _(" [Version3 EABI]")); 12111 break; 12112 12113 case EF_ARM_EABI_VER4: 12114 fprintf (file, _(" [Version4 EABI]")); 12115 goto eabi; 12116 12117 case EF_ARM_EABI_VER5: 12118 fprintf (file, _(" [Version5 EABI]")); 12119 eabi: 12120 if (flags & EF_ARM_BE8) 12121 fprintf (file, _(" [BE8]")); 12122 12123 if (flags & EF_ARM_LE8) 12124 fprintf (file, _(" [LE8]")); 12125 12126 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8); 12127 break; 12128 12129 default: 12130 fprintf (file, _(" <EABI version unrecognised>")); 12131 break; 12132 } 12133 12134 flags &= ~ EF_ARM_EABIMASK; 12135 12136 if (flags & EF_ARM_RELEXEC) 12137 fprintf (file, _(" [relocatable executable]")); 12138 12139 if (flags & EF_ARM_HASENTRY) 12140 fprintf (file, _(" [has entry point]")); 12141 12142 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY); 12143 12144 if (flags) 12145 fprintf (file, _("<Unrecognised flag bits set>")); 12146 12147 fputc ('\n', file); 12148 12149 return TRUE; 12150 } 12151 12152 static int 12153 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type) 12154 { 12155 switch (ELF_ST_TYPE (elf_sym->st_info)) 12156 { 12157 case STT_ARM_TFUNC: 12158 return ELF_ST_TYPE (elf_sym->st_info); 12159 12160 case STT_ARM_16BIT: 12161 /* If the symbol is not an object, return the STT_ARM_16BIT flag. 12162 This allows us to distinguish between data used by Thumb instructions 12163 and non-data (which is probably code) inside Thumb regions of an 12164 executable. */ 12165 if (type != STT_OBJECT && type != STT_TLS) 12166 return ELF_ST_TYPE (elf_sym->st_info); 12167 break; 12168 12169 default: 12170 break; 12171 } 12172 12173 return type; 12174 } 12175 12176 static asection * 12177 elf32_arm_gc_mark_hook (asection *sec, 12178 struct bfd_link_info *info, 12179 Elf_Internal_Rela *rel, 12180 struct elf_link_hash_entry *h, 12181 Elf_Internal_Sym *sym) 12182 { 12183 if (h != NULL) 12184 switch (ELF32_R_TYPE (rel->r_info)) 12185 { 12186 case R_ARM_GNU_VTINHERIT: 12187 case R_ARM_GNU_VTENTRY: 12188 return NULL; 12189 } 12190 12191 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); 12192 } 12193 12194 /* Update the got entry reference counts for the section being removed. */ 12195 12196 static bfd_boolean 12197 elf32_arm_gc_sweep_hook (bfd * abfd, 12198 struct bfd_link_info * info, 12199 asection * sec, 12200 const Elf_Internal_Rela * relocs) 12201 { 12202 Elf_Internal_Shdr *symtab_hdr; 12203 struct elf_link_hash_entry **sym_hashes; 12204 bfd_signed_vma *local_got_refcounts; 12205 const Elf_Internal_Rela *rel, *relend; 12206 struct elf32_arm_link_hash_table * globals; 12207 12208 if (info->relocatable) 12209 return TRUE; 12210 12211 globals = elf32_arm_hash_table (info); 12212 if (globals == NULL) 12213 return FALSE; 12214 12215 elf_section_data (sec)->local_dynrel = NULL; 12216 12217 symtab_hdr = & elf_symtab_hdr (abfd); 12218 sym_hashes = elf_sym_hashes (abfd); 12219 local_got_refcounts = elf_local_got_refcounts (abfd); 12220 12221 check_use_blx (globals); 12222 12223 relend = relocs + sec->reloc_count; 12224 for (rel = relocs; rel < relend; rel++) 12225 { 12226 unsigned long r_symndx; 12227 struct elf_link_hash_entry *h = NULL; 12228 struct elf32_arm_link_hash_entry *eh; 12229 int r_type; 12230 bfd_boolean call_reloc_p; 12231 bfd_boolean may_become_dynamic_p; 12232 bfd_boolean may_need_local_target_p; 12233 union gotplt_union *root_plt; 12234 struct arm_plt_info *arm_plt; 12235 12236 r_symndx = ELF32_R_SYM (rel->r_info); 12237 if (r_symndx >= symtab_hdr->sh_info) 12238 { 12239 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 12240 while (h->root.type == bfd_link_hash_indirect 12241 || h->root.type == bfd_link_hash_warning) 12242 h = (struct elf_link_hash_entry *) h->root.u.i.link; 12243 } 12244 eh = (struct elf32_arm_link_hash_entry *) h; 12245 12246 call_reloc_p = FALSE; 12247 may_become_dynamic_p = FALSE; 12248 may_need_local_target_p = FALSE; 12249 12250 r_type = ELF32_R_TYPE (rel->r_info); 12251 r_type = arm_real_reloc_type (globals, r_type); 12252 switch (r_type) 12253 { 12254 case R_ARM_GOT32: 12255 case R_ARM_GOT_PREL: 12256 case R_ARM_TLS_GD32: 12257 case R_ARM_TLS_IE32: 12258 if (h != NULL) 12259 { 12260 if (h->got.refcount > 0) 12261 h->got.refcount -= 1; 12262 } 12263 else if (local_got_refcounts != NULL) 12264 { 12265 if (local_got_refcounts[r_symndx] > 0) 12266 local_got_refcounts[r_symndx] -= 1; 12267 } 12268 break; 12269 12270 case R_ARM_TLS_LDM32: 12271 globals->tls_ldm_got.refcount -= 1; 12272 break; 12273 12274 case R_ARM_PC24: 12275 case R_ARM_PLT32: 12276 case R_ARM_CALL: 12277 case R_ARM_JUMP24: 12278 case R_ARM_PREL31: 12279 case R_ARM_THM_CALL: 12280 case R_ARM_THM_JUMP24: 12281 case R_ARM_THM_JUMP19: 12282 call_reloc_p = TRUE; 12283 may_need_local_target_p = TRUE; 12284 break; 12285 12286 case R_ARM_ABS12: 12287 if (!globals->vxworks_p) 12288 { 12289 may_need_local_target_p = TRUE; 12290 break; 12291 } 12292 /* Fall through. */ 12293 case R_ARM_ABS32: 12294 case R_ARM_ABS32_NOI: 12295 case R_ARM_REL32: 12296 case R_ARM_REL32_NOI: 12297 case R_ARM_MOVW_ABS_NC: 12298 case R_ARM_MOVT_ABS: 12299 case R_ARM_MOVW_PREL_NC: 12300 case R_ARM_MOVT_PREL: 12301 case R_ARM_THM_MOVW_ABS_NC: 12302 case R_ARM_THM_MOVT_ABS: 12303 case R_ARM_THM_MOVW_PREL_NC: 12304 case R_ARM_THM_MOVT_PREL: 12305 /* Should the interworking branches be here also? */ 12306 if ((info->shared || globals->root.is_relocatable_executable) 12307 && (sec->flags & SEC_ALLOC) != 0) 12308 { 12309 if (h == NULL 12310 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)) 12311 { 12312 call_reloc_p = TRUE; 12313 may_need_local_target_p = TRUE; 12314 } 12315 else 12316 may_become_dynamic_p = TRUE; 12317 } 12318 else 12319 may_need_local_target_p = TRUE; 12320 break; 12321 12322 default: 12323 break; 12324 } 12325 12326 if (may_need_local_target_p 12327 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt)) 12328 { 12329 /* If PLT refcount book-keeping is wrong and too low, we'll 12330 see a zero value (going to -1) for the root PLT reference 12331 count. */ 12332 if (root_plt->refcount >= 0) 12333 { 12334 BFD_ASSERT (root_plt->refcount != 0); 12335 root_plt->refcount -= 1; 12336 } 12337 else 12338 /* A value of -1 means the symbol has become local, forced 12339 or seeing a hidden definition. Any other negative value 12340 is an error. */ 12341 BFD_ASSERT (root_plt->refcount == -1); 12342 12343 if (!call_reloc_p) 12344 arm_plt->noncall_refcount--; 12345 12346 if (r_type == R_ARM_THM_CALL) 12347 arm_plt->maybe_thumb_refcount--; 12348 12349 if (r_type == R_ARM_THM_JUMP24 12350 || r_type == R_ARM_THM_JUMP19) 12351 arm_plt->thumb_refcount--; 12352 } 12353 12354 if (may_become_dynamic_p) 12355 { 12356 struct elf_dyn_relocs **pp; 12357 struct elf_dyn_relocs *p; 12358 12359 if (h != NULL) 12360 pp = &(eh->dyn_relocs); 12361 else 12362 { 12363 Elf_Internal_Sym *isym; 12364 12365 isym = bfd_sym_from_r_symndx (&globals->sym_cache, 12366 abfd, r_symndx); 12367 if (isym == NULL) 12368 return FALSE; 12369 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 12370 if (pp == NULL) 12371 return FALSE; 12372 } 12373 for (; (p = *pp) != NULL; pp = &p->next) 12374 if (p->sec == sec) 12375 { 12376 /* Everything must go for SEC. */ 12377 *pp = p->next; 12378 break; 12379 } 12380 } 12381 } 12382 12383 return TRUE; 12384 } 12385 12386 /* Look through the relocs for a section during the first phase. */ 12387 12388 static bfd_boolean 12389 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, 12390 asection *sec, const Elf_Internal_Rela *relocs) 12391 { 12392 Elf_Internal_Shdr *symtab_hdr; 12393 struct elf_link_hash_entry **sym_hashes; 12394 const Elf_Internal_Rela *rel; 12395 const Elf_Internal_Rela *rel_end; 12396 bfd *dynobj; 12397 asection *sreloc; 12398 struct elf32_arm_link_hash_table *htab; 12399 bfd_boolean call_reloc_p; 12400 bfd_boolean may_become_dynamic_p; 12401 bfd_boolean may_need_local_target_p; 12402 unsigned long nsyms; 12403 12404 if (info->relocatable) 12405 return TRUE; 12406 12407 BFD_ASSERT (is_arm_elf (abfd)); 12408 12409 htab = elf32_arm_hash_table (info); 12410 if (htab == NULL) 12411 return FALSE; 12412 12413 sreloc = NULL; 12414 12415 /* Create dynamic sections for relocatable executables so that we can 12416 copy relocations. */ 12417 if (htab->root.is_relocatable_executable 12418 && ! htab->root.dynamic_sections_created) 12419 { 12420 if (! _bfd_elf_link_create_dynamic_sections (abfd, info)) 12421 return FALSE; 12422 } 12423 12424 if (htab->root.dynobj == NULL) 12425 htab->root.dynobj = abfd; 12426 if (!create_ifunc_sections (info)) 12427 return FALSE; 12428 12429 dynobj = htab->root.dynobj; 12430 12431 symtab_hdr = & elf_symtab_hdr (abfd); 12432 sym_hashes = elf_sym_hashes (abfd); 12433 nsyms = NUM_SHDR_ENTRIES (symtab_hdr); 12434 12435 rel_end = relocs + sec->reloc_count; 12436 for (rel = relocs; rel < rel_end; rel++) 12437 { 12438 Elf_Internal_Sym *isym; 12439 struct elf_link_hash_entry *h; 12440 struct elf32_arm_link_hash_entry *eh; 12441 unsigned long r_symndx; 12442 int r_type; 12443 12444 r_symndx = ELF32_R_SYM (rel->r_info); 12445 r_type = ELF32_R_TYPE (rel->r_info); 12446 r_type = arm_real_reloc_type (htab, r_type); 12447 12448 if (r_symndx >= nsyms 12449 /* PR 9934: It is possible to have relocations that do not 12450 refer to symbols, thus it is also possible to have an 12451 object file containing relocations but no symbol table. */ 12452 && (r_symndx > STN_UNDEF || nsyms > 0)) 12453 { 12454 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, 12455 r_symndx); 12456 return FALSE; 12457 } 12458 12459 h = NULL; 12460 isym = NULL; 12461 if (nsyms > 0) 12462 { 12463 if (r_symndx < symtab_hdr->sh_info) 12464 { 12465 /* A local symbol. */ 12466 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 12467 abfd, r_symndx); 12468 if (isym == NULL) 12469 return FALSE; 12470 } 12471 else 12472 { 12473 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 12474 while (h->root.type == bfd_link_hash_indirect 12475 || h->root.type == bfd_link_hash_warning) 12476 h = (struct elf_link_hash_entry *) h->root.u.i.link; 12477 } 12478 } 12479 12480 eh = (struct elf32_arm_link_hash_entry *) h; 12481 12482 call_reloc_p = FALSE; 12483 may_become_dynamic_p = FALSE; 12484 may_need_local_target_p = FALSE; 12485 12486 /* Could be done earlier, if h were already available. */ 12487 r_type = elf32_arm_tls_transition (info, r_type, h); 12488 switch (r_type) 12489 { 12490 case R_ARM_GOT32: 12491 case R_ARM_GOT_PREL: 12492 case R_ARM_TLS_GD32: 12493 case R_ARM_TLS_IE32: 12494 case R_ARM_TLS_GOTDESC: 12495 case R_ARM_TLS_DESCSEQ: 12496 case R_ARM_THM_TLS_DESCSEQ: 12497 case R_ARM_TLS_CALL: 12498 case R_ARM_THM_TLS_CALL: 12499 /* This symbol requires a global offset table entry. */ 12500 { 12501 int tls_type, old_tls_type; 12502 12503 switch (r_type) 12504 { 12505 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break; 12506 12507 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break; 12508 12509 case R_ARM_TLS_GOTDESC: 12510 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL: 12511 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ: 12512 tls_type = GOT_TLS_GDESC; break; 12513 12514 default: tls_type = GOT_NORMAL; break; 12515 } 12516 12517 if (h != NULL) 12518 { 12519 h->got.refcount++; 12520 old_tls_type = elf32_arm_hash_entry (h)->tls_type; 12521 } 12522 else 12523 { 12524 /* This is a global offset table entry for a local symbol. */ 12525 if (!elf32_arm_allocate_local_sym_info (abfd)) 12526 return FALSE; 12527 elf_local_got_refcounts (abfd)[r_symndx] += 1; 12528 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx]; 12529 } 12530 12531 /* If a variable is accessed with both tls methods, two 12532 slots may be created. */ 12533 if (GOT_TLS_GD_ANY_P (old_tls_type) 12534 && GOT_TLS_GD_ANY_P (tls_type)) 12535 tls_type |= old_tls_type; 12536 12537 /* We will already have issued an error message if there 12538 is a TLS/non-TLS mismatch, based on the symbol 12539 type. So just combine any TLS types needed. */ 12540 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL 12541 && tls_type != GOT_NORMAL) 12542 tls_type |= old_tls_type; 12543 12544 /* If the symbol is accessed in both IE and GDESC 12545 method, we're able to relax. Turn off the GDESC flag, 12546 without messing up with any other kind of tls types 12547 that may be involved */ 12548 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC)) 12549 tls_type &= ~GOT_TLS_GDESC; 12550 12551 if (old_tls_type != tls_type) 12552 { 12553 if (h != NULL) 12554 elf32_arm_hash_entry (h)->tls_type = tls_type; 12555 else 12556 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type; 12557 } 12558 } 12559 /* Fall through. */ 12560 12561 case R_ARM_TLS_LDM32: 12562 if (r_type == R_ARM_TLS_LDM32) 12563 htab->tls_ldm_got.refcount++; 12564 /* Fall through. */ 12565 12566 case R_ARM_GOTOFF32: 12567 case R_ARM_GOTPC: 12568 if (htab->root.sgot == NULL 12569 && !create_got_section (htab->root.dynobj, info)) 12570 return FALSE; 12571 break; 12572 12573 case R_ARM_PC24: 12574 case R_ARM_PLT32: 12575 case R_ARM_CALL: 12576 case R_ARM_JUMP24: 12577 case R_ARM_PREL31: 12578 case R_ARM_THM_CALL: 12579 case R_ARM_THM_JUMP24: 12580 case R_ARM_THM_JUMP19: 12581 call_reloc_p = TRUE; 12582 may_need_local_target_p = TRUE; 12583 break; 12584 12585 case R_ARM_ABS12: 12586 /* VxWorks uses dynamic R_ARM_ABS12 relocations for 12587 ldr __GOTT_INDEX__ offsets. */ 12588 if (!htab->vxworks_p) 12589 { 12590 may_need_local_target_p = TRUE; 12591 break; 12592 } 12593 /* Fall through. */ 12594 12595 case R_ARM_MOVW_ABS_NC: 12596 case R_ARM_MOVT_ABS: 12597 case R_ARM_THM_MOVW_ABS_NC: 12598 case R_ARM_THM_MOVT_ABS: 12599 if (info->shared) 12600 { 12601 (*_bfd_error_handler) 12602 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), 12603 abfd, elf32_arm_howto_table_1[r_type].name, 12604 (h) ? h->root.root.string : "a local symbol"); 12605 bfd_set_error (bfd_error_bad_value); 12606 return FALSE; 12607 } 12608 12609 /* Fall through. */ 12610 case R_ARM_ABS32: 12611 case R_ARM_ABS32_NOI: 12612 case R_ARM_REL32: 12613 case R_ARM_REL32_NOI: 12614 case R_ARM_MOVW_PREL_NC: 12615 case R_ARM_MOVT_PREL: 12616 case R_ARM_THM_MOVW_PREL_NC: 12617 case R_ARM_THM_MOVT_PREL: 12618 12619 /* Should the interworking branches be listed here? */ 12620 if ((info->shared || htab->root.is_relocatable_executable) 12621 && (sec->flags & SEC_ALLOC) != 0) 12622 { 12623 if (h == NULL 12624 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)) 12625 { 12626 /* In shared libraries and relocatable executables, 12627 we treat local relative references as calls; 12628 see the related SYMBOL_CALLS_LOCAL code in 12629 allocate_dynrelocs. */ 12630 call_reloc_p = TRUE; 12631 may_need_local_target_p = TRUE; 12632 } 12633 else 12634 /* We are creating a shared library or relocatable 12635 executable, and this is a reloc against a global symbol, 12636 or a non-PC-relative reloc against a local symbol. 12637 We may need to copy the reloc into the output. */ 12638 may_become_dynamic_p = TRUE; 12639 } 12640 else 12641 may_need_local_target_p = TRUE; 12642 break; 12643 12644 /* This relocation describes the C++ object vtable hierarchy. 12645 Reconstruct it for later use during GC. */ 12646 case R_ARM_GNU_VTINHERIT: 12647 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 12648 return FALSE; 12649 break; 12650 12651 /* This relocation describes which C++ vtable entries are actually 12652 used. Record for later use during GC. */ 12653 case R_ARM_GNU_VTENTRY: 12654 BFD_ASSERT (h != NULL); 12655 if (h != NULL 12656 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset)) 12657 return FALSE; 12658 break; 12659 } 12660 12661 if (h != NULL) 12662 { 12663 if (call_reloc_p) 12664 /* We may need a .plt entry if the function this reloc 12665 refers to is in a different object, regardless of the 12666 symbol's type. We can't tell for sure yet, because 12667 something later might force the symbol local. */ 12668 h->needs_plt = 1; 12669 else if (may_need_local_target_p) 12670 /* If this reloc is in a read-only section, we might 12671 need a copy reloc. We can't check reliably at this 12672 stage whether the section is read-only, as input 12673 sections have not yet been mapped to output sections. 12674 Tentatively set the flag for now, and correct in 12675 adjust_dynamic_symbol. */ 12676 h->non_got_ref = 1; 12677 } 12678 12679 if (may_need_local_target_p 12680 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)) 12681 { 12682 union gotplt_union *root_plt; 12683 struct arm_plt_info *arm_plt; 12684 struct arm_local_iplt_info *local_iplt; 12685 12686 if (h != NULL) 12687 { 12688 root_plt = &h->plt; 12689 arm_plt = &eh->plt; 12690 } 12691 else 12692 { 12693 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx); 12694 if (local_iplt == NULL) 12695 return FALSE; 12696 root_plt = &local_iplt->root; 12697 arm_plt = &local_iplt->arm; 12698 } 12699 12700 /* If the symbol is a function that doesn't bind locally, 12701 this relocation will need a PLT entry. */ 12702 if (root_plt->refcount != -1) 12703 root_plt->refcount += 1; 12704 12705 if (!call_reloc_p) 12706 arm_plt->noncall_refcount++; 12707 12708 /* It's too early to use htab->use_blx here, so we have to 12709 record possible blx references separately from 12710 relocs that definitely need a thumb stub. */ 12711 12712 if (r_type == R_ARM_THM_CALL) 12713 arm_plt->maybe_thumb_refcount += 1; 12714 12715 if (r_type == R_ARM_THM_JUMP24 12716 || r_type == R_ARM_THM_JUMP19) 12717 arm_plt->thumb_refcount += 1; 12718 } 12719 12720 if (may_become_dynamic_p) 12721 { 12722 struct elf_dyn_relocs *p, **head; 12723 12724 /* Create a reloc section in dynobj. */ 12725 if (sreloc == NULL) 12726 { 12727 sreloc = _bfd_elf_make_dynamic_reloc_section 12728 (sec, dynobj, 2, abfd, ! htab->use_rel); 12729 12730 if (sreloc == NULL) 12731 return FALSE; 12732 12733 /* BPABI objects never have dynamic relocations mapped. */ 12734 if (htab->symbian_p) 12735 { 12736 flagword flags; 12737 12738 flags = bfd_get_section_flags (dynobj, sreloc); 12739 flags &= ~(SEC_LOAD | SEC_ALLOC); 12740 bfd_set_section_flags (dynobj, sreloc, flags); 12741 } 12742 } 12743 12744 /* If this is a global symbol, count the number of 12745 relocations we need for this symbol. */ 12746 if (h != NULL) 12747 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs; 12748 else 12749 { 12750 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym); 12751 if (head == NULL) 12752 return FALSE; 12753 } 12754 12755 p = *head; 12756 if (p == NULL || p->sec != sec) 12757 { 12758 bfd_size_type amt = sizeof *p; 12759 12760 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt); 12761 if (p == NULL) 12762 return FALSE; 12763 p->next = *head; 12764 *head = p; 12765 p->sec = sec; 12766 p->count = 0; 12767 p->pc_count = 0; 12768 } 12769 12770 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI) 12771 p->pc_count += 1; 12772 p->count += 1; 12773 } 12774 } 12775 12776 return TRUE; 12777 } 12778 12779 /* Unwinding tables are not referenced directly. This pass marks them as 12780 required if the corresponding code section is marked. */ 12781 12782 static bfd_boolean 12783 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info, 12784 elf_gc_mark_hook_fn gc_mark_hook) 12785 { 12786 bfd *sub; 12787 Elf_Internal_Shdr **elf_shdrp; 12788 bfd_boolean again; 12789 12790 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook); 12791 12792 /* Marking EH data may cause additional code sections to be marked, 12793 requiring multiple passes. */ 12794 again = TRUE; 12795 while (again) 12796 { 12797 again = FALSE; 12798 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next) 12799 { 12800 asection *o; 12801 12802 if (! is_arm_elf (sub)) 12803 continue; 12804 12805 elf_shdrp = elf_elfsections (sub); 12806 for (o = sub->sections; o != NULL; o = o->next) 12807 { 12808 Elf_Internal_Shdr *hdr; 12809 12810 hdr = &elf_section_data (o)->this_hdr; 12811 if (hdr->sh_type == SHT_ARM_EXIDX 12812 && hdr->sh_link 12813 && hdr->sh_link < elf_numsections (sub) 12814 && !o->gc_mark 12815 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark) 12816 { 12817 again = TRUE; 12818 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook)) 12819 return FALSE; 12820 } 12821 } 12822 } 12823 } 12824 12825 return TRUE; 12826 } 12827 12828 /* Treat mapping symbols as special target symbols. */ 12829 12830 static bfd_boolean 12831 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym) 12832 { 12833 return bfd_is_arm_special_symbol_name (sym->name, 12834 BFD_ARM_SPECIAL_SYM_TYPE_ANY); 12835 } 12836 12837 /* This is a copy of elf_find_function() from elf.c except that 12838 ARM mapping symbols are ignored when looking for function names 12839 and STT_ARM_TFUNC is considered to a function type. */ 12840 12841 static bfd_boolean 12842 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED, 12843 asection * section, 12844 asymbol ** symbols, 12845 bfd_vma offset, 12846 const char ** filename_ptr, 12847 const char ** functionname_ptr) 12848 { 12849 const char * filename = NULL; 12850 asymbol * func = NULL; 12851 bfd_vma low_func = 0; 12852 asymbol ** p; 12853 12854 for (p = symbols; *p != NULL; p++) 12855 { 12856 elf_symbol_type *q; 12857 12858 q = (elf_symbol_type *) *p; 12859 12860 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) 12861 { 12862 default: 12863 break; 12864 case STT_FILE: 12865 filename = bfd_asymbol_name (&q->symbol); 12866 break; 12867 case STT_FUNC: 12868 case STT_ARM_TFUNC: 12869 case STT_NOTYPE: 12870 /* Skip mapping symbols. */ 12871 if ((q->symbol.flags & BSF_LOCAL) 12872 && bfd_is_arm_special_symbol_name (q->symbol.name, 12873 BFD_ARM_SPECIAL_SYM_TYPE_ANY)) 12874 continue; 12875 /* Fall through. */ 12876 if (bfd_get_section (&q->symbol) == section 12877 && q->symbol.value >= low_func 12878 && q->symbol.value <= offset) 12879 { 12880 func = (asymbol *) q; 12881 low_func = q->symbol.value; 12882 } 12883 break; 12884 } 12885 } 12886 12887 if (func == NULL) 12888 return FALSE; 12889 12890 if (filename_ptr) 12891 *filename_ptr = filename; 12892 if (functionname_ptr) 12893 *functionname_ptr = bfd_asymbol_name (func); 12894 12895 return TRUE; 12896 } 12897 12898 12899 /* Find the nearest line to a particular section and offset, for error 12900 reporting. This code is a duplicate of the code in elf.c, except 12901 that it uses arm_elf_find_function. */ 12902 12903 static bfd_boolean 12904 elf32_arm_find_nearest_line (bfd * abfd, 12905 asection * section, 12906 asymbol ** symbols, 12907 bfd_vma offset, 12908 const char ** filename_ptr, 12909 const char ** functionname_ptr, 12910 unsigned int * line_ptr) 12911 { 12912 bfd_boolean found = FALSE; 12913 12914 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */ 12915 12916 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections, 12917 section, symbols, offset, 12918 filename_ptr, functionname_ptr, 12919 line_ptr, NULL, 0, 12920 & elf_tdata (abfd)->dwarf2_find_line_info)) 12921 { 12922 if (!*functionname_ptr) 12923 arm_elf_find_function (abfd, section, symbols, offset, 12924 *filename_ptr ? NULL : filename_ptr, 12925 functionname_ptr); 12926 12927 return TRUE; 12928 } 12929 12930 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, 12931 & found, filename_ptr, 12932 functionname_ptr, line_ptr, 12933 & elf_tdata (abfd)->line_info)) 12934 return FALSE; 12935 12936 if (found && (*functionname_ptr || *line_ptr)) 12937 return TRUE; 12938 12939 if (symbols == NULL) 12940 return FALSE; 12941 12942 if (! arm_elf_find_function (abfd, section, symbols, offset, 12943 filename_ptr, functionname_ptr)) 12944 return FALSE; 12945 12946 *line_ptr = 0; 12947 return TRUE; 12948 } 12949 12950 static bfd_boolean 12951 elf32_arm_find_inliner_info (bfd * abfd, 12952 const char ** filename_ptr, 12953 const char ** functionname_ptr, 12954 unsigned int * line_ptr) 12955 { 12956 bfd_boolean found; 12957 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr, 12958 functionname_ptr, line_ptr, 12959 & elf_tdata (abfd)->dwarf2_find_line_info); 12960 return found; 12961 } 12962 12963 /* Adjust a symbol defined by a dynamic object and referenced by a 12964 regular object. The current definition is in some section of the 12965 dynamic object, but we're not including those sections. We have to 12966 change the definition to something the rest of the link can 12967 understand. */ 12968 12969 static bfd_boolean 12970 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, 12971 struct elf_link_hash_entry * h) 12972 { 12973 bfd * dynobj; 12974 asection * s; 12975 struct elf32_arm_link_hash_entry * eh; 12976 struct elf32_arm_link_hash_table *globals; 12977 12978 globals = elf32_arm_hash_table (info); 12979 if (globals == NULL) 12980 return FALSE; 12981 12982 dynobj = elf_hash_table (info)->dynobj; 12983 12984 /* Make sure we know what is going on here. */ 12985 BFD_ASSERT (dynobj != NULL 12986 && (h->needs_plt 12987 || h->type == STT_GNU_IFUNC 12988 || h->u.weakdef != NULL 12989 || (h->def_dynamic 12990 && h->ref_regular 12991 && !h->def_regular))); 12992 12993 eh = (struct elf32_arm_link_hash_entry *) h; 12994 12995 /* If this is a function, put it in the procedure linkage table. We 12996 will fill in the contents of the procedure linkage table later, 12997 when we know the address of the .got section. */ 12998 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt) 12999 { 13000 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the 13001 symbol binds locally. */ 13002 if (h->plt.refcount <= 0 13003 || (h->type != STT_GNU_IFUNC 13004 && (SYMBOL_CALLS_LOCAL (info, h) 13005 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 13006 && h->root.type == bfd_link_hash_undefweak)))) 13007 { 13008 /* This case can occur if we saw a PLT32 reloc in an input 13009 file, but the symbol was never referred to by a dynamic 13010 object, or if all references were garbage collected. In 13011 such a case, we don't actually need to build a procedure 13012 linkage table, and we can just do a PC24 reloc instead. */ 13013 h->plt.offset = (bfd_vma) -1; 13014 eh->plt.thumb_refcount = 0; 13015 eh->plt.maybe_thumb_refcount = 0; 13016 eh->plt.noncall_refcount = 0; 13017 h->needs_plt = 0; 13018 } 13019 13020 return TRUE; 13021 } 13022 else 13023 { 13024 /* It's possible that we incorrectly decided a .plt reloc was 13025 needed for an R_ARM_PC24 or similar reloc to a non-function sym 13026 in check_relocs. We can't decide accurately between function 13027 and non-function syms in check-relocs; Objects loaded later in 13028 the link may change h->type. So fix it now. */ 13029 h->plt.offset = (bfd_vma) -1; 13030 eh->plt.thumb_refcount = 0; 13031 eh->plt.maybe_thumb_refcount = 0; 13032 eh->plt.noncall_refcount = 0; 13033 } 13034 13035 /* If this is a weak symbol, and there is a real definition, the 13036 processor independent code will have arranged for us to see the 13037 real definition first, and we can just use the same value. */ 13038 if (h->u.weakdef != NULL) 13039 { 13040 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined 13041 || h->u.weakdef->root.type == bfd_link_hash_defweak); 13042 h->root.u.def.section = h->u.weakdef->root.u.def.section; 13043 h->root.u.def.value = h->u.weakdef->root.u.def.value; 13044 return TRUE; 13045 } 13046 13047 /* If there are no non-GOT references, we do not need a copy 13048 relocation. */ 13049 if (!h->non_got_ref) 13050 return TRUE; 13051 13052 /* This is a reference to a symbol defined by a dynamic object which 13053 is not a function. */ 13054 13055 /* If we are creating a shared library, we must presume that the 13056 only references to the symbol are via the global offset table. 13057 For such cases we need not do anything here; the relocations will 13058 be handled correctly by relocate_section. Relocatable executables 13059 can reference data in shared objects directly, so we don't need to 13060 do anything here. */ 13061 if (info->shared || globals->root.is_relocatable_executable) 13062 return TRUE; 13063 13064 /* We must allocate the symbol in our .dynbss section, which will 13065 become part of the .bss section of the executable. There will be 13066 an entry for this symbol in the .dynsym section. The dynamic 13067 object will contain position independent code, so all references 13068 from the dynamic object to this symbol will go through the global 13069 offset table. The dynamic linker will use the .dynsym entry to 13070 determine the address it must put in the global offset table, so 13071 both the dynamic object and the regular object will refer to the 13072 same memory location for the variable. */ 13073 s = bfd_get_linker_section (dynobj, ".dynbss"); 13074 BFD_ASSERT (s != NULL); 13075 13076 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to 13077 copy the initial value out of the dynamic object and into the 13078 runtime process image. We need to remember the offset into the 13079 .rel(a).bss section we are going to use. */ 13080 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) 13081 { 13082 asection *srel; 13083 13084 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss")); 13085 elf32_arm_allocate_dynrelocs (info, srel, 1); 13086 h->needs_copy = 1; 13087 } 13088 13089 return _bfd_elf_adjust_dynamic_copy (h, s); 13090 } 13091 13092 /* Allocate space in .plt, .got and associated reloc sections for 13093 dynamic relocs. */ 13094 13095 static bfd_boolean 13096 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) 13097 { 13098 struct bfd_link_info *info; 13099 struct elf32_arm_link_hash_table *htab; 13100 struct elf32_arm_link_hash_entry *eh; 13101 struct elf_dyn_relocs *p; 13102 13103 if (h->root.type == bfd_link_hash_indirect) 13104 return TRUE; 13105 13106 eh = (struct elf32_arm_link_hash_entry *) h; 13107 13108 info = (struct bfd_link_info *) inf; 13109 htab = elf32_arm_hash_table (info); 13110 if (htab == NULL) 13111 return FALSE; 13112 13113 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC) 13114 && h->plt.refcount > 0) 13115 { 13116 /* Make sure this symbol is output as a dynamic symbol. 13117 Undefined weak syms won't yet be marked as dynamic. */ 13118 if (h->dynindx == -1 13119 && !h->forced_local) 13120 { 13121 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13122 return FALSE; 13123 } 13124 13125 /* If the call in the PLT entry binds locally, the associated 13126 GOT entry should use an R_ARM_IRELATIVE relocation instead of 13127 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather 13128 than the .plt section. */ 13129 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h)) 13130 { 13131 eh->is_iplt = 1; 13132 if (eh->plt.noncall_refcount == 0 13133 && SYMBOL_REFERENCES_LOCAL (info, h)) 13134 /* All non-call references can be resolved directly. 13135 This means that they can (and in some cases, must) 13136 resolve directly to the run-time target, rather than 13137 to the PLT. That in turns means that any .got entry 13138 would be equal to the .igot.plt entry, so there's 13139 no point having both. */ 13140 h->got.refcount = 0; 13141 } 13142 13143 if (info->shared 13144 || eh->is_iplt 13145 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 13146 { 13147 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt); 13148 13149 /* If this symbol is not defined in a regular file, and we are 13150 not generating a shared library, then set the symbol to this 13151 location in the .plt. This is required to make function 13152 pointers compare as equal between the normal executable and 13153 the shared library. */ 13154 if (! info->shared 13155 && !h->def_regular) 13156 { 13157 h->root.u.def.section = htab->root.splt; 13158 h->root.u.def.value = h->plt.offset; 13159 13160 /* Make sure the function is not marked as Thumb, in case 13161 it is the target of an ABS32 relocation, which will 13162 point to the PLT entry. */ 13163 h->target_internal = ST_BRANCH_TO_ARM; 13164 } 13165 13166 htab->next_tls_desc_index++; 13167 13168 /* VxWorks executables have a second set of relocations for 13169 each PLT entry. They go in a separate relocation section, 13170 which is processed by the kernel loader. */ 13171 if (htab->vxworks_p && !info->shared) 13172 { 13173 /* There is a relocation for the initial PLT entry: 13174 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */ 13175 if (h->plt.offset == htab->plt_header_size) 13176 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1); 13177 13178 /* There are two extra relocations for each subsequent 13179 PLT entry: an R_ARM_32 relocation for the GOT entry, 13180 and an R_ARM_32 relocation for the PLT entry. */ 13181 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2); 13182 } 13183 } 13184 else 13185 { 13186 h->plt.offset = (bfd_vma) -1; 13187 h->needs_plt = 0; 13188 } 13189 } 13190 else 13191 { 13192 h->plt.offset = (bfd_vma) -1; 13193 h->needs_plt = 0; 13194 } 13195 13196 eh = (struct elf32_arm_link_hash_entry *) h; 13197 eh->tlsdesc_got = (bfd_vma) -1; 13198 13199 if (h->got.refcount > 0) 13200 { 13201 asection *s; 13202 bfd_boolean dyn; 13203 int tls_type = elf32_arm_hash_entry (h)->tls_type; 13204 int indx; 13205 13206 /* Make sure this symbol is output as a dynamic symbol. 13207 Undefined weak syms won't yet be marked as dynamic. */ 13208 if (h->dynindx == -1 13209 && !h->forced_local) 13210 { 13211 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13212 return FALSE; 13213 } 13214 13215 if (!htab->symbian_p) 13216 { 13217 s = htab->root.sgot; 13218 h->got.offset = s->size; 13219 13220 if (tls_type == GOT_UNKNOWN) 13221 abort (); 13222 13223 if (tls_type == GOT_NORMAL) 13224 /* Non-TLS symbols need one GOT slot. */ 13225 s->size += 4; 13226 else 13227 { 13228 if (tls_type & GOT_TLS_GDESC) 13229 { 13230 /* R_ARM_TLS_DESC needs 2 GOT slots. */ 13231 eh->tlsdesc_got 13232 = (htab->root.sgotplt->size 13233 - elf32_arm_compute_jump_table_size (htab)); 13234 htab->root.sgotplt->size += 8; 13235 h->got.offset = (bfd_vma) -2; 13236 /* plt.got_offset needs to know there's a TLS_DESC 13237 reloc in the middle of .got.plt. */ 13238 htab->num_tls_desc++; 13239 } 13240 13241 if (tls_type & GOT_TLS_GD) 13242 { 13243 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If 13244 the symbol is both GD and GDESC, got.offset may 13245 have been overwritten. */ 13246 h->got.offset = s->size; 13247 s->size += 8; 13248 } 13249 13250 if (tls_type & GOT_TLS_IE) 13251 /* R_ARM_TLS_IE32 needs one GOT slot. */ 13252 s->size += 4; 13253 } 13254 13255 dyn = htab->root.dynamic_sections_created; 13256 13257 indx = 0; 13258 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) 13259 && (!info->shared 13260 || !SYMBOL_REFERENCES_LOCAL (info, h))) 13261 indx = h->dynindx; 13262 13263 if (tls_type != GOT_NORMAL 13264 && (info->shared || indx != 0) 13265 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 13266 || h->root.type != bfd_link_hash_undefweak)) 13267 { 13268 if (tls_type & GOT_TLS_IE) 13269 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13270 13271 if (tls_type & GOT_TLS_GD) 13272 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13273 13274 if (tls_type & GOT_TLS_GDESC) 13275 { 13276 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 13277 /* GDESC needs a trampoline to jump to. */ 13278 htab->tls_trampoline = -1; 13279 } 13280 13281 /* Only GD needs it. GDESC just emits one relocation per 13282 2 entries. */ 13283 if ((tls_type & GOT_TLS_GD) && indx != 0) 13284 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13285 } 13286 else if (!SYMBOL_REFERENCES_LOCAL (info, h)) 13287 { 13288 if (htab->root.dynamic_sections_created) 13289 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */ 13290 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13291 } 13292 else if (h->type == STT_GNU_IFUNC 13293 && eh->plt.noncall_refcount == 0) 13294 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry; 13295 they all resolve dynamically instead. Reserve room for the 13296 GOT entry's R_ARM_IRELATIVE relocation. */ 13297 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1); 13298 else if (info->shared) 13299 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */ 13300 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13301 } 13302 } 13303 else 13304 h->got.offset = (bfd_vma) -1; 13305 13306 /* Allocate stubs for exported Thumb functions on v4t. */ 13307 if (!htab->use_blx && h->dynindx != -1 13308 && h->def_regular 13309 && h->target_internal == ST_BRANCH_TO_THUMB 13310 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT) 13311 { 13312 struct elf_link_hash_entry * th; 13313 struct bfd_link_hash_entry * bh; 13314 struct elf_link_hash_entry * myh; 13315 char name[1024]; 13316 asection *s; 13317 bh = NULL; 13318 /* Create a new symbol to regist the real location of the function. */ 13319 s = h->root.u.def.section; 13320 sprintf (name, "__real_%s", h->root.root.string); 13321 _bfd_generic_link_add_one_symbol (info, s->owner, 13322 name, BSF_GLOBAL, s, 13323 h->root.u.def.value, 13324 NULL, TRUE, FALSE, &bh); 13325 13326 myh = (struct elf_link_hash_entry *) bh; 13327 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 13328 myh->forced_local = 1; 13329 myh->target_internal = ST_BRANCH_TO_THUMB; 13330 eh->export_glue = myh; 13331 th = record_arm_to_thumb_glue (info, h); 13332 /* Point the symbol at the stub. */ 13333 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC); 13334 h->target_internal = ST_BRANCH_TO_ARM; 13335 h->root.u.def.section = th->root.u.def.section; 13336 h->root.u.def.value = th->root.u.def.value & ~1; 13337 } 13338 13339 if (eh->dyn_relocs == NULL) 13340 return TRUE; 13341 13342 /* In the shared -Bsymbolic case, discard space allocated for 13343 dynamic pc-relative relocs against symbols which turn out to be 13344 defined in regular objects. For the normal shared case, discard 13345 space for pc-relative relocs that have become local due to symbol 13346 visibility changes. */ 13347 13348 if (info->shared || htab->root.is_relocatable_executable) 13349 { 13350 /* The only relocs that use pc_count are R_ARM_REL32 and 13351 R_ARM_REL32_NOI, which will appear on something like 13352 ".long foo - .". We want calls to protected symbols to resolve 13353 directly to the function rather than going via the plt. If people 13354 want function pointer comparisons to work as expected then they 13355 should avoid writing assembly like ".long foo - .". */ 13356 if (SYMBOL_CALLS_LOCAL (info, h)) 13357 { 13358 struct elf_dyn_relocs **pp; 13359 13360 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 13361 { 13362 p->count -= p->pc_count; 13363 p->pc_count = 0; 13364 if (p->count == 0) 13365 *pp = p->next; 13366 else 13367 pp = &p->next; 13368 } 13369 } 13370 13371 if (htab->vxworks_p) 13372 { 13373 struct elf_dyn_relocs **pp; 13374 13375 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 13376 { 13377 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0) 13378 *pp = p->next; 13379 else 13380 pp = &p->next; 13381 } 13382 } 13383 13384 /* Also discard relocs on undefined weak syms with non-default 13385 visibility. */ 13386 if (eh->dyn_relocs != NULL 13387 && h->root.type == bfd_link_hash_undefweak) 13388 { 13389 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 13390 eh->dyn_relocs = NULL; 13391 13392 /* Make sure undefined weak symbols are output as a dynamic 13393 symbol in PIEs. */ 13394 else if (h->dynindx == -1 13395 && !h->forced_local) 13396 { 13397 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13398 return FALSE; 13399 } 13400 } 13401 13402 else if (htab->root.is_relocatable_executable && h->dynindx == -1 13403 && h->root.type == bfd_link_hash_new) 13404 { 13405 /* Output absolute symbols so that we can create relocations 13406 against them. For normal symbols we output a relocation 13407 against the section that contains them. */ 13408 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13409 return FALSE; 13410 } 13411 13412 } 13413 else 13414 { 13415 /* For the non-shared case, discard space for relocs against 13416 symbols which turn out to need copy relocs or are not 13417 dynamic. */ 13418 13419 if (!h->non_got_ref 13420 && ((h->def_dynamic 13421 && !h->def_regular) 13422 || (htab->root.dynamic_sections_created 13423 && (h->root.type == bfd_link_hash_undefweak 13424 || h->root.type == bfd_link_hash_undefined)))) 13425 { 13426 /* Make sure this symbol is output as a dynamic symbol. 13427 Undefined weak syms won't yet be marked as dynamic. */ 13428 if (h->dynindx == -1 13429 && !h->forced_local) 13430 { 13431 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 13432 return FALSE; 13433 } 13434 13435 /* If that succeeded, we know we'll be keeping all the 13436 relocs. */ 13437 if (h->dynindx != -1) 13438 goto keep; 13439 } 13440 13441 eh->dyn_relocs = NULL; 13442 13443 keep: ; 13444 } 13445 13446 /* Finally, allocate space. */ 13447 for (p = eh->dyn_relocs; p != NULL; p = p->next) 13448 { 13449 asection *sreloc = elf_section_data (p->sec)->sreloc; 13450 if (h->type == STT_GNU_IFUNC 13451 && eh->plt.noncall_refcount == 0 13452 && SYMBOL_REFERENCES_LOCAL (info, h)) 13453 elf32_arm_allocate_irelocs (info, sreloc, p->count); 13454 else 13455 elf32_arm_allocate_dynrelocs (info, sreloc, p->count); 13456 } 13457 13458 return TRUE; 13459 } 13460 13461 /* Find any dynamic relocs that apply to read-only sections. */ 13462 13463 static bfd_boolean 13464 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf) 13465 { 13466 struct elf32_arm_link_hash_entry * eh; 13467 struct elf_dyn_relocs * p; 13468 13469 eh = (struct elf32_arm_link_hash_entry *) h; 13470 for (p = eh->dyn_relocs; p != NULL; p = p->next) 13471 { 13472 asection *s = p->sec; 13473 13474 if (s != NULL && (s->flags & SEC_READONLY) != 0) 13475 { 13476 struct bfd_link_info *info = (struct bfd_link_info *) inf; 13477 13478 if (info->warn_shared_textrel) 13479 (*_bfd_error_handler) 13480 (_("warning: dynamic relocation in readonly section `%s'"), 13481 h->root.root.string); 13482 info->flags |= DF_TEXTREL; 13483 13484 /* Not an error, just cut short the traversal. */ 13485 return FALSE; 13486 } 13487 } 13488 return TRUE; 13489 } 13490 13491 void 13492 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info, 13493 int byteswap_code) 13494 { 13495 struct elf32_arm_link_hash_table *globals; 13496 13497 globals = elf32_arm_hash_table (info); 13498 if (globals == NULL) 13499 return; 13500 13501 globals->byteswap_code = byteswap_code; 13502 } 13503 13504 /* Set the sizes of the dynamic sections. */ 13505 13506 static bfd_boolean 13507 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, 13508 struct bfd_link_info * info) 13509 { 13510 bfd * dynobj; 13511 asection * s; 13512 bfd_boolean plt; 13513 bfd_boolean relocs; 13514 bfd *ibfd; 13515 struct elf32_arm_link_hash_table *htab; 13516 13517 htab = elf32_arm_hash_table (info); 13518 if (htab == NULL) 13519 return FALSE; 13520 13521 dynobj = elf_hash_table (info)->dynobj; 13522 BFD_ASSERT (dynobj != NULL); 13523 check_use_blx (htab); 13524 13525 if (elf_hash_table (info)->dynamic_sections_created) 13526 { 13527 /* Set the contents of the .interp section to the interpreter. */ 13528 if (info->executable) 13529 { 13530 s = bfd_get_linker_section (dynobj, ".interp"); 13531 BFD_ASSERT (s != NULL); 13532 s->size = sizeof ELF_DYNAMIC_INTERPRETER; 13533 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; 13534 } 13535 } 13536 13537 /* Set up .got offsets for local syms, and space for local dynamic 13538 relocs. */ 13539 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) 13540 { 13541 bfd_signed_vma *local_got; 13542 bfd_signed_vma *end_local_got; 13543 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt; 13544 char *local_tls_type; 13545 bfd_vma *local_tlsdesc_gotent; 13546 bfd_size_type locsymcount; 13547 Elf_Internal_Shdr *symtab_hdr; 13548 asection *srel; 13549 bfd_boolean is_vxworks = htab->vxworks_p; 13550 unsigned int symndx; 13551 13552 if (! is_arm_elf (ibfd)) 13553 continue; 13554 13555 for (s = ibfd->sections; s != NULL; s = s->next) 13556 { 13557 struct elf_dyn_relocs *p; 13558 13559 for (p = (struct elf_dyn_relocs *) 13560 elf_section_data (s)->local_dynrel; p != NULL; p = p->next) 13561 { 13562 if (!bfd_is_abs_section (p->sec) 13563 && bfd_is_abs_section (p->sec->output_section)) 13564 { 13565 /* Input section has been discarded, either because 13566 it is a copy of a linkonce section or due to 13567 linker script /DISCARD/, so we'll be discarding 13568 the relocs too. */ 13569 } 13570 else if (is_vxworks 13571 && strcmp (p->sec->output_section->name, 13572 ".tls_vars") == 0) 13573 { 13574 /* Relocations in vxworks .tls_vars sections are 13575 handled specially by the loader. */ 13576 } 13577 else if (p->count != 0) 13578 { 13579 srel = elf_section_data (p->sec)->sreloc; 13580 elf32_arm_allocate_dynrelocs (info, srel, p->count); 13581 if ((p->sec->output_section->flags & SEC_READONLY) != 0) 13582 info->flags |= DF_TEXTREL; 13583 } 13584 } 13585 } 13586 13587 local_got = elf_local_got_refcounts (ibfd); 13588 if (!local_got) 13589 continue; 13590 13591 symtab_hdr = & elf_symtab_hdr (ibfd); 13592 locsymcount = symtab_hdr->sh_info; 13593 end_local_got = local_got + locsymcount; 13594 local_iplt_ptr = elf32_arm_local_iplt (ibfd); 13595 local_tls_type = elf32_arm_local_got_tls_type (ibfd); 13596 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd); 13597 symndx = 0; 13598 s = htab->root.sgot; 13599 srel = htab->root.srelgot; 13600 for (; local_got < end_local_got; 13601 ++local_got, ++local_iplt_ptr, ++local_tls_type, 13602 ++local_tlsdesc_gotent, ++symndx) 13603 { 13604 *local_tlsdesc_gotent = (bfd_vma) -1; 13605 local_iplt = *local_iplt_ptr; 13606 if (local_iplt != NULL) 13607 { 13608 struct elf_dyn_relocs *p; 13609 13610 if (local_iplt->root.refcount > 0) 13611 { 13612 elf32_arm_allocate_plt_entry (info, TRUE, 13613 &local_iplt->root, 13614 &local_iplt->arm); 13615 if (local_iplt->arm.noncall_refcount == 0) 13616 /* All references to the PLT are calls, so all 13617 non-call references can resolve directly to the 13618 run-time target. This means that the .got entry 13619 would be the same as the .igot.plt entry, so there's 13620 no point creating both. */ 13621 *local_got = 0; 13622 } 13623 else 13624 { 13625 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0); 13626 local_iplt->root.offset = (bfd_vma) -1; 13627 } 13628 13629 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next) 13630 { 13631 asection *psrel; 13632 13633 psrel = elf_section_data (p->sec)->sreloc; 13634 if (local_iplt->arm.noncall_refcount == 0) 13635 elf32_arm_allocate_irelocs (info, psrel, p->count); 13636 else 13637 elf32_arm_allocate_dynrelocs (info, psrel, p->count); 13638 } 13639 } 13640 if (*local_got > 0) 13641 { 13642 Elf_Internal_Sym *isym; 13643 13644 *local_got = s->size; 13645 if (*local_tls_type & GOT_TLS_GD) 13646 /* TLS_GD relocs need an 8-byte structure in the GOT. */ 13647 s->size += 8; 13648 if (*local_tls_type & GOT_TLS_GDESC) 13649 { 13650 *local_tlsdesc_gotent = htab->root.sgotplt->size 13651 - elf32_arm_compute_jump_table_size (htab); 13652 htab->root.sgotplt->size += 8; 13653 *local_got = (bfd_vma) -2; 13654 /* plt.got_offset needs to know there's a TLS_DESC 13655 reloc in the middle of .got.plt. */ 13656 htab->num_tls_desc++; 13657 } 13658 if (*local_tls_type & GOT_TLS_IE) 13659 s->size += 4; 13660 13661 if (*local_tls_type & GOT_NORMAL) 13662 { 13663 /* If the symbol is both GD and GDESC, *local_got 13664 may have been overwritten. */ 13665 *local_got = s->size; 13666 s->size += 4; 13667 } 13668 13669 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx); 13670 if (isym == NULL) 13671 return FALSE; 13672 13673 /* If all references to an STT_GNU_IFUNC PLT are calls, 13674 then all non-call references, including this GOT entry, 13675 resolve directly to the run-time target. */ 13676 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC 13677 && (local_iplt == NULL 13678 || local_iplt->arm.noncall_refcount == 0)) 13679 elf32_arm_allocate_irelocs (info, srel, 1); 13680 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC)) 13681 || *local_tls_type & GOT_TLS_GD) 13682 elf32_arm_allocate_dynrelocs (info, srel, 1); 13683 13684 if (info->shared && *local_tls_type & GOT_TLS_GDESC) 13685 { 13686 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); 13687 htab->tls_trampoline = -1; 13688 } 13689 } 13690 else 13691 *local_got = (bfd_vma) -1; 13692 } 13693 } 13694 13695 if (htab->tls_ldm_got.refcount > 0) 13696 { 13697 /* Allocate two GOT entries and one dynamic relocation (if necessary) 13698 for R_ARM_TLS_LDM32 relocations. */ 13699 htab->tls_ldm_got.offset = htab->root.sgot->size; 13700 htab->root.sgot->size += 8; 13701 if (info->shared) 13702 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); 13703 } 13704 else 13705 htab->tls_ldm_got.offset = -1; 13706 13707 /* Allocate global sym .plt and .got entries, and space for global 13708 sym dynamic relocs. */ 13709 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info); 13710 13711 /* Here we rummage through the found bfds to collect glue information. */ 13712 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) 13713 { 13714 if (! is_arm_elf (ibfd)) 13715 continue; 13716 13717 /* Initialise mapping tables for code/data. */ 13718 bfd_elf32_arm_init_maps (ibfd); 13719 13720 if (!bfd_elf32_arm_process_before_allocation (ibfd, info) 13721 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)) 13722 /* xgettext:c-format */ 13723 _bfd_error_handler (_("Errors encountered processing file %s"), 13724 ibfd->filename); 13725 } 13726 13727 /* Allocate space for the glue sections now that we've sized them. */ 13728 bfd_elf32_arm_allocate_interworking_sections (info); 13729 13730 /* For every jump slot reserved in the sgotplt, reloc_count is 13731 incremented. However, when we reserve space for TLS descriptors, 13732 it's not incremented, so in order to compute the space reserved 13733 for them, it suffices to multiply the reloc count by the jump 13734 slot size. */ 13735 if (htab->root.srelplt) 13736 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab); 13737 13738 if (htab->tls_trampoline) 13739 { 13740 if (htab->root.splt->size == 0) 13741 htab->root.splt->size += htab->plt_header_size; 13742 13743 htab->tls_trampoline = htab->root.splt->size; 13744 htab->root.splt->size += htab->plt_entry_size; 13745 13746 /* If we're not using lazy TLS relocations, don't generate the 13747 PLT and GOT entries they require. */ 13748 if (!(info->flags & DF_BIND_NOW)) 13749 { 13750 htab->dt_tlsdesc_got = htab->root.sgot->size; 13751 htab->root.sgot->size += 4; 13752 13753 htab->dt_tlsdesc_plt = htab->root.splt->size; 13754 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline); 13755 } 13756 } 13757 13758 /* The check_relocs and adjust_dynamic_symbol entry points have 13759 determined the sizes of the various dynamic sections. Allocate 13760 memory for them. */ 13761 plt = FALSE; 13762 relocs = FALSE; 13763 for (s = dynobj->sections; s != NULL; s = s->next) 13764 { 13765 const char * name; 13766 13767 if ((s->flags & SEC_LINKER_CREATED) == 0) 13768 continue; 13769 13770 /* It's OK to base decisions on the section name, because none 13771 of the dynobj section names depend upon the input files. */ 13772 name = bfd_get_section_name (dynobj, s); 13773 13774 if (s == htab->root.splt) 13775 { 13776 /* Remember whether there is a PLT. */ 13777 plt = s->size != 0; 13778 } 13779 else if (CONST_STRNEQ (name, ".rel")) 13780 { 13781 if (s->size != 0) 13782 { 13783 /* Remember whether there are any reloc sections other 13784 than .rel(a).plt and .rela.plt.unloaded. */ 13785 if (s != htab->root.srelplt && s != htab->srelplt2) 13786 relocs = TRUE; 13787 13788 /* We use the reloc_count field as a counter if we need 13789 to copy relocs into the output file. */ 13790 s->reloc_count = 0; 13791 } 13792 } 13793 else if (s != htab->root.sgot 13794 && s != htab->root.sgotplt 13795 && s != htab->root.iplt 13796 && s != htab->root.igotplt 13797 && s != htab->sdynbss) 13798 { 13799 /* It's not one of our sections, so don't allocate space. */ 13800 continue; 13801 } 13802 13803 if (s->size == 0) 13804 { 13805 /* If we don't need this section, strip it from the 13806 output file. This is mostly to handle .rel(a).bss and 13807 .rel(a).plt. We must create both sections in 13808 create_dynamic_sections, because they must be created 13809 before the linker maps input sections to output 13810 sections. The linker does that before 13811 adjust_dynamic_symbol is called, and it is that 13812 function which decides whether anything needs to go 13813 into these sections. */ 13814 s->flags |= SEC_EXCLUDE; 13815 continue; 13816 } 13817 13818 if ((s->flags & SEC_HAS_CONTENTS) == 0) 13819 continue; 13820 13821 /* Allocate memory for the section contents. */ 13822 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size); 13823 if (s->contents == NULL) 13824 return FALSE; 13825 } 13826 13827 if (elf_hash_table (info)->dynamic_sections_created) 13828 { 13829 /* Add some entries to the .dynamic section. We fill in the 13830 values later, in elf32_arm_finish_dynamic_sections, but we 13831 must add the entries now so that we get the correct size for 13832 the .dynamic section. The DT_DEBUG entry is filled in by the 13833 dynamic linker and used by the debugger. */ 13834 #define add_dynamic_entry(TAG, VAL) \ 13835 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 13836 13837 if (info->executable) 13838 { 13839 if (!add_dynamic_entry (DT_DEBUG, 0)) 13840 return FALSE; 13841 } 13842 13843 if (plt) 13844 { 13845 if ( !add_dynamic_entry (DT_PLTGOT, 0) 13846 || !add_dynamic_entry (DT_PLTRELSZ, 0) 13847 || !add_dynamic_entry (DT_PLTREL, 13848 htab->use_rel ? DT_REL : DT_RELA) 13849 || !add_dynamic_entry (DT_JMPREL, 0)) 13850 return FALSE; 13851 13852 if (htab->dt_tlsdesc_plt && 13853 (!add_dynamic_entry (DT_TLSDESC_PLT,0) 13854 || !add_dynamic_entry (DT_TLSDESC_GOT,0))) 13855 return FALSE; 13856 } 13857 13858 if (relocs) 13859 { 13860 if (htab->use_rel) 13861 { 13862 if (!add_dynamic_entry (DT_REL, 0) 13863 || !add_dynamic_entry (DT_RELSZ, 0) 13864 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab))) 13865 return FALSE; 13866 } 13867 else 13868 { 13869 if (!add_dynamic_entry (DT_RELA, 0) 13870 || !add_dynamic_entry (DT_RELASZ, 0) 13871 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab))) 13872 return FALSE; 13873 } 13874 } 13875 13876 /* If any dynamic relocs apply to a read-only section, 13877 then we need a DT_TEXTREL entry. */ 13878 if ((info->flags & DF_TEXTREL) == 0) 13879 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs, 13880 info); 13881 13882 if ((info->flags & DF_TEXTREL) != 0) 13883 { 13884 if (!add_dynamic_entry (DT_TEXTREL, 0)) 13885 return FALSE; 13886 } 13887 if (htab->vxworks_p 13888 && !elf_vxworks_add_dynamic_entries (output_bfd, info)) 13889 return FALSE; 13890 } 13891 #undef add_dynamic_entry 13892 13893 return TRUE; 13894 } 13895 13896 /* Size sections even though they're not dynamic. We use it to setup 13897 _TLS_MODULE_BASE_, if needed. */ 13898 13899 static bfd_boolean 13900 elf32_arm_always_size_sections (bfd *output_bfd, 13901 struct bfd_link_info *info) 13902 { 13903 asection *tls_sec; 13904 13905 if (info->relocatable) 13906 return TRUE; 13907 13908 tls_sec = elf_hash_table (info)->tls_sec; 13909 13910 if (tls_sec) 13911 { 13912 struct elf_link_hash_entry *tlsbase; 13913 13914 tlsbase = elf_link_hash_lookup 13915 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE); 13916 13917 if (tlsbase) 13918 { 13919 struct bfd_link_hash_entry *bh = NULL; 13920 const struct elf_backend_data *bed 13921 = get_elf_backend_data (output_bfd); 13922 13923 if (!(_bfd_generic_link_add_one_symbol 13924 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 13925 tls_sec, 0, NULL, FALSE, 13926 bed->collect, &bh))) 13927 return FALSE; 13928 13929 tlsbase->type = STT_TLS; 13930 tlsbase = (struct elf_link_hash_entry *)bh; 13931 tlsbase->def_regular = 1; 13932 tlsbase->other = STV_HIDDEN; 13933 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); 13934 } 13935 } 13936 return TRUE; 13937 } 13938 13939 /* Finish up dynamic symbol handling. We set the contents of various 13940 dynamic sections here. */ 13941 13942 static bfd_boolean 13943 elf32_arm_finish_dynamic_symbol (bfd * output_bfd, 13944 struct bfd_link_info * info, 13945 struct elf_link_hash_entry * h, 13946 Elf_Internal_Sym * sym) 13947 { 13948 struct elf32_arm_link_hash_table *htab; 13949 struct elf32_arm_link_hash_entry *eh; 13950 13951 htab = elf32_arm_hash_table (info); 13952 if (htab == NULL) 13953 return FALSE; 13954 13955 eh = (struct elf32_arm_link_hash_entry *) h; 13956 13957 if (h->plt.offset != (bfd_vma) -1) 13958 { 13959 if (!eh->is_iplt) 13960 { 13961 BFD_ASSERT (h->dynindx != -1); 13962 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt, 13963 h->dynindx, 0); 13964 } 13965 13966 if (!h->def_regular) 13967 { 13968 /* Mark the symbol as undefined, rather than as defined in 13969 the .plt section. Leave the value alone. */ 13970 sym->st_shndx = SHN_UNDEF; 13971 /* If the symbol is weak, we do need to clear the value. 13972 Otherwise, the PLT entry would provide a definition for 13973 the symbol even if the symbol wasn't defined anywhere, 13974 and so the symbol would never be NULL. */ 13975 if (!h->ref_regular_nonweak) 13976 sym->st_value = 0; 13977 } 13978 else if (eh->is_iplt && eh->plt.noncall_refcount != 0) 13979 { 13980 /* At least one non-call relocation references this .iplt entry, 13981 so the .iplt entry is the function's canonical address. */ 13982 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC); 13983 sym->st_target_internal = ST_BRANCH_TO_ARM; 13984 sym->st_shndx = (_bfd_elf_section_from_bfd_section 13985 (output_bfd, htab->root.iplt->output_section)); 13986 sym->st_value = (h->plt.offset 13987 + htab->root.iplt->output_section->vma 13988 + htab->root.iplt->output_offset); 13989 } 13990 } 13991 13992 if (h->needs_copy) 13993 { 13994 asection * s; 13995 Elf_Internal_Rela rel; 13996 13997 /* This symbol needs a copy reloc. Set it up. */ 13998 BFD_ASSERT (h->dynindx != -1 13999 && (h->root.type == bfd_link_hash_defined 14000 || h->root.type == bfd_link_hash_defweak)); 14001 14002 s = htab->srelbss; 14003 BFD_ASSERT (s != NULL); 14004 14005 rel.r_addend = 0; 14006 rel.r_offset = (h->root.u.def.value 14007 + h->root.u.def.section->output_section->vma 14008 + h->root.u.def.section->output_offset); 14009 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY); 14010 elf32_arm_add_dynreloc (output_bfd, info, s, &rel); 14011 } 14012 14013 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks, 14014 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative 14015 to the ".got" section. */ 14016 if (strcmp (h->root.root.string, "_DYNAMIC") == 0 14017 || (!htab->vxworks_p && h == htab->root.hgot)) 14018 sym->st_shndx = SHN_ABS; 14019 14020 return TRUE; 14021 } 14022 14023 static void 14024 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd, 14025 void *contents, 14026 const unsigned long *template, unsigned count) 14027 { 14028 unsigned ix; 14029 14030 for (ix = 0; ix != count; ix++) 14031 { 14032 unsigned long insn = template[ix]; 14033 14034 /* Emit mov pc,rx if bx is not permitted. */ 14035 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10) 14036 insn = (insn & 0xf000000f) | 0x01a0f000; 14037 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4); 14038 } 14039 } 14040 14041 /* Finish up the dynamic sections. */ 14042 14043 static bfd_boolean 14044 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info) 14045 { 14046 bfd * dynobj; 14047 asection * sgot; 14048 asection * sdyn; 14049 struct elf32_arm_link_hash_table *htab; 14050 14051 htab = elf32_arm_hash_table (info); 14052 if (htab == NULL) 14053 return FALSE; 14054 14055 dynobj = elf_hash_table (info)->dynobj; 14056 14057 sgot = htab->root.sgotplt; 14058 /* A broken linker script might have discarded the dynamic sections. 14059 Catch this here so that we do not seg-fault later on. */ 14060 if (sgot != NULL && bfd_is_abs_section (sgot->output_section)) 14061 return FALSE; 14062 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 14063 14064 if (elf_hash_table (info)->dynamic_sections_created) 14065 { 14066 asection *splt; 14067 Elf32_External_Dyn *dyncon, *dynconend; 14068 14069 splt = htab->root.splt; 14070 BFD_ASSERT (splt != NULL && sdyn != NULL); 14071 BFD_ASSERT (htab->symbian_p || sgot != NULL); 14072 14073 dyncon = (Elf32_External_Dyn *) sdyn->contents; 14074 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size); 14075 14076 for (; dyncon < dynconend; dyncon++) 14077 { 14078 Elf_Internal_Dyn dyn; 14079 const char * name; 14080 asection * s; 14081 14082 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn); 14083 14084 switch (dyn.d_tag) 14085 { 14086 unsigned int type; 14087 14088 default: 14089 if (htab->vxworks_p 14090 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn)) 14091 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14092 break; 14093 14094 case DT_HASH: 14095 name = ".hash"; 14096 goto get_vma_if_bpabi; 14097 case DT_STRTAB: 14098 name = ".dynstr"; 14099 goto get_vma_if_bpabi; 14100 case DT_SYMTAB: 14101 name = ".dynsym"; 14102 goto get_vma_if_bpabi; 14103 case DT_VERSYM: 14104 name = ".gnu.version"; 14105 goto get_vma_if_bpabi; 14106 case DT_VERDEF: 14107 name = ".gnu.version_d"; 14108 goto get_vma_if_bpabi; 14109 case DT_VERNEED: 14110 name = ".gnu.version_r"; 14111 goto get_vma_if_bpabi; 14112 14113 case DT_PLTGOT: 14114 name = ".got"; 14115 goto get_vma; 14116 case DT_JMPREL: 14117 name = RELOC_SECTION (htab, ".plt"); 14118 get_vma: 14119 s = bfd_get_section_by_name (output_bfd, name); 14120 if (s == NULL) 14121 { 14122 /* PR ld/14397: Issue an error message if a required section is missing. */ 14123 (*_bfd_error_handler) 14124 (_("error: required section '%s' not found in the linker script"), name); 14125 bfd_set_error (bfd_error_invalid_operation); 14126 return FALSE; 14127 } 14128 if (!htab->symbian_p) 14129 dyn.d_un.d_ptr = s->vma; 14130 else 14131 /* In the BPABI, tags in the PT_DYNAMIC section point 14132 at the file offset, not the memory address, for the 14133 convenience of the post linker. */ 14134 dyn.d_un.d_ptr = s->filepos; 14135 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14136 break; 14137 14138 get_vma_if_bpabi: 14139 if (htab->symbian_p) 14140 goto get_vma; 14141 break; 14142 14143 case DT_PLTRELSZ: 14144 s = htab->root.srelplt; 14145 BFD_ASSERT (s != NULL); 14146 dyn.d_un.d_val = s->size; 14147 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14148 break; 14149 14150 case DT_RELSZ: 14151 case DT_RELASZ: 14152 if (!htab->symbian_p) 14153 { 14154 /* My reading of the SVR4 ABI indicates that the 14155 procedure linkage table relocs (DT_JMPREL) should be 14156 included in the overall relocs (DT_REL). This is 14157 what Solaris does. However, UnixWare can not handle 14158 that case. Therefore, we override the DT_RELSZ entry 14159 here to make it not include the JMPREL relocs. Since 14160 the linker script arranges for .rel(a).plt to follow all 14161 other relocation sections, we don't have to worry 14162 about changing the DT_REL entry. */ 14163 s = htab->root.srelplt; 14164 if (s != NULL) 14165 dyn.d_un.d_val -= s->size; 14166 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14167 break; 14168 } 14169 /* Fall through. */ 14170 14171 case DT_REL: 14172 case DT_RELA: 14173 /* In the BPABI, the DT_REL tag must point at the file 14174 offset, not the VMA, of the first relocation 14175 section. So, we use code similar to that in 14176 elflink.c, but do not check for SHF_ALLOC on the 14177 relcoation section, since relocations sections are 14178 never allocated under the BPABI. The comments above 14179 about Unixware notwithstanding, we include all of the 14180 relocations here. */ 14181 if (htab->symbian_p) 14182 { 14183 unsigned int i; 14184 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ) 14185 ? SHT_REL : SHT_RELA); 14186 dyn.d_un.d_val = 0; 14187 for (i = 1; i < elf_numsections (output_bfd); i++) 14188 { 14189 Elf_Internal_Shdr *hdr 14190 = elf_elfsections (output_bfd)[i]; 14191 if (hdr->sh_type == type) 14192 { 14193 if (dyn.d_tag == DT_RELSZ 14194 || dyn.d_tag == DT_RELASZ) 14195 dyn.d_un.d_val += hdr->sh_size; 14196 else if ((ufile_ptr) hdr->sh_offset 14197 <= dyn.d_un.d_val - 1) 14198 dyn.d_un.d_val = hdr->sh_offset; 14199 } 14200 } 14201 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14202 } 14203 break; 14204 14205 case DT_TLSDESC_PLT: 14206 s = htab->root.splt; 14207 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 14208 + htab->dt_tlsdesc_plt); 14209 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14210 break; 14211 14212 case DT_TLSDESC_GOT: 14213 s = htab->root.sgot; 14214 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset 14215 + htab->dt_tlsdesc_got); 14216 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14217 break; 14218 14219 /* Set the bottom bit of DT_INIT/FINI if the 14220 corresponding function is Thumb. */ 14221 case DT_INIT: 14222 name = info->init_function; 14223 goto get_sym; 14224 case DT_FINI: 14225 name = info->fini_function; 14226 get_sym: 14227 /* If it wasn't set by elf_bfd_final_link 14228 then there is nothing to adjust. */ 14229 if (dyn.d_un.d_val != 0) 14230 { 14231 struct elf_link_hash_entry * eh; 14232 14233 eh = elf_link_hash_lookup (elf_hash_table (info), name, 14234 FALSE, FALSE, TRUE); 14235 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB) 14236 { 14237 dyn.d_un.d_val |= 1; 14238 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon); 14239 } 14240 } 14241 break; 14242 } 14243 } 14244 14245 /* Fill in the first entry in the procedure linkage table. */ 14246 if (splt->size > 0 && htab->plt_header_size) 14247 { 14248 const bfd_vma *plt0_entry; 14249 bfd_vma got_address, plt_address, got_displacement; 14250 14251 /* Calculate the addresses of the GOT and PLT. */ 14252 got_address = sgot->output_section->vma + sgot->output_offset; 14253 plt_address = splt->output_section->vma + splt->output_offset; 14254 14255 if (htab->vxworks_p) 14256 { 14257 /* The VxWorks GOT is relocated by the dynamic linker. 14258 Therefore, we must emit relocations rather than simply 14259 computing the values now. */ 14260 Elf_Internal_Rela rel; 14261 14262 plt0_entry = elf32_arm_vxworks_exec_plt0_entry; 14263 put_arm_insn (htab, output_bfd, plt0_entry[0], 14264 splt->contents + 0); 14265 put_arm_insn (htab, output_bfd, plt0_entry[1], 14266 splt->contents + 4); 14267 put_arm_insn (htab, output_bfd, plt0_entry[2], 14268 splt->contents + 8); 14269 bfd_put_32 (output_bfd, got_address, splt->contents + 12); 14270 14271 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */ 14272 rel.r_offset = plt_address + 12; 14273 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 14274 rel.r_addend = 0; 14275 SWAP_RELOC_OUT (htab) (output_bfd, &rel, 14276 htab->srelplt2->contents); 14277 } 14278 else if (htab->nacl_p) 14279 { 14280 unsigned int i; 14281 14282 got_displacement = got_address + 8 - (plt_address + 16); 14283 14284 put_arm_insn (htab, output_bfd, 14285 elf32_arm_nacl_plt0_entry[0] 14286 | arm_movw_immediate (got_displacement), 14287 splt->contents + 0); 14288 put_arm_insn (htab, output_bfd, 14289 elf32_arm_nacl_plt0_entry[1] 14290 | arm_movt_immediate (got_displacement), 14291 splt->contents + 4); 14292 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i) 14293 put_arm_insn (htab, output_bfd, 14294 elf32_arm_nacl_plt0_entry[i], 14295 splt->contents + (i * 4)); 14296 } 14297 else 14298 { 14299 got_displacement = got_address - (plt_address + 16); 14300 14301 plt0_entry = elf32_arm_plt0_entry; 14302 put_arm_insn (htab, output_bfd, plt0_entry[0], 14303 splt->contents + 0); 14304 put_arm_insn (htab, output_bfd, plt0_entry[1], 14305 splt->contents + 4); 14306 put_arm_insn (htab, output_bfd, plt0_entry[2], 14307 splt->contents + 8); 14308 put_arm_insn (htab, output_bfd, plt0_entry[3], 14309 splt->contents + 12); 14310 14311 #ifdef FOUR_WORD_PLT 14312 /* The displacement value goes in the otherwise-unused 14313 last word of the second entry. */ 14314 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28); 14315 #else 14316 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16); 14317 #endif 14318 } 14319 } 14320 14321 /* UnixWare sets the entsize of .plt to 4, although that doesn't 14322 really seem like the right value. */ 14323 if (splt->output_section->owner == output_bfd) 14324 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4; 14325 14326 if (htab->dt_tlsdesc_plt) 14327 { 14328 bfd_vma got_address 14329 = sgot->output_section->vma + sgot->output_offset; 14330 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma 14331 + htab->root.sgot->output_offset); 14332 bfd_vma plt_address 14333 = splt->output_section->vma + splt->output_offset; 14334 14335 arm_put_trampoline (htab, output_bfd, 14336 splt->contents + htab->dt_tlsdesc_plt, 14337 dl_tlsdesc_lazy_trampoline, 6); 14338 14339 bfd_put_32 (output_bfd, 14340 gotplt_address + htab->dt_tlsdesc_got 14341 - (plt_address + htab->dt_tlsdesc_plt) 14342 - dl_tlsdesc_lazy_trampoline[6], 14343 splt->contents + htab->dt_tlsdesc_plt + 24); 14344 bfd_put_32 (output_bfd, 14345 got_address - (plt_address + htab->dt_tlsdesc_plt) 14346 - dl_tlsdesc_lazy_trampoline[7], 14347 splt->contents + htab->dt_tlsdesc_plt + 24 + 4); 14348 } 14349 14350 if (htab->tls_trampoline) 14351 { 14352 arm_put_trampoline (htab, output_bfd, 14353 splt->contents + htab->tls_trampoline, 14354 tls_trampoline, 3); 14355 #ifdef FOUR_WORD_PLT 14356 bfd_put_32 (output_bfd, 0x00000000, 14357 splt->contents + htab->tls_trampoline + 12); 14358 #endif 14359 } 14360 14361 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0) 14362 { 14363 /* Correct the .rel(a).plt.unloaded relocations. They will have 14364 incorrect symbol indexes. */ 14365 int num_plts; 14366 unsigned char *p; 14367 14368 num_plts = ((htab->root.splt->size - htab->plt_header_size) 14369 / htab->plt_entry_size); 14370 p = htab->srelplt2->contents + RELOC_SIZE (htab); 14371 14372 for (; num_plts; num_plts--) 14373 { 14374 Elf_Internal_Rela rel; 14375 14376 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 14377 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32); 14378 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 14379 p += RELOC_SIZE (htab); 14380 14381 SWAP_RELOC_IN (htab) (output_bfd, p, &rel); 14382 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32); 14383 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p); 14384 p += RELOC_SIZE (htab); 14385 } 14386 } 14387 } 14388 14389 /* Fill in the first three entries in the global offset table. */ 14390 if (sgot) 14391 { 14392 if (sgot->size > 0) 14393 { 14394 if (sdyn == NULL) 14395 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents); 14396 else 14397 bfd_put_32 (output_bfd, 14398 sdyn->output_section->vma + sdyn->output_offset, 14399 sgot->contents); 14400 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4); 14401 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8); 14402 } 14403 14404 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4; 14405 } 14406 14407 return TRUE; 14408 } 14409 14410 static void 14411 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED) 14412 { 14413 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ 14414 struct elf32_arm_link_hash_table *globals; 14415 14416 i_ehdrp = elf_elfheader (abfd); 14417 14418 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN) 14419 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM; 14420 else 14421 i_ehdrp->e_ident[EI_OSABI] = 0; 14422 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION; 14423 14424 if (link_info) 14425 { 14426 globals = elf32_arm_hash_table (link_info); 14427 if (globals != NULL && globals->byteswap_code) 14428 i_ehdrp->e_flags |= EF_ARM_BE8; 14429 } 14430 } 14431 14432 static enum elf_reloc_type_class 14433 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela) 14434 { 14435 switch ((int) ELF32_R_TYPE (rela->r_info)) 14436 { 14437 case R_ARM_RELATIVE: 14438 return reloc_class_relative; 14439 case R_ARM_JUMP_SLOT: 14440 return reloc_class_plt; 14441 case R_ARM_COPY: 14442 return reloc_class_copy; 14443 default: 14444 return reloc_class_normal; 14445 } 14446 } 14447 14448 static void 14449 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED) 14450 { 14451 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION); 14452 } 14453 14454 /* Return TRUE if this is an unwinding table entry. */ 14455 14456 static bfd_boolean 14457 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name) 14458 { 14459 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind) 14460 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once)); 14461 } 14462 14463 14464 /* Set the type and flags for an ARM section. We do this by 14465 the section name, which is a hack, but ought to work. */ 14466 14467 static bfd_boolean 14468 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec) 14469 { 14470 const char * name; 14471 14472 name = bfd_get_section_name (abfd, sec); 14473 14474 if (is_arm_elf_unwind_section_name (abfd, name)) 14475 { 14476 hdr->sh_type = SHT_ARM_EXIDX; 14477 hdr->sh_flags |= SHF_LINK_ORDER; 14478 } 14479 return TRUE; 14480 } 14481 14482 /* Handle an ARM specific section when reading an object file. This is 14483 called when bfd_section_from_shdr finds a section with an unknown 14484 type. */ 14485 14486 static bfd_boolean 14487 elf32_arm_section_from_shdr (bfd *abfd, 14488 Elf_Internal_Shdr * hdr, 14489 const char *name, 14490 int shindex) 14491 { 14492 /* There ought to be a place to keep ELF backend specific flags, but 14493 at the moment there isn't one. We just keep track of the 14494 sections by their name, instead. Fortunately, the ABI gives 14495 names for all the ARM specific sections, so we will probably get 14496 away with this. */ 14497 switch (hdr->sh_type) 14498 { 14499 case SHT_ARM_EXIDX: 14500 case SHT_ARM_PREEMPTMAP: 14501 case SHT_ARM_ATTRIBUTES: 14502 break; 14503 14504 default: 14505 return FALSE; 14506 } 14507 14508 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 14509 return FALSE; 14510 14511 return TRUE; 14512 } 14513 14514 static _arm_elf_section_data * 14515 get_arm_elf_section_data (asection * sec) 14516 { 14517 if (sec && sec->owner && is_arm_elf (sec->owner)) 14518 return elf32_arm_section_data (sec); 14519 else 14520 return NULL; 14521 } 14522 14523 typedef struct 14524 { 14525 void *flaginfo; 14526 struct bfd_link_info *info; 14527 asection *sec; 14528 int sec_shndx; 14529 int (*func) (void *, const char *, Elf_Internal_Sym *, 14530 asection *, struct elf_link_hash_entry *); 14531 } output_arch_syminfo; 14532 14533 enum map_symbol_type 14534 { 14535 ARM_MAP_ARM, 14536 ARM_MAP_THUMB, 14537 ARM_MAP_DATA 14538 }; 14539 14540 14541 /* Output a single mapping symbol. */ 14542 14543 static bfd_boolean 14544 elf32_arm_output_map_sym (output_arch_syminfo *osi, 14545 enum map_symbol_type type, 14546 bfd_vma offset) 14547 { 14548 static const char *names[3] = {"$a", "$t", "$d"}; 14549 Elf_Internal_Sym sym; 14550 14551 sym.st_value = osi->sec->output_section->vma 14552 + osi->sec->output_offset 14553 + offset; 14554 sym.st_size = 0; 14555 sym.st_other = 0; 14556 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); 14557 sym.st_shndx = osi->sec_shndx; 14558 sym.st_target_internal = 0; 14559 elf32_arm_section_map_add (osi->sec, names[type][1], offset); 14560 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1; 14561 } 14562 14563 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT. 14564 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */ 14565 14566 static bfd_boolean 14567 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi, 14568 bfd_boolean is_iplt_entry_p, 14569 union gotplt_union *root_plt, 14570 struct arm_plt_info *arm_plt) 14571 { 14572 struct elf32_arm_link_hash_table *htab; 14573 bfd_vma addr, plt_header_size; 14574 14575 if (root_plt->offset == (bfd_vma) -1) 14576 return TRUE; 14577 14578 htab = elf32_arm_hash_table (osi->info); 14579 if (htab == NULL) 14580 return FALSE; 14581 14582 if (is_iplt_entry_p) 14583 { 14584 osi->sec = htab->root.iplt; 14585 plt_header_size = 0; 14586 } 14587 else 14588 { 14589 osi->sec = htab->root.splt; 14590 plt_header_size = htab->plt_header_size; 14591 } 14592 osi->sec_shndx = (_bfd_elf_section_from_bfd_section 14593 (osi->info->output_bfd, osi->sec->output_section)); 14594 14595 addr = root_plt->offset & -2; 14596 if (htab->symbian_p) 14597 { 14598 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14599 return FALSE; 14600 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4)) 14601 return FALSE; 14602 } 14603 else if (htab->vxworks_p) 14604 { 14605 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14606 return FALSE; 14607 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8)) 14608 return FALSE; 14609 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12)) 14610 return FALSE; 14611 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20)) 14612 return FALSE; 14613 } 14614 else if (htab->nacl_p) 14615 { 14616 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14617 return FALSE; 14618 } 14619 else 14620 { 14621 bfd_boolean thumb_stub_p; 14622 14623 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt); 14624 if (thumb_stub_p) 14625 { 14626 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4)) 14627 return FALSE; 14628 } 14629 #ifdef FOUR_WORD_PLT 14630 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14631 return FALSE; 14632 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12)) 14633 return FALSE; 14634 #else 14635 /* A three-word PLT with no Thumb thunk contains only Arm code, 14636 so only need to output a mapping symbol for the first PLT entry and 14637 entries with thumb thunks. */ 14638 if (thumb_stub_p || addr == plt_header_size) 14639 { 14640 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr)) 14641 return FALSE; 14642 } 14643 #endif 14644 } 14645 14646 return TRUE; 14647 } 14648 14649 /* Output mapping symbols for PLT entries associated with H. */ 14650 14651 static bfd_boolean 14652 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf) 14653 { 14654 output_arch_syminfo *osi = (output_arch_syminfo *) inf; 14655 struct elf32_arm_link_hash_entry *eh; 14656 14657 if (h->root.type == bfd_link_hash_indirect) 14658 return TRUE; 14659 14660 if (h->root.type == bfd_link_hash_warning) 14661 /* When warning symbols are created, they **replace** the "real" 14662 entry in the hash table, thus we never get to see the real 14663 symbol in a hash traversal. So look at it now. */ 14664 h = (struct elf_link_hash_entry *) h->root.u.i.link; 14665 14666 eh = (struct elf32_arm_link_hash_entry *) h; 14667 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h), 14668 &h->plt, &eh->plt); 14669 } 14670 14671 /* Output a single local symbol for a generated stub. */ 14672 14673 static bfd_boolean 14674 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name, 14675 bfd_vma offset, bfd_vma size) 14676 { 14677 Elf_Internal_Sym sym; 14678 14679 sym.st_value = osi->sec->output_section->vma 14680 + osi->sec->output_offset 14681 + offset; 14682 sym.st_size = size; 14683 sym.st_other = 0; 14684 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); 14685 sym.st_shndx = osi->sec_shndx; 14686 sym.st_target_internal = 0; 14687 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1; 14688 } 14689 14690 static bfd_boolean 14691 arm_map_one_stub (struct bfd_hash_entry * gen_entry, 14692 void * in_arg) 14693 { 14694 struct elf32_arm_stub_hash_entry *stub_entry; 14695 asection *stub_sec; 14696 bfd_vma addr; 14697 char *stub_name; 14698 output_arch_syminfo *osi; 14699 const insn_sequence *template_sequence; 14700 enum stub_insn_type prev_type; 14701 int size; 14702 int i; 14703 enum map_symbol_type sym_type; 14704 14705 /* Massage our args to the form they really have. */ 14706 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 14707 osi = (output_arch_syminfo *) in_arg; 14708 14709 stub_sec = stub_entry->stub_sec; 14710 14711 /* Ensure this stub is attached to the current section being 14712 processed. */ 14713 if (stub_sec != osi->sec) 14714 return TRUE; 14715 14716 addr = (bfd_vma) stub_entry->stub_offset; 14717 stub_name = stub_entry->output_name; 14718 14719 template_sequence = stub_entry->stub_template; 14720 switch (template_sequence[0].type) 14721 { 14722 case ARM_TYPE: 14723 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size)) 14724 return FALSE; 14725 break; 14726 case THUMB16_TYPE: 14727 case THUMB32_TYPE: 14728 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 14729 stub_entry->stub_size)) 14730 return FALSE; 14731 break; 14732 default: 14733 BFD_FAIL (); 14734 return 0; 14735 } 14736 14737 prev_type = DATA_TYPE; 14738 size = 0; 14739 for (i = 0; i < stub_entry->stub_template_size; i++) 14740 { 14741 switch (template_sequence[i].type) 14742 { 14743 case ARM_TYPE: 14744 sym_type = ARM_MAP_ARM; 14745 break; 14746 14747 case THUMB16_TYPE: 14748 case THUMB32_TYPE: 14749 sym_type = ARM_MAP_THUMB; 14750 break; 14751 14752 case DATA_TYPE: 14753 sym_type = ARM_MAP_DATA; 14754 break; 14755 14756 default: 14757 BFD_FAIL (); 14758 return FALSE; 14759 } 14760 14761 if (template_sequence[i].type != prev_type) 14762 { 14763 prev_type = template_sequence[i].type; 14764 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size)) 14765 return FALSE; 14766 } 14767 14768 switch (template_sequence[i].type) 14769 { 14770 case ARM_TYPE: 14771 case THUMB32_TYPE: 14772 size += 4; 14773 break; 14774 14775 case THUMB16_TYPE: 14776 size += 2; 14777 break; 14778 14779 case DATA_TYPE: 14780 size += 4; 14781 break; 14782 14783 default: 14784 BFD_FAIL (); 14785 return FALSE; 14786 } 14787 } 14788 14789 return TRUE; 14790 } 14791 14792 /* Output mapping symbols for linker generated sections, 14793 and for those data-only sections that do not have a 14794 $d. */ 14795 14796 static bfd_boolean 14797 elf32_arm_output_arch_local_syms (bfd *output_bfd, 14798 struct bfd_link_info *info, 14799 void *flaginfo, 14800 int (*func) (void *, const char *, 14801 Elf_Internal_Sym *, 14802 asection *, 14803 struct elf_link_hash_entry *)) 14804 { 14805 output_arch_syminfo osi; 14806 struct elf32_arm_link_hash_table *htab; 14807 bfd_vma offset; 14808 bfd_size_type size; 14809 bfd *input_bfd; 14810 14811 htab = elf32_arm_hash_table (info); 14812 if (htab == NULL) 14813 return FALSE; 14814 14815 check_use_blx (htab); 14816 14817 osi.flaginfo = flaginfo; 14818 osi.info = info; 14819 osi.func = func; 14820 14821 /* Add a $d mapping symbol to data-only sections that 14822 don't have any mapping symbol. This may result in (harmless) redundant 14823 mapping symbols. */ 14824 for (input_bfd = info->input_bfds; 14825 input_bfd != NULL; 14826 input_bfd = input_bfd->link_next) 14827 { 14828 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS) 14829 for (osi.sec = input_bfd->sections; 14830 osi.sec != NULL; 14831 osi.sec = osi.sec->next) 14832 { 14833 if (osi.sec->output_section != NULL 14834 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE)) 14835 != 0) 14836 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED)) 14837 == SEC_HAS_CONTENTS 14838 && get_arm_elf_section_data (osi.sec) != NULL 14839 && get_arm_elf_section_data (osi.sec)->mapcount == 0 14840 && osi.sec->size > 0 14841 && (osi.sec->flags & SEC_EXCLUDE) == 0) 14842 { 14843 osi.sec_shndx = _bfd_elf_section_from_bfd_section 14844 (output_bfd, osi.sec->output_section); 14845 if (osi.sec_shndx != (int)SHN_BAD) 14846 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0); 14847 } 14848 } 14849 } 14850 14851 /* ARM->Thumb glue. */ 14852 if (htab->arm_glue_size > 0) 14853 { 14854 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 14855 ARM2THUMB_GLUE_SECTION_NAME); 14856 14857 osi.sec_shndx = _bfd_elf_section_from_bfd_section 14858 (output_bfd, osi.sec->output_section); 14859 if (info->shared || htab->root.is_relocatable_executable 14860 || htab->pic_veneer) 14861 size = ARM2THUMB_PIC_GLUE_SIZE; 14862 else if (htab->use_blx) 14863 size = ARM2THUMB_V5_STATIC_GLUE_SIZE; 14864 else 14865 size = ARM2THUMB_STATIC_GLUE_SIZE; 14866 14867 for (offset = 0; offset < htab->arm_glue_size; offset += size) 14868 { 14869 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset); 14870 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4); 14871 } 14872 } 14873 14874 /* Thumb->ARM glue. */ 14875 if (htab->thumb_glue_size > 0) 14876 { 14877 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 14878 THUMB2ARM_GLUE_SECTION_NAME); 14879 14880 osi.sec_shndx = _bfd_elf_section_from_bfd_section 14881 (output_bfd, osi.sec->output_section); 14882 size = THUMB2ARM_GLUE_SIZE; 14883 14884 for (offset = 0; offset < htab->thumb_glue_size; offset += size) 14885 { 14886 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset); 14887 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4); 14888 } 14889 } 14890 14891 /* ARMv4 BX veneers. */ 14892 if (htab->bx_glue_size > 0) 14893 { 14894 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner, 14895 ARM_BX_GLUE_SECTION_NAME); 14896 14897 osi.sec_shndx = _bfd_elf_section_from_bfd_section 14898 (output_bfd, osi.sec->output_section); 14899 14900 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0); 14901 } 14902 14903 /* Long calls stubs. */ 14904 if (htab->stub_bfd && htab->stub_bfd->sections) 14905 { 14906 asection* stub_sec; 14907 14908 for (stub_sec = htab->stub_bfd->sections; 14909 stub_sec != NULL; 14910 stub_sec = stub_sec->next) 14911 { 14912 /* Ignore non-stub sections. */ 14913 if (!strstr (stub_sec->name, STUB_SUFFIX)) 14914 continue; 14915 14916 osi.sec = stub_sec; 14917 14918 osi.sec_shndx = _bfd_elf_section_from_bfd_section 14919 (output_bfd, osi.sec->output_section); 14920 14921 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi); 14922 } 14923 } 14924 14925 /* Finally, output mapping symbols for the PLT. */ 14926 if (htab->root.splt && htab->root.splt->size > 0) 14927 { 14928 osi.sec = htab->root.splt; 14929 osi.sec_shndx = (_bfd_elf_section_from_bfd_section 14930 (output_bfd, osi.sec->output_section)); 14931 14932 /* Output mapping symbols for the plt header. SymbianOS does not have a 14933 plt header. */ 14934 if (htab->vxworks_p) 14935 { 14936 /* VxWorks shared libraries have no PLT header. */ 14937 if (!info->shared) 14938 { 14939 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 14940 return FALSE; 14941 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12)) 14942 return FALSE; 14943 } 14944 } 14945 else if (htab->nacl_p) 14946 { 14947 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 14948 return FALSE; 14949 } 14950 else if (!htab->symbian_p) 14951 { 14952 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) 14953 return FALSE; 14954 #ifndef FOUR_WORD_PLT 14955 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16)) 14956 return FALSE; 14957 #endif 14958 } 14959 } 14960 if ((htab->root.splt && htab->root.splt->size > 0) 14961 || (htab->root.iplt && htab->root.iplt->size > 0)) 14962 { 14963 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi); 14964 for (input_bfd = info->input_bfds; 14965 input_bfd != NULL; 14966 input_bfd = input_bfd->link_next) 14967 { 14968 struct arm_local_iplt_info **local_iplt; 14969 unsigned int i, num_syms; 14970 14971 local_iplt = elf32_arm_local_iplt (input_bfd); 14972 if (local_iplt != NULL) 14973 { 14974 num_syms = elf_symtab_hdr (input_bfd).sh_info; 14975 for (i = 0; i < num_syms; i++) 14976 if (local_iplt[i] != NULL 14977 && !elf32_arm_output_plt_map_1 (&osi, TRUE, 14978 &local_iplt[i]->root, 14979 &local_iplt[i]->arm)) 14980 return FALSE; 14981 } 14982 } 14983 } 14984 if (htab->dt_tlsdesc_plt != 0) 14985 { 14986 /* Mapping symbols for the lazy tls trampoline. */ 14987 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt)) 14988 return FALSE; 14989 14990 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 14991 htab->dt_tlsdesc_plt + 24)) 14992 return FALSE; 14993 } 14994 if (htab->tls_trampoline != 0) 14995 { 14996 /* Mapping symbols for the tls trampoline. */ 14997 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline)) 14998 return FALSE; 14999 #ifdef FOUR_WORD_PLT 15000 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 15001 htab->tls_trampoline + 12)) 15002 return FALSE; 15003 #endif 15004 } 15005 15006 return TRUE; 15007 } 15008 15009 /* Allocate target specific section data. */ 15010 15011 static bfd_boolean 15012 elf32_arm_new_section_hook (bfd *abfd, asection *sec) 15013 { 15014 if (!sec->used_by_bfd) 15015 { 15016 _arm_elf_section_data *sdata; 15017 bfd_size_type amt = sizeof (*sdata); 15018 15019 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt); 15020 if (sdata == NULL) 15021 return FALSE; 15022 sec->used_by_bfd = sdata; 15023 } 15024 15025 return _bfd_elf_new_section_hook (abfd, sec); 15026 } 15027 15028 15029 /* Used to order a list of mapping symbols by address. */ 15030 15031 static int 15032 elf32_arm_compare_mapping (const void * a, const void * b) 15033 { 15034 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a; 15035 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b; 15036 15037 if (amap->vma > bmap->vma) 15038 return 1; 15039 else if (amap->vma < bmap->vma) 15040 return -1; 15041 else if (amap->type > bmap->type) 15042 /* Ensure results do not depend on the host qsort for objects with 15043 multiple mapping symbols at the same address by sorting on type 15044 after vma. */ 15045 return 1; 15046 else if (amap->type < bmap->type) 15047 return -1; 15048 else 15049 return 0; 15050 } 15051 15052 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */ 15053 15054 static unsigned long 15055 offset_prel31 (unsigned long addr, bfd_vma offset) 15056 { 15057 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful); 15058 } 15059 15060 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31 15061 relocations. */ 15062 15063 static void 15064 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset) 15065 { 15066 unsigned long first_word = bfd_get_32 (output_bfd, from); 15067 unsigned long second_word = bfd_get_32 (output_bfd, from + 4); 15068 15069 /* High bit of first word is supposed to be zero. */ 15070 if ((first_word & 0x80000000ul) == 0) 15071 first_word = offset_prel31 (first_word, offset); 15072 15073 /* If the high bit of the first word is clear, and the bit pattern is not 0x1 15074 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */ 15075 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0)) 15076 second_word = offset_prel31 (second_word, offset); 15077 15078 bfd_put_32 (output_bfd, first_word, to); 15079 bfd_put_32 (output_bfd, second_word, to + 4); 15080 } 15081 15082 /* Data for make_branch_to_a8_stub(). */ 15083 15084 struct a8_branch_to_stub_data 15085 { 15086 asection *writing_section; 15087 bfd_byte *contents; 15088 }; 15089 15090 15091 /* Helper to insert branches to Cortex-A8 erratum stubs in the right 15092 places for a particular section. */ 15093 15094 static bfd_boolean 15095 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, 15096 void *in_arg) 15097 { 15098 struct elf32_arm_stub_hash_entry *stub_entry; 15099 struct a8_branch_to_stub_data *data; 15100 bfd_byte *contents; 15101 unsigned long branch_insn; 15102 bfd_vma veneered_insn_loc, veneer_entry_loc; 15103 bfd_signed_vma branch_offset; 15104 bfd *abfd; 15105 unsigned int target; 15106 15107 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; 15108 data = (struct a8_branch_to_stub_data *) in_arg; 15109 15110 if (stub_entry->target_section != data->writing_section 15111 || stub_entry->stub_type < arm_stub_a8_veneer_lwm) 15112 return TRUE; 15113 15114 contents = data->contents; 15115 15116 veneered_insn_loc = stub_entry->target_section->output_section->vma 15117 + stub_entry->target_section->output_offset 15118 + stub_entry->target_value; 15119 15120 veneer_entry_loc = stub_entry->stub_sec->output_section->vma 15121 + stub_entry->stub_sec->output_offset 15122 + stub_entry->stub_offset; 15123 15124 if (stub_entry->stub_type == arm_stub_a8_veneer_blx) 15125 veneered_insn_loc &= ~3u; 15126 15127 branch_offset = veneer_entry_loc - veneered_insn_loc - 4; 15128 15129 abfd = stub_entry->target_section->owner; 15130 target = stub_entry->target_value; 15131 15132 /* We attempt to avoid this condition by setting stubs_always_after_branch 15133 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. 15134 This check is just to be on the safe side... */ 15135 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) 15136 { 15137 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " 15138 "allocated in unsafe location"), abfd); 15139 return FALSE; 15140 } 15141 15142 switch (stub_entry->stub_type) 15143 { 15144 case arm_stub_a8_veneer_b: 15145 case arm_stub_a8_veneer_b_cond: 15146 branch_insn = 0xf0009000; 15147 goto jump24; 15148 15149 case arm_stub_a8_veneer_blx: 15150 branch_insn = 0xf000e800; 15151 goto jump24; 15152 15153 case arm_stub_a8_veneer_bl: 15154 { 15155 unsigned int i1, j1, i2, j2, s; 15156 15157 branch_insn = 0xf000d000; 15158 15159 jump24: 15160 if (branch_offset < -16777216 || branch_offset > 16777214) 15161 { 15162 /* There's not much we can do apart from complain if this 15163 happens. */ 15164 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " 15165 "of range (input file too large)"), abfd); 15166 return FALSE; 15167 } 15168 15169 /* i1 = not(j1 eor s), so: 15170 not i1 = j1 eor s 15171 j1 = (not i1) eor s. */ 15172 15173 branch_insn |= (branch_offset >> 1) & 0x7ff; 15174 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; 15175 i2 = (branch_offset >> 22) & 1; 15176 i1 = (branch_offset >> 23) & 1; 15177 s = (branch_offset >> 24) & 1; 15178 j1 = (!i1) ^ s; 15179 j2 = (!i2) ^ s; 15180 branch_insn |= j2 << 11; 15181 branch_insn |= j1 << 13; 15182 branch_insn |= s << 26; 15183 } 15184 break; 15185 15186 default: 15187 BFD_FAIL (); 15188 return FALSE; 15189 } 15190 15191 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]); 15192 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]); 15193 15194 return TRUE; 15195 } 15196 15197 /* Do code byteswapping. Return FALSE afterwards so that the section is 15198 written out as normal. */ 15199 15200 static bfd_boolean 15201 elf32_arm_write_section (bfd *output_bfd, 15202 struct bfd_link_info *link_info, 15203 asection *sec, 15204 bfd_byte *contents) 15205 { 15206 unsigned int mapcount, errcount; 15207 _arm_elf_section_data *arm_data; 15208 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); 15209 elf32_arm_section_map *map; 15210 elf32_vfp11_erratum_list *errnode; 15211 bfd_vma ptr; 15212 bfd_vma end; 15213 bfd_vma offset = sec->output_section->vma + sec->output_offset; 15214 bfd_byte tmp; 15215 unsigned int i; 15216 15217 if (globals == NULL) 15218 return FALSE; 15219 15220 /* If this section has not been allocated an _arm_elf_section_data 15221 structure then we cannot record anything. */ 15222 arm_data = get_arm_elf_section_data (sec); 15223 if (arm_data == NULL) 15224 return FALSE; 15225 15226 mapcount = arm_data->mapcount; 15227 map = arm_data->map; 15228 errcount = arm_data->erratumcount; 15229 15230 if (errcount != 0) 15231 { 15232 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0; 15233 15234 for (errnode = arm_data->erratumlist; errnode != 0; 15235 errnode = errnode->next) 15236 { 15237 bfd_vma target = errnode->vma - offset; 15238 15239 switch (errnode->type) 15240 { 15241 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: 15242 { 15243 bfd_vma branch_to_veneer; 15244 /* Original condition code of instruction, plus bit mask for 15245 ARM B instruction. */ 15246 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000) 15247 | 0x0a000000; 15248 15249 /* The instruction is before the label. */ 15250 target -= 4; 15251 15252 /* Above offset included in -4 below. */ 15253 branch_to_veneer = errnode->u.b.veneer->vma 15254 - errnode->vma - 4; 15255 15256 if ((signed) branch_to_veneer < -(1 << 25) 15257 || (signed) branch_to_veneer >= (1 << 25)) 15258 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 15259 "range"), output_bfd); 15260 15261 insn |= (branch_to_veneer >> 2) & 0xffffff; 15262 contents[endianflip ^ target] = insn & 0xff; 15263 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 15264 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 15265 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 15266 } 15267 break; 15268 15269 case VFP11_ERRATUM_ARM_VENEER: 15270 { 15271 bfd_vma branch_from_veneer; 15272 unsigned int insn; 15273 15274 /* Take size of veneer into account. */ 15275 branch_from_veneer = errnode->u.v.branch->vma 15276 - errnode->vma - 12; 15277 15278 if ((signed) branch_from_veneer < -(1 << 25) 15279 || (signed) branch_from_veneer >= (1 << 25)) 15280 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of " 15281 "range"), output_bfd); 15282 15283 /* Original instruction. */ 15284 insn = errnode->u.v.branch->u.b.vfp_insn; 15285 contents[endianflip ^ target] = insn & 0xff; 15286 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; 15287 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; 15288 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; 15289 15290 /* Branch back to insn after original insn. */ 15291 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff); 15292 contents[endianflip ^ (target + 4)] = insn & 0xff; 15293 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff; 15294 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff; 15295 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff; 15296 } 15297 break; 15298 15299 default: 15300 abort (); 15301 } 15302 } 15303 } 15304 15305 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) 15306 { 15307 arm_unwind_table_edit *edit_node 15308 = arm_data->u.exidx.unwind_edit_list; 15309 /* Now, sec->size is the size of the section we will write. The original 15310 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND 15311 markers) was sec->rawsize. (This isn't the case if we perform no 15312 edits, then rawsize will be zero and we should use size). */ 15313 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size); 15314 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size; 15315 unsigned int in_index, out_index; 15316 bfd_vma add_to_offsets = 0; 15317 15318 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;) 15319 { 15320 if (edit_node) 15321 { 15322 unsigned int edit_index = edit_node->index; 15323 15324 if (in_index < edit_index && in_index * 8 < input_size) 15325 { 15326 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 15327 contents + in_index * 8, add_to_offsets); 15328 out_index++; 15329 in_index++; 15330 } 15331 else if (in_index == edit_index 15332 || (in_index * 8 >= input_size 15333 && edit_index == UINT_MAX)) 15334 { 15335 switch (edit_node->type) 15336 { 15337 case DELETE_EXIDX_ENTRY: 15338 in_index++; 15339 add_to_offsets += 8; 15340 break; 15341 15342 case INSERT_EXIDX_CANTUNWIND_AT_END: 15343 { 15344 asection *text_sec = edit_node->linked_section; 15345 bfd_vma text_offset = text_sec->output_section->vma 15346 + text_sec->output_offset 15347 + text_sec->size; 15348 bfd_vma exidx_offset = offset + out_index * 8; 15349 unsigned long prel31_offset; 15350 15351 /* Note: this is meant to be equivalent to an 15352 R_ARM_PREL31 relocation. These synthetic 15353 EXIDX_CANTUNWIND markers are not relocated by the 15354 usual BFD method. */ 15355 prel31_offset = (text_offset - exidx_offset) 15356 & 0x7ffffffful; 15357 15358 /* First address we can't unwind. */ 15359 bfd_put_32 (output_bfd, prel31_offset, 15360 &edited_contents[out_index * 8]); 15361 15362 /* Code for EXIDX_CANTUNWIND. */ 15363 bfd_put_32 (output_bfd, 0x1, 15364 &edited_contents[out_index * 8 + 4]); 15365 15366 out_index++; 15367 add_to_offsets -= 8; 15368 } 15369 break; 15370 } 15371 15372 edit_node = edit_node->next; 15373 } 15374 } 15375 else 15376 { 15377 /* No more edits, copy remaining entries verbatim. */ 15378 copy_exidx_entry (output_bfd, edited_contents + out_index * 8, 15379 contents + in_index * 8, add_to_offsets); 15380 out_index++; 15381 in_index++; 15382 } 15383 } 15384 15385 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD)) 15386 bfd_set_section_contents (output_bfd, sec->output_section, 15387 edited_contents, 15388 (file_ptr) sec->output_offset, sec->size); 15389 15390 return TRUE; 15391 } 15392 15393 /* Fix code to point to Cortex-A8 erratum stubs. */ 15394 if (globals->fix_cortex_a8) 15395 { 15396 struct a8_branch_to_stub_data data; 15397 15398 data.writing_section = sec; 15399 data.contents = contents; 15400 15401 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, 15402 &data); 15403 } 15404 15405 if (mapcount == 0) 15406 return FALSE; 15407 15408 if (globals->byteswap_code) 15409 { 15410 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping); 15411 15412 ptr = map[0].vma; 15413 for (i = 0; i < mapcount; i++) 15414 { 15415 if (i == mapcount - 1) 15416 end = sec->size; 15417 else 15418 end = map[i + 1].vma; 15419 15420 switch (map[i].type) 15421 { 15422 case 'a': 15423 /* Byte swap code words. */ 15424 while (ptr + 3 < end) 15425 { 15426 tmp = contents[ptr]; 15427 contents[ptr] = contents[ptr + 3]; 15428 contents[ptr + 3] = tmp; 15429 tmp = contents[ptr + 1]; 15430 contents[ptr + 1] = contents[ptr + 2]; 15431 contents[ptr + 2] = tmp; 15432 ptr += 4; 15433 } 15434 break; 15435 15436 case 't': 15437 /* Byte swap code halfwords. */ 15438 while (ptr + 1 < end) 15439 { 15440 tmp = contents[ptr]; 15441 contents[ptr] = contents[ptr + 1]; 15442 contents[ptr + 1] = tmp; 15443 ptr += 2; 15444 } 15445 break; 15446 15447 case 'd': 15448 /* Leave data alone. */ 15449 break; 15450 } 15451 ptr = end; 15452 } 15453 } 15454 15455 free (map); 15456 arm_data->mapcount = -1; 15457 arm_data->mapsize = 0; 15458 arm_data->map = NULL; 15459 15460 return FALSE; 15461 } 15462 15463 /* Mangle thumb function symbols as we read them in. */ 15464 15465 static bfd_boolean 15466 elf32_arm_swap_symbol_in (bfd * abfd, 15467 const void *psrc, 15468 const void *pshn, 15469 Elf_Internal_Sym *dst) 15470 { 15471 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst)) 15472 return FALSE; 15473 15474 /* New EABI objects mark thumb function symbols by setting the low bit of 15475 the address. */ 15476 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC 15477 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC) 15478 { 15479 if (dst->st_value & 1) 15480 { 15481 dst->st_value &= ~(bfd_vma) 1; 15482 dst->st_target_internal = ST_BRANCH_TO_THUMB; 15483 } 15484 else 15485 dst->st_target_internal = ST_BRANCH_TO_ARM; 15486 } 15487 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC) 15488 { 15489 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC); 15490 dst->st_target_internal = ST_BRANCH_TO_THUMB; 15491 } 15492 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION) 15493 dst->st_target_internal = ST_BRANCH_LONG; 15494 else 15495 dst->st_target_internal = ST_BRANCH_UNKNOWN; 15496 15497 return TRUE; 15498 } 15499 15500 15501 /* Mangle thumb function symbols as we write them out. */ 15502 15503 static void 15504 elf32_arm_swap_symbol_out (bfd *abfd, 15505 const Elf_Internal_Sym *src, 15506 void *cdst, 15507 void *shndx) 15508 { 15509 Elf_Internal_Sym newsym; 15510 15511 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit 15512 of the address set, as per the new EABI. We do this unconditionally 15513 because objcopy does not set the elf header flags until after 15514 it writes out the symbol table. */ 15515 if (src->st_target_internal == ST_BRANCH_TO_THUMB) 15516 { 15517 newsym = *src; 15518 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC) 15519 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC); 15520 if (newsym.st_shndx != SHN_UNDEF) 15521 { 15522 /* Do this only for defined symbols. At link type, the static 15523 linker will simulate the work of dynamic linker of resolving 15524 symbols and will carry over the thumbness of found symbols to 15525 the output symbol table. It's not clear how it happens, but 15526 the thumbness of undefined symbols can well be different at 15527 runtime, and writing '1' for them will be confusing for users 15528 and possibly for dynamic linker itself. 15529 */ 15530 newsym.st_value |= 1; 15531 } 15532 15533 src = &newsym; 15534 } 15535 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx); 15536 } 15537 15538 /* Add the PT_ARM_EXIDX program header. */ 15539 15540 static bfd_boolean 15541 elf32_arm_modify_segment_map (bfd *abfd, 15542 struct bfd_link_info *info ATTRIBUTE_UNUSED) 15543 { 15544 struct elf_segment_map *m; 15545 asection *sec; 15546 15547 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 15548 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 15549 { 15550 /* If there is already a PT_ARM_EXIDX header, then we do not 15551 want to add another one. This situation arises when running 15552 "strip"; the input binary already has the header. */ 15553 m = elf_tdata (abfd)->segment_map; 15554 while (m && m->p_type != PT_ARM_EXIDX) 15555 m = m->next; 15556 if (!m) 15557 { 15558 m = (struct elf_segment_map *) 15559 bfd_zalloc (abfd, sizeof (struct elf_segment_map)); 15560 if (m == NULL) 15561 return FALSE; 15562 m->p_type = PT_ARM_EXIDX; 15563 m->count = 1; 15564 m->sections[0] = sec; 15565 15566 m->next = elf_tdata (abfd)->segment_map; 15567 elf_tdata (abfd)->segment_map = m; 15568 } 15569 } 15570 15571 return TRUE; 15572 } 15573 15574 /* We may add a PT_ARM_EXIDX program header. */ 15575 15576 static int 15577 elf32_arm_additional_program_headers (bfd *abfd, 15578 struct bfd_link_info *info ATTRIBUTE_UNUSED) 15579 { 15580 asection *sec; 15581 15582 sec = bfd_get_section_by_name (abfd, ".ARM.exidx"); 15583 if (sec != NULL && (sec->flags & SEC_LOAD) != 0) 15584 return 1; 15585 else 15586 return 0; 15587 } 15588 15589 /* Hook called by the linker routine which adds symbols from an object 15590 file. */ 15591 15592 static bfd_boolean 15593 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, 15594 Elf_Internal_Sym *sym, const char **namep, 15595 flagword *flagsp, asection **secp, bfd_vma *valp) 15596 { 15597 if ((abfd->flags & DYNAMIC) == 0 15598 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC 15599 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)) 15600 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; 15601 15602 if (elf32_arm_hash_table (info)->vxworks_p 15603 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep, 15604 flagsp, secp, valp)) 15605 return FALSE; 15606 15607 return TRUE; 15608 } 15609 15610 /* We use this to override swap_symbol_in and swap_symbol_out. */ 15611 const struct elf_size_info elf32_arm_size_info = 15612 { 15613 sizeof (Elf32_External_Ehdr), 15614 sizeof (Elf32_External_Phdr), 15615 sizeof (Elf32_External_Shdr), 15616 sizeof (Elf32_External_Rel), 15617 sizeof (Elf32_External_Rela), 15618 sizeof (Elf32_External_Sym), 15619 sizeof (Elf32_External_Dyn), 15620 sizeof (Elf_External_Note), 15621 4, 15622 1, 15623 32, 2, 15624 ELFCLASS32, EV_CURRENT, 15625 bfd_elf32_write_out_phdrs, 15626 bfd_elf32_write_shdrs_and_ehdr, 15627 bfd_elf32_checksum_contents, 15628 bfd_elf32_write_relocs, 15629 elf32_arm_swap_symbol_in, 15630 elf32_arm_swap_symbol_out, 15631 bfd_elf32_slurp_reloc_table, 15632 bfd_elf32_slurp_symbol_table, 15633 bfd_elf32_swap_dyn_in, 15634 bfd_elf32_swap_dyn_out, 15635 bfd_elf32_swap_reloc_in, 15636 bfd_elf32_swap_reloc_out, 15637 bfd_elf32_swap_reloca_in, 15638 bfd_elf32_swap_reloca_out 15639 }; 15640 15641 #define ELF_ARCH bfd_arch_arm 15642 #define ELF_TARGET_ID ARM_ELF_DATA 15643 #define ELF_MACHINE_CODE EM_ARM 15644 #ifdef __QNXTARGET__ 15645 #define ELF_MAXPAGESIZE 0x1000 15646 #else 15647 #define ELF_MAXPAGESIZE 0x10000 15648 #endif 15649 #define ELF_MINPAGESIZE 0x1000 15650 #define ELF_COMMONPAGESIZE 0x1000 15651 15652 #define bfd_elf32_mkobject elf32_arm_mkobject 15653 15654 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data 15655 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data 15656 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags 15657 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data 15658 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create 15659 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free 15660 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup 15661 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup 15662 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line 15663 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info 15664 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook 15665 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol 15666 #define bfd_elf32_bfd_final_link elf32_arm_final_link 15667 15668 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type 15669 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook 15670 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections 15671 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook 15672 #define elf_backend_check_relocs elf32_arm_check_relocs 15673 #define elf_backend_relocate_section elf32_arm_relocate_section 15674 #define elf_backend_write_section elf32_arm_write_section 15675 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol 15676 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections 15677 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol 15678 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections 15679 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections 15680 #define elf_backend_always_size_sections elf32_arm_always_size_sections 15681 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections 15682 #define elf_backend_post_process_headers elf32_arm_post_process_headers 15683 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class 15684 #define elf_backend_object_p elf32_arm_object_p 15685 #define elf_backend_fake_sections elf32_arm_fake_sections 15686 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr 15687 #define elf_backend_final_write_processing elf32_arm_final_write_processing 15688 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol 15689 #define elf_backend_size_info elf32_arm_size_info 15690 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 15691 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers 15692 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms 15693 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing 15694 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook 15695 15696 #define elf_backend_can_refcount 1 15697 #define elf_backend_can_gc_sections 1 15698 #define elf_backend_plt_readonly 1 15699 #define elf_backend_want_got_plt 1 15700 #define elf_backend_want_plt_sym 0 15701 #define elf_backend_may_use_rel_p 1 15702 #define elf_backend_may_use_rela_p 0 15703 #define elf_backend_default_use_rela_p 0 15704 15705 #define elf_backend_got_header_size 12 15706 15707 #undef elf_backend_obj_attrs_vendor 15708 #define elf_backend_obj_attrs_vendor "aeabi" 15709 #undef elf_backend_obj_attrs_section 15710 #define elf_backend_obj_attrs_section ".ARM.attributes" 15711 #undef elf_backend_obj_attrs_arg_type 15712 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type 15713 #undef elf_backend_obj_attrs_section_type 15714 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES 15715 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order 15716 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown 15717 15718 #include "elf32-target.h" 15719 15720 /* Native Client targets. */ 15721 15722 #undef TARGET_LITTLE_SYM 15723 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec 15724 #undef TARGET_LITTLE_NAME 15725 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl" 15726 #undef TARGET_BIG_SYM 15727 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec 15728 #undef TARGET_BIG_NAME 15729 #define TARGET_BIG_NAME "elf32-bigarm-nacl" 15730 15731 /* Like elf32_arm_link_hash_table_create -- but overrides 15732 appropriately for NaCl. */ 15733 15734 static struct bfd_link_hash_table * 15735 elf32_arm_nacl_link_hash_table_create (bfd *abfd) 15736 { 15737 struct bfd_link_hash_table *ret; 15738 15739 ret = elf32_arm_link_hash_table_create (abfd); 15740 if (ret) 15741 { 15742 struct elf32_arm_link_hash_table *htab 15743 = (struct elf32_arm_link_hash_table *) ret; 15744 15745 htab->nacl_p = 1; 15746 15747 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry); 15748 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry); 15749 } 15750 return ret; 15751 } 15752 15753 /* Since NaCl doesn't use the ARM-specific unwind format, we don't 15754 really need to use elf32_arm_modify_segment_map. But we do it 15755 anyway just to reduce gratuitous differences with the stock ARM backend. */ 15756 15757 static bfd_boolean 15758 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info) 15759 { 15760 return (elf32_arm_modify_segment_map (abfd, info) 15761 && nacl_modify_segment_map (abfd, info)); 15762 } 15763 15764 #undef elf32_bed 15765 #define elf32_bed elf32_arm_nacl_bed 15766 #undef bfd_elf32_bfd_link_hash_table_create 15767 #define bfd_elf32_bfd_link_hash_table_create \ 15768 elf32_arm_nacl_link_hash_table_create 15769 #undef elf_backend_plt_alignment 15770 #define elf_backend_plt_alignment 4 15771 #undef elf_backend_modify_segment_map 15772 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map 15773 #undef elf_backend_modify_program_headers 15774 #define elf_backend_modify_program_headers nacl_modify_program_headers 15775 15776 #include "elf32-target.h" 15777 15778 /* Reset to defaults. */ 15779 #undef elf_backend_plt_alignment 15780 #undef elf_backend_modify_segment_map 15781 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map 15782 #undef elf_backend_modify_program_headers 15783 15784 /* VxWorks Targets. */ 15785 15786 #undef TARGET_LITTLE_SYM 15787 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec 15788 #undef TARGET_LITTLE_NAME 15789 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks" 15790 #undef TARGET_BIG_SYM 15791 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec 15792 #undef TARGET_BIG_NAME 15793 #define TARGET_BIG_NAME "elf32-bigarm-vxworks" 15794 15795 /* Like elf32_arm_link_hash_table_create -- but overrides 15796 appropriately for VxWorks. */ 15797 15798 static struct bfd_link_hash_table * 15799 elf32_arm_vxworks_link_hash_table_create (bfd *abfd) 15800 { 15801 struct bfd_link_hash_table *ret; 15802 15803 ret = elf32_arm_link_hash_table_create (abfd); 15804 if (ret) 15805 { 15806 struct elf32_arm_link_hash_table *htab 15807 = (struct elf32_arm_link_hash_table *) ret; 15808 htab->use_rel = 0; 15809 htab->vxworks_p = 1; 15810 } 15811 return ret; 15812 } 15813 15814 static void 15815 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker) 15816 { 15817 elf32_arm_final_write_processing (abfd, linker); 15818 elf_vxworks_final_write_processing (abfd, linker); 15819 } 15820 15821 #undef elf32_bed 15822 #define elf32_bed elf32_arm_vxworks_bed 15823 15824 #undef bfd_elf32_bfd_link_hash_table_create 15825 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create 15826 #undef elf_backend_final_write_processing 15827 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing 15828 #undef elf_backend_emit_relocs 15829 #define elf_backend_emit_relocs elf_vxworks_emit_relocs 15830 15831 #undef elf_backend_may_use_rel_p 15832 #define elf_backend_may_use_rel_p 0 15833 #undef elf_backend_may_use_rela_p 15834 #define elf_backend_may_use_rela_p 1 15835 #undef elf_backend_default_use_rela_p 15836 #define elf_backend_default_use_rela_p 1 15837 #undef elf_backend_want_plt_sym 15838 #define elf_backend_want_plt_sym 1 15839 #undef ELF_MAXPAGESIZE 15840 #define ELF_MAXPAGESIZE 0x1000 15841 15842 #include "elf32-target.h" 15843 15844 15845 /* Merge backend specific data from an object file to the output 15846 object file when linking. */ 15847 15848 static bfd_boolean 15849 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) 15850 { 15851 flagword out_flags; 15852 flagword in_flags; 15853 bfd_boolean flags_compatible = TRUE; 15854 asection *sec; 15855 15856 /* Check if we have the same endianness. */ 15857 if (! _bfd_generic_verify_endian_match (ibfd, obfd)) 15858 return FALSE; 15859 15860 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) 15861 return TRUE; 15862 15863 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd)) 15864 return FALSE; 15865 15866 /* The input BFD must have had its flags initialised. */ 15867 /* The following seems bogus to me -- The flags are initialized in 15868 the assembler but I don't think an elf_flags_init field is 15869 written into the object. */ 15870 /* BFD_ASSERT (elf_flags_init (ibfd)); */ 15871 15872 in_flags = elf_elfheader (ibfd)->e_flags; 15873 out_flags = elf_elfheader (obfd)->e_flags; 15874 15875 /* In theory there is no reason why we couldn't handle this. However 15876 in practice it isn't even close to working and there is no real 15877 reason to want it. */ 15878 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4 15879 && !(ibfd->flags & DYNAMIC) 15880 && (in_flags & EF_ARM_BE8)) 15881 { 15882 _bfd_error_handler (_("error: %B is already in final BE8 format"), 15883 ibfd); 15884 return FALSE; 15885 } 15886 15887 if (!elf_flags_init (obfd)) 15888 { 15889 /* If the input is the default architecture and had the default 15890 flags then do not bother setting the flags for the output 15891 architecture, instead allow future merges to do this. If no 15892 future merges ever set these flags then they will retain their 15893 uninitialised values, which surprise surprise, correspond 15894 to the default values. */ 15895 if (bfd_get_arch_info (ibfd)->the_default 15896 && elf_elfheader (ibfd)->e_flags == 0) 15897 return TRUE; 15898 15899 elf_flags_init (obfd) = TRUE; 15900 elf_elfheader (obfd)->e_flags = in_flags; 15901 15902 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) 15903 && bfd_get_arch_info (obfd)->the_default) 15904 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); 15905 15906 return TRUE; 15907 } 15908 15909 /* Determine what should happen if the input ARM architecture 15910 does not match the output ARM architecture. */ 15911 if (! bfd_arm_merge_machines (ibfd, obfd)) 15912 return FALSE; 15913 15914 /* Identical flags must be compatible. */ 15915 if (in_flags == out_flags) 15916 return TRUE; 15917 15918 /* Check to see if the input BFD actually contains any sections. If 15919 not, its flags may not have been initialised either, but it 15920 cannot actually cause any incompatiblity. Do not short-circuit 15921 dynamic objects; their section list may be emptied by 15922 elf_link_add_object_symbols. 15923 15924 Also check to see if there are no code sections in the input. 15925 In this case there is no need to check for code specific flags. 15926 XXX - do we need to worry about floating-point format compatability 15927 in data sections ? */ 15928 if (!(ibfd->flags & DYNAMIC)) 15929 { 15930 bfd_boolean null_input_bfd = TRUE; 15931 bfd_boolean only_data_sections = TRUE; 15932 15933 for (sec = ibfd->sections; sec != NULL; sec = sec->next) 15934 { 15935 /* Ignore synthetic glue sections. */ 15936 if (strcmp (sec->name, ".glue_7") 15937 && strcmp (sec->name, ".glue_7t")) 15938 { 15939 if ((bfd_get_section_flags (ibfd, sec) 15940 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 15941 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) 15942 only_data_sections = FALSE; 15943 15944 null_input_bfd = FALSE; 15945 break; 15946 } 15947 } 15948 15949 if (null_input_bfd || only_data_sections) 15950 return TRUE; 15951 } 15952 15953 /* Complain about various flag mismatches. */ 15954 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags), 15955 EF_ARM_EABI_VERSION (out_flags))) 15956 { 15957 _bfd_error_handler 15958 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"), 15959 ibfd, obfd, 15960 (in_flags & EF_ARM_EABIMASK) >> 24, 15961 (out_flags & EF_ARM_EABIMASK) >> 24); 15962 return FALSE; 15963 } 15964 15965 /* Not sure what needs to be checked for EABI versions >= 1. */ 15966 /* VxWorks libraries do not use these flags. */ 15967 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed 15968 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed 15969 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN) 15970 { 15971 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) 15972 { 15973 _bfd_error_handler 15974 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"), 15975 ibfd, obfd, 15976 in_flags & EF_ARM_APCS_26 ? 26 : 32, 15977 out_flags & EF_ARM_APCS_26 ? 26 : 32); 15978 flags_compatible = FALSE; 15979 } 15980 15981 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) 15982 { 15983 if (in_flags & EF_ARM_APCS_FLOAT) 15984 _bfd_error_handler 15985 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"), 15986 ibfd, obfd); 15987 else 15988 _bfd_error_handler 15989 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"), 15990 ibfd, obfd); 15991 15992 flags_compatible = FALSE; 15993 } 15994 15995 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT)) 15996 { 15997 if (in_flags & EF_ARM_VFP_FLOAT) 15998 _bfd_error_handler 15999 (_("error: %B uses VFP instructions, whereas %B does not"), 16000 ibfd, obfd); 16001 else 16002 _bfd_error_handler 16003 (_("error: %B uses FPA instructions, whereas %B does not"), 16004 ibfd, obfd); 16005 16006 flags_compatible = FALSE; 16007 } 16008 16009 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT)) 16010 { 16011 if (in_flags & EF_ARM_MAVERICK_FLOAT) 16012 _bfd_error_handler 16013 (_("error: %B uses Maverick instructions, whereas %B does not"), 16014 ibfd, obfd); 16015 else 16016 _bfd_error_handler 16017 (_("error: %B does not use Maverick instructions, whereas %B does"), 16018 ibfd, obfd); 16019 16020 flags_compatible = FALSE; 16021 } 16022 16023 #ifdef EF_ARM_SOFT_FLOAT 16024 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT)) 16025 { 16026 /* We can allow interworking between code that is VFP format 16027 layout, and uses either soft float or integer regs for 16028 passing floating point arguments and results. We already 16029 know that the APCS_FLOAT flags match; similarly for VFP 16030 flags. */ 16031 if ((in_flags & EF_ARM_APCS_FLOAT) != 0 16032 || (in_flags & EF_ARM_VFP_FLOAT) == 0) 16033 { 16034 if (in_flags & EF_ARM_SOFT_FLOAT) 16035 _bfd_error_handler 16036 (_("error: %B uses software FP, whereas %B uses hardware FP"), 16037 ibfd, obfd); 16038 else 16039 _bfd_error_handler 16040 (_("error: %B uses hardware FP, whereas %B uses software FP"), 16041 ibfd, obfd); 16042 16043 flags_compatible = FALSE; 16044 } 16045 } 16046 #endif 16047 16048 /* Interworking mismatch is only a warning. */ 16049 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) 16050 { 16051 if (in_flags & EF_ARM_INTERWORK) 16052 { 16053 _bfd_error_handler 16054 (_("Warning: %B supports interworking, whereas %B does not"), 16055 ibfd, obfd); 16056 } 16057 else 16058 { 16059 _bfd_error_handler 16060 (_("Warning: %B does not support interworking, whereas %B does"), 16061 ibfd, obfd); 16062 } 16063 } 16064 } 16065 16066 return flags_compatible; 16067 } 16068 16069 16070 /* Symbian OS Targets. */ 16071 16072 #undef TARGET_LITTLE_SYM 16073 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec 16074 #undef TARGET_LITTLE_NAME 16075 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian" 16076 #undef TARGET_BIG_SYM 16077 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec 16078 #undef TARGET_BIG_NAME 16079 #define TARGET_BIG_NAME "elf32-bigarm-symbian" 16080 16081 /* Like elf32_arm_link_hash_table_create -- but overrides 16082 appropriately for Symbian OS. */ 16083 16084 static struct bfd_link_hash_table * 16085 elf32_arm_symbian_link_hash_table_create (bfd *abfd) 16086 { 16087 struct bfd_link_hash_table *ret; 16088 16089 ret = elf32_arm_link_hash_table_create (abfd); 16090 if (ret) 16091 { 16092 struct elf32_arm_link_hash_table *htab 16093 = (struct elf32_arm_link_hash_table *)ret; 16094 /* There is no PLT header for Symbian OS. */ 16095 htab->plt_header_size = 0; 16096 /* The PLT entries are each one instruction and one word. */ 16097 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry); 16098 htab->symbian_p = 1; 16099 /* Symbian uses armv5t or above, so use_blx is always true. */ 16100 htab->use_blx = 1; 16101 htab->root.is_relocatable_executable = 1; 16102 } 16103 return ret; 16104 } 16105 16106 static const struct bfd_elf_special_section 16107 elf32_arm_symbian_special_sections[] = 16108 { 16109 /* In a BPABI executable, the dynamic linking sections do not go in 16110 the loadable read-only segment. The post-linker may wish to 16111 refer to these sections, but they are not part of the final 16112 program image. */ 16113 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 }, 16114 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 }, 16115 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 }, 16116 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 }, 16117 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 }, 16118 /* These sections do not need to be writable as the SymbianOS 16119 postlinker will arrange things so that no dynamic relocation is 16120 required. */ 16121 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC }, 16122 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC }, 16123 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC }, 16124 { NULL, 0, 0, 0, 0 } 16125 }; 16126 16127 static void 16128 elf32_arm_symbian_begin_write_processing (bfd *abfd, 16129 struct bfd_link_info *link_info) 16130 { 16131 /* BPABI objects are never loaded directly by an OS kernel; they are 16132 processed by a postlinker first, into an OS-specific format. If 16133 the D_PAGED bit is set on the file, BFD will align segments on 16134 page boundaries, so that an OS can directly map the file. With 16135 BPABI objects, that just results in wasted space. In addition, 16136 because we clear the D_PAGED bit, map_sections_to_segments will 16137 recognize that the program headers should not be mapped into any 16138 loadable segment. */ 16139 abfd->flags &= ~D_PAGED; 16140 elf32_arm_begin_write_processing (abfd, link_info); 16141 } 16142 16143 static bfd_boolean 16144 elf32_arm_symbian_modify_segment_map (bfd *abfd, 16145 struct bfd_link_info *info) 16146 { 16147 struct elf_segment_map *m; 16148 asection *dynsec; 16149 16150 /* BPABI shared libraries and executables should have a PT_DYNAMIC 16151 segment. However, because the .dynamic section is not marked 16152 with SEC_LOAD, the generic ELF code will not create such a 16153 segment. */ 16154 dynsec = bfd_get_section_by_name (abfd, ".dynamic"); 16155 if (dynsec) 16156 { 16157 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next) 16158 if (m->p_type == PT_DYNAMIC) 16159 break; 16160 16161 if (m == NULL) 16162 { 16163 m = _bfd_elf_make_dynamic_segment (abfd, dynsec); 16164 m->next = elf_tdata (abfd)->segment_map; 16165 elf_tdata (abfd)->segment_map = m; 16166 } 16167 } 16168 16169 /* Also call the generic arm routine. */ 16170 return elf32_arm_modify_segment_map (abfd, info); 16171 } 16172 16173 /* Return address for Ith PLT stub in section PLT, for relocation REL 16174 or (bfd_vma) -1 if it should not be included. */ 16175 16176 static bfd_vma 16177 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt, 16178 const arelent *rel ATTRIBUTE_UNUSED) 16179 { 16180 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i; 16181 } 16182 16183 16184 #undef elf32_bed 16185 #define elf32_bed elf32_arm_symbian_bed 16186 16187 /* The dynamic sections are not allocated on SymbianOS; the postlinker 16188 will process them and then discard them. */ 16189 #undef ELF_DYNAMIC_SEC_FLAGS 16190 #define ELF_DYNAMIC_SEC_FLAGS \ 16191 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED) 16192 16193 #undef elf_backend_emit_relocs 16194 16195 #undef bfd_elf32_bfd_link_hash_table_create 16196 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create 16197 #undef elf_backend_special_sections 16198 #define elf_backend_special_sections elf32_arm_symbian_special_sections 16199 #undef elf_backend_begin_write_processing 16200 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing 16201 #undef elf_backend_final_write_processing 16202 #define elf_backend_final_write_processing elf32_arm_final_write_processing 16203 16204 #undef elf_backend_modify_segment_map 16205 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map 16206 16207 /* There is no .got section for BPABI objects, and hence no header. */ 16208 #undef elf_backend_got_header_size 16209 #define elf_backend_got_header_size 0 16210 16211 /* Similarly, there is no .got.plt section. */ 16212 #undef elf_backend_want_got_plt 16213 #define elf_backend_want_got_plt 0 16214 16215 #undef elf_backend_plt_sym_val 16216 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val 16217 16218 #undef elf_backend_may_use_rel_p 16219 #define elf_backend_may_use_rel_p 1 16220 #undef elf_backend_may_use_rela_p 16221 #define elf_backend_may_use_rela_p 0 16222 #undef elf_backend_default_use_rela_p 16223 #define elf_backend_default_use_rela_p 0 16224 #undef elf_backend_want_plt_sym 16225 #define elf_backend_want_plt_sym 0 16226 #undef ELF_MAXPAGESIZE 16227 #define ELF_MAXPAGESIZE 0x8000 16228 16229 #include "elf32-target.h" 16230