xref: /llvm-project/lld/ELF/Arch/PPC64.cpp (revision 04996a28b7639a333c5d04bf4d10d70bda3e0173)
1 //===- PPC64.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "SymbolTable.h"
12 #include "Symbols.h"
13 #include "SyntheticSections.h"
14 #include "Target.h"
15 #include "Thunks.h"
16 #include "lld/Common/CommonLinkerContext.h"
17 #include "llvm/Support/Endian.h"
18 
19 using namespace llvm;
20 using namespace llvm::object;
21 using namespace llvm::support::endian;
22 using namespace llvm::ELF;
23 using namespace lld;
24 using namespace lld::elf;
25 
26 constexpr uint64_t ppc64TocOffset = 0x8000;
27 constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
28 
29 namespace {
30 // The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
31 // instructions that can be used as part of the initial exec TLS sequence.
32 enum XFormOpcd {
33   LBZX = 87,
34   LHZX = 279,
35   LWZX = 23,
36   LDX = 21,
37   STBX = 215,
38   STHX = 407,
39   STWX = 151,
40   STDX = 149,
41   LHAX = 343,
42   LWAX = 341,
43   LFSX = 535,
44   LFDX = 599,
45   STFSX = 663,
46   STFDX = 727,
47   ADD = 266,
48 };
49 
50 enum DFormOpcd {
51   LBZ = 34,
52   LBZU = 35,
53   LHZ = 40,
54   LHZU = 41,
55   LHAU = 43,
56   LWZ = 32,
57   LWZU = 33,
58   LFSU = 49,
59   LFDU = 51,
60   STB = 38,
61   STBU = 39,
62   STH = 44,
63   STHU = 45,
64   STW = 36,
65   STWU = 37,
66   STFSU = 53,
67   STFDU = 55,
68   LHA = 42,
69   LFS = 48,
70   LFD = 50,
71   STFS = 52,
72   STFD = 54,
73   ADDI = 14
74 };
75 
76 enum DSFormOpcd {
77   LD = 58,
78   LWA = 58,
79   STD = 62
80 };
81 
82 constexpr uint32_t NOP = 0x60000000;
83 
84 enum class PPCLegacyInsn : uint32_t {
85   NOINSN = 0,
86   // Loads.
87   LBZ = 0x88000000,
88   LHZ = 0xa0000000,
89   LWZ = 0x80000000,
90   LHA = 0xa8000000,
91   LWA = 0xe8000002,
92   LD = 0xe8000000,
93   LFS = 0xC0000000,
94   LXSSP = 0xe4000003,
95   LFD = 0xc8000000,
96   LXSD = 0xe4000002,
97   LXV = 0xf4000001,
98   LXVP = 0x18000000,
99 
100   // Stores.
101   STB = 0x98000000,
102   STH = 0xb0000000,
103   STW = 0x90000000,
104   STD = 0xf8000000,
105   STFS = 0xd0000000,
106   STXSSP = 0xf4000003,
107   STFD = 0xd8000000,
108   STXSD = 0xf4000002,
109   STXV = 0xf4000005,
110   STXVP = 0x18000001
111 };
112 enum class PPCPrefixedInsn : uint64_t {
113   NOINSN = 0,
114   PREFIX_MLS = 0x0610000000000000,
115   PREFIX_8LS = 0x0410000000000000,
116 
117   // Loads.
118   PLBZ = PREFIX_MLS,
119   PLHZ = PREFIX_MLS,
120   PLWZ = PREFIX_MLS,
121   PLHA = PREFIX_MLS,
122   PLWA = PREFIX_8LS | 0xa4000000,
123   PLD = PREFIX_8LS | 0xe4000000,
124   PLFS = PREFIX_MLS,
125   PLXSSP = PREFIX_8LS | 0xac000000,
126   PLFD = PREFIX_MLS,
127   PLXSD = PREFIX_8LS | 0xa8000000,
128   PLXV = PREFIX_8LS | 0xc8000000,
129   PLXVP = PREFIX_8LS | 0xe8000000,
130 
131   // Stores.
132   PSTB = PREFIX_MLS,
133   PSTH = PREFIX_MLS,
134   PSTW = PREFIX_MLS,
135   PSTD = PREFIX_8LS | 0xf4000000,
136   PSTFS = PREFIX_MLS,
137   PSTXSSP = PREFIX_8LS | 0xbc000000,
138   PSTFD = PREFIX_MLS,
139   PSTXSD = PREFIX_8LS | 0xb8000000,
140   PSTXV = PREFIX_8LS | 0xd8000000,
141   PSTXVP = PREFIX_8LS | 0xf8000000
142 };
143 
144 static bool checkPPCLegacyInsn(uint32_t encoding) {
145   PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
146   if (insn == PPCLegacyInsn::NOINSN)
147     return false;
148 #define PCREL_OPT(Legacy, PCRel, InsnMask)                                     \
149   if (insn == PPCLegacyInsn::Legacy)                                           \
150     return true;
151 #include "PPCInsns.def"
152 #undef PCREL_OPT
153   return false;
154 }
155 
156 // Masks to apply to legacy instructions when converting them to prefixed,
157 // pc-relative versions. For the most part, the primary opcode is shared
158 // between the legacy instruction and the suffix of its prefixed version.
159 // However, there are some instances where that isn't the case (DS-Form and
160 // DQ-form instructions).
161 enum class LegacyToPrefixMask : uint64_t {
162   NOMASK = 0x0,
163   OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
164   ONLY_RST = 0x3e00000,     // [RS]T (6-10).
165   ST_STX28_TO5 =
166       0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
167 };
168 
169 class PPC64 final : public TargetInfo {
170 public:
171   PPC64(Ctx &);
172   int getTlsGdRelaxSkip(RelType type) const override;
173   uint32_t calcEFlags() const override;
174   RelExpr getRelExpr(RelType type, const Symbol &s,
175                      const uint8_t *loc) const override;
176   RelType getDynRel(RelType type) const override;
177   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
178   void writePltHeader(uint8_t *buf) const override;
179   void writePlt(uint8_t *buf, const Symbol &sym,
180                 uint64_t pltEntryAddr) const override;
181   void writeIplt(uint8_t *buf, const Symbol &sym,
182                  uint64_t pltEntryAddr) const override;
183   void relocate(uint8_t *loc, const Relocation &rel,
184                 uint64_t val) const override;
185   void writeGotHeader(uint8_t *buf) const override;
186   bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
187                   uint64_t branchAddr, const Symbol &s,
188                   int64_t a) const override;
189   uint32_t getThunkSectionSpacing() const override;
190   bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
191   RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
192   RelExpr adjustGotPcExpr(RelType type, int64_t addend,
193                           const uint8_t *loc) const override;
194   void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const;
195   void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
196 
197   bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
198                                         uint8_t stOther) const override;
199 
200 private:
201   void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
202   void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
203   void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
204   void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
205 };
206 } // namespace
207 
208 uint64_t elf::getPPC64TocBase(Ctx &ctx) {
209   // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
210   // TOC starts where the first of these sections starts. We always create a
211   // .got when we see a relocation that uses it, so for us the start is always
212   // the .got.
213   uint64_t tocVA = ctx.in.got->getVA();
214 
215   // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
216   // thus permitting a full 64 Kbytes segment. Note that the glibc startup
217   // code (crt1.o) assumes that you can get from the TOC base to the
218   // start of the .toc section with only a single (signed) 16-bit relocation.
219   return tocVA + ppc64TocOffset;
220 }
221 
222 unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(Ctx &ctx, uint8_t stOther) {
223   // The offset is encoded into the 3 most significant bits of the st_other
224   // field, with some special values described in section 3.4.1 of the ABI:
225   // 0   --> Zero offset between the GEP and LEP, and the function does NOT use
226   //         the TOC pointer (r2). r2 will hold the same value on returning from
227   //         the function as it did on entering the function.
228   // 1   --> Zero offset between the GEP and LEP, and r2 should be treated as a
229   //         caller-saved register for all callers.
230   // 2-6 --> The  binary logarithm of the offset eg:
231   //         2 --> 2^2 = 4 bytes -->  1 instruction.
232   //         6 --> 2^6 = 64 bytes --> 16 instructions.
233   // 7   --> Reserved.
234   uint8_t gepToLep = (stOther >> 5) & 7;
235   if (gepToLep < 2)
236     return 0;
237 
238   // The value encoded in the st_other bits is the
239   // log-base-2(offset).
240   if (gepToLep < 7)
241     return 1 << gepToLep;
242 
243   ErrAlways(ctx)
244       << "reserved value of 7 in the 3 most-significant-bits of st_other";
245   return 0;
246 }
247 
248 void elf::writePrefixedInst(Ctx &ctx, uint8_t *loc, uint64_t insn) {
249   insn = ctx.arg.isLE ? insn << 32 | insn >> 32 : insn;
250   write64(ctx, loc, insn);
251 }
252 
253 static bool addOptional(Ctx &ctx, StringRef name, uint64_t value,
254                         std::vector<Defined *> &defined) {
255   Symbol *sym = ctx.symtab->find(name);
256   if (!sym || sym->isDefined())
257     return false;
258   sym->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL,
259                             STV_HIDDEN, STT_FUNC, value,
260                             /*size=*/0, /*section=*/nullptr});
261   defined.push_back(cast<Defined>(sym));
262   return true;
263 }
264 
265 // If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
266 // firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
267 // The labels are defined only if they exist in the symbol table.
268 static void writeSequence(Ctx &ctx, const char *prefix, int from,
269                           uint32_t firstInsn, ArrayRef<uint32_t> tail) {
270   std::vector<Defined *> defined;
271   char name[16];
272   int first;
273   const size_t size = 32 - from + tail.size();
274   MutableArrayRef<uint32_t> buf(ctx.bAlloc.Allocate<uint32_t>(size), size);
275   uint32_t *ptr = buf.data();
276   for (int r = from; r < 32; ++r) {
277     format("%s%d", prefix, r).snprint(name, sizeof(name));
278     if (addOptional(ctx, name, 4 * (r - from), defined) && defined.size() == 1)
279       first = r - from;
280     write32(ctx, ptr++, firstInsn + 0x200008 * (r - from));
281   }
282   for (uint32_t insn : tail)
283     write32(ctx, ptr++, insn);
284   assert(ptr == &*buf.end());
285 
286   if (defined.empty())
287     return;
288   // The full section content has the extent of [begin, end). We drop unused
289   // instructions and write [first,end).
290   auto *sec = make<InputSection>(
291       ctx.internalFile, ".text", SHT_PROGBITS, SHF_ALLOC, /*addralign=*/4,
292       /*entsize=*/0,
293       ArrayRef(reinterpret_cast<uint8_t *>(buf.data() + first),
294                4 * (buf.size() - first)));
295   ctx.inputSections.push_back(sec);
296   for (Defined *sym : defined) {
297     sym->section = sec;
298     sym->value -= 4 * first;
299   }
300 }
301 
302 // Implements some save and restore functions as described by ELF V2 ABI to be
303 // compatible with GCC. With GCC -Os, when the number of call-saved registers
304 // exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and
305 // expects the linker to define them. See
306 // https://sourceware.org/pipermail/binutils/2002-February/017444.html and
307 // https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is
308 // weird because libgcc.a would be the natural place. The linker generation
309 // approach has the advantage that the linker can generate multiple copies to
310 // avoid long branch thunks. However, we don't consider the advantage
311 // significant enough to complicate our trunk implementation, so we take the
312 // simple approach and synthesize .text sections providing the implementation.
313 void elf::addPPC64SaveRestore(Ctx &ctx) {
314   constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
315 
316   // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
317   // Tail: ld 0, 16(1); mtlr 0; blr
318   writeSequence(ctx, "_restgpr0_", 14, 0xe9c1ff70, {0xe8010010, mtlr_0, blr});
319   // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
320   // Tail: blr
321   writeSequence(ctx, "_restgpr1_", 14, 0xe9ccff70, {blr});
322   // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
323   // Tail: std 0, 16(1); blr
324   writeSequence(ctx, "_savegpr0_", 14, 0xf9c1ff70, {0xf8010010, blr});
325   // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
326   // Tail: blr
327   writeSequence(ctx, "_savegpr1_", 14, 0xf9ccff70, {blr});
328 }
329 
330 // Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
331 template <typename ELFT>
332 static std::pair<Defined *, int64_t>
333 getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
334   // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
335   // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
336   // relocation index in most cases.
337   //
338   // In rare cases a TOC entry may store a constant that doesn't need an
339   // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
340   // points to a relocation with larger r_offset. Do a linear probe then.
341   // Constants are extremely uncommon in .toc and the extra number of array
342   // accesses can be seen as a small constant.
343   ArrayRef<typename ELFT::Rela> relas =
344       tocSec->template relsOrRelas<ELFT>().relas;
345   if (relas.empty())
346     return {};
347   uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
348   for (;;) {
349     if (relas[index].r_offset == offset) {
350       Symbol &sym = tocSec->file->getRelocTargetSym(relas[index]);
351       return {dyn_cast<Defined>(&sym), getAddend<ELFT>(relas[index])};
352     }
353     if (relas[index].r_offset < offset || index == 0)
354       break;
355     --index;
356   }
357   return {};
358 }
359 
360 // When accessing a symbol defined in another translation unit, compilers
361 // reserve a .toc entry, allocate a local label and generate toc-indirect
362 // instructions:
363 //
364 //   addis 3, 2, .LC0@toc@ha  # R_PPC64_TOC16_HA
365 //   ld    3, .LC0@toc@l(3)   # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
366 //   ld/lwa 3, 0(3)           # load the value from the address
367 //
368 //   .section .toc,"aw",@progbits
369 //   .LC0: .tc var[TC],var
370 //
371 // If var is defined, non-preemptable and addressable with a 32-bit signed
372 // offset from the toc base, the address of var can be computed by adding an
373 // offset to the toc base, saving a load.
374 //
375 //   addis 3,2,var@toc@ha     # this may be relaxed to a nop,
376 //   addi  3,3,var@toc@l      # then this becomes addi 3,2,var@toc
377 //   ld/lwa 3, 0(3)           # load the value from the address
378 //
379 // Returns true if the relaxation is performed.
380 static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel,
381                                         uint8_t *bufLoc) {
382   assert(ctx.arg.tocOptimize);
383   if (rel.addend < 0)
384     return false;
385 
386   // If the symbol is not the .toc section, this isn't a toc-indirection.
387   Defined *defSym = dyn_cast<Defined>(rel.sym);
388   if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
389     return false;
390 
391   Defined *d;
392   int64_t addend;
393   auto *tocISB = cast<InputSectionBase>(defSym->section);
394   std::tie(d, addend) =
395       ctx.arg.isLE ? getRelaTocSymAndAddend<ELF64LE>(tocISB, rel.addend)
396                    : getRelaTocSymAndAddend<ELF64BE>(tocISB, rel.addend);
397 
398   // Only non-preemptable defined symbols can be relaxed.
399   if (!d || d->isPreemptible)
400     return false;
401 
402   // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable
403   // ifunc and changed its type to STT_FUNC.
404   assert(!d->isGnuIFunc());
405 
406   // Two instructions can materialize a 32-bit signed offset from the toc base.
407   uint64_t tocRelative = d->getVA(ctx, addend) - getPPC64TocBase(ctx);
408   if (!isInt<32>(tocRelative))
409     return false;
410 
411   // Add PPC64TocOffset that will be subtracted by PPC64::relocate().
412   static_cast<const PPC64 &>(*ctx.target)
413       .relaxGot(bufLoc, rel, tocRelative + ppc64TocOffset);
414   return true;
415 }
416 
417 // Relocation masks following the #lo(value), #hi(value), #ha(value),
418 // #higher(value), #highera(value), #highest(value), and #highesta(value)
419 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
420 // document.
421 static uint16_t lo(uint64_t v) { return v; }
422 static uint16_t hi(uint64_t v) { return v >> 16; }
423 static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
424 static uint16_t higher(uint64_t v) { return v >> 32; }
425 static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
426 static uint16_t highest(uint64_t v) { return v >> 48; }
427 static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
428 
429 // Extracts the 'PO' field of an instruction encoding.
430 static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
431 
432 static bool isDQFormInstruction(uint32_t encoding) {
433   switch (getPrimaryOpCode(encoding)) {
434   default:
435     return false;
436   case 6: // Power10 paired loads/stores (lxvp, stxvp).
437   case 56:
438     // The only instruction with a primary opcode of 56 is `lq`.
439     return true;
440   case 61:
441     // There are both DS and DQ instruction forms with this primary opcode.
442     // Namely `lxv` and `stxv` are the DQ-forms that use it.
443     // The DS 'XO' bits being set to 01 is restricted to DQ form.
444     return (encoding & 3) == 0x1;
445   }
446 }
447 
448 static bool isDSFormInstruction(PPCLegacyInsn insn) {
449   switch (insn) {
450   default:
451     return false;
452   case PPCLegacyInsn::LWA:
453   case PPCLegacyInsn::LD:
454   case PPCLegacyInsn::LXSD:
455   case PPCLegacyInsn::LXSSP:
456   case PPCLegacyInsn::STD:
457   case PPCLegacyInsn::STXSD:
458   case PPCLegacyInsn::STXSSP:
459     return true;
460   }
461 }
462 
463 static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
464   uint32_t opc = encoding & 0xfc000000;
465 
466   // If the primary opcode is shared between multiple instructions, we need to
467   // fix it up to match the actual instruction we are after.
468   if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
469        opc == 0xf8000000) &&
470       !isDQFormInstruction(encoding))
471     opc = encoding & 0xfc000003;
472   else if (opc == 0xf4000000)
473     opc = encoding & 0xfc000007;
474   else if (opc == 0x18000000)
475     opc = encoding & 0xfc00000f;
476 
477   // If the value is not one of the enumerators in PPCLegacyInsn, we want to
478   // return PPCLegacyInsn::NOINSN.
479   if (!checkPPCLegacyInsn(opc))
480     return PPCLegacyInsn::NOINSN;
481   return static_cast<PPCLegacyInsn>(opc);
482 }
483 
484 static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
485   switch (insn) {
486 #define PCREL_OPT(Legacy, PCRel, InsnMask)                                     \
487   case PPCLegacyInsn::Legacy:                                                  \
488     return PPCPrefixedInsn::PCRel
489 #include "PPCInsns.def"
490 #undef PCREL_OPT
491   }
492   return PPCPrefixedInsn::NOINSN;
493 }
494 
495 static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
496   switch (insn) {
497 #define PCREL_OPT(Legacy, PCRel, InsnMask)                                     \
498   case PPCLegacyInsn::Legacy:                                                  \
499     return LegacyToPrefixMask::InsnMask
500 #include "PPCInsns.def"
501 #undef PCREL_OPT
502   }
503   return LegacyToPrefixMask::NOMASK;
504 }
505 static uint64_t getPCRelativeForm(uint32_t encoding) {
506   PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
507   PPCPrefixedInsn pcrelInsn = getPCRelativeForm(origInsn);
508   if (pcrelInsn == PPCPrefixedInsn::NOINSN)
509     return UINT64_C(-1);
510   LegacyToPrefixMask origInsnMask = getInsnMask(origInsn);
511   uint64_t pcrelEncoding =
512       (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
513 
514   // If the mask requires moving bit 28 to bit 5, do that now.
515   if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
516     pcrelEncoding |= (encoding & 0x8) << 23;
517   return pcrelEncoding;
518 }
519 
520 static bool isInstructionUpdateForm(uint32_t encoding) {
521   switch (getPrimaryOpCode(encoding)) {
522   default:
523     return false;
524   case LBZU:
525   case LHAU:
526   case LHZU:
527   case LWZU:
528   case LFSU:
529   case LFDU:
530   case STBU:
531   case STHU:
532   case STWU:
533   case STFSU:
534   case STFDU:
535     return true;
536     // LWA has the same opcode as LD, and the DS bits is what differentiates
537     // between LD/LDU/LWA
538   case LD:
539   case STD:
540     return (encoding & 3) == 1;
541   }
542 }
543 
544 // Compute the total displacement between the prefixed instruction that gets
545 // to the start of the data and the load/store instruction that has the offset
546 // into the data structure.
547 // For example:
548 // paddi 3, 0, 1000, 1
549 // lwz 3, 20(3)
550 // Should add up to 1020 for total displacement.
551 static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
552   int64_t disp34 = llvm::SignExtend64(
553       ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), 34);
554   int32_t disp16 = llvm::SignExtend32(accessInsn & 0xffff, 16);
555   // For DS and DQ form instructions, we need to mask out the XO bits.
556   if (isDQFormInstruction(accessInsn))
557     disp16 &= ~0xf;
558   else if (isDSFormInstruction(getPPCLegacyInsn(accessInsn)))
559     disp16 &= ~0x3;
560   return disp34 + disp16;
561 }
562 
563 // There are a number of places when we either want to read or write an
564 // instruction when handling a half16 relocation type. On big-endian the buffer
565 // pointer is pointing into the middle of the word we want to extract, and on
566 // little-endian it is pointing to the start of the word. These 2 helpers are to
567 // simplify reading and writing in that context.
568 static void writeFromHalf16(Ctx &ctx, uint8_t *loc, uint32_t insn) {
569   write32(ctx, ctx.arg.isLE ? loc : loc - 2, insn);
570 }
571 
572 static uint32_t readFromHalf16(Ctx &ctx, const uint8_t *loc) {
573   return read32(ctx, ctx.arg.isLE ? loc : loc - 2);
574 }
575 
576 static uint64_t readPrefixedInst(Ctx &ctx, const uint8_t *loc) {
577   uint64_t fullInstr = read64(ctx, loc);
578   return ctx.arg.isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
579 }
580 
581 PPC64::PPC64(Ctx &ctx) : TargetInfo(ctx) {
582   copyRel = R_PPC64_COPY;
583   gotRel = R_PPC64_GLOB_DAT;
584   pltRel = R_PPC64_JMP_SLOT;
585   relativeRel = R_PPC64_RELATIVE;
586   iRelativeRel = R_PPC64_IRELATIVE;
587   symbolicRel = R_PPC64_ADDR64;
588   pltHeaderSize = 60;
589   pltEntrySize = 4;
590   ipltEntrySize = 16; // PPC64PltCallStub::size
591   gotHeaderEntriesNum = 1;
592   gotPltHeaderEntriesNum = 2;
593   needsThunks = true;
594 
595   tlsModuleIndexRel = R_PPC64_DTPMOD64;
596   tlsOffsetRel = R_PPC64_DTPREL64;
597 
598   tlsGotRel = R_PPC64_TPREL64;
599 
600   needsMoreStackNonSplit = false;
601 
602   // We need 64K pages (at least under glibc/Linux, the loader won't
603   // set different permissions on a finer granularity than that).
604   defaultMaxPageSize = 65536;
605 
606   // The PPC64 ELF ABI v1 spec, says:
607   //
608   //   It is normally desirable to put segments with different characteristics
609   //   in separate 256 Mbyte portions of the address space, to give the
610   //   operating system full paging flexibility in the 64-bit address space.
611   //
612   // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
613   // use 0x10000000 as the starting address.
614   defaultImageBase = 0x10000000;
615 
616   write32(ctx, trapInstr.data(), 0x7fe00008);
617 }
618 
619 int PPC64::getTlsGdRelaxSkip(RelType type) const {
620   // A __tls_get_addr call instruction is marked with 2 relocations:
621   //
622   //   R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation
623   //   R_PPC64_REL24: __tls_get_addr
624   //
625   // After the relaxation we no longer call __tls_get_addr and should skip both
626   // relocations to not create a false dependence on __tls_get_addr being
627   // defined.
628   if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD)
629     return 2;
630   return 1;
631 }
632 
633 static uint32_t getEFlags(InputFile *file) {
634   if (file->ekind == ELF64BEKind)
635     return cast<ObjFile<ELF64BE>>(file)->getObj().getHeader().e_flags;
636   return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
637 }
638 
639 // This file implements v2 ABI. This function makes sure that all
640 // object files have v2 or an unspecified version as an ABI version.
641 uint32_t PPC64::calcEFlags() const {
642   for (InputFile *f : ctx.objectFiles) {
643     uint32_t flag = getEFlags(f);
644     if (flag == 1)
645       ErrAlways(ctx) << f << ": ABI version 1 is not supported";
646     else if (flag > 2)
647       ErrAlways(ctx) << f << ": unrecognized e_flags: " << flag;
648   }
649   return 2;
650 }
651 
652 void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
653   switch (rel.type) {
654   case R_PPC64_TOC16_HA:
655     // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
656     relocate(loc, rel, val);
657     break;
658   case R_PPC64_TOC16_LO_DS: {
659     // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
660     // "addi reg, 2, var@toc".
661     uint32_t insn = readFromHalf16(ctx, loc);
662     if (getPrimaryOpCode(insn) != LD)
663       ErrAlways(ctx)
664           << "expected a 'ld' for got-indirect to toc-relative relaxing";
665     writeFromHalf16(ctx, loc, (insn & 0x03ffffff) | 0x38000000);
666     relocateNoSym(loc, R_PPC64_TOC16_LO, val);
667     break;
668   }
669   case R_PPC64_GOT_PCREL34: {
670     // Clear the first 8 bits of the prefix and the first 6 bits of the
671     // instruction (the primary opcode).
672     uint64_t insn = readPrefixedInst(ctx, loc);
673     if ((insn & 0xfc000000) != 0xe4000000)
674       ErrAlways(ctx)
675           << "expected a 'pld' for got-indirect to pc-relative relaxing";
676     insn &= ~0xff000000fc000000;
677 
678     // Replace the cleared bits with the values for PADDI (0x600000038000000);
679     insn |= 0x600000038000000;
680     writePrefixedInst(ctx, loc, insn);
681     relocate(loc, rel, val);
682     break;
683   }
684   case R_PPC64_PCREL_OPT: {
685     // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
686     // be relaxed. The eligibility for the relaxation needs to be determined
687     // on that relocation since this one does not relocate a symbol.
688     uint64_t insn = readPrefixedInst(ctx, loc);
689     uint32_t accessInsn = read32(ctx, loc + rel.addend);
690     uint64_t pcRelInsn = getPCRelativeForm(accessInsn);
691 
692     // This error is not necessary for correctness but is emitted for now
693     // to ensure we don't miss these opportunities in real code. It can be
694     // removed at a later date.
695     if (pcRelInsn == UINT64_C(-1)) {
696       Err(ctx)
697           << "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x"
698           << utohexstr(accessInsn, true);
699       break;
700     }
701 
702     int64_t totalDisp = getTotalDisp(insn, accessInsn);
703     if (!isInt<34>(totalDisp))
704       break; // Displacement doesn't fit.
705     // Convert the PADDI to the prefixed version of accessInsn and convert
706     // accessInsn to a nop.
707     writePrefixedInst(ctx, loc,
708                       pcRelInsn | ((totalDisp & 0x3ffff0000) << 16) |
709                           (totalDisp & 0xffff));
710     write32(ctx, loc + rel.addend, NOP); // nop accessInsn.
711     break;
712   }
713   default:
714     llvm_unreachable("unexpected relocation type");
715   }
716 }
717 
718 void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
719                            uint64_t val) const {
720   // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
721   // The general dynamic code sequence for a global `x` will look like:
722   // Instruction                    Relocation                Symbol
723   // addis r3, r2, x@got@tlsgd@ha   R_PPC64_GOT_TLSGD16_HA      x
724   // addi  r3, r3, x@got@tlsgd@l    R_PPC64_GOT_TLSGD16_LO      x
725   // bl __tls_get_addr(x@tlsgd)     R_PPC64_TLSGD               x
726   //                                R_PPC64_REL24               __tls_get_addr
727   // nop                            None                       None
728 
729   // Relaxing to local exec entails converting:
730   // addis r3, r2, x@got@tlsgd@ha    into      nop
731   // addi  r3, r3, x@got@tlsgd@l     into      addis r3, r13, x@tprel@ha
732   // bl __tls_get_addr(x@tlsgd)      into      nop
733   // nop                             into      addi r3, r3, x@tprel@l
734 
735   switch (rel.type) {
736   case R_PPC64_GOT_TLSGD16_HA:
737     writeFromHalf16(ctx, loc, NOP);
738     break;
739   case R_PPC64_GOT_TLSGD16:
740   case R_PPC64_GOT_TLSGD16_LO:
741     writeFromHalf16(ctx, loc, 0x3c6d0000); // addis r3, r13
742     relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
743     break;
744   case R_PPC64_GOT_TLSGD_PCREL34:
745     // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
746     //            paddi r3, r13, x@tprel, 0
747     writePrefixedInst(ctx, loc, 0x06000000386d0000);
748     relocateNoSym(loc, R_PPC64_TPREL34, val);
749     break;
750   case R_PPC64_TLSGD: {
751     // PC Relative Relaxation:
752     // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
753     //            nop
754     // TOC Relaxation:
755     // Relax from bl __tls_get_addr(x@tlsgd)
756     //            nop
757     // to
758     //            nop
759     //            addi r3, r3, x@tprel@l
760     const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
761     if (locAsInt % 4 == 0) {
762       write32(ctx, loc, NOP);            // nop
763       write32(ctx, loc + 4, 0x38630000); // addi r3, r3
764       // Since we are relocating a half16 type relocation and Loc + 4 points to
765       // the start of an instruction we need to advance the buffer by an extra
766       // 2 bytes on BE.
767       relocateNoSym(loc + 4 + (ctx.arg.ekind == ELF64BEKind ? 2 : 0),
768                     R_PPC64_TPREL16_LO, val);
769     } else if (locAsInt % 4 == 1) {
770       write32(ctx, loc - 1, NOP);
771     } else {
772       Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
773     }
774     break;
775   }
776   default:
777     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
778   }
779 }
780 
781 void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
782                            uint64_t val) const {
783   // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
784   // The local dynamic code sequence for a global `x` will look like:
785   // Instruction                    Relocation                Symbol
786   // addis r3, r2, x@got@tlsld@ha   R_PPC64_GOT_TLSLD16_HA      x
787   // addi  r3, r3, x@got@tlsld@l    R_PPC64_GOT_TLSLD16_LO      x
788   // bl __tls_get_addr(x@tlsgd)     R_PPC64_TLSLD               x
789   //                                R_PPC64_REL24               __tls_get_addr
790   // nop                            None                       None
791 
792   // Relaxing to local exec entails converting:
793   // addis r3, r2, x@got@tlsld@ha   into      nop
794   // addi  r3, r3, x@got@tlsld@l    into      addis r3, r13, 0
795   // bl __tls_get_addr(x@tlsgd)     into      nop
796   // nop                            into      addi r3, r3, 4096
797 
798   switch (rel.type) {
799   case R_PPC64_GOT_TLSLD16_HA:
800     writeFromHalf16(ctx, loc, NOP);
801     break;
802   case R_PPC64_GOT_TLSLD16_LO:
803     writeFromHalf16(ctx, loc, 0x3c6d0000); // addis r3, r13, 0
804     break;
805   case R_PPC64_GOT_TLSLD_PCREL34:
806     // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
807     //            paddi r3, r13, 0x1000, 0
808     writePrefixedInst(ctx, loc, 0x06000000386d1000);
809     break;
810   case R_PPC64_TLSLD: {
811     // PC Relative Relaxation:
812     // Relax from bl __tls_get_addr@notoc(x@tlsld)
813     // to
814     //            nop
815     // TOC Relaxation:
816     // Relax from bl __tls_get_addr(x@tlsld)
817     //            nop
818     // to
819     //            nop
820     //            addi r3, r3, 4096
821     const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
822     if (locAsInt % 4 == 0) {
823       write32(ctx, loc, NOP);
824       write32(ctx, loc + 4, 0x38631000); // addi r3, r3, 4096
825     } else if (locAsInt % 4 == 1) {
826       write32(ctx, loc - 1, NOP);
827     } else {
828       Err(ctx) << "R_PPC64_TLSLD has unexpected byte alignment";
829     }
830     break;
831   }
832   case R_PPC64_DTPREL16:
833   case R_PPC64_DTPREL16_HA:
834   case R_PPC64_DTPREL16_HI:
835   case R_PPC64_DTPREL16_DS:
836   case R_PPC64_DTPREL16_LO:
837   case R_PPC64_DTPREL16_LO_DS:
838   case R_PPC64_DTPREL34:
839     relocate(loc, rel, val);
840     break;
841   default:
842     llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
843   }
844 }
845 
846 // Map X-Form instructions to their DS-Form counterparts, if applicable.
847 // The full encoding is returned here to distinguish between the different
848 // DS-Form instructions.
849 unsigned elf::getPPCDSFormOp(unsigned secondaryOp) {
850   switch (secondaryOp) {
851   case LWAX:
852     return (LWA << 26) | 0x2;
853   case LDX:
854     return LD << 26;
855   case STDX:
856     return STD << 26;
857   default:
858     return 0;
859   }
860 }
861 
862 unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
863   switch (secondaryOp) {
864   case LBZX:
865     return LBZ << 26;
866   case LHZX:
867     return LHZ << 26;
868   case LWZX:
869     return LWZ << 26;
870   case STBX:
871     return STB << 26;
872   case STHX:
873     return STH << 26;
874   case STWX:
875     return STW << 26;
876   case LHAX:
877     return LHA << 26;
878   case LFSX:
879     return LFS << 26;
880   case LFDX:
881     return LFD << 26;
882   case STFSX:
883     return STFS << 26;
884   case STFDX:
885     return STFD << 26;
886   case ADD:
887     return ADDI << 26;
888   default:
889     return 0;
890   }
891 }
892 
893 void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
894                            uint64_t val) const {
895   // The initial exec code sequence for a global `x` will look like:
896   // Instruction                    Relocation                Symbol
897   // addis r9, r2, x@got@tprel@ha   R_PPC64_GOT_TPREL16_HA      x
898   // ld    r9, x@got@tprel@l(r9)    R_PPC64_GOT_TPREL16_LO_DS   x
899   // add r9, r9, x@tls              R_PPC64_TLS                 x
900 
901   // Relaxing to local exec entails converting:
902   // addis r9, r2, x@got@tprel@ha       into        nop
903   // ld r9, x@got@tprel@l(r9)           into        addis r9, r13, x@tprel@ha
904   // add r9, r9, x@tls                  into        addi r9, r9, x@tprel@l
905 
906   // x@tls R_PPC64_TLS is a relocation which does not compute anything,
907   // it is replaced with r13 (thread pointer).
908 
909   // The add instruction in the initial exec sequence has multiple variations
910   // that need to be handled. If we are building an address it will use an add
911   // instruction, if we are accessing memory it will use any of the X-form
912   // indexed load or store instructions.
913 
914   unsigned offset = (ctx.arg.ekind == ELF64BEKind) ? 2 : 0;
915   switch (rel.type) {
916   case R_PPC64_GOT_TPREL16_HA:
917     write32(ctx, loc - offset, NOP);
918     break;
919   case R_PPC64_GOT_TPREL16_LO_DS:
920   case R_PPC64_GOT_TPREL16_DS: {
921     uint32_t regNo = read32(ctx, loc - offset) & 0x03e00000; // bits 6-10
922     write32(ctx, loc - offset, 0x3c0d0000 | regNo);          // addis RegNo, r13
923     relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
924     break;
925   }
926   case R_PPC64_GOT_TPREL_PCREL34: {
927     const uint64_t pldRT = readPrefixedInst(ctx, loc) & 0x0000000003e00000;
928     // paddi RT(from pld), r13, symbol@tprel, 0
929     writePrefixedInst(ctx, loc, 0x06000000380d0000 | pldRT);
930     relocateNoSym(loc, R_PPC64_TPREL34, val);
931     break;
932   }
933   case R_PPC64_TLS: {
934     const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
935     if (locAsInt % 4 == 0) {
936       uint32_t primaryOp = getPrimaryOpCode(read32(ctx, loc));
937       if (primaryOp != 31)
938         ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
939       uint32_t secondaryOp = (read32(ctx, loc) & 0x000007fe) >> 1; // bits 21-30
940       uint32_t dFormOp = getPPCDFormOp(secondaryOp);
941       uint32_t finalReloc;
942       if (dFormOp == 0) { // Expecting a DS-Form instruction.
943         dFormOp = getPPCDSFormOp(secondaryOp);
944         if (dFormOp == 0)
945           ErrAlways(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
946         finalReloc = R_PPC64_TPREL16_LO_DS;
947       } else
948         finalReloc = R_PPC64_TPREL16_LO;
949       write32(ctx, loc, dFormOp | (read32(ctx, loc) & 0x03ff0000));
950       relocateNoSym(loc + offset, finalReloc, val);
951     } else if (locAsInt % 4 == 1) {
952       // If the offset is not 4 byte aligned then we have a PCRel type reloc.
953       // This version of the relocation is offset by one byte from the
954       // instruction it references.
955       uint32_t tlsInstr = read32(ctx, loc - 1);
956       uint32_t primaryOp = getPrimaryOpCode(tlsInstr);
957       if (primaryOp != 31)
958         Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
959       uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
960       // The add is a special case and should be turned into a nop. The paddi
961       // that comes before it will already have computed the address of the
962       // symbol.
963       if (secondaryOp == 266) {
964         // Check if the add uses the same result register as the input register.
965         uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
966         uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
967         if (ra == rt) {
968           write32(ctx, loc - 1, NOP);
969         } else {
970           // mr rt, ra
971           write32(ctx, loc - 1,
972                   0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
973         }
974       } else {
975         uint32_t dFormOp = getPPCDFormOp(secondaryOp);
976         if (dFormOp == 0) { // Expecting a DS-Form instruction.
977           dFormOp = getPPCDSFormOp(secondaryOp);
978           if (dFormOp == 0)
979             Err(ctx) << "unrecognized instruction for IE to LE R_PPC64_TLS";
980         }
981         write32(ctx, loc - 1, (dFormOp | (tlsInstr & 0x03ff0000)));
982       }
983     } else {
984       Err(ctx) << "R_PPC64_TLS must be either 4 byte aligned or one byte "
985                   "offset from 4 byte aligned";
986     }
987     break;
988   }
989   default:
990     llvm_unreachable("unknown relocation for IE to LE");
991     break;
992   }
993 }
994 
995 RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
996                           const uint8_t *loc) const {
997   switch (type) {
998   case R_PPC64_NONE:
999     return R_NONE;
1000   case R_PPC64_ADDR16:
1001   case R_PPC64_ADDR16_DS:
1002   case R_PPC64_ADDR16_HA:
1003   case R_PPC64_ADDR16_HI:
1004   case R_PPC64_ADDR16_HIGH:
1005   case R_PPC64_ADDR16_HIGHER:
1006   case R_PPC64_ADDR16_HIGHERA:
1007   case R_PPC64_ADDR16_HIGHEST:
1008   case R_PPC64_ADDR16_HIGHESTA:
1009   case R_PPC64_ADDR16_LO:
1010   case R_PPC64_ADDR16_LO_DS:
1011   case R_PPC64_ADDR32:
1012   case R_PPC64_ADDR64:
1013     return R_ABS;
1014   case R_PPC64_GOT16:
1015   case R_PPC64_GOT16_DS:
1016   case R_PPC64_GOT16_HA:
1017   case R_PPC64_GOT16_HI:
1018   case R_PPC64_GOT16_LO:
1019   case R_PPC64_GOT16_LO_DS:
1020     return R_GOT_OFF;
1021   case R_PPC64_TOC16:
1022   case R_PPC64_TOC16_DS:
1023   case R_PPC64_TOC16_HI:
1024   case R_PPC64_TOC16_LO:
1025     return R_GOTREL;
1026   case R_PPC64_GOT_PCREL34:
1027   case R_PPC64_GOT_TPREL_PCREL34:
1028   case R_PPC64_PCREL_OPT:
1029     return R_GOT_PC;
1030   case R_PPC64_TOC16_HA:
1031   case R_PPC64_TOC16_LO_DS:
1032     return ctx.arg.tocOptimize ? RE_PPC64_RELAX_TOC : R_GOTREL;
1033   case R_PPC64_TOC:
1034     return RE_PPC64_TOCBASE;
1035   case R_PPC64_REL14:
1036   case R_PPC64_REL24:
1037     return RE_PPC64_CALL_PLT;
1038   case R_PPC64_REL24_NOTOC:
1039     return R_PLT_PC;
1040   case R_PPC64_REL16_LO:
1041   case R_PPC64_REL16_HA:
1042   case R_PPC64_REL16_HI:
1043   case R_PPC64_REL32:
1044   case R_PPC64_REL64:
1045   case R_PPC64_PCREL34:
1046     return R_PC;
1047   case R_PPC64_GOT_TLSGD16:
1048   case R_PPC64_GOT_TLSGD16_HA:
1049   case R_PPC64_GOT_TLSGD16_HI:
1050   case R_PPC64_GOT_TLSGD16_LO:
1051     return R_TLSGD_GOT;
1052   case R_PPC64_GOT_TLSGD_PCREL34:
1053     return R_TLSGD_PC;
1054   case R_PPC64_GOT_TLSLD16:
1055   case R_PPC64_GOT_TLSLD16_HA:
1056   case R_PPC64_GOT_TLSLD16_HI:
1057   case R_PPC64_GOT_TLSLD16_LO:
1058     return R_TLSLD_GOT;
1059   case R_PPC64_GOT_TLSLD_PCREL34:
1060     return R_TLSLD_PC;
1061   case R_PPC64_GOT_TPREL16_HA:
1062   case R_PPC64_GOT_TPREL16_LO_DS:
1063   case R_PPC64_GOT_TPREL16_DS:
1064   case R_PPC64_GOT_TPREL16_HI:
1065     return R_GOT_OFF;
1066   case R_PPC64_GOT_DTPREL16_HA:
1067   case R_PPC64_GOT_DTPREL16_LO_DS:
1068   case R_PPC64_GOT_DTPREL16_DS:
1069   case R_PPC64_GOT_DTPREL16_HI:
1070     return R_TLSLD_GOT_OFF;
1071   case R_PPC64_TPREL16:
1072   case R_PPC64_TPREL16_HA:
1073   case R_PPC64_TPREL16_LO:
1074   case R_PPC64_TPREL16_HI:
1075   case R_PPC64_TPREL16_DS:
1076   case R_PPC64_TPREL16_LO_DS:
1077   case R_PPC64_TPREL16_HIGHER:
1078   case R_PPC64_TPREL16_HIGHERA:
1079   case R_PPC64_TPREL16_HIGHEST:
1080   case R_PPC64_TPREL16_HIGHESTA:
1081   case R_PPC64_TPREL34:
1082     return R_TPREL;
1083   case R_PPC64_DTPREL16:
1084   case R_PPC64_DTPREL16_DS:
1085   case R_PPC64_DTPREL16_HA:
1086   case R_PPC64_DTPREL16_HI:
1087   case R_PPC64_DTPREL16_HIGHER:
1088   case R_PPC64_DTPREL16_HIGHERA:
1089   case R_PPC64_DTPREL16_HIGHEST:
1090   case R_PPC64_DTPREL16_HIGHESTA:
1091   case R_PPC64_DTPREL16_LO:
1092   case R_PPC64_DTPREL16_LO_DS:
1093   case R_PPC64_DTPREL64:
1094   case R_PPC64_DTPREL34:
1095     return R_DTPREL;
1096   case R_PPC64_TLSGD:
1097     return R_TLSDESC_CALL;
1098   case R_PPC64_TLSLD:
1099     return R_TLSLD_HINT;
1100   case R_PPC64_TLS:
1101     return R_TLSIE_HINT;
1102   default:
1103     Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
1104              << ") against symbol " << &s;
1105     return R_NONE;
1106   }
1107 }
1108 
1109 RelType PPC64::getDynRel(RelType type) const {
1110   if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
1111     return R_PPC64_ADDR64;
1112   return R_PPC64_NONE;
1113 }
1114 
1115 int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
1116   switch (type) {
1117   case R_PPC64_NONE:
1118   case R_PPC64_GLOB_DAT:
1119   case R_PPC64_JMP_SLOT:
1120     return 0;
1121   case R_PPC64_REL32:
1122     return SignExtend64<32>(read32(ctx, buf));
1123   case R_PPC64_ADDR64:
1124   case R_PPC64_REL64:
1125   case R_PPC64_RELATIVE:
1126   case R_PPC64_IRELATIVE:
1127   case R_PPC64_DTPMOD64:
1128   case R_PPC64_DTPREL64:
1129   case R_PPC64_TPREL64:
1130     return read64(ctx, buf);
1131   default:
1132     InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
1133     return 0;
1134   }
1135 }
1136 
1137 void PPC64::writeGotHeader(uint8_t *buf) const {
1138   write64(ctx, buf, getPPC64TocBase(ctx));
1139 }
1140 
1141 void PPC64::writePltHeader(uint8_t *buf) const {
1142   // The generic resolver stub goes first.
1143   write32(ctx, buf + 0, 0x7c0802a6);  // mflr r0
1144   write32(ctx, buf + 4, 0x429f0005);  // bcl  20,4*cr7+so,8 <_glink+0x8>
1145   write32(ctx, buf + 8, 0x7d6802a6);  // mflr r11
1146   write32(ctx, buf + 12, 0x7c0803a6); // mtlr r0
1147   write32(ctx, buf + 16, 0x7d8b6050); // subf r12, r11, r12
1148   write32(ctx, buf + 20, 0x380cffcc); // subi r0,r12,52
1149   write32(ctx, buf + 24, 0x7800f082); // srdi r0,r0,62,2
1150   write32(ctx, buf + 28, 0xe98b002c); // ld   r12,44(r11)
1151   write32(ctx, buf + 32, 0x7d6c5a14); // add  r11,r12,r11
1152   write32(ctx, buf + 36, 0xe98b0000); // ld   r12,0(r11)
1153   write32(ctx, buf + 40, 0xe96b0008); // ld   r11,8(r11)
1154   write32(ctx, buf + 44, 0x7d8903a6); // mtctr   r12
1155   write32(ctx, buf + 48, 0x4e800420); // bctr
1156 
1157   // The 'bcl' instruction will set the link register to the address of the
1158   // following instruction ('mflr r11'). Here we store the offset from that
1159   // instruction  to the first entry in the GotPlt section.
1160   int64_t gotPltOffset = ctx.in.gotPlt->getVA() - (ctx.in.plt->getVA() + 8);
1161   write64(ctx, buf + 52, gotPltOffset);
1162 }
1163 
1164 void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
1165                      uint64_t /*pltEntryAddr*/) const {
1166   int32_t offset = pltHeaderSize + sym.getPltIdx(ctx) * pltEntrySize;
1167   // bl __glink_PLTresolve
1168   write32(ctx, buf, 0x48000000 | ((-offset) & 0x03fffffc));
1169 }
1170 
1171 void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
1172                       uint64_t /*pltEntryAddr*/) const {
1173   writePPC64LoadAndBranch(ctx, buf,
1174                           sym.getGotPltVA(ctx) - getPPC64TocBase(ctx));
1175 }
1176 
1177 static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
1178   // Relocations relative to the toc-base need to be adjusted by the Toc offset.
1179   uint64_t tocBiasedVal = val - ppc64TocOffset;
1180   // Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset.
1181   uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset;
1182 
1183   switch (type) {
1184   // TOC biased relocation.
1185   case R_PPC64_GOT16:
1186   case R_PPC64_GOT_TLSGD16:
1187   case R_PPC64_GOT_TLSLD16:
1188   case R_PPC64_TOC16:
1189     return {R_PPC64_ADDR16, tocBiasedVal};
1190   case R_PPC64_GOT16_DS:
1191   case R_PPC64_TOC16_DS:
1192   case R_PPC64_GOT_TPREL16_DS:
1193   case R_PPC64_GOT_DTPREL16_DS:
1194     return {R_PPC64_ADDR16_DS, tocBiasedVal};
1195   case R_PPC64_GOT16_HA:
1196   case R_PPC64_GOT_TLSGD16_HA:
1197   case R_PPC64_GOT_TLSLD16_HA:
1198   case R_PPC64_GOT_TPREL16_HA:
1199   case R_PPC64_GOT_DTPREL16_HA:
1200   case R_PPC64_TOC16_HA:
1201     return {R_PPC64_ADDR16_HA, tocBiasedVal};
1202   case R_PPC64_GOT16_HI:
1203   case R_PPC64_GOT_TLSGD16_HI:
1204   case R_PPC64_GOT_TLSLD16_HI:
1205   case R_PPC64_GOT_TPREL16_HI:
1206   case R_PPC64_GOT_DTPREL16_HI:
1207   case R_PPC64_TOC16_HI:
1208     return {R_PPC64_ADDR16_HI, tocBiasedVal};
1209   case R_PPC64_GOT16_LO:
1210   case R_PPC64_GOT_TLSGD16_LO:
1211   case R_PPC64_GOT_TLSLD16_LO:
1212   case R_PPC64_TOC16_LO:
1213     return {R_PPC64_ADDR16_LO, tocBiasedVal};
1214   case R_PPC64_GOT16_LO_DS:
1215   case R_PPC64_TOC16_LO_DS:
1216   case R_PPC64_GOT_TPREL16_LO_DS:
1217   case R_PPC64_GOT_DTPREL16_LO_DS:
1218     return {R_PPC64_ADDR16_LO_DS, tocBiasedVal};
1219 
1220   // Dynamic Thread pointer biased relocation types.
1221   case R_PPC64_DTPREL16:
1222     return {R_PPC64_ADDR16, dtpBiasedVal};
1223   case R_PPC64_DTPREL16_DS:
1224     return {R_PPC64_ADDR16_DS, dtpBiasedVal};
1225   case R_PPC64_DTPREL16_HA:
1226     return {R_PPC64_ADDR16_HA, dtpBiasedVal};
1227   case R_PPC64_DTPREL16_HI:
1228     return {R_PPC64_ADDR16_HI, dtpBiasedVal};
1229   case R_PPC64_DTPREL16_HIGHER:
1230     return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal};
1231   case R_PPC64_DTPREL16_HIGHERA:
1232     return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal};
1233   case R_PPC64_DTPREL16_HIGHEST:
1234     return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal};
1235   case R_PPC64_DTPREL16_HIGHESTA:
1236     return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal};
1237   case R_PPC64_DTPREL16_LO:
1238     return {R_PPC64_ADDR16_LO, dtpBiasedVal};
1239   case R_PPC64_DTPREL16_LO_DS:
1240     return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal};
1241   case R_PPC64_DTPREL64:
1242     return {R_PPC64_ADDR64, dtpBiasedVal};
1243 
1244   default:
1245     return {type, val};
1246   }
1247 }
1248 
1249 static bool isTocOptType(RelType type) {
1250   switch (type) {
1251   case R_PPC64_GOT16_HA:
1252   case R_PPC64_GOT16_LO_DS:
1253   case R_PPC64_TOC16_HA:
1254   case R_PPC64_TOC16_LO_DS:
1255   case R_PPC64_TOC16_LO:
1256     return true;
1257   default:
1258     return false;
1259   }
1260 }
1261 
1262 void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
1263   RelType type = rel.type;
1264   bool shouldTocOptimize =  isTocOptType(type);
1265   // For dynamic thread pointer relative, toc-relative, and got-indirect
1266   // relocations, proceed in terms of the corresponding ADDR16 relocation type.
1267   std::tie(type, val) = toAddr16Rel(type, val);
1268 
1269   switch (type) {
1270   case R_PPC64_ADDR14: {
1271     checkAlignment(ctx, loc, val, 4, rel);
1272     // Preserve the AA/LK bits in the branch instruction
1273     uint8_t aalk = loc[3];
1274     write16(ctx, loc + 2, (aalk & 3) | (val & 0xfffc));
1275     break;
1276   }
1277   case R_PPC64_ADDR16:
1278     checkIntUInt(ctx, loc, val, 16, rel);
1279     write16(ctx, loc, val);
1280     break;
1281   case R_PPC64_ADDR32:
1282     checkIntUInt(ctx, loc, val, 32, rel);
1283     write32(ctx, loc, val);
1284     break;
1285   case R_PPC64_ADDR16_DS:
1286   case R_PPC64_TPREL16_DS: {
1287     checkInt(ctx, loc, val, 16, rel);
1288     // DQ-form instructions use bits 28-31 as part of the instruction encoding
1289     // DS-form instructions only use bits 30-31.
1290     uint16_t mask = isDQFormInstruction(readFromHalf16(ctx, loc)) ? 0xf : 0x3;
1291     checkAlignment(ctx, loc, lo(val), mask + 1, rel);
1292     write16(ctx, loc, (read16(ctx, loc) & mask) | lo(val));
1293   } break;
1294   case R_PPC64_ADDR16_HA:
1295   case R_PPC64_REL16_HA:
1296   case R_PPC64_TPREL16_HA:
1297     if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0)
1298       writeFromHalf16(ctx, loc, NOP);
1299     else {
1300       checkInt(ctx, loc, val + 0x8000, 32, rel);
1301       write16(ctx, loc, ha(val));
1302     }
1303     break;
1304   case R_PPC64_ADDR16_HI:
1305   case R_PPC64_REL16_HI:
1306   case R_PPC64_TPREL16_HI:
1307     checkInt(ctx, loc, val, 32, rel);
1308     write16(ctx, loc, hi(val));
1309     break;
1310   case R_PPC64_ADDR16_HIGH:
1311     write16(ctx, loc, hi(val));
1312     break;
1313   case R_PPC64_ADDR16_HIGHER:
1314   case R_PPC64_TPREL16_HIGHER:
1315     write16(ctx, loc, higher(val));
1316     break;
1317   case R_PPC64_ADDR16_HIGHERA:
1318   case R_PPC64_TPREL16_HIGHERA:
1319     write16(ctx, loc, highera(val));
1320     break;
1321   case R_PPC64_ADDR16_HIGHEST:
1322   case R_PPC64_TPREL16_HIGHEST:
1323     write16(ctx, loc, highest(val));
1324     break;
1325   case R_PPC64_ADDR16_HIGHESTA:
1326   case R_PPC64_TPREL16_HIGHESTA:
1327     write16(ctx, loc, highesta(val));
1328     break;
1329   case R_PPC64_ADDR16_LO:
1330   case R_PPC64_REL16_LO:
1331   case R_PPC64_TPREL16_LO:
1332     // When the high-adjusted part of a toc relocation evaluates to 0, it is
1333     // changed into a nop. The lo part then needs to be updated to use the
1334     // toc-pointer register r2, as the base register.
1335     if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
1336       uint32_t insn = readFromHalf16(ctx, loc);
1337       if (isInstructionUpdateForm(insn))
1338         Err(ctx) << getErrorLoc(ctx, loc)
1339                  << "can't toc-optimize an update instruction: 0x"
1340                  << utohexstr(insn, true);
1341       writeFromHalf16(ctx, loc, (insn & 0xffe00000) | 0x00020000 | lo(val));
1342     } else {
1343       write16(ctx, loc, lo(val));
1344     }
1345     break;
1346   case R_PPC64_ADDR16_LO_DS:
1347   case R_PPC64_TPREL16_LO_DS: {
1348     // DQ-form instructions use bits 28-31 as part of the instruction encoding
1349     // DS-form instructions only use bits 30-31.
1350     uint32_t insn = readFromHalf16(ctx, loc);
1351     uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
1352     checkAlignment(ctx, loc, lo(val), mask + 1, rel);
1353     if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
1354       // When the high-adjusted part of a toc relocation evaluates to 0, it is
1355       // changed into a nop. The lo part then needs to be updated to use the toc
1356       // pointer register r2, as the base register.
1357       if (isInstructionUpdateForm(insn))
1358         Err(ctx) << getErrorLoc(ctx, loc)
1359                  << "can't toc-optimize an update instruction: 0x"
1360                  << utohexstr(insn, true);
1361       insn &= 0xffe00000 | mask;
1362       writeFromHalf16(ctx, loc, insn | 0x00020000 | lo(val));
1363     } else {
1364       write16(ctx, loc, (read16(ctx, loc) & mask) | lo(val));
1365     }
1366   } break;
1367   case R_PPC64_TPREL16:
1368     checkInt(ctx, loc, val, 16, rel);
1369     write16(ctx, loc, val);
1370     break;
1371   case R_PPC64_REL32:
1372     checkInt(ctx, loc, val, 32, rel);
1373     write32(ctx, loc, val);
1374     break;
1375   case R_PPC64_ADDR64:
1376   case R_PPC64_REL64:
1377   case R_PPC64_TOC:
1378     write64(ctx, loc, val);
1379     break;
1380   case R_PPC64_REL14: {
1381     uint32_t mask = 0x0000FFFC;
1382     checkInt(ctx, loc, val, 16, rel);
1383     checkAlignment(ctx, loc, val, 4, rel);
1384     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
1385     break;
1386   }
1387   case R_PPC64_REL24:
1388   case R_PPC64_REL24_NOTOC: {
1389     uint32_t mask = 0x03FFFFFC;
1390     checkInt(ctx, loc, val, 26, rel);
1391     checkAlignment(ctx, loc, val, 4, rel);
1392     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
1393     break;
1394   }
1395   case R_PPC64_DTPREL64:
1396     write64(ctx, loc, val - dynamicThreadPointerOffset);
1397     break;
1398   case R_PPC64_DTPREL34:
1399     // The Dynamic Thread Vector actually points 0x8000 bytes past the start
1400     // of the TLS block. Therefore, in the case of R_PPC64_DTPREL34 we first
1401     // need to subtract that value then fallthrough to the general case.
1402     val -= dynamicThreadPointerOffset;
1403     [[fallthrough]];
1404   case R_PPC64_PCREL34:
1405   case R_PPC64_GOT_PCREL34:
1406   case R_PPC64_GOT_TLSGD_PCREL34:
1407   case R_PPC64_GOT_TLSLD_PCREL34:
1408   case R_PPC64_GOT_TPREL_PCREL34:
1409   case R_PPC64_TPREL34: {
1410     const uint64_t si0Mask = 0x00000003ffff0000;
1411     const uint64_t si1Mask = 0x000000000000ffff;
1412     const uint64_t fullMask = 0x0003ffff0000ffff;
1413     checkInt(ctx, loc, val, 34, rel);
1414 
1415     uint64_t instr = readPrefixedInst(ctx, loc) & ~fullMask;
1416     writePrefixedInst(ctx, loc,
1417                       instr | ((val & si0Mask) << 16) | (val & si1Mask));
1418     break;
1419   }
1420   // If we encounter a PCREL_OPT relocation that we won't optimize.
1421   case R_PPC64_PCREL_OPT:
1422     break;
1423   default:
1424     llvm_unreachable("unknown relocation");
1425   }
1426 }
1427 
1428 bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
1429                        uint64_t branchAddr, const Symbol &s, int64_t a) const {
1430   if (type != R_PPC64_REL14 && type != R_PPC64_REL24 &&
1431       type != R_PPC64_REL24_NOTOC)
1432     return false;
1433 
1434   // If a function is in the Plt it needs to be called with a call-stub.
1435   if (s.isInPlt(ctx))
1436     return true;
1437 
1438   // This check looks at the st_other bits of the callee with relocation
1439   // R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
1440   // clobbers the TOC and we need an R2 save stub.
1441   if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
1442     return true;
1443 
1444   if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
1445     return true;
1446 
1447   // An undefined weak symbol not in a PLT does not need a thunk. If it is
1448   // hidden, its binding has been converted to local, so we just check
1449   // isUndefined() here. A undefined non-weak symbol has been errored.
1450   if (s.isUndefined())
1451     return false;
1452 
1453   // If the offset exceeds the range of the branch type then it will need
1454   // a range-extending thunk.
1455   // See the comment in getRelocTargetVA() about RE_PPC64_CALL.
1456   return !inBranchRange(
1457       type, branchAddr,
1458       s.getVA(ctx, a) + getPPC64GlobalEntryToLocalEntryOffset(ctx, s.stOther));
1459 }
1460 
1461 uint32_t PPC64::getThunkSectionSpacing() const {
1462   // See comment in Arch/ARM.cpp for a more detailed explanation of
1463   // getThunkSectionSpacing(). For PPC64 we pick the constant here based on
1464   // R_PPC64_REL24, which is used by unconditional branch instructions.
1465   // 0x2000000 = (1 << 24-1) * 4
1466   return 0x2000000;
1467 }
1468 
1469 bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
1470   int64_t offset = dst - src;
1471   if (type == R_PPC64_REL14)
1472     return isInt<16>(offset);
1473   if (type == R_PPC64_REL24 || type == R_PPC64_REL24_NOTOC)
1474     return isInt<26>(offset);
1475   llvm_unreachable("unsupported relocation type used in branch");
1476 }
1477 
1478 RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
1479   if (type != R_PPC64_GOT_TLSGD_PCREL34 && expr == R_RELAX_TLS_GD_TO_IE)
1480     return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
1481   if (expr == R_RELAX_TLS_LD_TO_LE)
1482     return R_RELAX_TLS_LD_TO_LE_ABS;
1483   return expr;
1484 }
1485 
1486 RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
1487                                const uint8_t *loc) const {
1488   if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
1489       ctx.arg.pcRelOptimize) {
1490     // It only makes sense to optimize pld since paddi means that the address
1491     // of the object in the GOT is required rather than the object itself.
1492     if ((readPrefixedInst(ctx, loc) & 0xfc000000) == 0xe4000000)
1493       return RE_PPC64_RELAX_GOT_PC;
1494   }
1495   return R_GOT_PC;
1496 }
1497 
1498 // Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
1499 // The general dynamic code sequence for a global `x` uses 4 instructions.
1500 // Instruction                    Relocation                Symbol
1501 // addis r3, r2, x@got@tlsgd@ha   R_PPC64_GOT_TLSGD16_HA      x
1502 // addi  r3, r3, x@got@tlsgd@l    R_PPC64_GOT_TLSGD16_LO      x
1503 // bl __tls_get_addr(x@tlsgd)     R_PPC64_TLSGD               x
1504 //                                R_PPC64_REL24               __tls_get_addr
1505 // nop                            None                       None
1506 //
1507 // Relaxing to initial-exec entails:
1508 // 1) Convert the addis/addi pair that builds the address of the tls_index
1509 //    struct for 'x' to an addis/ld pair that loads an offset from a got-entry.
1510 // 2) Convert the call to __tls_get_addr to a nop.
1511 // 3) Convert the nop following the call to an add of the loaded offset to the
1512 //    thread pointer.
1513 // Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
1514 // used as the relaxation hint for both steps 2 and 3.
1515 void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
1516                            uint64_t val) const {
1517   switch (rel.type) {
1518   case R_PPC64_GOT_TLSGD16_HA:
1519     // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
1520     //                      addis rT, r2, sym@got@tprel@ha.
1521     relocateNoSym(loc, R_PPC64_GOT_TPREL16_HA, val);
1522     return;
1523   case R_PPC64_GOT_TLSGD16:
1524   case R_PPC64_GOT_TLSGD16_LO: {
1525     // Relax from addi  r3, rA, sym@got@tlsgd@l to
1526     //            ld r3, sym@got@tprel@l(rA)
1527     uint32_t ra = (readFromHalf16(ctx, loc) & (0x1f << 16));
1528     writeFromHalf16(ctx, loc, 0xe8600000 | ra);
1529     relocateNoSym(loc, R_PPC64_GOT_TPREL16_LO_DS, val);
1530     return;
1531   }
1532   case R_PPC64_GOT_TLSGD_PCREL34: {
1533     // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
1534     //            pld r3, sym@got@tprel@pcrel
1535     writePrefixedInst(ctx, loc, 0x04100000e4600000);
1536     relocateNoSym(loc, R_PPC64_GOT_TPREL_PCREL34, val);
1537     return;
1538   }
1539   case R_PPC64_TLSGD: {
1540     // PC Relative Relaxation:
1541     // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
1542     //            nop
1543     // TOC Relaxation:
1544     // Relax from bl __tls_get_addr(x@tlsgd)
1545     //            nop
1546     // to
1547     //            nop
1548     //            add r3, r3, r13
1549     const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
1550     if (locAsInt % 4 == 0) {
1551       write32(ctx, loc, NOP);            // bl __tls_get_addr(sym@tlsgd) --> nop
1552       write32(ctx, loc + 4, 0x7c636a14); // nop --> add r3, r3, r13
1553     } else if (locAsInt % 4 == 1) {
1554       // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
1555       write32(ctx, loc - 1, 0x7c636a14);
1556     } else {
1557       Err(ctx) << "R_PPC64_TLSGD has unexpected byte alignment";
1558     }
1559     return;
1560   }
1561   default:
1562     llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
1563   }
1564 }
1565 
1566 void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
1567   uint64_t secAddr = sec.getOutputSection()->addr;
1568   if (auto *s = dyn_cast<InputSection>(&sec))
1569     secAddr += s->outSecOff;
1570   else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
1571     secAddr += ehIn->getParent()->outSecOff;
1572   uint64_t lastPPCRelaxedRelocOff = -1;
1573   for (const Relocation &rel : sec.relocs()) {
1574     uint8_t *loc = buf + rel.offset;
1575     const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
1576     switch (rel.expr) {
1577     case RE_PPC64_RELAX_GOT_PC: {
1578       // The R_PPC64_PCREL_OPT relocation must appear immediately after
1579       // R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
1580       // We can only relax R_PPC64_PCREL_OPT if we have also relaxed
1581       // the associated R_PPC64_GOT_PCREL34 since only the latter has an
1582       // associated symbol. So save the offset when relaxing R_PPC64_GOT_PCREL34
1583       // and only relax the other if the saved offset matches.
1584       if (rel.type == R_PPC64_GOT_PCREL34)
1585         lastPPCRelaxedRelocOff = rel.offset;
1586       if (rel.type == R_PPC64_PCREL_OPT && rel.offset != lastPPCRelaxedRelocOff)
1587         break;
1588       relaxGot(loc, rel, val);
1589       break;
1590     }
1591     case RE_PPC64_RELAX_TOC:
1592       // rel.sym refers to the STT_SECTION symbol associated to the .toc input
1593       // section. If an R_PPC64_TOC16_LO (.toc + addend) references the TOC
1594       // entry, there may be R_PPC64_TOC16_HA not paired with
1595       // R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
1596       // opportunities but is safe.
1597       if (ctx.ppc64noTocRelax.count({rel.sym, rel.addend}) ||
1598           !tryRelaxPPC64TocIndirection(ctx, rel, loc))
1599         relocate(loc, rel, val);
1600       break;
1601     case RE_PPC64_CALL:
1602       // If this is a call to __tls_get_addr, it may be part of a TLS
1603       // sequence that has been relaxed and turned into a nop. In this
1604       // case, we don't want to handle it as a call.
1605       if (read32(ctx, loc) == 0x60000000) // nop
1606         break;
1607 
1608       // Patch a nop (0x60000000) to a ld.
1609       if (rel.sym->needsTocRestore()) {
1610         // gcc/gfortran 5.4, 6.3 and earlier versions do not add nop for
1611         // recursive calls even if the function is preemptible. This is not
1612         // wrong in the common case where the function is not preempted at
1613         // runtime. Just ignore.
1614         if ((rel.offset + 8 > sec.content().size() ||
1615              read32(ctx, loc + 4) != 0x60000000) &&
1616             rel.sym->file != sec.file) {
1617           // Use substr(6) to remove the "__plt_" prefix.
1618           Err(ctx) << getErrorLoc(ctx, loc) << "call to "
1619                    << toStr(ctx, *rel.sym).substr(6)
1620                    << " lacks nop, can't restore toc";
1621           break;
1622         }
1623         write32(ctx, loc + 4, 0xe8410018); // ld %r2, 24(%r1)
1624       }
1625       relocate(loc, rel, val);
1626       break;
1627     case R_RELAX_TLS_GD_TO_IE:
1628     case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
1629       relaxTlsGdToIe(loc, rel, val);
1630       break;
1631     case R_RELAX_TLS_GD_TO_LE:
1632       relaxTlsGdToLe(loc, rel, val);
1633       break;
1634     case R_RELAX_TLS_LD_TO_LE_ABS:
1635       relaxTlsLdToLe(loc, rel, val);
1636       break;
1637     case R_RELAX_TLS_IE_TO_LE:
1638       relaxTlsIeToLe(loc, rel, val);
1639       break;
1640     default:
1641       relocate(loc, rel, val);
1642       break;
1643     }
1644   }
1645 }
1646 
1647 // The prologue for a split-stack function is expected to look roughly
1648 // like this:
1649 //    .Lglobal_entry_point:
1650 //      # TOC pointer initialization.
1651 //      ...
1652 //    .Llocal_entry_point:
1653 //      # load the __private_ss member of the threads tcbhead.
1654 //      ld r0,-0x7000-64(r13)
1655 //      # subtract the functions stack size from the stack pointer.
1656 //      addis r12, r1, ha(-stack-frame size)
1657 //      addi  r12, r12, l(-stack-frame size)
1658 //      # compare needed to actual and branch to allocate_more_stack if more
1659 //      # space is needed, otherwise fallthrough to 'normal' function body.
1660 //      cmpld cr7,r12,r0
1661 //      blt- cr7, .Lallocate_more_stack
1662 //
1663 // -) The allocate_more_stack block might be placed after the split-stack
1664 //    prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body`
1665 //    instead.
1666 // -) If either the addis or addi is not needed due to the stack size being
1667 //    smaller then 32K or a multiple of 64K they will be replaced with a nop,
1668 //    but there will always be 2 instructions the linker can overwrite for the
1669 //    adjusted stack size.
1670 //
1671 // The linkers job here is to increase the stack size used in the addis/addi
1672 // pair by split-stack-size-adjust.
1673 // addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
1674 // addi  r12, r12, l(-stack-frame size - split-stack-adjust-size)
1675 bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
1676                                              uint8_t stOther) const {
1677   // If the caller has a global entry point adjust the buffer past it. The start
1678   // of the split-stack prologue will be at the local entry point.
1679   loc += getPPC64GlobalEntryToLocalEntryOffset(ctx, stOther);
1680 
1681   // At the very least we expect to see a load of some split-stack data from the
1682   // tcb, and 2 instructions that calculate the ending stack address this
1683   // function will require. If there is not enough room for at least 3
1684   // instructions it can't be a split-stack prologue.
1685   if (loc + 12 >= end)
1686     return false;
1687 
1688   // First instruction must be `ld r0, -0x7000-64(r13)`
1689   if (read32(ctx, loc) != 0xe80d8fc0)
1690     return false;
1691 
1692   int16_t hiImm = 0;
1693   int16_t loImm = 0;
1694   // First instruction can be either an addis if the frame size is larger then
1695   // 32K, or an addi if the size is less then 32K.
1696   int32_t firstInstr = read32(ctx, loc + 4);
1697   if (getPrimaryOpCode(firstInstr) == 15) {
1698     hiImm = firstInstr & 0xFFFF;
1699   } else if (getPrimaryOpCode(firstInstr) == 14) {
1700     loImm = firstInstr & 0xFFFF;
1701   } else {
1702     return false;
1703   }
1704 
1705   // Second instruction is either an addi or a nop. If the first instruction was
1706   // an addi then LoImm is set and the second instruction must be a nop.
1707   uint32_t secondInstr = read32(ctx, loc + 8);
1708   if (!loImm && getPrimaryOpCode(secondInstr) == 14) {
1709     loImm = secondInstr & 0xFFFF;
1710   } else if (secondInstr != NOP) {
1711     return false;
1712   }
1713 
1714   // The register operands of the first instruction should be the stack-pointer
1715   // (r1) as the input (RA) and r12 as the output (RT). If the second
1716   // instruction is not a nop, then it should use r12 as both input and output.
1717   auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
1718                              uint8_t expectedRA) {
1719     return ((instr & 0x3E00000) >> 21 == expectedRT) &&
1720            ((instr & 0x1F0000) >> 16 == expectedRA);
1721   };
1722   if (!checkRegOperands(firstInstr, 12, 1))
1723     return false;
1724   if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
1725     return false;
1726 
1727   int32_t stackFrameSize = (hiImm * 65536) + loImm;
1728   // Check that the adjusted size doesn't overflow what we can represent with 2
1729   // instructions.
1730   if (stackFrameSize < ctx.arg.splitStackAdjustSize + INT32_MIN) {
1731     Err(ctx) << getErrorLoc(ctx, loc)
1732              << "split-stack prologue adjustment overflows";
1733     return false;
1734   }
1735 
1736   int32_t adjustedStackFrameSize =
1737       stackFrameSize - ctx.arg.splitStackAdjustSize;
1738 
1739   loImm = adjustedStackFrameSize & 0xFFFF;
1740   hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
1741   if (hiImm) {
1742     write32(ctx, loc + 4, 0x3d810000 | (uint16_t)hiImm);
1743     // If the low immediate is zero the second instruction will be a nop.
1744     secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
1745     write32(ctx, loc + 8, secondInstr);
1746   } else {
1747     // addi r12, r1, imm
1748     write32(ctx, loc + 4, (0x39810000) | (uint16_t)loImm);
1749     write32(ctx, loc + 8, NOP);
1750   }
1751 
1752   return true;
1753 }
1754 
1755 void elf::setPPC64TargetInfo(Ctx &ctx) { ctx.target.reset(new PPC64(ctx)); }
1756