xref: /llvm-project/lld/ELF/Arch/AArch64.cpp (revision 9178708c3bf926fe0d7767e26344f3f98b1e92ec)
1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/Support/Endian.h"
17 
18 using namespace llvm;
19 using namespace llvm::support::endian;
20 using namespace llvm::ELF;
21 using namespace lld;
22 using namespace lld::elf;
23 
24 // Page(Expr) is the page address of the expression Expr, defined
25 // as (Expr & ~0xFFF). (This applies even if the machine page size
26 // supported by the platform has a different value.)
27 uint64_t elf::getAArch64Page(uint64_t expr) {
28   return expr & ~static_cast<uint64_t>(0xFFF);
29 }
30 
31 // A BTI landing pad is a valid target for an indirect branch when the Branch
32 // Target Identification has been enabled.  As linker generated branches are
33 // via x16 the BTI landing pads are defined as: BTI C, BTI J, BTI JC, PACIASP,
34 // PACIBSP.
35 bool elf::isAArch64BTILandingPad(Ctx &ctx, Symbol &s, int64_t a) {
36   // PLT entries accessed indirectly have a BTI c.
37   if (s.isInPlt(ctx))
38     return true;
39   Defined *d = dyn_cast<Defined>(&s);
40   if (!isa_and_nonnull<InputSection>(d->section))
41     // All places that we cannot disassemble are responsible for making
42     // the target a BTI landing pad.
43     return true;
44   InputSection *isec = cast<InputSection>(d->section);
45   uint64_t off = d->value + a;
46   // Likely user error, but protect ourselves against out of bounds
47   // access.
48   if (off >= isec->getSize())
49     return true;
50   const uint8_t *buf = isec->content().begin();
51   const uint32_t instr = read32le(buf + off);
52   // All BTI instructions are HINT instructions which all have same encoding
53   // apart from bits [11:5]
54   if ((instr & 0xd503201f) == 0xd503201f &&
55       is_contained({/*PACIASP*/ 0xd503233f, /*PACIBSP*/ 0xd503237f,
56                     /*BTI C*/ 0xd503245f, /*BTI J*/ 0xd503249f,
57                     /*BTI JC*/ 0xd50324df},
58                    instr))
59     return true;
60   return false;
61 }
62 
63 namespace {
64 class AArch64 : public TargetInfo {
65 public:
66   AArch64(Ctx &);
67   RelExpr getRelExpr(RelType type, const Symbol &s,
68                      const uint8_t *loc) const override;
69   RelType getDynRel(RelType type) const override;
70   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
71   void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
72   void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
73   void writePltHeader(uint8_t *buf) const override;
74   void writePlt(uint8_t *buf, const Symbol &sym,
75                 uint64_t pltEntryAddr) const override;
76   bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
77                   uint64_t branchAddr, const Symbol &s,
78                   int64_t a) const override;
79   uint32_t getThunkSectionSpacing() const override;
80   bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
81   bool usesOnlyLowPageBits(RelType type) const override;
82   void relocate(uint8_t *loc, const Relocation &rel,
83                 uint64_t val) const override;
84   RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
85   void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
86 
87 private:
88   void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
89   void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
90   void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
91 };
92 
93 struct AArch64Relaxer {
94   Ctx &ctx;
95   bool safeToRelaxAdrpLdr = false;
96 
97   AArch64Relaxer(Ctx &ctx, ArrayRef<Relocation> relocs);
98   bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel,
99                        uint64_t secAddr, uint8_t *buf) const;
100   bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,
101                        uint64_t secAddr, uint8_t *buf) const;
102 };
103 } // namespace
104 
105 // Return the bits [Start, End] from Val shifted Start bits.
106 // For instance, getBits(0xF0, 4, 8) returns 0xF.
107 static uint64_t getBits(uint64_t val, int start, int end) {
108   uint64_t mask = ((uint64_t)1 << (end + 1 - start)) - 1;
109   return (val >> start) & mask;
110 }
111 
112 AArch64::AArch64(Ctx &ctx) : TargetInfo(ctx) {
113   copyRel = R_AARCH64_COPY;
114   relativeRel = R_AARCH64_RELATIVE;
115   iRelativeRel = R_AARCH64_IRELATIVE;
116   gotRel = R_AARCH64_GLOB_DAT;
117   pltRel = R_AARCH64_JUMP_SLOT;
118   symbolicRel = R_AARCH64_ABS64;
119   tlsDescRel = R_AARCH64_TLSDESC;
120   tlsGotRel = R_AARCH64_TLS_TPREL64;
121   pltHeaderSize = 32;
122   pltEntrySize = 16;
123   ipltEntrySize = 16;
124   defaultMaxPageSize = 65536;
125 
126   // Align to the 2 MiB page size (known as a superpage or huge page).
127   // FreeBSD automatically promotes 2 MiB-aligned allocations.
128   defaultImageBase = 0x200000;
129 
130   needsThunks = true;
131 }
132 
133 RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,
134                             const uint8_t *loc) const {
135   switch (type) {
136   case R_AARCH64_ABS16:
137   case R_AARCH64_ABS32:
138   case R_AARCH64_ABS64:
139   case R_AARCH64_ADD_ABS_LO12_NC:
140   case R_AARCH64_LDST128_ABS_LO12_NC:
141   case R_AARCH64_LDST16_ABS_LO12_NC:
142   case R_AARCH64_LDST32_ABS_LO12_NC:
143   case R_AARCH64_LDST64_ABS_LO12_NC:
144   case R_AARCH64_LDST8_ABS_LO12_NC:
145   case R_AARCH64_MOVW_SABS_G0:
146   case R_AARCH64_MOVW_SABS_G1:
147   case R_AARCH64_MOVW_SABS_G2:
148   case R_AARCH64_MOVW_UABS_G0:
149   case R_AARCH64_MOVW_UABS_G0_NC:
150   case R_AARCH64_MOVW_UABS_G1:
151   case R_AARCH64_MOVW_UABS_G1_NC:
152   case R_AARCH64_MOVW_UABS_G2:
153   case R_AARCH64_MOVW_UABS_G2_NC:
154   case R_AARCH64_MOVW_UABS_G3:
155     return R_ABS;
156   case R_AARCH64_AUTH_ABS64:
157     return RE_AARCH64_AUTH;
158   case R_AARCH64_TLSDESC_ADR_PAGE21:
159     return RE_AARCH64_TLSDESC_PAGE;
160   case R_AARCH64_AUTH_TLSDESC_ADR_PAGE21:
161     return RE_AARCH64_AUTH_TLSDESC_PAGE;
162   case R_AARCH64_TLSDESC_LD64_LO12:
163   case R_AARCH64_TLSDESC_ADD_LO12:
164     return R_TLSDESC;
165   case R_AARCH64_AUTH_TLSDESC_LD64_LO12:
166   case R_AARCH64_AUTH_TLSDESC_ADD_LO12:
167     return RE_AARCH64_AUTH_TLSDESC;
168   case R_AARCH64_TLSDESC_CALL:
169     return R_TLSDESC_CALL;
170   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
171   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
172   case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
173   case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
174   case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
175   case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
176   case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
177   case R_AARCH64_TLSLE_MOVW_TPREL_G0:
178   case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
179   case R_AARCH64_TLSLE_MOVW_TPREL_G1:
180   case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
181   case R_AARCH64_TLSLE_MOVW_TPREL_G2:
182     return R_TPREL;
183   case R_AARCH64_CALL26:
184   case R_AARCH64_CONDBR19:
185   case R_AARCH64_JUMP26:
186   case R_AARCH64_TSTBR14:
187     return R_PLT_PC;
188   case R_AARCH64_PLT32:
189     const_cast<Symbol &>(s).thunkAccessed = true;
190     return R_PLT_PC;
191   case R_AARCH64_PREL16:
192   case R_AARCH64_PREL32:
193   case R_AARCH64_PREL64:
194   case R_AARCH64_ADR_PREL_LO21:
195   case R_AARCH64_LD_PREL_LO19:
196   case R_AARCH64_MOVW_PREL_G0:
197   case R_AARCH64_MOVW_PREL_G0_NC:
198   case R_AARCH64_MOVW_PREL_G1:
199   case R_AARCH64_MOVW_PREL_G1_NC:
200   case R_AARCH64_MOVW_PREL_G2:
201   case R_AARCH64_MOVW_PREL_G2_NC:
202   case R_AARCH64_MOVW_PREL_G3:
203     return R_PC;
204   case R_AARCH64_ADR_PREL_PG_HI21:
205   case R_AARCH64_ADR_PREL_PG_HI21_NC:
206     return RE_AARCH64_PAGE_PC;
207   case R_AARCH64_LD64_GOT_LO12_NC:
208   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
209     return R_GOT;
210   case R_AARCH64_AUTH_LD64_GOT_LO12_NC:
211   case R_AARCH64_AUTH_GOT_ADD_LO12_NC:
212     return RE_AARCH64_AUTH_GOT;
213   case R_AARCH64_AUTH_GOT_LD_PREL19:
214   case R_AARCH64_AUTH_GOT_ADR_PREL_LO21:
215     return RE_AARCH64_AUTH_GOT_PC;
216   case R_AARCH64_LD64_GOTPAGE_LO15:
217     return RE_AARCH64_GOT_PAGE;
218   case R_AARCH64_ADR_GOT_PAGE:
219   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
220     return RE_AARCH64_GOT_PAGE_PC;
221   case R_AARCH64_AUTH_ADR_GOT_PAGE:
222     return RE_AARCH64_AUTH_GOT_PAGE_PC;
223   case R_AARCH64_GOTPCREL32:
224   case R_AARCH64_GOT_LD_PREL19:
225     return R_GOT_PC;
226   case R_AARCH64_NONE:
227     return R_NONE;
228   default:
229     Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
230              << ") against symbol " << &s;
231     return R_NONE;
232   }
233 }
234 
235 RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const {
236   if (expr == R_RELAX_TLS_GD_TO_IE) {
237     if (type == R_AARCH64_TLSDESC_ADR_PAGE21)
238       return RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC;
239     return R_RELAX_TLS_GD_TO_IE_ABS;
240   }
241   return expr;
242 }
243 
244 bool AArch64::usesOnlyLowPageBits(RelType type) const {
245   switch (type) {
246   default:
247     return false;
248   case R_AARCH64_ADD_ABS_LO12_NC:
249   case R_AARCH64_LD64_GOT_LO12_NC:
250   case R_AARCH64_LDST128_ABS_LO12_NC:
251   case R_AARCH64_LDST16_ABS_LO12_NC:
252   case R_AARCH64_LDST32_ABS_LO12_NC:
253   case R_AARCH64_LDST64_ABS_LO12_NC:
254   case R_AARCH64_LDST8_ABS_LO12_NC:
255   case R_AARCH64_TLSDESC_ADD_LO12:
256   case R_AARCH64_TLSDESC_LD64_LO12:
257   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
258     return true;
259   }
260 }
261 
262 RelType AArch64::getDynRel(RelType type) const {
263   if (type == R_AARCH64_ABS64 || type == R_AARCH64_AUTH_ABS64)
264     return type;
265   return R_AARCH64_NONE;
266 }
267 
268 int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {
269   switch (type) {
270   case R_AARCH64_TLSDESC:
271     return read64(ctx, buf + 8);
272   case R_AARCH64_NONE:
273   case R_AARCH64_GLOB_DAT:
274   case R_AARCH64_AUTH_GLOB_DAT:
275   case R_AARCH64_JUMP_SLOT:
276     return 0;
277   case R_AARCH64_ABS16:
278   case R_AARCH64_PREL16:
279     return SignExtend64<16>(read16(ctx, buf));
280   case R_AARCH64_ABS32:
281   case R_AARCH64_PREL32:
282     return SignExtend64<32>(read32(ctx, buf));
283   case R_AARCH64_ABS64:
284   case R_AARCH64_PREL64:
285   case R_AARCH64_RELATIVE:
286   case R_AARCH64_IRELATIVE:
287   case R_AARCH64_TLS_TPREL64:
288     return read64(ctx, buf);
289 
290     // The following relocation types all point at instructions, and
291     // relocate an immediate field in the instruction.
292     //
293     // The general rule, from AAELF64 §5.7.2 "Addends and PC-bias",
294     // says: "If the relocation relocates an instruction the immediate
295     // field of the instruction is extracted, scaled as required by
296     // the instruction field encoding, and sign-extended to 64 bits".
297 
298     // The R_AARCH64_MOVW family operates on wide MOV/MOVK/MOVZ
299     // instructions, which have a 16-bit immediate field with its low
300     // bit in bit 5 of the instruction encoding. When the immediate
301     // field is used as an implicit addend for REL-type relocations,
302     // it is treated as added to the low bits of the output value, not
303     // shifted depending on the relocation type.
304     //
305     // This allows REL relocations to express the requirement 'please
306     // add 12345 to this symbol value and give me the four 16-bit
307     // chunks of the result', by putting the same addend 12345 in all
308     // four instructions. Carries between the 16-bit chunks are
309     // handled correctly, because the whole 64-bit addition is done
310     // once per relocation.
311   case R_AARCH64_MOVW_UABS_G0:
312   case R_AARCH64_MOVW_UABS_G0_NC:
313   case R_AARCH64_MOVW_UABS_G1:
314   case R_AARCH64_MOVW_UABS_G1_NC:
315   case R_AARCH64_MOVW_UABS_G2:
316   case R_AARCH64_MOVW_UABS_G2_NC:
317   case R_AARCH64_MOVW_UABS_G3:
318     return SignExtend64<16>(getBits(read32le(buf), 5, 20));
319 
320     // R_AARCH64_TSTBR14 points at a TBZ or TBNZ instruction, which
321     // has a 14-bit offset measured in instructions, i.e. shifted left
322     // by 2.
323   case R_AARCH64_TSTBR14:
324     return SignExtend64<16>(getBits(read32le(buf), 5, 18) << 2);
325 
326     // R_AARCH64_CONDBR19 operates on the ordinary B.cond instruction,
327     // which has a 19-bit offset measured in instructions.
328     //
329     // R_AARCH64_LD_PREL_LO19 operates on the LDR (literal)
330     // instruction, which also has a 19-bit offset, measured in 4-byte
331     // chunks. So the calculation is the same as for
332     // R_AARCH64_CONDBR19.
333   case R_AARCH64_CONDBR19:
334   case R_AARCH64_LD_PREL_LO19:
335     return SignExtend64<21>(getBits(read32le(buf), 5, 23) << 2);
336 
337     // R_AARCH64_ADD_ABS_LO12_NC operates on ADD (immediate). The
338     // immediate can optionally be shifted left by 12 bits, but this
339     // relocation is intended for the case where it is not.
340   case R_AARCH64_ADD_ABS_LO12_NC:
341     return SignExtend64<12>(getBits(read32le(buf), 10, 21));
342 
343     // R_AARCH64_ADR_PREL_LO21 operates on an ADR instruction, whose
344     // 21-bit immediate is split between two bits high up in the word
345     // (in fact the two _lowest_ order bits of the value) and 19 bits
346     // lower down.
347     //
348     // R_AARCH64_ADR_PREL_PG_HI21[_NC] operate on an ADRP instruction,
349     // which encodes the immediate in the same way, but will shift it
350     // left by 12 bits when the instruction executes. For the same
351     // reason as the MOVW family, we don't apply that left shift here.
352   case R_AARCH64_ADR_PREL_LO21:
353   case R_AARCH64_ADR_PREL_PG_HI21:
354   case R_AARCH64_ADR_PREL_PG_HI21_NC:
355     return SignExtend64<21>((getBits(read32le(buf), 5, 23) << 2) |
356                             getBits(read32le(buf), 29, 30));
357 
358     // R_AARCH64_{JUMP,CALL}26 operate on B and BL, which have a
359     // 26-bit offset measured in instructions.
360   case R_AARCH64_JUMP26:
361   case R_AARCH64_CALL26:
362     return SignExtend64<28>(getBits(read32le(buf), 0, 25) << 2);
363 
364   default:
365     InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
366     return 0;
367   }
368 }
369 
370 void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
371   write64(ctx, buf, ctx.in.plt->getVA());
372 }
373 
374 void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
375   if (ctx.arg.writeAddends)
376     write64(ctx, buf, s.getVA(ctx));
377 }
378 
379 void AArch64::writePltHeader(uint8_t *buf) const {
380   const uint8_t pltData[] = {
381       0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
382       0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.got.plt[2]))
383       0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.got.plt[2]))]
384       0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.got.plt[2]))
385       0x20, 0x02, 0x1f, 0xd6, // br     x17
386       0x1f, 0x20, 0x03, 0xd5, // nop
387       0x1f, 0x20, 0x03, 0xd5, // nop
388       0x1f, 0x20, 0x03, 0xd5  // nop
389   };
390   memcpy(buf, pltData, sizeof(pltData));
391 
392   uint64_t got = ctx.in.gotPlt->getVA();
393   uint64_t plt = ctx.in.plt->getVA();
394   relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
395                 getAArch64Page(got + 16) - getAArch64Page(plt + 4));
396   relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
397   relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
398 }
399 
400 void AArch64::writePlt(uint8_t *buf, const Symbol &sym,
401                        uint64_t pltEntryAddr) const {
402   const uint8_t inst[] = {
403       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n]))
404       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.got.plt[n]))]
405       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.got.plt[n]))
406       0x20, 0x02, 0x1f, 0xd6  // br   x17
407   };
408   memcpy(buf, inst, sizeof(inst));
409 
410   uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
411   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
412                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
413   relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
414   relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
415 }
416 
417 bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
418                          uint64_t branchAddr, const Symbol &s,
419                          int64_t a) const {
420   // If s is an undefined weak symbol and does not have a PLT entry then it will
421   // be resolved as a branch to the next instruction. If it is hidden, its
422   // binding has been converted to local, so we just check isUndefined() here. A
423   // undefined non-weak symbol will have been errored.
424   if (s.isUndefined() && !s.isInPlt(ctx))
425     return false;
426   // ELF for the ARM 64-bit architecture, section Call and Jump relocations
427   // only permits range extension thunks for R_AARCH64_CALL26 and
428   // R_AARCH64_JUMP26 relocation types.
429   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
430       type != R_AARCH64_PLT32)
431     return false;
432   uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(ctx, a);
433   return !inBranchRange(type, branchAddr, dst);
434 }
435 
436 uint32_t AArch64::getThunkSectionSpacing() const {
437   // See comment in Arch/ARM.cpp for a more detailed explanation of
438   // getThunkSectionSpacing(). For AArch64 the only branches we are permitted to
439   // Thunk have a range of +/- 128 MiB
440   return (128 * 1024 * 1024) - 0x30000;
441 }
442 
443 bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
444   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
445       type != R_AARCH64_PLT32)
446     return true;
447   // The AArch64 call and unconditional branch instructions have a range of
448   // +/- 128 MiB. The PLT32 relocation supports a range up to +/- 2 GiB.
449   uint64_t range =
450       type == R_AARCH64_PLT32 ? (UINT64_C(1) << 31) : (128 * 1024 * 1024);
451   if (dst > src) {
452     // Immediate of branch is signed.
453     range -= 4;
454     return dst - src <= range;
455   }
456   return src - dst <= range;
457 }
458 
459 static void write32AArch64Addr(uint8_t *l, uint64_t imm) {
460   uint32_t immLo = (imm & 0x3) << 29;
461   uint32_t immHi = (imm & 0x1FFFFC) << 3;
462   uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
463   write32le(l, (read32le(l) & ~mask) | immLo | immHi);
464 }
465 
466 static void writeMaskedBits32le(uint8_t *p, int32_t v, uint32_t mask) {
467   write32le(p, (read32le(p) & ~mask) | v);
468 }
469 
470 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
471 static void write32Imm12(uint8_t *l, uint64_t imm) {
472   writeMaskedBits32le(l, (imm & 0xFFF) << 10, 0xFFF << 10);
473 }
474 
475 // Update the immediate field in an AArch64 movk, movn or movz instruction
476 // for a signed relocation, and update the opcode of a movn or movz instruction
477 // to match the sign of the operand.
478 static void writeSMovWImm(uint8_t *loc, uint32_t imm) {
479   uint32_t inst = read32le(loc);
480   // Opcode field is bits 30, 29, with 10 = movz, 00 = movn and 11 = movk.
481   if (!(inst & (1 << 29))) {
482     // movn or movz.
483     if (imm & 0x10000) {
484       // Change opcode to movn, which takes an inverted operand.
485       imm ^= 0xFFFF;
486       inst &= ~(1 << 30);
487     } else {
488       // Change opcode to movz.
489       inst |= 1 << 30;
490     }
491   }
492   write32le(loc, inst | ((imm & 0xFFFF) << 5));
493 }
494 
495 void AArch64::relocate(uint8_t *loc, const Relocation &rel,
496                        uint64_t val) const {
497   switch (rel.type) {
498   case R_AARCH64_ABS16:
499   case R_AARCH64_PREL16:
500     checkIntUInt(ctx, loc, val, 16, rel);
501     write16(ctx, loc, val);
502     break;
503   case R_AARCH64_ABS32:
504   case R_AARCH64_PREL32:
505     checkIntUInt(ctx, loc, val, 32, rel);
506     write32(ctx, loc, val);
507     break;
508   case R_AARCH64_PLT32:
509   case R_AARCH64_GOTPCREL32:
510     checkInt(ctx, loc, val, 32, rel);
511     write32(ctx, loc, val);
512     break;
513   case R_AARCH64_ABS64:
514     // AArch64 relocations to tagged symbols have extended semantics, as
515     // described here:
516     // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative.
517     // tl;dr: encode the symbol's special addend in the place, which is an
518     // offset to the point where the logical tag is derived from. Quick hack, if
519     // the addend is within the symbol's bounds, no need to encode the tag
520     // derivation offset.
521     if (rel.sym && rel.sym->isTagged() &&
522         (rel.addend < 0 ||
523          rel.addend >= static_cast<int64_t>(rel.sym->getSize())))
524       write64(ctx, loc, -rel.addend);
525     else
526       write64(ctx, loc, val);
527     break;
528   case R_AARCH64_PREL64:
529     write64(ctx, loc, val);
530     break;
531   case R_AARCH64_AUTH_ABS64:
532     // If val is wider than 32 bits, the relocation must have been moved from
533     // .relr.auth.dyn to .rela.dyn, and the addend write is not needed.
534     //
535     // If val fits in 32 bits, we have two potential scenarios:
536     // * True RELR: Write the 32-bit `val`.
537     // * RELA: Even if the value now fits in 32 bits, it might have been
538     //   converted from RELR during an iteration in
539     //   finalizeAddressDependentContent(). Writing the value is harmless
540     //   because dynamic linking ignores it.
541     if (isInt<32>(val))
542       write32(ctx, loc, val);
543     break;
544   case R_AARCH64_ADD_ABS_LO12_NC:
545   case R_AARCH64_AUTH_GOT_ADD_LO12_NC:
546     write32Imm12(loc, val);
547     break;
548   case R_AARCH64_ADR_GOT_PAGE:
549   case R_AARCH64_AUTH_ADR_GOT_PAGE:
550   case R_AARCH64_ADR_PREL_PG_HI21:
551   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
552   case R_AARCH64_TLSDESC_ADR_PAGE21:
553   case R_AARCH64_AUTH_TLSDESC_ADR_PAGE21:
554     checkInt(ctx, loc, val, 33, rel);
555     [[fallthrough]];
556   case R_AARCH64_ADR_PREL_PG_HI21_NC:
557     write32AArch64Addr(loc, val >> 12);
558     break;
559   case R_AARCH64_ADR_PREL_LO21:
560   case R_AARCH64_AUTH_GOT_ADR_PREL_LO21:
561     checkInt(ctx, loc, val, 21, rel);
562     write32AArch64Addr(loc, val);
563     break;
564   case R_AARCH64_JUMP26:
565     // Normally we would just write the bits of the immediate field, however
566     // when patching instructions for the cpu errata fix -fix-cortex-a53-843419
567     // we want to replace a non-branch instruction with a branch immediate
568     // instruction. By writing all the bits of the instruction including the
569     // opcode and the immediate (0 001 | 01 imm26) we can do this
570     // transformation by placing a R_AARCH64_JUMP26 relocation at the offset of
571     // the instruction we want to patch.
572     write32le(loc, 0x14000000);
573     [[fallthrough]];
574   case R_AARCH64_CALL26:
575     checkInt(ctx, loc, val, 28, rel);
576     writeMaskedBits32le(loc, (val & 0x0FFFFFFC) >> 2, 0x0FFFFFFC >> 2);
577     break;
578   case R_AARCH64_CONDBR19:
579   case R_AARCH64_LD_PREL_LO19:
580   case R_AARCH64_GOT_LD_PREL19:
581   case R_AARCH64_AUTH_GOT_LD_PREL19:
582     checkAlignment(ctx, loc, val, 4, rel);
583     checkInt(ctx, loc, val, 21, rel);
584     writeMaskedBits32le(loc, (val & 0x1FFFFC) << 3, 0x1FFFFC << 3);
585     break;
586   case R_AARCH64_LDST8_ABS_LO12_NC:
587   case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
588     write32Imm12(loc, getBits(val, 0, 11));
589     break;
590   case R_AARCH64_LDST16_ABS_LO12_NC:
591   case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
592     checkAlignment(ctx, loc, val, 2, rel);
593     write32Imm12(loc, getBits(val, 1, 11));
594     break;
595   case R_AARCH64_LDST32_ABS_LO12_NC:
596   case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
597     checkAlignment(ctx, loc, val, 4, rel);
598     write32Imm12(loc, getBits(val, 2, 11));
599     break;
600   case R_AARCH64_LDST64_ABS_LO12_NC:
601   case R_AARCH64_LD64_GOT_LO12_NC:
602   case R_AARCH64_AUTH_LD64_GOT_LO12_NC:
603   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
604   case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
605   case R_AARCH64_TLSDESC_LD64_LO12:
606   case R_AARCH64_AUTH_TLSDESC_LD64_LO12:
607     checkAlignment(ctx, loc, val, 8, rel);
608     write32Imm12(loc, getBits(val, 3, 11));
609     break;
610   case R_AARCH64_LDST128_ABS_LO12_NC:
611   case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
612     checkAlignment(ctx, loc, val, 16, rel);
613     write32Imm12(loc, getBits(val, 4, 11));
614     break;
615   case R_AARCH64_LD64_GOTPAGE_LO15:
616     checkAlignment(ctx, loc, val, 8, rel);
617     write32Imm12(loc, getBits(val, 3, 14));
618     break;
619   case R_AARCH64_MOVW_UABS_G0:
620     checkUInt(ctx, loc, val, 16, rel);
621     [[fallthrough]];
622   case R_AARCH64_MOVW_UABS_G0_NC:
623     writeMaskedBits32le(loc, (val & 0xFFFF) << 5, 0xFFFF << 5);
624     break;
625   case R_AARCH64_MOVW_UABS_G1:
626     checkUInt(ctx, loc, val, 32, rel);
627     [[fallthrough]];
628   case R_AARCH64_MOVW_UABS_G1_NC:
629     writeMaskedBits32le(loc, (val & 0xFFFF0000) >> 11, 0xFFFF0000 >> 11);
630     break;
631   case R_AARCH64_MOVW_UABS_G2:
632     checkUInt(ctx, loc, val, 48, rel);
633     [[fallthrough]];
634   case R_AARCH64_MOVW_UABS_G2_NC:
635     writeMaskedBits32le(loc, (val & 0xFFFF00000000) >> 27,
636                         0xFFFF00000000 >> 27);
637     break;
638   case R_AARCH64_MOVW_UABS_G3:
639     writeMaskedBits32le(loc, (val & 0xFFFF000000000000) >> 43,
640                         0xFFFF000000000000 >> 43);
641     break;
642   case R_AARCH64_MOVW_PREL_G0:
643   case R_AARCH64_MOVW_SABS_G0:
644   case R_AARCH64_TLSLE_MOVW_TPREL_G0:
645     checkInt(ctx, loc, val, 17, rel);
646     [[fallthrough]];
647   case R_AARCH64_MOVW_PREL_G0_NC:
648   case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
649     writeSMovWImm(loc, val);
650     break;
651   case R_AARCH64_MOVW_PREL_G1:
652   case R_AARCH64_MOVW_SABS_G1:
653   case R_AARCH64_TLSLE_MOVW_TPREL_G1:
654     checkInt(ctx, loc, val, 33, rel);
655     [[fallthrough]];
656   case R_AARCH64_MOVW_PREL_G1_NC:
657   case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
658     writeSMovWImm(loc, val >> 16);
659     break;
660   case R_AARCH64_MOVW_PREL_G2:
661   case R_AARCH64_MOVW_SABS_G2:
662   case R_AARCH64_TLSLE_MOVW_TPREL_G2:
663     checkInt(ctx, loc, val, 49, rel);
664     [[fallthrough]];
665   case R_AARCH64_MOVW_PREL_G2_NC:
666     writeSMovWImm(loc, val >> 32);
667     break;
668   case R_AARCH64_MOVW_PREL_G3:
669     writeSMovWImm(loc, val >> 48);
670     break;
671   case R_AARCH64_TSTBR14:
672     checkInt(ctx, loc, val, 16, rel);
673     writeMaskedBits32le(loc, (val & 0xFFFC) << 3, 0xFFFC << 3);
674     break;
675   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
676     checkUInt(ctx, loc, val, 24, rel);
677     write32Imm12(loc, val >> 12);
678     break;
679   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
680   case R_AARCH64_TLSDESC_ADD_LO12:
681   case R_AARCH64_AUTH_TLSDESC_ADD_LO12:
682     write32Imm12(loc, val);
683     break;
684   case R_AARCH64_TLSDESC:
685     // For R_AARCH64_TLSDESC the addend is stored in the second 64-bit word.
686     write64(ctx, loc + 8, val);
687     break;
688   default:
689     llvm_unreachable("unknown relocation");
690   }
691 }
692 
693 void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
694                              uint64_t val) const {
695   // TLSDESC Global-Dynamic relocation are in the form:
696   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
697   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
698   //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
699   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
700   //   blr     x1
701   // And it can optimized to:
702   //   movz    x0, #0x0, lsl #16
703   //   movk    x0, #0x10
704   //   nop
705   //   nop
706   checkUInt(ctx, loc, val, 32, rel);
707 
708   switch (rel.type) {
709   case R_AARCH64_TLSDESC_ADD_LO12:
710   case R_AARCH64_TLSDESC_CALL:
711     write32le(loc, 0xd503201f); // nop
712     return;
713   case R_AARCH64_TLSDESC_ADR_PAGE21:
714     write32le(loc, 0xd2a00000 | (((val >> 16) & 0xffff) << 5)); // movz
715     return;
716   case R_AARCH64_TLSDESC_LD64_LO12:
717     write32le(loc, 0xf2800000 | ((val & 0xffff) << 5)); // movk
718     return;
719   default:
720     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
721   }
722 }
723 
724 void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
725                              uint64_t val) const {
726   // TLSDESC Global-Dynamic relocation are in the form:
727   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
728   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
729   //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
730   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
731   //   blr     x1
732   // And it can optimized to:
733   //   adrp    x0, :gottprel:v
734   //   ldr     x0, [x0, :gottprel_lo12:v]
735   //   nop
736   //   nop
737 
738   switch (rel.type) {
739   case R_AARCH64_TLSDESC_ADD_LO12:
740   case R_AARCH64_TLSDESC_CALL:
741     write32le(loc, 0xd503201f); // nop
742     break;
743   case R_AARCH64_TLSDESC_ADR_PAGE21:
744     write32le(loc, 0x90000000); // adrp
745     relocateNoSym(loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, val);
746     break;
747   case R_AARCH64_TLSDESC_LD64_LO12:
748     write32le(loc, 0xf9400000); // ldr
749     relocateNoSym(loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, val);
750     break;
751   default:
752     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
753   }
754 }
755 
756 void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
757                              uint64_t val) const {
758   checkUInt(ctx, loc, val, 32, rel);
759 
760   if (rel.type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
761     // Generate MOVZ.
762     uint32_t regNo = read32le(loc) & 0x1f;
763     write32le(loc, (0xd2a00000 | regNo) | (((val >> 16) & 0xffff) << 5));
764     return;
765   }
766   if (rel.type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
767     // Generate MOVK.
768     uint32_t regNo = read32le(loc) & 0x1f;
769     write32le(loc, (0xf2800000 | regNo) | ((val & 0xffff) << 5));
770     return;
771   }
772   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
773 }
774 
775 AArch64Relaxer::AArch64Relaxer(Ctx &ctx, ArrayRef<Relocation> relocs)
776     : ctx(ctx) {
777   if (!ctx.arg.relax)
778     return;
779   // Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC
780   // always appear in pairs.
781   size_t i = 0;
782   const size_t size = relocs.size();
783   for (; i != size; ++i) {
784     if (relocs[i].type == R_AARCH64_ADR_GOT_PAGE) {
785       if (i + 1 < size && relocs[i + 1].type == R_AARCH64_LD64_GOT_LO12_NC) {
786         ++i;
787         continue;
788       }
789       break;
790     } else if (relocs[i].type == R_AARCH64_LD64_GOT_LO12_NC) {
791       break;
792     }
793   }
794   safeToRelaxAdrpLdr = i == size;
795 }
796 
797 bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
798                                      const Relocation &addRel, uint64_t secAddr,
799                                      uint8_t *buf) const {
800   // When the address of sym is within the range of ADR then
801   // we may relax
802   // ADRP xn, sym
803   // ADD  xn, xn, :lo12: sym
804   // to
805   // NOP
806   // ADR xn, sym
807   if (!ctx.arg.relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 ||
808       addRel.type != R_AARCH64_ADD_ABS_LO12_NC)
809     return false;
810   // Check if the relocations apply to consecutive instructions.
811   if (adrpRel.offset + 4 != addRel.offset)
812     return false;
813   if (adrpRel.sym != addRel.sym)
814     return false;
815   if (adrpRel.addend != 0 || addRel.addend != 0)
816     return false;
817 
818   uint32_t adrpInstr = read32le(buf + adrpRel.offset);
819   uint32_t addInstr = read32le(buf + addRel.offset);
820   // Check if the first instruction is ADRP and the second instruction is ADD.
821   if ((adrpInstr & 0x9f000000) != 0x90000000 ||
822       (addInstr & 0xffc00000) != 0x91000000)
823     return false;
824   uint32_t adrpDestReg = adrpInstr & 0x1f;
825   uint32_t addDestReg = addInstr & 0x1f;
826   uint32_t addSrcReg = (addInstr >> 5) & 0x1f;
827   if (adrpDestReg != addDestReg || adrpDestReg != addSrcReg)
828     return false;
829 
830   Symbol &sym = *adrpRel.sym;
831   // Check if the address difference is within 1MiB range.
832   int64_t val = sym.getVA(ctx) - (secAddr + addRel.offset);
833   if (val < -1024 * 1024 || val >= 1024 * 1024)
834     return false;
835 
836   Relocation adrRel = {R_ABS, R_AARCH64_ADR_PREL_LO21, addRel.offset,
837                        /*addend=*/0, &sym};
838   // nop
839   write32le(buf + adrpRel.offset, 0xd503201f);
840   // adr x_<dest_reg>
841   write32le(buf + adrRel.offset, 0x10000000 | adrpDestReg);
842   ctx.target->relocate(buf + adrRel.offset, adrRel, val);
843   return true;
844 }
845 
846 bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
847                                      const Relocation &ldrRel, uint64_t secAddr,
848                                      uint8_t *buf) const {
849   if (!safeToRelaxAdrpLdr)
850     return false;
851 
852   // When the definition of sym is not preemptible then we may
853   // be able to relax
854   // ADRP xn, :got: sym
855   // LDR xn, [ xn :got_lo12: sym]
856   // to
857   // ADRP xn, sym
858   // ADD xn, xn, :lo_12: sym
859 
860   if (adrpRel.type != R_AARCH64_ADR_GOT_PAGE ||
861       ldrRel.type != R_AARCH64_LD64_GOT_LO12_NC)
862     return false;
863   // Check if the relocations apply to consecutive instructions.
864   if (adrpRel.offset + 4 != ldrRel.offset)
865     return false;
866   // Check if the relocations reference the same symbol and
867   // skip undefined, preemptible and STT_GNU_IFUNC symbols.
868   if (!adrpRel.sym || adrpRel.sym != ldrRel.sym || !adrpRel.sym->isDefined() ||
869       adrpRel.sym->isPreemptible || adrpRel.sym->isGnuIFunc())
870     return false;
871   // Check if the addends of the both relocations are zero.
872   if (adrpRel.addend != 0 || ldrRel.addend != 0)
873     return false;
874   uint32_t adrpInstr = read32le(buf + adrpRel.offset);
875   uint32_t ldrInstr = read32le(buf + ldrRel.offset);
876   // Check if the first instruction is ADRP and the second instruction is LDR.
877   if ((adrpInstr & 0x9f000000) != 0x90000000 ||
878       (ldrInstr & 0x3b000000) != 0x39000000)
879     return false;
880   // Check the value of the sf bit.
881   if (!(ldrInstr >> 31))
882     return false;
883   uint32_t adrpDestReg = adrpInstr & 0x1f;
884   uint32_t ldrDestReg = ldrInstr & 0x1f;
885   uint32_t ldrSrcReg = (ldrInstr >> 5) & 0x1f;
886   // Check if ADPR and LDR use the same register.
887   if (adrpDestReg != ldrDestReg || adrpDestReg != ldrSrcReg)
888     return false;
889 
890   Symbol &sym = *adrpRel.sym;
891   // GOT references to absolute symbols can't be relaxed to use ADRP/ADD in
892   // position-independent code because these instructions produce a relative
893   // address.
894   if (ctx.arg.isPic && !cast<Defined>(sym).section)
895     return false;
896   // Check if the address difference is within 4GB range.
897   int64_t val =
898       getAArch64Page(sym.getVA(ctx)) - getAArch64Page(secAddr + adrpRel.offset);
899   if (val != llvm::SignExtend64(val, 33))
900     return false;
901 
902   Relocation adrpSymRel = {RE_AARCH64_PAGE_PC, R_AARCH64_ADR_PREL_PG_HI21,
903                            adrpRel.offset, /*addend=*/0, &sym};
904   Relocation addRel = {R_ABS, R_AARCH64_ADD_ABS_LO12_NC, ldrRel.offset,
905                        /*addend=*/0, &sym};
906 
907   // adrp x_<dest_reg>
908   write32le(buf + adrpSymRel.offset, 0x90000000 | adrpDestReg);
909   // add x_<dest reg>, x_<dest reg>
910   write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5));
911 
912   ctx.target->relocate(
913       buf + adrpSymRel.offset, adrpSymRel,
914       SignExtend64(getAArch64Page(sym.getVA(ctx)) -
915                        getAArch64Page(secAddr + adrpSymRel.offset),
916                    64));
917   ctx.target->relocate(buf + addRel.offset, addRel,
918                        SignExtend64(sym.getVA(ctx), 64));
919   tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf);
920   return true;
921 }
922 
923 // Tagged symbols have upper address bits that are added by the dynamic loader,
924 // and thus need the full 64-bit GOT entry. Do not relax such symbols.
925 static bool needsGotForMemtag(const Relocation &rel) {
926   return rel.sym->isTagged() && needsGot(rel.expr);
927 }
928 
929 void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
930   uint64_t secAddr = sec.getOutputSection()->addr;
931   if (auto *s = dyn_cast<InputSection>(&sec))
932     secAddr += s->outSecOff;
933   else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
934     secAddr += ehIn->getParent()->outSecOff;
935   AArch64Relaxer relaxer(ctx, sec.relocs());
936   for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
937     const Relocation &rel = sec.relocs()[i];
938     uint8_t *loc = buf + rel.offset;
939     const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
940 
941     if (needsGotForMemtag(rel)) {
942       relocate(loc, rel, val);
943       continue;
944     }
945 
946     switch (rel.expr) {
947     case RE_AARCH64_GOT_PAGE_PC:
948       if (i + 1 < size &&
949           relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) {
950         ++i;
951         continue;
952       }
953       break;
954     case RE_AARCH64_PAGE_PC:
955       if (i + 1 < size &&
956           relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) {
957         ++i;
958         continue;
959       }
960       break;
961     case RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
962     case R_RELAX_TLS_GD_TO_IE_ABS:
963       relaxTlsGdToIe(loc, rel, val);
964       continue;
965     case R_RELAX_TLS_GD_TO_LE:
966       relaxTlsGdToLe(loc, rel, val);
967       continue;
968     case R_RELAX_TLS_IE_TO_LE:
969       relaxTlsIeToLe(loc, rel, val);
970       continue;
971     default:
972       break;
973     }
974     relocate(loc, rel, val);
975   }
976 }
977 
978 // AArch64 may use security features in variant PLT sequences. These are:
979 // Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target
980 // Indicator (BTI) introduced in armv8.5-a. The additional instructions used
981 // in the variant Plt sequences are encoded in the Hint space so they can be
982 // deployed on older architectures, which treat the instructions as a nop.
983 // PAC and BTI can be combined leading to the following combinations:
984 // writePltHeader
985 // writePltHeaderBti (no PAC Header needed)
986 // writePlt
987 // writePltBti (BTI only)
988 // writePltPac (PAC only)
989 // writePltBtiPac (BTI and PAC)
990 //
991 // When PAC is enabled the dynamic loader encrypts the address that it places
992 // in the .got.plt using the pacia1716 instruction which encrypts the value in
993 // x17 using the modifier in x16. The static linker places autia1716 before the
994 // indirect branch to x17 to authenticate the address in x17 with the modifier
995 // in x16. This makes it more difficult for an attacker to modify the value in
996 // the .got.plt.
997 //
998 // When BTI is enabled all indirect branches must land on a bti instruction.
999 // The static linker must place a bti instruction at the start of any PLT entry
1000 // that may be the target of an indirect branch. As the PLT entries call the
1001 // lazy resolver indirectly this must have a bti instruction at start. In
1002 // general a bti instruction is not needed for a PLT entry as indirect calls
1003 // are resolved to the function address and not the PLT entry for the function.
1004 // There are a small number of cases where the PLT address can escape, such as
1005 // taking the address of a function or ifunc via a non got-generating
1006 // relocation, and a shared library refers to that symbol.
1007 //
1008 // We use the bti c variant of the instruction which permits indirect branches
1009 // (br) via x16/x17 and indirect function calls (blr) via any register. The ABI
1010 // guarantees that all indirect branches from code requiring BTI protection
1011 // will go via x16/x17
1012 
1013 namespace {
1014 class AArch64BtiPac final : public AArch64 {
1015 public:
1016   AArch64BtiPac(Ctx &);
1017   void writePltHeader(uint8_t *buf) const override;
1018   void writePlt(uint8_t *buf, const Symbol &sym,
1019                 uint64_t pltEntryAddr) const override;
1020 
1021 private:
1022   bool btiHeader; // bti instruction needed in PLT Header and Entry
1023   enum {
1024     PEK_NoAuth,
1025     PEK_AuthHint, // use autia1716 instr for authenticated branch in PLT entry
1026     PEK_Auth,     // use braa instr for authenticated branch in PLT entry
1027   } pacEntryKind;
1028 };
1029 } // namespace
1030 
1031 AArch64BtiPac::AArch64BtiPac(Ctx &ctx) : AArch64(ctx) {
1032   btiHeader = (ctx.arg.andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
1033   // A BTI (Branch Target Indicator) Plt Entry is only required if the
1034   // address of the PLT entry can be taken by the program, which permits an
1035   // indirect jump to the PLT entry. This can happen when the address
1036   // of the PLT entry for a function is canonicalised due to the address of
1037   // the function in an executable being taken by a shared library, or
1038   // non-preemptible ifunc referenced by non-GOT-generating, non-PLT-generating
1039   // relocations.
1040   // The PAC PLT entries require dynamic loader support and this isn't known
1041   // from properties in the objects, so we use the command line flag.
1042   // By default we only use hint-space instructions, but if we detect the
1043   // PAuthABI, which requires v8.3-A, we can use the non-hint space
1044   // instructions.
1045 
1046   if (ctx.arg.zPacPlt) {
1047     if (llvm::any_of(ctx.aarch64PauthAbiCoreInfo,
1048                      [](uint8_t c) { return c != 0; }))
1049       pacEntryKind = PEK_Auth;
1050     else
1051       pacEntryKind = PEK_AuthHint;
1052   } else {
1053     pacEntryKind = PEK_NoAuth;
1054   }
1055 
1056   if (btiHeader || (pacEntryKind != PEK_NoAuth)) {
1057     pltEntrySize = 24;
1058     ipltEntrySize = 24;
1059   }
1060 }
1061 
1062 void AArch64BtiPac::writePltHeader(uint8_t *buf) const {
1063   const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
1064   const uint8_t pltData[] = {
1065       0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
1066       0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.got.plt[2]))
1067       0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.got.plt[2]))]
1068       0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.got.plt[2]))
1069       0x20, 0x02, 0x1f, 0xd6, // br     x17
1070       0x1f, 0x20, 0x03, 0xd5, // nop
1071       0x1f, 0x20, 0x03, 0xd5  // nop
1072   };
1073   const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
1074 
1075   uint64_t got = ctx.in.gotPlt->getVA();
1076   uint64_t plt = ctx.in.plt->getVA();
1077 
1078   if (btiHeader) {
1079     // PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C
1080     // instruction.
1081     memcpy(buf, btiData, sizeof(btiData));
1082     buf += sizeof(btiData);
1083     plt += sizeof(btiData);
1084   }
1085   memcpy(buf, pltData, sizeof(pltData));
1086 
1087   relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
1088                 getAArch64Page(got + 16) - getAArch64Page(plt + 4));
1089   relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
1090   relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
1091   if (!btiHeader)
1092     // We didn't add the BTI c instruction so round out size with NOP.
1093     memcpy(buf + sizeof(pltData), nopData, sizeof(nopData));
1094 }
1095 
1096 void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,
1097                              uint64_t pltEntryAddr) const {
1098   // The PLT entry is of the form:
1099   // [btiData] addrInst (pacBr | stdBr) [nopData]
1100   const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
1101   const uint8_t addrInst[] = {
1102       0x10, 0x00, 0x00, 0x90,  // adrp x16, Page(&(.got.plt[n]))
1103       0x11, 0x02, 0x40, 0xf9,  // ldr  x17, [x16, Offset(&(.got.plt[n]))]
1104       0x10, 0x02, 0x00, 0x91   // add  x16, x16, Offset(&(.got.plt[n]))
1105   };
1106   const uint8_t pacHintBr[] = {
1107       0x9f, 0x21, 0x03, 0xd5, // autia1716
1108       0x20, 0x02, 0x1f, 0xd6  // br   x17
1109   };
1110   const uint8_t pacBr[] = {
1111       0x30, 0x0a, 0x1f, 0xd7, // braa x17, x16
1112       0x1f, 0x20, 0x03, 0xd5  // nop
1113   };
1114   const uint8_t stdBr[] = {
1115       0x20, 0x02, 0x1f, 0xd6,  // br   x17
1116       0x1f, 0x20, 0x03, 0xd5   // nop
1117   };
1118   const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
1119 
1120   // NEEDS_COPY indicates a non-ifunc canonical PLT entry whose address may
1121   // escape to shared objects. isInIplt indicates a non-preemptible ifunc. Its
1122   // address may escape if referenced by a direct relocation. If relative
1123   // vtables are used then if the vtable is in a shared object the offsets will
1124   // be to the PLT entry. The condition is conservative.
1125   bool hasBti = btiHeader &&
1126                 (sym.hasFlag(NEEDS_COPY) || sym.isInIplt || sym.thunkAccessed);
1127   if (hasBti) {
1128     memcpy(buf, btiData, sizeof(btiData));
1129     buf += sizeof(btiData);
1130     pltEntryAddr += sizeof(btiData);
1131   }
1132 
1133   uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
1134   memcpy(buf, addrInst, sizeof(addrInst));
1135   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
1136                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
1137   relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
1138   relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
1139 
1140   if (pacEntryKind != PEK_NoAuth)
1141     memcpy(buf + sizeof(addrInst),
1142            pacEntryKind == PEK_AuthHint ? pacHintBr : pacBr,
1143            sizeof(pacEntryKind == PEK_AuthHint ? pacHintBr : pacBr));
1144   else
1145     memcpy(buf + sizeof(addrInst), stdBr, sizeof(stdBr));
1146   if (!hasBti)
1147     // We didn't add the BTI c instruction so round out size with NOP.
1148     memcpy(buf + sizeof(addrInst) + sizeof(stdBr), nopData, sizeof(nopData));
1149 }
1150 
1151 template <class ELFT>
1152 static void
1153 addTaggedSymbolReferences(Ctx &ctx, InputSectionBase &sec,
1154                           DenseMap<Symbol *, unsigned> &referenceCount) {
1155   assert(sec.type == SHT_AARCH64_MEMTAG_GLOBALS_STATIC);
1156 
1157   const RelsOrRelas<ELFT> rels = sec.relsOrRelas<ELFT>();
1158   if (rels.areRelocsRel())
1159     ErrAlways(ctx)
1160         << "non-RELA relocations are not allowed with memtag globals";
1161 
1162   for (const typename ELFT::Rela &rel : rels.relas) {
1163     Symbol &sym = sec.file->getRelocTargetSym(rel);
1164     // Linker-synthesized symbols such as __executable_start may be referenced
1165     // as tagged in input objfiles, and we don't want them to be tagged. A
1166     // cheap way to exclude them is the type check, but their type is
1167     // STT_NOTYPE. In addition, this save us from checking untaggable symbols,
1168     // like functions or TLS symbols.
1169     if (sym.type != STT_OBJECT)
1170       continue;
1171     // STB_LOCAL symbols can't be referenced from outside the object file, and
1172     // thus don't need to be checked for references from other object files.
1173     if (sym.binding == STB_LOCAL) {
1174       sym.setIsTagged(true);
1175       continue;
1176     }
1177     ++referenceCount[&sym];
1178   }
1179   sec.markDead();
1180 }
1181 
1182 // A tagged symbol must be denoted as being tagged by all references and the
1183 // chosen definition. For simplicity, here, it must also be denoted as tagged
1184 // for all definitions. Otherwise:
1185 //
1186 //  1. A tagged definition can be used by an untagged declaration, in which case
1187 //     the untagged access may be PC-relative, causing a tag mismatch at
1188 //     runtime.
1189 //  2. An untagged definition can be used by a tagged declaration, where the
1190 //     compiler has taken advantage of the increased alignment of the tagged
1191 //     declaration, but the alignment at runtime is wrong, causing a fault.
1192 //
1193 // Ideally, this isn't a problem, as any TU that imports or exports tagged
1194 // symbols should also be built with tagging. But, to handle these cases, we
1195 // demote the symbol to be untagged.
1196 void elf::createTaggedSymbols(Ctx &ctx) {
1197   assert(hasMemtag(ctx));
1198 
1199   // First, collect all symbols that are marked as tagged, and count how many
1200   // times they're marked as tagged.
1201   DenseMap<Symbol *, unsigned> taggedSymbolReferenceCount;
1202   for (InputFile *file : ctx.objectFiles) {
1203     if (file->kind() != InputFile::ObjKind)
1204       continue;
1205     for (InputSectionBase *section : file->getSections()) {
1206       if (!section || section->type != SHT_AARCH64_MEMTAG_GLOBALS_STATIC ||
1207           section == &InputSection::discarded)
1208         continue;
1209       invokeELFT(addTaggedSymbolReferences, ctx, *section,
1210                  taggedSymbolReferenceCount);
1211     }
1212   }
1213 
1214   // Now, go through all the symbols. If the number of declarations +
1215   // definitions to a symbol exceeds the amount of times they're marked as
1216   // tagged, it means we have an objfile that uses the untagged variant of the
1217   // symbol.
1218   for (InputFile *file : ctx.objectFiles) {
1219     if (file->kind() != InputFile::BinaryKind &&
1220         file->kind() != InputFile::ObjKind)
1221       continue;
1222 
1223     for (Symbol *symbol : file->getSymbols()) {
1224       // See `addTaggedSymbolReferences` for more details.
1225       if (symbol->type != STT_OBJECT ||
1226           symbol->binding == STB_LOCAL)
1227         continue;
1228       auto it = taggedSymbolReferenceCount.find(symbol);
1229       if (it == taggedSymbolReferenceCount.end()) continue;
1230       unsigned &remainingAllowedTaggedRefs = it->second;
1231       if (remainingAllowedTaggedRefs == 0) {
1232         taggedSymbolReferenceCount.erase(it);
1233         continue;
1234       }
1235       --remainingAllowedTaggedRefs;
1236     }
1237   }
1238 
1239   // `addTaggedSymbolReferences` has already checked that we have RELA
1240   // relocations, the only other way to get written addends is with
1241   // --apply-dynamic-relocs.
1242   if (!taggedSymbolReferenceCount.empty() && ctx.arg.writeAddends)
1243     ErrAlways(ctx) << "--apply-dynamic-relocs cannot be used with MTE globals";
1244 
1245   // Now, `taggedSymbolReferenceCount` should only contain symbols that are
1246   // defined as tagged exactly the same amount as it's referenced, meaning all
1247   // uses are tagged.
1248   for (auto &[symbol, remainingTaggedRefs] : taggedSymbolReferenceCount) {
1249     assert(remainingTaggedRefs == 0 &&
1250             "Symbol is defined as tagged more times than it's used");
1251     symbol->setIsTagged(true);
1252   }
1253 }
1254 
1255 void elf::setAArch64TargetInfo(Ctx &ctx) {
1256   if ((ctx.arg.andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ||
1257       ctx.arg.zPacPlt)
1258     ctx.target.reset(new AArch64BtiPac(ctx));
1259   else
1260     ctx.target.reset(new AArch64(ctx));
1261 }
1262