xref: /llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (revision 814b34f31e163e76b816194004689985f5b9fd7b)
1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCELFObjectWriter.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCTargetOptions.h"
23 #include "llvm/MC/MCValue.h"
24 #include "llvm/MC/TargetRegistry.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/TargetParser/Triple.h"
28 using namespace llvm;
29 
30 namespace {
31 
32 class AArch64AsmBackend : public MCAsmBackend {
33   static const unsigned PCRelFlagVal =
34       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
35 protected:
36   Triple TheTriple;
37 
38 public:
39   AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
40       : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
41                                     : llvm::endianness::big),
42         TheTriple(TT) {}
43 
44   unsigned getNumFixupKinds() const override {
45     return AArch64::NumTargetFixupKinds;
46   }
47 
48   std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
49 
50   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
51     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
52         // This table *must* be in the order that the fixup_* kinds are defined
53         // in AArch64FixupKinds.h.
54         //
55         // Name                           Offset (bits) Size (bits)     Flags
56         {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
57         {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
58         {"fixup_aarch64_add_imm12", 10, 12, 0},
59         {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
60         {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
61         {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
62         {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
63         {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
64         {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
65         {"fixup_aarch64_movw", 5, 16, 0},
66         {"fixup_aarch64_pcrel_branch9", 5, 9,  PCRelFlagVal},
67         {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
68         {"fixup_aarch64_pcrel_branch16", 5, 16, PCRelFlagVal},
69         {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
70         {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
71         {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
72 
73     // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
74     // require any extra processing.
75     if (Kind >= FirstLiteralRelocationKind)
76       return MCAsmBackend::getFixupKindInfo(FK_NONE);
77 
78     if (Kind < FirstTargetFixupKind)
79       return MCAsmBackend::getFixupKindInfo(Kind);
80 
81     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
82            "Invalid kind!");
83     return Infos[Kind - FirstTargetFixupKind];
84   }
85 
86   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
87                   const MCValue &Target, MutableArrayRef<char> Data,
88                   uint64_t Value, bool IsResolved,
89                   const MCSubtargetInfo *STI) const override;
90 
91   bool fixupNeedsRelaxation(const MCFixup &Fixup,
92                             uint64_t Value) const override;
93   void relaxInstruction(MCInst &Inst,
94                         const MCSubtargetInfo &STI) const override;
95   bool writeNopData(raw_ostream &OS, uint64_t Count,
96                     const MCSubtargetInfo *STI) const override;
97 
98   unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
99 
100   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
101                              const MCValue &Target, const uint64_t Value,
102                              const MCSubtargetInfo *STI) override;
103 };
104 
105 } // end anonymous namespace
106 
107 /// The number of bytes the fixup may change.
108 static unsigned getFixupKindNumBytes(unsigned Kind) {
109   switch (Kind) {
110   default:
111     llvm_unreachable("Unknown fixup kind!");
112 
113   case FK_Data_1:
114     return 1;
115 
116   case FK_Data_2:
117   case FK_SecRel_2:
118     return 2;
119 
120   case AArch64::fixup_aarch64_movw:
121   case AArch64::fixup_aarch64_pcrel_branch9:
122   case AArch64::fixup_aarch64_pcrel_branch14:
123   case AArch64::fixup_aarch64_pcrel_branch16:
124   case AArch64::fixup_aarch64_add_imm12:
125   case AArch64::fixup_aarch64_ldst_imm12_scale1:
126   case AArch64::fixup_aarch64_ldst_imm12_scale2:
127   case AArch64::fixup_aarch64_ldst_imm12_scale4:
128   case AArch64::fixup_aarch64_ldst_imm12_scale8:
129   case AArch64::fixup_aarch64_ldst_imm12_scale16:
130   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
131   case AArch64::fixup_aarch64_pcrel_branch19:
132     return 3;
133 
134   case AArch64::fixup_aarch64_pcrel_adr_imm21:
135   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
136   case AArch64::fixup_aarch64_pcrel_branch26:
137   case AArch64::fixup_aarch64_pcrel_call26:
138   case FK_Data_4:
139   case FK_SecRel_4:
140     return 4;
141 
142   case FK_Data_8:
143     return 8;
144   }
145 }
146 
147 static unsigned AdrImmBits(unsigned Value) {
148   unsigned lo2 = Value & 0x3;
149   unsigned hi19 = (Value & 0x1ffffc) >> 2;
150   return (hi19 << 5) | (lo2 << 29);
151 }
152 
153 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
154                                  uint64_t Value, MCContext &Ctx,
155                                  const Triple &TheTriple, bool IsResolved) {
156   int64_t SignedValue = static_cast<int64_t>(Value);
157   switch (Fixup.getTargetKind()) {
158   default:
159     llvm_unreachable("Unknown fixup kind!");
160   case AArch64::fixup_aarch64_pcrel_adr_imm21:
161     if (!isInt<21>(SignedValue))
162       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
163     return AdrImmBits(Value & 0x1fffffULL);
164   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
165     assert(!IsResolved);
166     if (TheTriple.isOSBinFormatCOFF()) {
167       if (!isInt<21>(SignedValue))
168         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
169       return AdrImmBits(Value & 0x1fffffULL);
170     }
171     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
172   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
173   case AArch64::fixup_aarch64_pcrel_branch19:
174     // Signed 19-bit immediate which gets multiplied by 4
175     if (!isInt<21>(SignedValue))
176       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
177     if (Value & 0x3)
178       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
179     // Low two bits are not encoded.
180     return (Value >> 2) & 0x7ffff;
181   case AArch64::fixup_aarch64_add_imm12:
182   case AArch64::fixup_aarch64_ldst_imm12_scale1:
183     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
184       Value &= 0xfff;
185     // Unsigned 12-bit immediate
186     if (!isUInt<12>(Value))
187       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
188     return Value;
189   case AArch64::fixup_aarch64_ldst_imm12_scale2:
190     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
191       Value &= 0xfff;
192     // Unsigned 12-bit immediate which gets multiplied by 2
193     if (!isUInt<13>(Value))
194       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
195     if (Value & 0x1)
196       Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
197     return Value >> 1;
198   case AArch64::fixup_aarch64_ldst_imm12_scale4:
199     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
200       Value &= 0xfff;
201     // Unsigned 12-bit immediate which gets multiplied by 4
202     if (!isUInt<14>(Value))
203       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
204     if (Value & 0x3)
205       Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
206     return Value >> 2;
207   case AArch64::fixup_aarch64_ldst_imm12_scale8:
208     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
209       Value &= 0xfff;
210     // Unsigned 12-bit immediate which gets multiplied by 8
211     if (!isUInt<15>(Value))
212       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
213     if (Value & 0x7)
214       Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
215     return Value >> 3;
216   case AArch64::fixup_aarch64_ldst_imm12_scale16:
217     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
218       Value &= 0xfff;
219     // Unsigned 12-bit immediate which gets multiplied by 16
220     if (!isUInt<16>(Value))
221       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
222     if (Value & 0xf)
223       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
224     return Value >> 4;
225   case AArch64::fixup_aarch64_movw: {
226     AArch64MCExpr::VariantKind RefKind =
227         static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
228     if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
229         AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
230       if (!RefKind) {
231         // The fixup is an expression
232         if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
233           Ctx.reportError(Fixup.getLoc(),
234                           "fixup value out of range [-0xFFFF, 0xFFFF]");
235 
236         // Invert the negative immediate because it will feed into a MOVN.
237         if (SignedValue < 0)
238           SignedValue = ~SignedValue;
239         Value = static_cast<uint64_t>(SignedValue);
240       } else
241         // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
242         // ever be resolved in the assembler.
243         Ctx.reportError(Fixup.getLoc(),
244                         "relocation for a thread-local variable points to an "
245                         "absolute symbol");
246       return Value;
247     }
248 
249     if (!IsResolved) {
250       // FIXME: Figure out when this can actually happen, and verify our
251       // behavior.
252       Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
253                                       "implemented");
254       return Value;
255     }
256 
257     if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
258       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
259       case AArch64MCExpr::VK_G0:
260         break;
261       case AArch64MCExpr::VK_G1:
262         SignedValue = SignedValue >> 16;
263         break;
264       case AArch64MCExpr::VK_G2:
265         SignedValue = SignedValue >> 32;
266         break;
267       case AArch64MCExpr::VK_G3:
268         SignedValue = SignedValue >> 48;
269         break;
270       default:
271         llvm_unreachable("Variant kind doesn't correspond to fixup");
272       }
273 
274     } else {
275       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
276       case AArch64MCExpr::VK_G0:
277         break;
278       case AArch64MCExpr::VK_G1:
279         Value = Value >> 16;
280         break;
281       case AArch64MCExpr::VK_G2:
282         Value = Value >> 32;
283         break;
284       case AArch64MCExpr::VK_G3:
285         Value = Value >> 48;
286         break;
287       default:
288         llvm_unreachable("Variant kind doesn't correspond to fixup");
289       }
290     }
291 
292     if (RefKind & AArch64MCExpr::VK_NC) {
293       Value &= 0xFFFF;
294     }
295     else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
296       if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
297         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
298 
299       // Invert the negative immediate because it will feed into a MOVN.
300       if (SignedValue < 0)
301         SignedValue = ~SignedValue;
302       Value = static_cast<uint64_t>(SignedValue);
303     }
304     else if (Value > 0xFFFF) {
305       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
306     }
307     return Value;
308   }
309   case AArch64::fixup_aarch64_pcrel_branch9:
310     // Signed 11-bit(9bits + 2 shifts) label
311     if (!isInt<11>(SignedValue))
312       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
313     // Low two bits are not encoded (4-byte alignment assumed).
314     if (Value & 0b11)
315       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
316     return (Value >> 2) & 0x1ff;
317   case AArch64::fixup_aarch64_pcrel_branch14:
318     // Signed 16-bit immediate
319     if (!isInt<16>(SignedValue))
320       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
321     // Low two bits are not encoded (4-byte alignment assumed).
322     if (Value & 0x3)
323       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
324     return (Value >> 2) & 0x3fff;
325   case AArch64::fixup_aarch64_pcrel_branch16:
326     // Unsigned PC-relative offset, so invert the negative immediate.
327     SignedValue = -SignedValue;
328     Value = static_cast<uint64_t>(SignedValue);
329     // Check valid 18-bit unsigned range.
330     if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
331       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
332     // Low two bits are not encoded (4-byte alignment assumed).
333     if (Value & 0b11)
334       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
335     return (Value >> 2) & 0xffff;
336   case AArch64::fixup_aarch64_pcrel_branch26:
337   case AArch64::fixup_aarch64_pcrel_call26:
338     if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
339       // MSVC link.exe and lld do not support this relocation type
340       // with a non-zero offset
341       Ctx.reportError(Fixup.getLoc(),
342                       "cannot perform a PC-relative fixup with a non-zero "
343                       "symbol offset");
344     }
345     // Signed 28-bit immediate
346     if (!isInt<28>(SignedValue))
347       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
348     // Low two bits are not encoded (4-byte alignment assumed).
349     if (Value & 0x3)
350       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
351     return (Value >> 2) & 0x3ffffff;
352   case FK_Data_1:
353   case FK_Data_2:
354   case FK_Data_4:
355   case FK_Data_8:
356   case FK_SecRel_2:
357   case FK_SecRel_4:
358     return Value;
359   }
360 }
361 
362 std::optional<MCFixupKind>
363 AArch64AsmBackend::getFixupKind(StringRef Name) const {
364   if (!TheTriple.isOSBinFormatELF())
365     return std::nullopt;
366 
367   unsigned Type = llvm::StringSwitch<unsigned>(Name)
368 #define ELF_RELOC(X, Y)  .Case(#X, Y)
369 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
370 #undef ELF_RELOC
371                       .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
372                       .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
373                       .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
374                       .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
375                       .Default(-1u);
376   if (Type == -1u)
377     return std::nullopt;
378   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
379 }
380 
381 /// getFixupKindContainereSizeInBytes - The number of bytes of the
382 /// container involved in big endian or 0 if the item is little endian
383 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
384   if (Endian == llvm::endianness::little)
385     return 0;
386 
387   switch (Kind) {
388   default:
389     llvm_unreachable("Unknown fixup kind!");
390 
391   case FK_Data_1:
392     return 1;
393   case FK_Data_2:
394     return 2;
395   case FK_Data_4:
396     return 4;
397   case FK_Data_8:
398     return 8;
399 
400   case AArch64::fixup_aarch64_movw:
401   case AArch64::fixup_aarch64_pcrel_branch9:
402   case AArch64::fixup_aarch64_pcrel_branch14:
403   case AArch64::fixup_aarch64_pcrel_branch16:
404   case AArch64::fixup_aarch64_add_imm12:
405   case AArch64::fixup_aarch64_ldst_imm12_scale1:
406   case AArch64::fixup_aarch64_ldst_imm12_scale2:
407   case AArch64::fixup_aarch64_ldst_imm12_scale4:
408   case AArch64::fixup_aarch64_ldst_imm12_scale8:
409   case AArch64::fixup_aarch64_ldst_imm12_scale16:
410   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
411   case AArch64::fixup_aarch64_pcrel_branch19:
412   case AArch64::fixup_aarch64_pcrel_adr_imm21:
413   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
414   case AArch64::fixup_aarch64_pcrel_branch26:
415   case AArch64::fixup_aarch64_pcrel_call26:
416     // Instructions are always little endian
417     return 0;
418   }
419 }
420 
421 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
422                                    const MCValue &Target,
423                                    MutableArrayRef<char> Data, uint64_t Value,
424                                    bool IsResolved,
425                                    const MCSubtargetInfo *STI) const {
426   if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
427     auto RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
428     AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
429     if (SymLoc == AArch64AuthMCExpr::VK_AUTH ||
430         SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) {
431       assert(Value == 0);
432       const auto *Expr = cast<AArch64AuthMCExpr>(Fixup.getValue());
433       Value = (uint64_t(Expr->getDiscriminator()) << 32) |
434               (uint64_t(Expr->getKey()) << 60) |
435               (uint64_t(Expr->hasAddressDiversity()) << 63);
436     }
437   }
438 
439   if (!Value)
440     return; // Doesn't change encoding.
441   unsigned Kind = Fixup.getKind();
442   if (Kind >= FirstLiteralRelocationKind)
443     return;
444   unsigned NumBytes = getFixupKindNumBytes(Kind);
445   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
446   MCContext &Ctx = Asm.getContext();
447   int64_t SignedValue = static_cast<int64_t>(Value);
448   // Apply any target-specific value adjustments.
449   Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
450 
451   // Shift the value into position.
452   Value <<= Info.TargetOffset;
453 
454   unsigned Offset = Fixup.getOffset();
455   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
456 
457   // Used to point to big endian bytes.
458   unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
459 
460   // For each byte of the fragment that the fixup touches, mask in the
461   // bits from the fixup value.
462   if (FulleSizeInBytes == 0) {
463     // Handle as little-endian
464     for (unsigned i = 0; i != NumBytes; ++i) {
465       Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
466     }
467   } else {
468     // Handle as big-endian
469     assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
470     assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
471     for (unsigned i = 0; i != NumBytes; ++i) {
472       unsigned Idx = FulleSizeInBytes - 1 - i;
473       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
474     }
475   }
476 
477   // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
478   // handle this more cleanly. This may affect the output of -show-mc-encoding.
479   AArch64MCExpr::VariantKind RefKind =
480       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
481   if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS ||
482       (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
483     // If the immediate is negative, generate MOVN else MOVZ.
484     // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
485     if (SignedValue < 0)
486       Data[Offset + 3] &= ~(1 << 6);
487     else
488       Data[Offset + 3] |= (1 << 6);
489   }
490 }
491 
492 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
493                                              uint64_t Value) const {
494   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
495   // into the targets for now.
496   //
497   // Relax if the value is too big for a (signed) i8.
498   return int64_t(Value) != int64_t(int8_t(Value));
499 }
500 
501 void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
502                                          const MCSubtargetInfo &STI) const {
503   llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
504 }
505 
506 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
507                                      const MCSubtargetInfo *STI) const {
508   // If the count is not 4-byte aligned, we must be writing data into the text
509   // section (otherwise we have unaligned instructions, and thus have far
510   // bigger problems), so just write zeros instead.
511   OS.write_zeros(Count % 4);
512 
513   // We are properly aligned, so write NOPs as requested.
514   Count /= 4;
515   for (uint64_t i = 0; i != Count; ++i)
516     OS.write("\x1f\x20\x03\xd5", 4);
517   return true;
518 }
519 
520 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
521                                               const MCFixup &Fixup,
522                                               const MCValue &Target,
523                                               const uint64_t,
524                                               const MCSubtargetInfo *STI) {
525   unsigned Kind = Fixup.getKind();
526   if (Kind >= FirstLiteralRelocationKind)
527     return true;
528 
529   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
530   // ~0xfff. This means that the required offset to reach a symbol can vary by
531   // up to one step depending on where the ADRP is in memory. For example:
532   //
533   //     ADRP x0, there
534   //  there:
535   //
536   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
537   // we'll need that as an offset. At any other address "there" will be in the
538   // same page as the ADRP and the instruction should encode 0x0. Assuming the
539   // section isn't 0x1000-aligned, we therefore need to delegate this decision
540   // to the linker -- a relocation!
541   if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
542     return true;
543 
544   return false;
545 }
546 
547 namespace {
548 
549 namespace CU {
550 
551 /// Compact unwind encoding values.
552 enum CompactUnwindEncodings {
553   /// A "frameless" leaf function, where no non-volatile registers are
554   /// saved. The return remains in LR throughout the function.
555   UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
556 
557   /// No compact unwind encoding available. Instead the low 23-bits of
558   /// the compact unwind encoding is the offset of the DWARF FDE in the
559   /// __eh_frame section. This mode is never used in object files. It is only
560   /// generated by the linker in final linked images, which have only DWARF info
561   /// for a function.
562   UNWIND_ARM64_MODE_DWARF = 0x03000000,
563 
564   /// This is a standard arm64 prologue where FP/LR are immediately
565   /// pushed on the stack, then SP is copied to FP. If there are any
566   /// non-volatile register saved, they are copied into the stack fame in pairs
567   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
568   /// five X pairs and four D pairs can be saved, but the memory layout must be
569   /// in register number order.
570   UNWIND_ARM64_MODE_FRAME = 0x04000000,
571 
572   /// Frame register pair encodings.
573   UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
574   UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
575   UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
576   UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
577   UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
578   UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
579   UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
580   UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
581   UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
582 };
583 
584 } // end CU namespace
585 
586 // FIXME: This should be in a separate file.
587 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
588   const MCRegisterInfo &MRI;
589 
590   /// Encode compact unwind stack adjustment for frameless functions.
591   /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
592   /// The stack size always needs to be 16 byte aligned.
593   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
594     return (StackSize / 16) << 12;
595   }
596 
597 public:
598   DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
599                           const MCRegisterInfo &MRI)
600       : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
601 
602   std::unique_ptr<MCObjectTargetWriter>
603   createObjectTargetWriter() const override {
604     uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
605     uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
606     return createAArch64MachObjectWriter(CPUType, CPUSubType,
607                                          TheTriple.isArch32Bit());
608   }
609 
610   /// Generate the compact unwind encoding from the CFI directives.
611   uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
612                                          const MCContext *Ctxt) const override {
613     ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
614     if (Instrs.empty())
615       return CU::UNWIND_ARM64_MODE_FRAMELESS;
616     if (!isDarwinCanonicalPersonality(FI->Personality) &&
617         !Ctxt->emitCompactUnwindNonCanonical())
618       return CU::UNWIND_ARM64_MODE_DWARF;
619 
620     bool HasFP = false;
621     uint64_t StackSize = 0;
622 
623     uint64_t CompactUnwindEncoding = 0;
624     int64_t CurOffset = 0;
625     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
626       const MCCFIInstruction &Inst = Instrs[i];
627 
628       switch (Inst.getOperation()) {
629       default:
630         // Cannot handle this directive:  bail out.
631         return CU::UNWIND_ARM64_MODE_DWARF;
632       case MCCFIInstruction::OpDefCfa: {
633         // Defines a frame pointer.
634         MCRegister XReg =
635             getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
636 
637         // Other CFA registers than FP are not supported by compact unwind.
638         // Fallback on DWARF.
639         // FIXME: When opt-remarks are supported in MC, add a remark to notify
640         // the user.
641         if (XReg != AArch64::FP)
642           return CU::UNWIND_ARM64_MODE_DWARF;
643 
644         if (i + 2 >= e)
645           return CU::UNWIND_ARM64_MODE_DWARF;
646 
647         const MCCFIInstruction &LRPush = Instrs[++i];
648         if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
649           return CU::UNWIND_ARM64_MODE_DWARF;
650         const MCCFIInstruction &FPPush = Instrs[++i];
651         if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
652           return CU::UNWIND_ARM64_MODE_DWARF;
653 
654         if (FPPush.getOffset() + 8 != LRPush.getOffset())
655           return CU::UNWIND_ARM64_MODE_DWARF;
656         CurOffset = FPPush.getOffset();
657 
658         MCRegister LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
659         MCRegister FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
660 
661         LRReg = getXRegFromWReg(LRReg);
662         FPReg = getXRegFromWReg(FPReg);
663 
664         if (LRReg != AArch64::LR || FPReg != AArch64::FP)
665           return CU::UNWIND_ARM64_MODE_DWARF;
666 
667         // Indicate that the function has a frame.
668         CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
669         HasFP = true;
670         break;
671       }
672       case MCCFIInstruction::OpDefCfaOffset: {
673         if (StackSize != 0)
674           return CU::UNWIND_ARM64_MODE_DWARF;
675         StackSize = std::abs(Inst.getOffset());
676         break;
677       }
678       case MCCFIInstruction::OpOffset: {
679         // Registers are saved in pairs. We expect there to be two consecutive
680         // `.cfi_offset' instructions with the appropriate registers specified.
681         MCRegister Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
682         if (i + 1 == e)
683           return CU::UNWIND_ARM64_MODE_DWARF;
684 
685         if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
686           return CU::UNWIND_ARM64_MODE_DWARF;
687         CurOffset = Inst.getOffset();
688 
689         const MCCFIInstruction &Inst2 = Instrs[++i];
690         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
691           return CU::UNWIND_ARM64_MODE_DWARF;
692         MCRegister Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
693 
694         if (Inst2.getOffset() != CurOffset - 8)
695           return CU::UNWIND_ARM64_MODE_DWARF;
696         CurOffset = Inst2.getOffset();
697 
698         // N.B. The encodings must be in register number order, and the X
699         // registers before the D registers.
700 
701         // X19/X20 pair = 0x00000001,
702         // X21/X22 pair = 0x00000002,
703         // X23/X24 pair = 0x00000004,
704         // X25/X26 pair = 0x00000008,
705         // X27/X28 pair = 0x00000010
706         Reg1 = getXRegFromWReg(Reg1);
707         Reg2 = getXRegFromWReg(Reg2);
708 
709         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
710             (CompactUnwindEncoding & 0xF1E) == 0)
711           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
712         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
713                  (CompactUnwindEncoding & 0xF1C) == 0)
714           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
715         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
716                  (CompactUnwindEncoding & 0xF18) == 0)
717           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
718         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
719                  (CompactUnwindEncoding & 0xF10) == 0)
720           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
721         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
722                  (CompactUnwindEncoding & 0xF00) == 0)
723           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
724         else {
725           Reg1 = getDRegFromBReg(Reg1);
726           Reg2 = getDRegFromBReg(Reg2);
727 
728           // D8/D9 pair   = 0x00000100,
729           // D10/D11 pair = 0x00000200,
730           // D12/D13 pair = 0x00000400,
731           // D14/D15 pair = 0x00000800
732           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
733               (CompactUnwindEncoding & 0xE00) == 0)
734             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
735           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
736                    (CompactUnwindEncoding & 0xC00) == 0)
737             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
738           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
739                    (CompactUnwindEncoding & 0x800) == 0)
740             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
741           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
742             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
743           else
744             // A pair was pushed which we cannot handle.
745             return CU::UNWIND_ARM64_MODE_DWARF;
746         }
747 
748         break;
749       }
750       }
751     }
752 
753     if (!HasFP) {
754       // With compact unwind info we can only represent stack adjustments of up
755       // to 65520 bytes.
756       if (StackSize > 65520)
757         return CU::UNWIND_ARM64_MODE_DWARF;
758 
759       CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
760       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
761     }
762 
763     return CompactUnwindEncoding;
764   }
765 };
766 
767 } // end anonymous namespace
768 
769 namespace {
770 
771 class ELFAArch64AsmBackend : public AArch64AsmBackend {
772 public:
773   uint8_t OSABI;
774   bool IsILP32;
775 
776   ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
777                        bool IsLittleEndian, bool IsILP32)
778       : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
779         IsILP32(IsILP32) {}
780 
781   std::unique_ptr<MCObjectTargetWriter>
782   createObjectTargetWriter() const override {
783     return createAArch64ELFObjectWriter(OSABI, IsILP32);
784   }
785 };
786 
787 }
788 
789 namespace {
790 class COFFAArch64AsmBackend : public AArch64AsmBackend {
791 public:
792   COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
793       : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
794 
795   std::unique_ptr<MCObjectTargetWriter>
796   createObjectTargetWriter() const override {
797     return createAArch64WinCOFFObjectWriter(TheTriple);
798   }
799 };
800 }
801 
802 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
803                                               const MCSubtargetInfo &STI,
804                                               const MCRegisterInfo &MRI,
805                                               const MCTargetOptions &Options) {
806   const Triple &TheTriple = STI.getTargetTriple();
807   if (TheTriple.isOSBinFormatMachO()) {
808     return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
809   }
810 
811   if (TheTriple.isOSBinFormatCOFF())
812     return new COFFAArch64AsmBackend(T, TheTriple);
813 
814   assert(TheTriple.isOSBinFormatELF() && "Invalid target");
815 
816   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
817   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
818   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
819                                   IsILP32);
820 }
821 
822 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
823                                               const MCSubtargetInfo &STI,
824                                               const MCRegisterInfo &MRI,
825                                               const MCTargetOptions &Options) {
826   const Triple &TheTriple = STI.getTargetTriple();
827   assert(TheTriple.isOSBinFormatELF() &&
828          "Big endian is only supported for ELF targets!");
829   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
830   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
831   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
832                                   IsILP32);
833 }
834