xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (revision a7dea1671b87c07d2d266f836bfa8b58efc7c134)
1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/BinaryFormat/MachO.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCDirectives.h"
19 #include "llvm/MC/MCELFObjectWriter.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionELF.h"
24 #include "llvm/MC/MCSectionMachO.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/MC/MCValue.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TargetRegistry.h"
29 using namespace llvm;
30 
31 namespace {
32 
33 class AArch64AsmBackend : public MCAsmBackend {
34   static const unsigned PCRelFlagVal =
35       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
36   Triple TheTriple;
37 
38 public:
39   AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
40       : MCAsmBackend(IsLittleEndian ? support::little : support::big),
41         TheTriple(TT) {}
42 
43   unsigned getNumFixupKinds() const override {
44     return AArch64::NumTargetFixupKinds;
45   }
46 
47   Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
48 
49   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
50     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
51         // This table *must* be in the order that the fixup_* kinds are defined
52         // in AArch64FixupKinds.h.
53         //
54         // Name                           Offset (bits) Size (bits)     Flags
55         {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
56         {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
57         {"fixup_aarch64_add_imm12", 10, 12, 0},
58         {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
59         {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
60         {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
61         {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
62         {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
63         {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
64         {"fixup_aarch64_movw", 5, 16, 0},
65         {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
66         {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
67         {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
68         {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
69         {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
70 
71     if (Kind < FirstTargetFixupKind)
72       return MCAsmBackend::getFixupKindInfo(Kind);
73 
74     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
75            "Invalid kind!");
76     return Infos[Kind - FirstTargetFixupKind];
77   }
78 
79   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
80                   const MCValue &Target, MutableArrayRef<char> Data,
81                   uint64_t Value, bool IsResolved,
82                   const MCSubtargetInfo *STI) const override;
83 
84   bool mayNeedRelaxation(const MCInst &Inst,
85                          const MCSubtargetInfo &STI) const override;
86   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
87                             const MCRelaxableFragment *DF,
88                             const MCAsmLayout &Layout) const override;
89   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
90                         MCInst &Res) const override;
91   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
92 
93   void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
94 
95   unsigned getPointerSize() const { return 8; }
96 
97   unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
98 
99   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
100                              const MCValue &Target) override;
101 };
102 
103 } // end anonymous namespace
104 
105 /// The number of bytes the fixup may change.
106 static unsigned getFixupKindNumBytes(unsigned Kind) {
107   switch (Kind) {
108   default:
109     llvm_unreachable("Unknown fixup kind!");
110 
111   case FK_NONE:
112   case AArch64::fixup_aarch64_tlsdesc_call:
113     return 0;
114 
115   case FK_Data_1:
116     return 1;
117 
118   case FK_Data_2:
119   case FK_SecRel_2:
120     return 2;
121 
122   case AArch64::fixup_aarch64_movw:
123   case AArch64::fixup_aarch64_pcrel_branch14:
124   case AArch64::fixup_aarch64_add_imm12:
125   case AArch64::fixup_aarch64_ldst_imm12_scale1:
126   case AArch64::fixup_aarch64_ldst_imm12_scale2:
127   case AArch64::fixup_aarch64_ldst_imm12_scale4:
128   case AArch64::fixup_aarch64_ldst_imm12_scale8:
129   case AArch64::fixup_aarch64_ldst_imm12_scale16:
130   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
131   case AArch64::fixup_aarch64_pcrel_branch19:
132     return 3;
133 
134   case AArch64::fixup_aarch64_pcrel_adr_imm21:
135   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
136   case AArch64::fixup_aarch64_pcrel_branch26:
137   case AArch64::fixup_aarch64_pcrel_call26:
138   case FK_Data_4:
139   case FK_SecRel_4:
140     return 4;
141 
142   case FK_Data_8:
143     return 8;
144   }
145 }
146 
147 static unsigned AdrImmBits(unsigned Value) {
148   unsigned lo2 = Value & 0x3;
149   unsigned hi19 = (Value & 0x1ffffc) >> 2;
150   return (hi19 << 5) | (lo2 << 29);
151 }
152 
153 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
154                                  uint64_t Value, MCContext &Ctx,
155                                  const Triple &TheTriple, bool IsResolved) {
156   int64_t SignedValue = static_cast<int64_t>(Value);
157   switch (Fixup.getTargetKind()) {
158   default:
159     llvm_unreachable("Unknown fixup kind!");
160   case AArch64::fixup_aarch64_pcrel_adr_imm21:
161     if (SignedValue > 2097151 || SignedValue < -2097152)
162       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
163     return AdrImmBits(Value & 0x1fffffULL);
164   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
165     assert(!IsResolved);
166     if (TheTriple.isOSBinFormatCOFF())
167       return AdrImmBits(Value & 0x1fffffULL);
168     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
169   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
170   case AArch64::fixup_aarch64_pcrel_branch19:
171     // Signed 21-bit immediate
172     if (SignedValue > 2097151 || SignedValue < -2097152)
173       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
174     if (Value & 0x3)
175       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
176     // Low two bits are not encoded.
177     return (Value >> 2) & 0x7ffff;
178   case AArch64::fixup_aarch64_add_imm12:
179   case AArch64::fixup_aarch64_ldst_imm12_scale1:
180     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
181       Value &= 0xfff;
182     // Unsigned 12-bit immediate
183     if (Value >= 0x1000)
184       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
185     return Value;
186   case AArch64::fixup_aarch64_ldst_imm12_scale2:
187     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
188       Value &= 0xfff;
189     // Unsigned 12-bit immediate which gets multiplied by 2
190     if (Value >= 0x2000)
191       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
192     if (Value & 0x1)
193       Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
194     return Value >> 1;
195   case AArch64::fixup_aarch64_ldst_imm12_scale4:
196     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
197       Value &= 0xfff;
198     // Unsigned 12-bit immediate which gets multiplied by 4
199     if (Value >= 0x4000)
200       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
201     if (Value & 0x3)
202       Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
203     return Value >> 2;
204   case AArch64::fixup_aarch64_ldst_imm12_scale8:
205     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
206       Value &= 0xfff;
207     // Unsigned 12-bit immediate which gets multiplied by 8
208     if (Value >= 0x8000)
209       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
210     if (Value & 0x7)
211       Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
212     return Value >> 3;
213   case AArch64::fixup_aarch64_ldst_imm12_scale16:
214     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
215       Value &= 0xfff;
216     // Unsigned 12-bit immediate which gets multiplied by 16
217     if (Value >= 0x10000)
218       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
219     if (Value & 0xf)
220       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
221     return Value >> 4;
222   case AArch64::fixup_aarch64_movw: {
223     AArch64MCExpr::VariantKind RefKind =
224         static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
225     if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
226         AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
227       // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
228       // ever be resolved in the assembler.
229       Ctx.reportError(Fixup.getLoc(),
230                       "relocation for a thread-local variable points to an "
231                       "absolute symbol");
232       return Value;
233     }
234 
235     if (!IsResolved) {
236       // FIXME: Figure out when this can actually happen, and verify our
237       // behavior.
238       Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
239                                       "implemented");
240       return Value;
241     }
242 
243     if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
244       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
245       case AArch64MCExpr::VK_G0:
246         break;
247       case AArch64MCExpr::VK_G1:
248         SignedValue = SignedValue >> 16;
249         break;
250       case AArch64MCExpr::VK_G2:
251         SignedValue = SignedValue >> 32;
252         break;
253       case AArch64MCExpr::VK_G3:
254         SignedValue = SignedValue >> 48;
255         break;
256       default:
257         llvm_unreachable("Variant kind doesn't correspond to fixup");
258       }
259 
260     } else {
261       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
262       case AArch64MCExpr::VK_G0:
263         break;
264       case AArch64MCExpr::VK_G1:
265         Value = Value >> 16;
266         break;
267       case AArch64MCExpr::VK_G2:
268         Value = Value >> 32;
269         break;
270       case AArch64MCExpr::VK_G3:
271         Value = Value >> 48;
272         break;
273       default:
274         llvm_unreachable("Variant kind doesn't correspond to fixup");
275       }
276     }
277 
278     if (RefKind & AArch64MCExpr::VK_NC) {
279       Value &= 0xFFFF;
280     }
281     else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
282       if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
283         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
284 
285       // Invert the negative immediate because it will feed into a MOVN.
286       if (SignedValue < 0)
287         SignedValue = ~SignedValue;
288       Value = static_cast<uint64_t>(SignedValue);
289     }
290     else if (Value > 0xFFFF) {
291       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
292     }
293     return Value;
294   }
295   case AArch64::fixup_aarch64_pcrel_branch14:
296     // Signed 16-bit immediate
297     if (SignedValue > 32767 || SignedValue < -32768)
298       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
299     // Low two bits are not encoded (4-byte alignment assumed).
300     if (Value & 0x3)
301       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
302     return (Value >> 2) & 0x3fff;
303   case AArch64::fixup_aarch64_pcrel_branch26:
304   case AArch64::fixup_aarch64_pcrel_call26:
305     // Signed 28-bit immediate
306     if (SignedValue > 134217727 || SignedValue < -134217728)
307       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
308     // Low two bits are not encoded (4-byte alignment assumed).
309     if (Value & 0x3)
310       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
311     return (Value >> 2) & 0x3ffffff;
312   case FK_NONE:
313   case FK_Data_1:
314   case FK_Data_2:
315   case FK_Data_4:
316   case FK_Data_8:
317   case FK_SecRel_2:
318   case FK_SecRel_4:
319     return Value;
320   }
321 }
322 
323 Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
324   if (TheTriple.isOSBinFormatELF() && Name == "R_AARCH64_NONE")
325     return FK_NONE;
326   return MCAsmBackend::getFixupKind(Name);
327 }
328 
329 /// getFixupKindContainereSizeInBytes - The number of bytes of the
330 /// container involved in big endian or 0 if the item is little endian
331 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
332   if (Endian == support::little)
333     return 0;
334 
335   switch (Kind) {
336   default:
337     llvm_unreachable("Unknown fixup kind!");
338 
339   case FK_Data_1:
340     return 1;
341   case FK_Data_2:
342     return 2;
343   case FK_Data_4:
344     return 4;
345   case FK_Data_8:
346     return 8;
347 
348   case AArch64::fixup_aarch64_tlsdesc_call:
349   case AArch64::fixup_aarch64_movw:
350   case AArch64::fixup_aarch64_pcrel_branch14:
351   case AArch64::fixup_aarch64_add_imm12:
352   case AArch64::fixup_aarch64_ldst_imm12_scale1:
353   case AArch64::fixup_aarch64_ldst_imm12_scale2:
354   case AArch64::fixup_aarch64_ldst_imm12_scale4:
355   case AArch64::fixup_aarch64_ldst_imm12_scale8:
356   case AArch64::fixup_aarch64_ldst_imm12_scale16:
357   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
358   case AArch64::fixup_aarch64_pcrel_branch19:
359   case AArch64::fixup_aarch64_pcrel_adr_imm21:
360   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
361   case AArch64::fixup_aarch64_pcrel_branch26:
362   case AArch64::fixup_aarch64_pcrel_call26:
363     // Instructions are always little endian
364     return 0;
365   }
366 }
367 
368 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
369                                    const MCValue &Target,
370                                    MutableArrayRef<char> Data, uint64_t Value,
371                                    bool IsResolved,
372                                    const MCSubtargetInfo *STI) const {
373   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
374   if (!Value)
375     return; // Doesn't change encoding.
376   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
377   MCContext &Ctx = Asm.getContext();
378   int64_t SignedValue = static_cast<int64_t>(Value);
379   // Apply any target-specific value adjustments.
380   Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
381 
382   // Shift the value into position.
383   Value <<= Info.TargetOffset;
384 
385   unsigned Offset = Fixup.getOffset();
386   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
387 
388   // Used to point to big endian bytes.
389   unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
390 
391   // For each byte of the fragment that the fixup touches, mask in the
392   // bits from the fixup value.
393   if (FulleSizeInBytes == 0) {
394     // Handle as little-endian
395     for (unsigned i = 0; i != NumBytes; ++i) {
396       Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
397     }
398   } else {
399     // Handle as big-endian
400     assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
401     assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
402     for (unsigned i = 0; i != NumBytes; ++i) {
403       unsigned Idx = FulleSizeInBytes - 1 - i;
404       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
405     }
406   }
407 
408   // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
409   // handle this more cleanly. This may affect the output of -show-mc-encoding.
410   AArch64MCExpr::VariantKind RefKind =
411     static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
412   if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
413     // If the immediate is negative, generate MOVN else MOVZ.
414     // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
415     if (SignedValue < 0)
416       Data[Offset + 3] &= ~(1 << 6);
417     else
418       Data[Offset + 3] |= (1 << 6);
419   }
420 }
421 
422 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
423                                           const MCSubtargetInfo &STI) const {
424   return false;
425 }
426 
427 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
428                                              uint64_t Value,
429                                              const MCRelaxableFragment *DF,
430                                              const MCAsmLayout &Layout) const {
431   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
432   // into the targets for now.
433   //
434   // Relax if the value is too big for a (signed) i8.
435   return int64_t(Value) != int64_t(int8_t(Value));
436 }
437 
438 void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
439                                          const MCSubtargetInfo &STI,
440                                          MCInst &Res) const {
441   llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
442 }
443 
444 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
445   // If the count is not 4-byte aligned, we must be writing data into the text
446   // section (otherwise we have unaligned instructions, and thus have far
447   // bigger problems), so just write zeros instead.
448   OS.write_zeros(Count % 4);
449 
450   // We are properly aligned, so write NOPs as requested.
451   Count /= 4;
452   for (uint64_t i = 0; i != Count; ++i)
453     support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
454   return true;
455 }
456 
457 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
458                                               const MCFixup &Fixup,
459                                               const MCValue &Target) {
460   unsigned Kind = Fixup.getKind();
461   if (Kind == FK_NONE)
462     return true;
463 
464   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
465   // ~0xfff. This means that the required offset to reach a symbol can vary by
466   // up to one step depending on where the ADRP is in memory. For example:
467   //
468   //     ADRP x0, there
469   //  there:
470   //
471   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
472   // we'll need that as an offset. At any other address "there" will be in the
473   // same page as the ADRP and the instruction should encode 0x0. Assuming the
474   // section isn't 0x1000-aligned, we therefore need to delegate this decision
475   // to the linker -- a relocation!
476   if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
477     return true;
478 
479   AArch64MCExpr::VariantKind RefKind =
480       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
481   AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
482   // LDR GOT relocations need a relocation
483   if (Kind == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
484       SymLoc == AArch64MCExpr::VK_GOT)
485     return true;
486   return false;
487 }
488 
489 namespace {
490 
491 namespace CU {
492 
493 /// Compact unwind encoding values.
494 enum CompactUnwindEncodings {
495   /// A "frameless" leaf function, where no non-volatile registers are
496   /// saved. The return remains in LR throughout the function.
497   UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
498 
499   /// No compact unwind encoding available. Instead the low 23-bits of
500   /// the compact unwind encoding is the offset of the DWARF FDE in the
501   /// __eh_frame section. This mode is never used in object files. It is only
502   /// generated by the linker in final linked images, which have only DWARF info
503   /// for a function.
504   UNWIND_ARM64_MODE_DWARF = 0x03000000,
505 
506   /// This is a standard arm64 prologue where FP/LR are immediately
507   /// pushed on the stack, then SP is copied to FP. If there are any
508   /// non-volatile register saved, they are copied into the stack fame in pairs
509   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
510   /// five X pairs and four D pairs can be saved, but the memory layout must be
511   /// in register number order.
512   UNWIND_ARM64_MODE_FRAME = 0x04000000,
513 
514   /// Frame register pair encodings.
515   UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
516   UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
517   UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
518   UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
519   UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
520   UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
521   UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
522   UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
523   UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
524 };
525 
526 } // end CU namespace
527 
528 // FIXME: This should be in a separate file.
529 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
530   const MCRegisterInfo &MRI;
531   bool IsILP32;
532 
533   /// Encode compact unwind stack adjustment for frameless functions.
534   /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
535   /// The stack size always needs to be 16 byte aligned.
536   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
537     return (StackSize / 16) << 12;
538   }
539 
540 public:
541   DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
542                           const MCRegisterInfo &MRI, bool IsILP32)
543       : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI),
544         IsILP32(IsILP32) {}
545 
546   std::unique_ptr<MCObjectTargetWriter>
547   createObjectTargetWriter() const override {
548     if (IsILP32)
549       return createAArch64MachObjectWriter(
550           MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, true);
551     else
552       return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64,
553                                            MachO::CPU_SUBTYPE_ARM64_ALL, false);
554   }
555 
556   /// Generate the compact unwind encoding from the CFI directives.
557   uint32_t generateCompactUnwindEncoding(
558                              ArrayRef<MCCFIInstruction> Instrs) const override {
559     if (Instrs.empty())
560       return CU::UNWIND_ARM64_MODE_FRAMELESS;
561 
562     bool HasFP = false;
563     unsigned StackSize = 0;
564 
565     uint32_t CompactUnwindEncoding = 0;
566     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
567       const MCCFIInstruction &Inst = Instrs[i];
568 
569       switch (Inst.getOperation()) {
570       default:
571         // Cannot handle this directive:  bail out.
572         return CU::UNWIND_ARM64_MODE_DWARF;
573       case MCCFIInstruction::OpDefCfa: {
574         // Defines a frame pointer.
575         unsigned XReg =
576             getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
577 
578         // Other CFA registers than FP are not supported by compact unwind.
579         // Fallback on DWARF.
580         // FIXME: When opt-remarks are supported in MC, add a remark to notify
581         // the user.
582         if (XReg != AArch64::FP)
583           return CU::UNWIND_ARM64_MODE_DWARF;
584 
585         assert(XReg == AArch64::FP && "Invalid frame pointer!");
586         assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
587 
588         const MCCFIInstruction &LRPush = Instrs[++i];
589         assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
590                "Link register not pushed!");
591         const MCCFIInstruction &FPPush = Instrs[++i];
592         assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
593                "Frame pointer not pushed!");
594 
595         unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
596         unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
597 
598         LRReg = getXRegFromWReg(LRReg);
599         FPReg = getXRegFromWReg(FPReg);
600 
601         assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
602                "Pushing invalid registers for frame!");
603 
604         // Indicate that the function has a frame.
605         CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
606         HasFP = true;
607         break;
608       }
609       case MCCFIInstruction::OpDefCfaOffset: {
610         assert(StackSize == 0 && "We already have the CFA offset!");
611         StackSize = std::abs(Inst.getOffset());
612         break;
613       }
614       case MCCFIInstruction::OpOffset: {
615         // Registers are saved in pairs. We expect there to be two consecutive
616         // `.cfi_offset' instructions with the appropriate registers specified.
617         unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
618         if (i + 1 == e)
619           return CU::UNWIND_ARM64_MODE_DWARF;
620 
621         const MCCFIInstruction &Inst2 = Instrs[++i];
622         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
623           return CU::UNWIND_ARM64_MODE_DWARF;
624         unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
625 
626         // N.B. The encodings must be in register number order, and the X
627         // registers before the D registers.
628 
629         // X19/X20 pair = 0x00000001,
630         // X21/X22 pair = 0x00000002,
631         // X23/X24 pair = 0x00000004,
632         // X25/X26 pair = 0x00000008,
633         // X27/X28 pair = 0x00000010
634         Reg1 = getXRegFromWReg(Reg1);
635         Reg2 = getXRegFromWReg(Reg2);
636 
637         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
638             (CompactUnwindEncoding & 0xF1E) == 0)
639           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
640         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
641                  (CompactUnwindEncoding & 0xF1C) == 0)
642           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
643         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
644                  (CompactUnwindEncoding & 0xF18) == 0)
645           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
646         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
647                  (CompactUnwindEncoding & 0xF10) == 0)
648           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
649         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
650                  (CompactUnwindEncoding & 0xF00) == 0)
651           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
652         else {
653           Reg1 = getDRegFromBReg(Reg1);
654           Reg2 = getDRegFromBReg(Reg2);
655 
656           // D8/D9 pair   = 0x00000100,
657           // D10/D11 pair = 0x00000200,
658           // D12/D13 pair = 0x00000400,
659           // D14/D15 pair = 0x00000800
660           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
661               (CompactUnwindEncoding & 0xE00) == 0)
662             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
663           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
664                    (CompactUnwindEncoding & 0xC00) == 0)
665             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
666           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
667                    (CompactUnwindEncoding & 0x800) == 0)
668             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
669           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
670             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
671           else
672             // A pair was pushed which we cannot handle.
673             return CU::UNWIND_ARM64_MODE_DWARF;
674         }
675 
676         break;
677       }
678       }
679     }
680 
681     if (!HasFP) {
682       // With compact unwind info we can only represent stack adjustments of up
683       // to 65520 bytes.
684       if (StackSize > 65520)
685         return CU::UNWIND_ARM64_MODE_DWARF;
686 
687       CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
688       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
689     }
690 
691     return CompactUnwindEncoding;
692   }
693 };
694 
695 } // end anonymous namespace
696 
697 namespace {
698 
699 class ELFAArch64AsmBackend : public AArch64AsmBackend {
700 public:
701   uint8_t OSABI;
702   bool IsILP32;
703 
704   ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
705                        bool IsLittleEndian, bool IsILP32)
706       : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
707         IsILP32(IsILP32) {}
708 
709   std::unique_ptr<MCObjectTargetWriter>
710   createObjectTargetWriter() const override {
711     return createAArch64ELFObjectWriter(OSABI, IsILP32);
712   }
713 };
714 
715 }
716 
717 namespace {
718 class COFFAArch64AsmBackend : public AArch64AsmBackend {
719 public:
720   COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
721       : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
722 
723   std::unique_ptr<MCObjectTargetWriter>
724   createObjectTargetWriter() const override {
725     return createAArch64WinCOFFObjectWriter();
726   }
727 };
728 }
729 
730 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
731                                               const MCSubtargetInfo &STI,
732                                               const MCRegisterInfo &MRI,
733                                               const MCTargetOptions &Options) {
734   const Triple &TheTriple = STI.getTargetTriple();
735   if (TheTriple.isOSBinFormatMachO()) {
736     const bool IsILP32 = TheTriple.isArch32Bit();
737     return new DarwinAArch64AsmBackend(T, TheTriple, MRI, IsILP32);
738   }
739 
740   if (TheTriple.isOSBinFormatCOFF())
741     return new COFFAArch64AsmBackend(T, TheTriple);
742 
743   assert(TheTriple.isOSBinFormatELF() && "Invalid target");
744 
745   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
746   bool IsILP32 = Options.getABIName() == "ilp32";
747   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
748                                   IsILP32);
749 }
750 
751 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
752                                               const MCSubtargetInfo &STI,
753                                               const MCRegisterInfo &MRI,
754                                               const MCTargetOptions &Options) {
755   const Triple &TheTriple = STI.getTargetTriple();
756   assert(TheTriple.isOSBinFormatELF() &&
757          "Big endian is only supported for ELF targets!");
758   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
759   bool IsILP32 = Options.getABIName() == "ilp32";
760   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
761                                   IsILP32);
762 }
763