xref: /llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp (revision e60b30d5e3878e7d91f8872ec4c4dca00d4a2dfc)
1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAsmLayout.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCDirectives.h"
24 #include "llvm/MC/MCELFObjectWriter.h"
25 #include "llvm/MC/MCExpr.h"
26 #include "llvm/MC/MCFixupKindInfo.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSectionMachO.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCValue.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/raw_ostream.h"
38 using namespace llvm;
39 
40 namespace {
41 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
42 public:
43   ARMELFObjectWriter(uint8_t OSABI)
44       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
45                                 /*HasRelocationAddend*/ false) {}
46 };
47 } // end anonymous namespace
48 
49 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
50   return std::nullopt;
51 }
52 
53 std::optional<MCFixupKind>
54 ARMAsmBackendELF::getFixupKind(StringRef Name) const {
55   unsigned Type = llvm::StringSwitch<unsigned>(Name)
56 #define ELF_RELOC(X, Y) .Case(#X, Y)
57 #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
58 #undef ELF_RELOC
59                       .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
60                       .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
61                       .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
62                       .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
63                       .Default(-1u);
64   if (Type == -1u)
65     return std::nullopt;
66   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
67 }
68 
69 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
70   unsigned IsPCRelConstant =
71       MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
72   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
73       // This table *must* be in the order that the fixup_* kinds are defined in
74       // ARMFixupKinds.h.
75       //
76       // Name                      Offset (bits) Size (bits)     Flags
77       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
78       {"fixup_t2_ldst_pcrel_12", 0, 32,
79        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
80       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
81       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
82       {"fixup_t2_pcrel_10", 0, 32,
83        MCFixupKindInfo::FKF_IsPCRel |
84            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
85       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
86       {"fixup_t2_pcrel_9", 0, 32,
87        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
88       {"fixup_arm_ldst_abs_12", 0, 32, 0},
89       {"fixup_thumb_adr_pcrel_10", 0, 8,
90        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
91       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
92       {"fixup_t2_adr_pcrel_12", 0, 32,
93        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
94       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
95       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
96       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
97       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
98       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
99       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
100       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
101       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
102       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
103       {"fixup_arm_thumb_blx", 0, 32,
104        MCFixupKindInfo::FKF_IsPCRel |
105            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
106       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
107       {"fixup_arm_thumb_cp", 0, 8,
108        MCFixupKindInfo::FKF_IsPCRel |
109            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
110       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
111       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
112       // - 19.
113       {"fixup_arm_movt_hi16", 0, 20, 0},
114       {"fixup_arm_movw_lo16", 0, 20, 0},
115       {"fixup_t2_movt_hi16", 0, 20, 0},
116       {"fixup_t2_movw_lo16", 0, 20, 0},
117       {"fixup_arm_mod_imm", 0, 12, 0},
118       {"fixup_t2_so_imm", 0, 26, 0},
119       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
120       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
121       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
122       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
123       {"fixup_bfcsel_else_target", 0, 32, 0},
124       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
125       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
126   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
127       // This table *must* be in the order that the fixup_* kinds are defined in
128       // ARMFixupKinds.h.
129       //
130       // Name                      Offset (bits) Size (bits)     Flags
131       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
132       {"fixup_t2_ldst_pcrel_12", 0, 32,
133        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
134       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
135       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
136       {"fixup_t2_pcrel_10", 0, 32,
137        MCFixupKindInfo::FKF_IsPCRel |
138            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
139       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
140       {"fixup_t2_pcrel_9", 0, 32,
141        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
142       {"fixup_arm_ldst_abs_12", 0, 32, 0},
143       {"fixup_thumb_adr_pcrel_10", 8, 8,
144        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
145       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
146       {"fixup_t2_adr_pcrel_12", 0, 32,
147        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
148       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
149       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
150       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
151       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
152       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
153       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
154       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
155       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
156       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
157       {"fixup_arm_thumb_blx", 0, 32,
158        MCFixupKindInfo::FKF_IsPCRel |
159            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
160       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
161       {"fixup_arm_thumb_cp", 8, 8,
162        MCFixupKindInfo::FKF_IsPCRel |
163            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
164       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
165       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
166       // - 19.
167       {"fixup_arm_movt_hi16", 12, 20, 0},
168       {"fixup_arm_movw_lo16", 12, 20, 0},
169       {"fixup_t2_movt_hi16", 12, 20, 0},
170       {"fixup_t2_movw_lo16", 12, 20, 0},
171       {"fixup_arm_mod_imm", 20, 12, 0},
172       {"fixup_t2_so_imm", 26, 6, 0},
173       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
174       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
175       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
176       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
177       {"fixup_bfcsel_else_target", 0, 32, 0},
178       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
179       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
180 
181   // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
182   // any extra processing.
183   if (Kind >= FirstLiteralRelocationKind)
184     return MCAsmBackend::getFixupKindInfo(FK_NONE);
185 
186   if (Kind < FirstTargetFixupKind)
187     return MCAsmBackend::getFixupKindInfo(Kind);
188 
189   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
190          "Invalid kind!");
191   return (Endian == support::little ? InfosLE
192                                     : InfosBE)[Kind - FirstTargetFixupKind];
193 }
194 
195 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
196   switch (Flag) {
197   default:
198     break;
199   case MCAF_Code16:
200     setIsThumb(true);
201     break;
202   case MCAF_Code32:
203     setIsThumb(false);
204     break;
205   }
206 }
207 
208 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
209                                          const MCSubtargetInfo &STI) const {
210   bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2);
211   bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps);
212 
213   switch (Op) {
214   default:
215     return Op;
216   case ARM::tBcc:
217     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
218   case ARM::tLDRpci:
219     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
220   case ARM::tADR:
221     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
222   case ARM::tB:
223     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
224   case ARM::tCBZ:
225     return ARM::tHINT;
226   case ARM::tCBNZ:
227     return ARM::tHINT;
228   }
229 }
230 
231 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
232                                       const MCSubtargetInfo &STI) const {
233   if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
234     return true;
235   return false;
236 }
237 
238 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
239   int64_t Offset = int64_t(Value) - 4;
240   if (Offset < Min || Offset > Max)
241     return "out of range pc-relative fixup value";
242   return nullptr;
243 }
244 
245 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
246                                                     uint64_t Value) const {
247   switch (Fixup.getTargetKind()) {
248   case ARM::fixup_arm_thumb_br: {
249     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
250     // low bit being an implied zero. There's an implied +4 offset for the
251     // branch, so we adjust the other way here to determine what's
252     // encodable.
253     //
254     // Relax if the value is too big for a (signed) i8.
255     int64_t Offset = int64_t(Value) - 4;
256     if (Offset > 2046 || Offset < -2048)
257       return "out of range pc-relative fixup value";
258     break;
259   }
260   case ARM::fixup_arm_thumb_bcc: {
261     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
262     // low bit being an implied zero. There's an implied +4 offset for the
263     // branch, so we adjust the other way here to determine what's
264     // encodable.
265     //
266     // Relax if the value is too big for a (signed) i8.
267     int64_t Offset = int64_t(Value) - 4;
268     if (Offset > 254 || Offset < -256)
269       return "out of range pc-relative fixup value";
270     break;
271   }
272   case ARM::fixup_thumb_adr_pcrel_10:
273   case ARM::fixup_arm_thumb_cp: {
274     // If the immediate is negative, greater than 1020, or not a multiple
275     // of four, the wide version of the instruction must be used.
276     int64_t Offset = int64_t(Value) - 4;
277     if (Offset & 3)
278       return "misaligned pc-relative fixup value";
279     else if (Offset > 1020 || Offset < 0)
280       return "out of range pc-relative fixup value";
281     break;
282   }
283   case ARM::fixup_arm_thumb_cb: {
284     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
285     // instruction it is actually out of range for the instruction.
286     // It will be changed to a NOP.
287     int64_t Offset = (Value & ~1);
288     if (Offset == 2)
289       return "will be converted to nop";
290     break;
291   }
292   case ARM::fixup_bf_branch:
293     return checkPCRelOffset(Value, 0, 30);
294   case ARM::fixup_bf_target:
295     return checkPCRelOffset(Value, -0x10000, +0xfffe);
296   case ARM::fixup_bfl_target:
297     return checkPCRelOffset(Value, -0x40000, +0x3fffe);
298   case ARM::fixup_bfc_target:
299     return checkPCRelOffset(Value, -0x1000, +0xffe);
300   case ARM::fixup_wls:
301     return checkPCRelOffset(Value, 0, +0xffe);
302   case ARM::fixup_le:
303     // The offset field in the LE and LETP instructions is an 11-bit
304     // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
305     // interpreted as a negative offset from the value read from pc,
306     // i.e. from instruction_address+4.
307     //
308     // So an LE instruction can in principle address the instruction
309     // immediately after itself, or (not very usefully) the address
310     // half way through the 4-byte LE.
311     return checkPCRelOffset(Value, -0xffe, 0);
312   case ARM::fixup_bfcsel_else_target: {
313     if (Value != 2 && Value != 4)
314       return "out of range label-relative fixup value";
315     break;
316   }
317 
318   default:
319     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
320   }
321   return nullptr;
322 }
323 
324 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
325                                          const MCRelaxableFragment *DF,
326                                          const MCAsmLayout &Layout) const {
327   return reasonForFixupRelaxation(Fixup, Value);
328 }
329 
330 void ARMAsmBackend::relaxInstruction(MCInst &Inst,
331                                      const MCSubtargetInfo &STI) const {
332   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
333 
334   // Return a diagnostic if we get here w/ a bogus instruction.
335   if (RelaxedOp == Inst.getOpcode()) {
336     SmallString<256> Tmp;
337     raw_svector_ostream OS(Tmp);
338     Inst.dump_pretty(OS);
339     OS << "\n";
340     report_fatal_error("unexpected instruction to relax: " + OS.str());
341   }
342 
343   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
344   // have to change the operands too.
345   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
346       RelaxedOp == ARM::tHINT) {
347     MCInst Res;
348     Res.setOpcode(RelaxedOp);
349     Res.addOperand(MCOperand::createImm(0));
350     Res.addOperand(MCOperand::createImm(14));
351     Res.addOperand(MCOperand::createReg(0));
352     Inst = std::move(Res);
353     return;
354   }
355 
356   // The rest of instructions we're relaxing have the same operands.
357   // We just need to update to the proper opcode.
358   Inst.setOpcode(RelaxedOp);
359 }
360 
361 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
362                                  const MCSubtargetInfo *STI) const {
363   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
364   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
365   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
366   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
367   if (isThumb()) {
368     const uint16_t nopEncoding =
369         hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
370     uint64_t NumNops = Count / 2;
371     for (uint64_t i = 0; i != NumNops; ++i)
372       support::endian::write(OS, nopEncoding, Endian);
373     if (Count & 1)
374       OS << '\0';
375     return true;
376   }
377   // ARM mode
378   const uint32_t nopEncoding =
379       hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
380   uint64_t NumNops = Count / 4;
381   for (uint64_t i = 0; i != NumNops; ++i)
382     support::endian::write(OS, nopEncoding, Endian);
383   // FIXME: should this function return false when unable to write exactly
384   // 'Count' bytes with NOP encodings?
385   switch (Count % 4) {
386   default:
387     break; // No leftover bytes to write
388   case 1:
389     OS << '\0';
390     break;
391   case 2:
392     OS.write("\0\0", 2);
393     break;
394   case 3:
395     OS.write("\0\0\xa0", 3);
396     break;
397   }
398 
399   return true;
400 }
401 
402 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
403   if (IsLittleEndian) {
404     // Note that the halfwords are stored high first and low second in thumb;
405     // so we need to swap the fixup value here to map properly.
406     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
407     Swapped |= (Value & 0x0000FFFF) << 16;
408     return Swapped;
409   } else
410     return Value;
411 }
412 
413 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
414                               bool IsLittleEndian) {
415   uint32_t Value;
416 
417   if (IsLittleEndian) {
418     Value = (SecondHalf & 0xFFFF) << 16;
419     Value |= (FirstHalf & 0xFFFF);
420   } else {
421     Value = (SecondHalf & 0xFFFF);
422     Value |= (FirstHalf & 0xFFFF) << 16;
423   }
424 
425   return Value;
426 }
427 
428 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
429                                          const MCFixup &Fixup,
430                                          const MCValue &Target, uint64_t Value,
431                                          bool IsResolved, MCContext &Ctx,
432                                          const MCSubtargetInfo* STI) const {
433   unsigned Kind = Fixup.getKind();
434 
435   // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
436   // and .word relocations they put the Thumb bit into the addend if possible.
437   // Other relocation types don't want this bit though (branches couldn't encode
438   // it if it *was* present, and no other relocations exist) and it can
439   // interfere with checking valid expressions.
440   if (const MCSymbolRefExpr *A = Target.getSymA()) {
441     if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
442         A->getSymbol().isExternal() &&
443         (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
444          Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
445          Kind == ARM::fixup_t2_movt_hi16))
446       Value |= 1;
447   }
448 
449   switch (Kind) {
450   default:
451     return 0;
452   case FK_Data_1:
453   case FK_Data_2:
454   case FK_Data_4:
455     return Value;
456   case FK_SecRel_2:
457     return Value;
458   case FK_SecRel_4:
459     return Value;
460   case ARM::fixup_arm_movt_hi16:
461     assert(STI != nullptr);
462     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
463       Value >>= 16;
464     [[fallthrough]];
465   case ARM::fixup_arm_movw_lo16: {
466     unsigned Hi4 = (Value & 0xF000) >> 12;
467     unsigned Lo12 = Value & 0x0FFF;
468     // inst{19-16} = Hi4;
469     // inst{11-0} = Lo12;
470     Value = (Hi4 << 16) | (Lo12);
471     return Value;
472   }
473   case ARM::fixup_t2_movt_hi16:
474     assert(STI != nullptr);
475     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
476       Value >>= 16;
477     [[fallthrough]];
478   case ARM::fixup_t2_movw_lo16: {
479     unsigned Hi4 = (Value & 0xF000) >> 12;
480     unsigned i = (Value & 0x800) >> 11;
481     unsigned Mid3 = (Value & 0x700) >> 8;
482     unsigned Lo8 = Value & 0x0FF;
483     // inst{19-16} = Hi4;
484     // inst{26} = i;
485     // inst{14-12} = Mid3;
486     // inst{7-0} = Lo8;
487     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
488     return swapHalfWords(Value, Endian == support::little);
489   }
490   case ARM::fixup_arm_ldst_pcrel_12:
491     // ARM PC-relative values are offset by 8.
492     Value -= 4;
493     [[fallthrough]];
494   case ARM::fixup_t2_ldst_pcrel_12:
495     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
496     Value -= 4;
497     [[fallthrough]];
498   case ARM::fixup_arm_ldst_abs_12: {
499     bool isAdd = true;
500     if ((int64_t)Value < 0) {
501       Value = -Value;
502       isAdd = false;
503     }
504     if (Value >= 4096) {
505       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
506       return 0;
507     }
508     Value |= isAdd << 23;
509 
510     // Same addressing mode as fixup_arm_pcrel_10,
511     // but with 16-bit halfwords swapped.
512     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
513       return swapHalfWords(Value, Endian == support::little);
514 
515     return Value;
516   }
517   case ARM::fixup_arm_adr_pcrel_12: {
518     // ARM PC-relative values are offset by 8.
519     Value -= 8;
520     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
521     if ((int64_t)Value < 0) {
522       Value = -Value;
523       opc = 2; // 0b0010
524     }
525     if (ARM_AM::getSOImmVal(Value) == -1) {
526       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
527       return 0;
528     }
529     // Encode the immediate and shift the opcode into place.
530     return ARM_AM::getSOImmVal(Value) | (opc << 21);
531   }
532 
533   case ARM::fixup_t2_adr_pcrel_12: {
534     Value -= 4;
535     unsigned opc = 0;
536     if ((int64_t)Value < 0) {
537       Value = -Value;
538       opc = 5;
539     }
540 
541     uint32_t out = (opc << 21);
542     out |= (Value & 0x800) << 15;
543     out |= (Value & 0x700) << 4;
544     out |= (Value & 0x0FF);
545 
546     return swapHalfWords(out, Endian == support::little);
547   }
548 
549   case ARM::fixup_arm_condbranch:
550   case ARM::fixup_arm_uncondbranch:
551   case ARM::fixup_arm_uncondbl:
552   case ARM::fixup_arm_condbl:
553   case ARM::fixup_arm_blx:
554     // These values don't encode the low two bits since they're always zero.
555     // Offset by 8 just as above.
556     if (const MCSymbolRefExpr *SRE =
557             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
558       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
559         return 0;
560     return 0xffffff & ((Value - 8) >> 2);
561   case ARM::fixup_t2_uncondbranch: {
562     Value = Value - 4;
563     if (!isInt<25>(Value)) {
564       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
565       return 0;
566     }
567 
568     Value >>= 1; // Low bit is not encoded.
569 
570     uint32_t out = 0;
571     bool I = Value & 0x800000;
572     bool J1 = Value & 0x400000;
573     bool J2 = Value & 0x200000;
574     J1 ^= I;
575     J2 ^= I;
576 
577     out |= I << 26;                 // S bit
578     out |= !J1 << 13;               // J1 bit
579     out |= !J2 << 11;               // J2 bit
580     out |= (Value & 0x1FF800) << 5; // imm6 field
581     out |= (Value & 0x0007FF);      // imm11 field
582 
583     return swapHalfWords(out, Endian == support::little);
584   }
585   case ARM::fixup_t2_condbranch: {
586     Value = Value - 4;
587     if (!isInt<21>(Value)) {
588       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
589       return 0;
590     }
591 
592     Value >>= 1; // Low bit is not encoded.
593 
594     uint64_t out = 0;
595     out |= (Value & 0x80000) << 7; // S bit
596     out |= (Value & 0x40000) >> 7; // J2 bit
597     out |= (Value & 0x20000) >> 4; // J1 bit
598     out |= (Value & 0x1F800) << 5; // imm6 field
599     out |= (Value & 0x007FF);      // imm11 field
600 
601     return swapHalfWords(out, Endian == support::little);
602   }
603   case ARM::fixup_arm_thumb_bl: {
604     if (!isInt<25>(Value - 4) ||
605         (!STI->hasFeature(ARM::FeatureThumb2) &&
606          !STI->hasFeature(ARM::HasV8MBaselineOps) &&
607          !STI->hasFeature(ARM::HasV6MOps) &&
608          !isInt<23>(Value - 4))) {
609       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
610       return 0;
611     }
612 
613     // The value doesn't encode the low bit (always zero) and is offset by
614     // four. The 32-bit immediate value is encoded as
615     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
616     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
617     // The value is encoded into disjoint bit positions in the destination
618     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
619     // J = either J1 or J2 bit
620     //
621     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
622     //
623     // Note that the halfwords are stored high first, low second; so we need
624     // to transpose the fixup value here to map properly.
625     uint32_t offset = (Value - 4) >> 1;
626     uint32_t signBit = (offset & 0x800000) >> 23;
627     uint32_t I1Bit = (offset & 0x400000) >> 22;
628     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
629     uint32_t I2Bit = (offset & 0x200000) >> 21;
630     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
631     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
632     uint32_t imm11Bits = (offset & 0x000007FF);
633 
634     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
635     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
636                            (uint16_t)imm11Bits);
637     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
638   }
639   case ARM::fixup_arm_thumb_blx: {
640     // The value doesn't encode the low two bits (always zero) and is offset by
641     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
642     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
643     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
644     // The value is encoded into disjoint bit positions in the destination
645     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
646     // J = either J1 or J2 bit, 0 = zero.
647     //
648     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
649     //
650     // Note that the halfwords are stored high first, low second; so we need
651     // to transpose the fixup value here to map properly.
652     if (Value % 4 != 0) {
653       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
654       return 0;
655     }
656 
657     uint32_t offset = (Value - 4) >> 2;
658     if (const MCSymbolRefExpr *SRE =
659             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
660       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
661         offset = 0;
662     uint32_t signBit = (offset & 0x400000) >> 22;
663     uint32_t I1Bit = (offset & 0x200000) >> 21;
664     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
665     uint32_t I2Bit = (offset & 0x100000) >> 20;
666     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
667     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
668     uint32_t imm10LBits = (offset & 0x3FF);
669 
670     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
671     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
672                            ((uint16_t)imm10LBits) << 1);
673     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
674   }
675   case ARM::fixup_thumb_adr_pcrel_10:
676   case ARM::fixup_arm_thumb_cp:
677     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
678     // could have an error on our hands.
679     assert(STI != nullptr);
680     if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) {
681       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
682       if (FixupDiagnostic) {
683         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
684         return 0;
685       }
686     }
687     // Offset by 4, and don't encode the low two bits.
688     return ((Value - 4) >> 2) & 0xff;
689   case ARM::fixup_arm_thumb_cb: {
690     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
691     // so ensure that the raw value LSB is zero and it lies in [2, 130].
692     // An offset of 2 will be relaxed to a NOP.
693     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
694       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
695       return 0;
696     }
697     // Offset by 4 and don't encode the lower bit, which is always 0.
698     // FIXME: diagnose if no Thumb2
699     uint32_t Binary = (Value - 4) >> 1;
700     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
701   }
702   case ARM::fixup_arm_thumb_br:
703     // Offset by 4 and don't encode the lower bit, which is always 0.
704     assert(STI != nullptr);
705     if (!STI->hasFeature(ARM::FeatureThumb2) &&
706         !STI->hasFeature(ARM::HasV8MBaselineOps)) {
707       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
708       if (FixupDiagnostic) {
709         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
710         return 0;
711       }
712     }
713     return ((Value - 4) >> 1) & 0x7ff;
714   case ARM::fixup_arm_thumb_bcc:
715     // Offset by 4 and don't encode the lower bit, which is always 0.
716     assert(STI != nullptr);
717     if (!STI->hasFeature(ARM::FeatureThumb2)) {
718       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
719       if (FixupDiagnostic) {
720         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
721         return 0;
722       }
723     }
724     return ((Value - 4) >> 1) & 0xff;
725   case ARM::fixup_arm_pcrel_10_unscaled: {
726     Value = Value - 8; // ARM fixups offset by an additional word and don't
727                        // need to adjust for the half-word ordering.
728     bool isAdd = true;
729     if ((int64_t)Value < 0) {
730       Value = -Value;
731       isAdd = false;
732     }
733     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
734     if (Value >= 256) {
735       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
736       return 0;
737     }
738     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
739     return Value | (isAdd << 23);
740   }
741   case ARM::fixup_arm_pcrel_10:
742     Value = Value - 4; // ARM fixups offset by an additional word and don't
743                        // need to adjust for the half-word ordering.
744     [[fallthrough]];
745   case ARM::fixup_t2_pcrel_10: {
746     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
747     Value = Value - 4;
748     bool isAdd = true;
749     if ((int64_t)Value < 0) {
750       Value = -Value;
751       isAdd = false;
752     }
753     // These values don't encode the low two bits since they're always zero.
754     Value >>= 2;
755     if (Value >= 256) {
756       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
757       return 0;
758     }
759     Value |= isAdd << 23;
760 
761     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
762     // swapped.
763     if (Kind == ARM::fixup_t2_pcrel_10)
764       return swapHalfWords(Value, Endian == support::little);
765 
766     return Value;
767   }
768   case ARM::fixup_arm_pcrel_9:
769     Value = Value - 4; // ARM fixups offset by an additional word and don't
770                        // need to adjust for the half-word ordering.
771     [[fallthrough]];
772   case ARM::fixup_t2_pcrel_9: {
773     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
774     Value = Value - 4;
775     bool isAdd = true;
776     if ((int64_t)Value < 0) {
777       Value = -Value;
778       isAdd = false;
779     }
780     // These values don't encode the low bit since it's always zero.
781     if (Value & 1) {
782       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
783       return 0;
784     }
785     Value >>= 1;
786     if (Value >= 256) {
787       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
788       return 0;
789     }
790     Value |= isAdd << 23;
791 
792     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
793     // swapped.
794     if (Kind == ARM::fixup_t2_pcrel_9)
795       return swapHalfWords(Value, Endian == support::little);
796 
797     return Value;
798   }
799   case ARM::fixup_arm_mod_imm:
800     Value = ARM_AM::getSOImmVal(Value);
801     if (Value >> 12) {
802       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
803       return 0;
804     }
805     return Value;
806   case ARM::fixup_t2_so_imm: {
807     Value = ARM_AM::getT2SOImmVal(Value);
808     if ((int64_t)Value < 0) {
809       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
810       return 0;
811     }
812     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
813     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
814     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
815     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
816     // half-word.
817     uint64_t EncValue = 0;
818     EncValue |= (Value & 0x800) << 15;
819     EncValue |= (Value & 0x700) << 4;
820     EncValue |= (Value & 0xff);
821     return swapHalfWords(EncValue, Endian == support::little);
822   }
823   case ARM::fixup_bf_branch: {
824     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
825     if (FixupDiagnostic) {
826       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
827       return 0;
828     }
829     uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
830     return swapHalfWords(out, Endian == support::little);
831   }
832   case ARM::fixup_bf_target:
833   case ARM::fixup_bfl_target:
834   case ARM::fixup_bfc_target: {
835     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
836     if (FixupDiagnostic) {
837       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
838       return 0;
839     }
840     uint32_t out = 0;
841     uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
842                             Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
843     out |= (((Value - 4) >> 1) & 0x1) << 11;
844     out |= (((Value - 4) >> 1) & 0x7fe);
845     out |= (((Value - 4) >> 1) & HighBitMask) << 5;
846     return swapHalfWords(out, Endian == support::little);
847   }
848   case ARM::fixup_bfcsel_else_target: {
849     // If this is a fixup of a branch future's else target then it should be a
850     // constant MCExpr representing the distance between the branch targetted
851     // and the instruction after that same branch.
852     Value = Target.getConstant();
853 
854     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
855     if (FixupDiagnostic) {
856       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
857       return 0;
858     }
859     uint32_t out = ((Value >> 2) & 1) << 17;
860     return swapHalfWords(out, Endian == support::little);
861   }
862   case ARM::fixup_wls:
863   case ARM::fixup_le: {
864     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
865     if (FixupDiagnostic) {
866       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
867       return 0;
868     }
869     uint64_t real_value = Value - 4;
870     uint32_t out = 0;
871     if (Kind == ARM::fixup_le)
872       real_value = -real_value;
873     out |= ((real_value >> 1) & 0x1) << 11;
874     out |= ((real_value >> 1) & 0x7fe);
875     return swapHalfWords(out, Endian == support::little);
876   }
877   }
878 }
879 
880 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
881                                           const MCFixup &Fixup,
882                                           const MCValue &Target) {
883   const MCSymbolRefExpr *A = Target.getSymA();
884   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
885   const unsigned FixupKind = Fixup.getKind();
886   if (FixupKind >= FirstLiteralRelocationKind)
887     return true;
888   if (FixupKind == ARM::fixup_arm_thumb_bl) {
889     assert(Sym && "How did we resolve this?");
890 
891     // If the symbol is external the linker will handle it.
892     // FIXME: Should we handle it as an optimization?
893 
894     // If the symbol is out of range, produce a relocation and hope the
895     // linker can handle it. GNU AS produces an error in this case.
896     if (Sym->isExternal())
897       return true;
898   }
899   // Create relocations for unconditional branches to function symbols with
900   // different execution mode in ELF binaries.
901   if (Sym && Sym->isELF()) {
902     unsigned Type = cast<MCSymbolELF>(Sym)->getType();
903     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
904       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
905         return true;
906       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
907                                     FixupKind == ARM::fixup_arm_thumb_bl ||
908                                     FixupKind == ARM::fixup_t2_condbranch ||
909                                     FixupKind == ARM::fixup_t2_uncondbranch))
910         return true;
911     }
912   }
913   // We must always generate a relocation for BL/BLX instructions if we have
914   // a symbol to reference, as the linker relies on knowing the destination
915   // symbol's thumb-ness to get interworking right.
916   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
917             FixupKind == ARM::fixup_arm_blx ||
918             FixupKind == ARM::fixup_arm_uncondbl ||
919             FixupKind == ARM::fixup_arm_condbl))
920     return true;
921   return false;
922 }
923 
924 /// getFixupKindNumBytes - The number of bytes the fixup may change.
925 static unsigned getFixupKindNumBytes(unsigned Kind) {
926   switch (Kind) {
927   default:
928     llvm_unreachable("Unknown fixup kind!");
929 
930   case FK_Data_1:
931   case ARM::fixup_arm_thumb_bcc:
932   case ARM::fixup_arm_thumb_cp:
933   case ARM::fixup_thumb_adr_pcrel_10:
934     return 1;
935 
936   case FK_Data_2:
937   case ARM::fixup_arm_thumb_br:
938   case ARM::fixup_arm_thumb_cb:
939   case ARM::fixup_arm_mod_imm:
940     return 2;
941 
942   case ARM::fixup_arm_pcrel_10_unscaled:
943   case ARM::fixup_arm_ldst_pcrel_12:
944   case ARM::fixup_arm_pcrel_10:
945   case ARM::fixup_arm_pcrel_9:
946   case ARM::fixup_arm_ldst_abs_12:
947   case ARM::fixup_arm_adr_pcrel_12:
948   case ARM::fixup_arm_uncondbl:
949   case ARM::fixup_arm_condbl:
950   case ARM::fixup_arm_blx:
951   case ARM::fixup_arm_condbranch:
952   case ARM::fixup_arm_uncondbranch:
953     return 3;
954 
955   case FK_Data_4:
956   case ARM::fixup_t2_ldst_pcrel_12:
957   case ARM::fixup_t2_condbranch:
958   case ARM::fixup_t2_uncondbranch:
959   case ARM::fixup_t2_pcrel_10:
960   case ARM::fixup_t2_pcrel_9:
961   case ARM::fixup_t2_adr_pcrel_12:
962   case ARM::fixup_arm_thumb_bl:
963   case ARM::fixup_arm_thumb_blx:
964   case ARM::fixup_arm_movt_hi16:
965   case ARM::fixup_arm_movw_lo16:
966   case ARM::fixup_t2_movt_hi16:
967   case ARM::fixup_t2_movw_lo16:
968   case ARM::fixup_t2_so_imm:
969   case ARM::fixup_bf_branch:
970   case ARM::fixup_bf_target:
971   case ARM::fixup_bfl_target:
972   case ARM::fixup_bfc_target:
973   case ARM::fixup_bfcsel_else_target:
974   case ARM::fixup_wls:
975   case ARM::fixup_le:
976     return 4;
977 
978   case FK_SecRel_2:
979     return 2;
980   case FK_SecRel_4:
981     return 4;
982   }
983 }
984 
985 /// getFixupKindContainerSizeBytes - The number of bytes of the
986 /// container involved in big endian.
987 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
988   switch (Kind) {
989   default:
990     llvm_unreachable("Unknown fixup kind!");
991 
992   case FK_Data_1:
993     return 1;
994   case FK_Data_2:
995     return 2;
996   case FK_Data_4:
997     return 4;
998 
999   case ARM::fixup_arm_thumb_bcc:
1000   case ARM::fixup_arm_thumb_cp:
1001   case ARM::fixup_thumb_adr_pcrel_10:
1002   case ARM::fixup_arm_thumb_br:
1003   case ARM::fixup_arm_thumb_cb:
1004     // Instruction size is 2 bytes.
1005     return 2;
1006 
1007   case ARM::fixup_arm_pcrel_10_unscaled:
1008   case ARM::fixup_arm_ldst_pcrel_12:
1009   case ARM::fixup_arm_pcrel_10:
1010   case ARM::fixup_arm_pcrel_9:
1011   case ARM::fixup_arm_adr_pcrel_12:
1012   case ARM::fixup_arm_uncondbl:
1013   case ARM::fixup_arm_condbl:
1014   case ARM::fixup_arm_blx:
1015   case ARM::fixup_arm_condbranch:
1016   case ARM::fixup_arm_uncondbranch:
1017   case ARM::fixup_t2_ldst_pcrel_12:
1018   case ARM::fixup_t2_condbranch:
1019   case ARM::fixup_t2_uncondbranch:
1020   case ARM::fixup_t2_pcrel_10:
1021   case ARM::fixup_t2_pcrel_9:
1022   case ARM::fixup_t2_adr_pcrel_12:
1023   case ARM::fixup_arm_thumb_bl:
1024   case ARM::fixup_arm_thumb_blx:
1025   case ARM::fixup_arm_movt_hi16:
1026   case ARM::fixup_arm_movw_lo16:
1027   case ARM::fixup_t2_movt_hi16:
1028   case ARM::fixup_t2_movw_lo16:
1029   case ARM::fixup_arm_mod_imm:
1030   case ARM::fixup_t2_so_imm:
1031   case ARM::fixup_bf_branch:
1032   case ARM::fixup_bf_target:
1033   case ARM::fixup_bfl_target:
1034   case ARM::fixup_bfc_target:
1035   case ARM::fixup_bfcsel_else_target:
1036   case ARM::fixup_wls:
1037   case ARM::fixup_le:
1038     // Instruction size is 4 bytes.
1039     return 4;
1040   }
1041 }
1042 
1043 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1044                                const MCValue &Target,
1045                                MutableArrayRef<char> Data, uint64_t Value,
1046                                bool IsResolved,
1047                                const MCSubtargetInfo* STI) const {
1048   unsigned Kind = Fixup.getKind();
1049   if (Kind >= FirstLiteralRelocationKind)
1050     return;
1051   MCContext &Ctx = Asm.getContext();
1052   Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1053   if (!Value)
1054     return; // Doesn't change encoding.
1055   const unsigned NumBytes = getFixupKindNumBytes(Kind);
1056 
1057   unsigned Offset = Fixup.getOffset();
1058   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1059 
1060   // Used to point to big endian bytes.
1061   unsigned FullSizeBytes;
1062   if (Endian == support::big) {
1063     FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1064     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1065     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1066   }
1067 
1068   // For each byte of the fragment that the fixup touches, mask in the bits from
1069   // the fixup value. The Value has been "split up" into the appropriate
1070   // bitfields above.
1071   for (unsigned i = 0; i != NumBytes; ++i) {
1072     unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1073     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1074   }
1075 }
1076 
1077 namespace CU {
1078 
1079 /// Compact unwind encoding values.
1080 enum CompactUnwindEncodings {
1081   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
1082   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
1083   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
1084   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
1085 
1086   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
1087 
1088   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
1089   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
1090   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
1091 
1092   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
1093   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
1094   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
1095   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
1096   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
1097 
1098   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
1099 
1100   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
1101 };
1102 
1103 } // end CU namespace
1104 
1105 /// Generate compact unwind encoding for the function based on the CFI
1106 /// instructions. If the CFI instructions describe a frame that cannot be
1107 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1108 /// tells the runtime to fallback and unwind using dwarf.
1109 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1110     const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const {
1111   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1112   // Only armv7k uses CFI based unwinding.
1113   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1114     return 0;
1115   // No .cfi directives means no frame.
1116   ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
1117   if (Instrs.empty())
1118     return 0;
1119   if (!isDarwinCanonicalPersonality(FI->Personality) &&
1120       !Ctxt->emitCompactUnwindNonCanonical())
1121     return CU::UNWIND_ARM_MODE_DWARF;
1122 
1123   // Start off assuming CFA is at SP+0.
1124   unsigned CFARegister = ARM::SP;
1125   int CFARegisterOffset = 0;
1126   // Mark savable registers as initially unsaved
1127   DenseMap<unsigned, int> RegOffsets;
1128   int FloatRegCount = 0;
1129   // Process each .cfi directive and build up compact unwind info.
1130   for (const MCCFIInstruction &Inst : Instrs) {
1131     unsigned Reg;
1132     switch (Inst.getOperation()) {
1133     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1134       CFARegisterOffset = Inst.getOffset();
1135       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1136       break;
1137     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1138       CFARegisterOffset = Inst.getOffset();
1139       break;
1140     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1141       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1142       break;
1143     case MCCFIInstruction::OpOffset: // DW_CFA_offset
1144       Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1145       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1146         RegOffsets[Reg] = Inst.getOffset();
1147       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1148         RegOffsets[Reg] = Inst.getOffset();
1149         ++FloatRegCount;
1150       } else {
1151         DEBUG_WITH_TYPE("compact-unwind",
1152                         llvm::dbgs() << ".cfi_offset on unknown register="
1153                                      << Inst.getRegister() << "\n");
1154         return CU::UNWIND_ARM_MODE_DWARF;
1155       }
1156       break;
1157     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1158       // Ignore
1159       break;
1160     default:
1161       // Directive not convertable to compact unwind, bail out.
1162       DEBUG_WITH_TYPE("compact-unwind",
1163                       llvm::dbgs()
1164                           << "CFI directive not compatible with compact "
1165                              "unwind encoding, opcode=" << Inst.getOperation()
1166                           << "\n");
1167       return CU::UNWIND_ARM_MODE_DWARF;
1168       break;
1169     }
1170   }
1171 
1172   // If no frame set up, return no unwind info.
1173   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1174     return 0;
1175 
1176   // Verify standard frame (lr/r7) was used.
1177   if (CFARegister != ARM::R7) {
1178     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1179                                                    << CFARegister
1180                                                    << " instead of r7\n");
1181     return CU::UNWIND_ARM_MODE_DWARF;
1182   }
1183   int StackAdjust = CFARegisterOffset - 8;
1184   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1185     DEBUG_WITH_TYPE("compact-unwind",
1186                     llvm::dbgs()
1187                         << "LR not saved as standard frame, StackAdjust="
1188                         << StackAdjust
1189                         << ", CFARegisterOffset=" << CFARegisterOffset
1190                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1191     return CU::UNWIND_ARM_MODE_DWARF;
1192   }
1193   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1194     DEBUG_WITH_TYPE("compact-unwind",
1195                     llvm::dbgs() << "r7 not saved as standard frame\n");
1196     return CU::UNWIND_ARM_MODE_DWARF;
1197   }
1198   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1199 
1200   // If var-args are used, there may be a stack adjust required.
1201   switch (StackAdjust) {
1202   case 0:
1203     break;
1204   case 4:
1205     CompactUnwindEncoding |= 0x00400000;
1206     break;
1207   case 8:
1208     CompactUnwindEncoding |= 0x00800000;
1209     break;
1210   case 12:
1211     CompactUnwindEncoding |= 0x00C00000;
1212     break;
1213   default:
1214     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1215                                           << ".cfi_def_cfa stack adjust ("
1216                                           << StackAdjust << ") out of range\n");
1217     return CU::UNWIND_ARM_MODE_DWARF;
1218   }
1219 
1220   // If r6 is saved, it must be right below r7.
1221   static struct {
1222     unsigned Reg;
1223     unsigned Encoding;
1224   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1225                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1226                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1227                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1228                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1229                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1230                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1231                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1232 
1233   int CurOffset = -8 - StackAdjust;
1234   for (auto CSReg : GPRCSRegs) {
1235     auto Offset = RegOffsets.find(CSReg.Reg);
1236     if (Offset == RegOffsets.end())
1237       continue;
1238 
1239     int RegOffset = Offset->second;
1240     if (RegOffset != CurOffset - 4) {
1241       DEBUG_WITH_TYPE("compact-unwind",
1242                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1243                                    << RegOffset << " but only supported at "
1244                                    << CurOffset << "\n");
1245       return CU::UNWIND_ARM_MODE_DWARF;
1246     }
1247     CompactUnwindEncoding |= CSReg.Encoding;
1248     CurOffset -= 4;
1249   }
1250 
1251   // If no floats saved, we are done.
1252   if (FloatRegCount == 0)
1253     return CompactUnwindEncoding;
1254 
1255   // Switch mode to include D register saving.
1256   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1257   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1258 
1259   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1260   // but needs coordination with the linker and libunwind.
1261   if (FloatRegCount > 4) {
1262     DEBUG_WITH_TYPE("compact-unwind",
1263                     llvm::dbgs() << "unsupported number of D registers saved ("
1264                                  << FloatRegCount << ")\n");
1265       return CU::UNWIND_ARM_MODE_DWARF;
1266   }
1267 
1268   // Floating point registers must either be saved sequentially, or we defer to
1269   // DWARF. No gaps allowed here so check that each saved d-register is
1270   // precisely where it should be.
1271   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1272   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1273     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1274     if (Offset == RegOffsets.end()) {
1275       DEBUG_WITH_TYPE("compact-unwind",
1276                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1277                                    << MRI.getName(FPRCSRegs[Idx])
1278                                    << " not saved\n");
1279       return CU::UNWIND_ARM_MODE_DWARF;
1280     } else if (Offset->second != CurOffset - 8) {
1281       DEBUG_WITH_TYPE("compact-unwind",
1282                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1283                                    << MRI.getName(FPRCSRegs[Idx])
1284                                    << " saved at " << Offset->second
1285                                    << ", expected at " << CurOffset - 8
1286                                    << "\n");
1287       return CU::UNWIND_ARM_MODE_DWARF;
1288     }
1289     CurOffset -= 8;
1290   }
1291 
1292   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1293 }
1294 
1295 static MCAsmBackend *createARMAsmBackend(const Target &T,
1296                                          const MCSubtargetInfo &STI,
1297                                          const MCRegisterInfo &MRI,
1298                                          const MCTargetOptions &Options,
1299                                          support::endianness Endian) {
1300   const Triple &TheTriple = STI.getTargetTriple();
1301   switch (TheTriple.getObjectFormat()) {
1302   default:
1303     llvm_unreachable("unsupported object format");
1304   case Triple::MachO:
1305     return new ARMAsmBackendDarwin(T, STI, MRI);
1306   case Triple::COFF:
1307     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1308     return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
1309   case Triple::ELF:
1310     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1311     uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1312     return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
1313                                 Endian);
1314   }
1315 }
1316 
1317 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1318                                           const MCSubtargetInfo &STI,
1319                                           const MCRegisterInfo &MRI,
1320                                           const MCTargetOptions &Options) {
1321   return createARMAsmBackend(T, STI, MRI, Options, support::little);
1322 }
1323 
1324 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1325                                           const MCSubtargetInfo &STI,
1326                                           const MCRegisterInfo &MRI,
1327                                           const MCTargetOptions &Options) {
1328   return createARMAsmBackend(T, STI, MRI, Options, support::big);
1329 }
1330