xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
17330f729Sjoerg //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
27330f729Sjoerg //
37330f729Sjoerg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
47330f729Sjoerg // See https://llvm.org/LICENSE.txt for license information.
57330f729Sjoerg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
67330f729Sjoerg //
77330f729Sjoerg //===----------------------------------------------------------------------===//
87330f729Sjoerg 
97330f729Sjoerg #include "MCTargetDesc/X86BaseInfo.h"
107330f729Sjoerg #include "MCTargetDesc/X86FixupKinds.h"
117330f729Sjoerg #include "llvm/ADT/StringSwitch.h"
127330f729Sjoerg #include "llvm/BinaryFormat/ELF.h"
137330f729Sjoerg #include "llvm/BinaryFormat/MachO.h"
147330f729Sjoerg #include "llvm/MC/MCAsmBackend.h"
15*82d56013Sjoerg #include "llvm/MC/MCAsmLayout.h"
16*82d56013Sjoerg #include "llvm/MC/MCAssembler.h"
17*82d56013Sjoerg #include "llvm/MC/MCCodeEmitter.h"
18*82d56013Sjoerg #include "llvm/MC/MCContext.h"
197330f729Sjoerg #include "llvm/MC/MCDwarf.h"
207330f729Sjoerg #include "llvm/MC/MCELFObjectWriter.h"
217330f729Sjoerg #include "llvm/MC/MCExpr.h"
227330f729Sjoerg #include "llvm/MC/MCFixupKindInfo.h"
237330f729Sjoerg #include "llvm/MC/MCInst.h"
24*82d56013Sjoerg #include "llvm/MC/MCInstrInfo.h"
257330f729Sjoerg #include "llvm/MC/MCMachObjectWriter.h"
26*82d56013Sjoerg #include "llvm/MC/MCObjectStreamer.h"
277330f729Sjoerg #include "llvm/MC/MCObjectWriter.h"
287330f729Sjoerg #include "llvm/MC/MCRegisterInfo.h"
297330f729Sjoerg #include "llvm/MC/MCSectionMachO.h"
307330f729Sjoerg #include "llvm/MC/MCSubtargetInfo.h"
31*82d56013Sjoerg #include "llvm/MC/MCValue.h"
32*82d56013Sjoerg #include "llvm/Support/CommandLine.h"
337330f729Sjoerg #include "llvm/Support/ErrorHandling.h"
34*82d56013Sjoerg #include "llvm/Support/TargetRegistry.h"
357330f729Sjoerg #include "llvm/Support/raw_ostream.h"
36*82d56013Sjoerg 
377330f729Sjoerg using namespace llvm;
387330f729Sjoerg 
39*82d56013Sjoerg namespace {
40*82d56013Sjoerg /// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
41*82d56013Sjoerg class X86AlignBranchKind {
42*82d56013Sjoerg private:
43*82d56013Sjoerg   uint8_t AlignBranchKind = 0;
44*82d56013Sjoerg 
45*82d56013Sjoerg public:
operator =(const std::string & Val)46*82d56013Sjoerg   void operator=(const std::string &Val) {
47*82d56013Sjoerg     if (Val.empty())
48*82d56013Sjoerg       return;
49*82d56013Sjoerg     SmallVector<StringRef, 6> BranchTypes;
50*82d56013Sjoerg     StringRef(Val).split(BranchTypes, '+', -1, false);
51*82d56013Sjoerg     for (auto BranchType : BranchTypes) {
52*82d56013Sjoerg       if (BranchType == "fused")
53*82d56013Sjoerg         addKind(X86::AlignBranchFused);
54*82d56013Sjoerg       else if (BranchType == "jcc")
55*82d56013Sjoerg         addKind(X86::AlignBranchJcc);
56*82d56013Sjoerg       else if (BranchType == "jmp")
57*82d56013Sjoerg         addKind(X86::AlignBranchJmp);
58*82d56013Sjoerg       else if (BranchType == "call")
59*82d56013Sjoerg         addKind(X86::AlignBranchCall);
60*82d56013Sjoerg       else if (BranchType == "ret")
61*82d56013Sjoerg         addKind(X86::AlignBranchRet);
62*82d56013Sjoerg       else if (BranchType == "indirect")
63*82d56013Sjoerg         addKind(X86::AlignBranchIndirect);
64*82d56013Sjoerg       else {
65*82d56013Sjoerg         errs() << "invalid argument " << BranchType.str()
66*82d56013Sjoerg                << " to -x86-align-branch=; each element must be one of: fused, "
67*82d56013Sjoerg                   "jcc, jmp, call, ret, indirect.(plus separated)\n";
68*82d56013Sjoerg       }
697330f729Sjoerg     }
707330f729Sjoerg   }
717330f729Sjoerg 
operator uint8_t() const72*82d56013Sjoerg   operator uint8_t() const { return AlignBranchKind; }
addKind(X86::AlignBranchBoundaryKind Value)73*82d56013Sjoerg   void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
74*82d56013Sjoerg };
75*82d56013Sjoerg 
76*82d56013Sjoerg X86AlignBranchKind X86AlignBranchKindLoc;
77*82d56013Sjoerg 
78*82d56013Sjoerg cl::opt<unsigned> X86AlignBranchBoundary(
79*82d56013Sjoerg     "x86-align-branch-boundary", cl::init(0),
80*82d56013Sjoerg     cl::desc(
81*82d56013Sjoerg         "Control how the assembler should align branches with NOP. If the "
82*82d56013Sjoerg         "boundary's size is not 0, it should be a power of 2 and no less "
83*82d56013Sjoerg         "than 32. Branches will be aligned to prevent from being across or "
84*82d56013Sjoerg         "against the boundary of specified size. The default value 0 does not "
85*82d56013Sjoerg         "align branches."));
86*82d56013Sjoerg 
87*82d56013Sjoerg cl::opt<X86AlignBranchKind, true, cl::parser<std::string>> X86AlignBranch(
88*82d56013Sjoerg     "x86-align-branch",
89*82d56013Sjoerg     cl::desc(
90*82d56013Sjoerg         "Specify types of branches to align (plus separated list of types):"
91*82d56013Sjoerg              "\njcc      indicates conditional jumps"
92*82d56013Sjoerg              "\nfused    indicates fused conditional jumps"
93*82d56013Sjoerg              "\njmp      indicates direct unconditional jumps"
94*82d56013Sjoerg              "\ncall     indicates direct and indirect calls"
95*82d56013Sjoerg              "\nret      indicates rets"
96*82d56013Sjoerg              "\nindirect indicates indirect unconditional jumps"),
97*82d56013Sjoerg     cl::location(X86AlignBranchKindLoc));
98*82d56013Sjoerg 
99*82d56013Sjoerg cl::opt<bool> X86AlignBranchWithin32BBoundaries(
100*82d56013Sjoerg     "x86-branches-within-32B-boundaries", cl::init(false),
101*82d56013Sjoerg     cl::desc(
102*82d56013Sjoerg         "Align selected instructions to mitigate negative performance impact "
103*82d56013Sjoerg         "of Intel's micro code update for errata skx102.  May break "
104*82d56013Sjoerg         "assumptions about labels corresponding to particular instructions, "
105*82d56013Sjoerg         "and should be used with caution."));
106*82d56013Sjoerg 
107*82d56013Sjoerg cl::opt<unsigned> X86PadMaxPrefixSize(
108*82d56013Sjoerg     "x86-pad-max-prefix-size", cl::init(0),
109*82d56013Sjoerg     cl::desc("Maximum number of prefixes to use for padding"));
110*82d56013Sjoerg 
111*82d56013Sjoerg cl::opt<bool> X86PadForAlign(
112*82d56013Sjoerg     "x86-pad-for-align", cl::init(false), cl::Hidden,
113*82d56013Sjoerg     cl::desc("Pad previous instructions to implement align directives"));
114*82d56013Sjoerg 
115*82d56013Sjoerg cl::opt<bool> X86PadForBranchAlign(
116*82d56013Sjoerg     "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
117*82d56013Sjoerg     cl::desc("Pad previous instructions to implement branch alignment"));
1187330f729Sjoerg 
1197330f729Sjoerg class X86ELFObjectWriter : public MCELFObjectTargetWriter {
1207330f729Sjoerg public:
X86ELFObjectWriter(bool is64Bit,uint8_t OSABI,uint16_t EMachine,bool HasRelocationAddend,bool foobar)1217330f729Sjoerg   X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
1227330f729Sjoerg                      bool HasRelocationAddend, bool foobar)
1237330f729Sjoerg     : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
1247330f729Sjoerg };
1257330f729Sjoerg 
1267330f729Sjoerg class X86AsmBackend : public MCAsmBackend {
1277330f729Sjoerg   const MCSubtargetInfo &STI;
128*82d56013Sjoerg   std::unique_ptr<const MCInstrInfo> MCII;
129*82d56013Sjoerg   X86AlignBranchKind AlignBranchType;
130*82d56013Sjoerg   Align AlignBoundary;
131*82d56013Sjoerg   unsigned TargetPrefixMax = 0;
132*82d56013Sjoerg 
133*82d56013Sjoerg   MCInst PrevInst;
134*82d56013Sjoerg   MCBoundaryAlignFragment *PendingBA = nullptr;
135*82d56013Sjoerg   std::pair<MCFragment *, size_t> PrevInstPosition;
136*82d56013Sjoerg   bool CanPadInst;
137*82d56013Sjoerg 
138*82d56013Sjoerg   uint8_t determinePaddingPrefix(const MCInst &Inst) const;
139*82d56013Sjoerg   bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
140*82d56013Sjoerg   bool needAlign(const MCInst &Inst) const;
141*82d56013Sjoerg   bool canPadBranches(MCObjectStreamer &OS) const;
142*82d56013Sjoerg   bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
143*82d56013Sjoerg 
1447330f729Sjoerg public:
X86AsmBackend(const Target & T,const MCSubtargetInfo & STI)1457330f729Sjoerg   X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
146*82d56013Sjoerg       : MCAsmBackend(support::little), STI(STI),
147*82d56013Sjoerg         MCII(T.createMCInstrInfo()) {
148*82d56013Sjoerg     if (X86AlignBranchWithin32BBoundaries) {
149*82d56013Sjoerg       // At the moment, this defaults to aligning fused branches, unconditional
150*82d56013Sjoerg       // jumps, and (unfused) conditional jumps with nops.  Both the
151*82d56013Sjoerg       // instructions aligned and the alignment method (nop vs prefix) may
152*82d56013Sjoerg       // change in the future.
153*82d56013Sjoerg       AlignBoundary = assumeAligned(32);;
154*82d56013Sjoerg       AlignBranchType.addKind(X86::AlignBranchFused);
155*82d56013Sjoerg       AlignBranchType.addKind(X86::AlignBranchJcc);
156*82d56013Sjoerg       AlignBranchType.addKind(X86::AlignBranchJmp);
157*82d56013Sjoerg     }
158*82d56013Sjoerg     // Allow overriding defaults set by master flag
159*82d56013Sjoerg     if (X86AlignBranchBoundary.getNumOccurrences())
160*82d56013Sjoerg       AlignBoundary = assumeAligned(X86AlignBranchBoundary);
161*82d56013Sjoerg     if (X86AlignBranch.getNumOccurrences())
162*82d56013Sjoerg       AlignBranchType = X86AlignBranchKindLoc;
163*82d56013Sjoerg     if (X86PadMaxPrefixSize.getNumOccurrences())
164*82d56013Sjoerg       TargetPrefixMax = X86PadMaxPrefixSize;
165*82d56013Sjoerg   }
166*82d56013Sjoerg 
167*82d56013Sjoerg   bool allowAutoPadding() const override;
168*82d56013Sjoerg   bool allowEnhancedRelaxation() const override;
169*82d56013Sjoerg   void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst) override;
170*82d56013Sjoerg   void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) override;
1717330f729Sjoerg 
getNumFixupKinds() const1727330f729Sjoerg   unsigned getNumFixupKinds() const override {
1737330f729Sjoerg     return X86::NumTargetFixupKinds;
1747330f729Sjoerg   }
1757330f729Sjoerg 
1767330f729Sjoerg   Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
1777330f729Sjoerg 
178*82d56013Sjoerg   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
1797330f729Sjoerg 
1807330f729Sjoerg   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
1817330f729Sjoerg                              const MCValue &Target) override;
1827330f729Sjoerg 
1837330f729Sjoerg   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1847330f729Sjoerg                   const MCValue &Target, MutableArrayRef<char> Data,
1857330f729Sjoerg                   uint64_t Value, bool IsResolved,
186*82d56013Sjoerg                   const MCSubtargetInfo *STI) const override;
1877330f729Sjoerg 
1887330f729Sjoerg   bool mayNeedRelaxation(const MCInst &Inst,
1897330f729Sjoerg                          const MCSubtargetInfo &STI) const override;
1907330f729Sjoerg 
1917330f729Sjoerg   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
1927330f729Sjoerg                             const MCRelaxableFragment *DF,
1937330f729Sjoerg                             const MCAsmLayout &Layout) const override;
1947330f729Sjoerg 
195*82d56013Sjoerg   void relaxInstruction(MCInst &Inst,
196*82d56013Sjoerg                         const MCSubtargetInfo &STI) const override;
197*82d56013Sjoerg 
198*82d56013Sjoerg   bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
199*82d56013Sjoerg                                    MCCodeEmitter &Emitter,
200*82d56013Sjoerg                                    unsigned &RemainingSize) const;
201*82d56013Sjoerg 
202*82d56013Sjoerg   bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
203*82d56013Sjoerg                                unsigned &RemainingSize) const;
204*82d56013Sjoerg 
205*82d56013Sjoerg   bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
206*82d56013Sjoerg                               unsigned &RemainingSize) const;
207*82d56013Sjoerg 
208*82d56013Sjoerg   void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const override;
209*82d56013Sjoerg 
210*82d56013Sjoerg   unsigned getMaximumNopSize() const override;
2117330f729Sjoerg 
2127330f729Sjoerg   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
2137330f729Sjoerg };
2147330f729Sjoerg } // end anonymous namespace
2157330f729Sjoerg 
getRelaxedOpcodeBranch(const MCInst & Inst,bool Is16BitMode)216*82d56013Sjoerg static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode) {
2177330f729Sjoerg   unsigned Op = Inst.getOpcode();
2187330f729Sjoerg   switch (Op) {
2197330f729Sjoerg   default:
2207330f729Sjoerg     return Op;
2217330f729Sjoerg   case X86::JCC_1:
222*82d56013Sjoerg     return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
2237330f729Sjoerg   case X86::JMP_1:
224*82d56013Sjoerg     return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
2257330f729Sjoerg   }
2267330f729Sjoerg }
2277330f729Sjoerg 
getRelaxedOpcodeArith(const MCInst & Inst)2287330f729Sjoerg static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
2297330f729Sjoerg   unsigned Op = Inst.getOpcode();
2307330f729Sjoerg   switch (Op) {
2317330f729Sjoerg   default:
2327330f729Sjoerg     return Op;
2337330f729Sjoerg 
2347330f729Sjoerg     // IMUL
2357330f729Sjoerg   case X86::IMUL16rri8: return X86::IMUL16rri;
2367330f729Sjoerg   case X86::IMUL16rmi8: return X86::IMUL16rmi;
2377330f729Sjoerg   case X86::IMUL32rri8: return X86::IMUL32rri;
2387330f729Sjoerg   case X86::IMUL32rmi8: return X86::IMUL32rmi;
2397330f729Sjoerg   case X86::IMUL64rri8: return X86::IMUL64rri32;
2407330f729Sjoerg   case X86::IMUL64rmi8: return X86::IMUL64rmi32;
2417330f729Sjoerg 
2427330f729Sjoerg     // AND
2437330f729Sjoerg   case X86::AND16ri8: return X86::AND16ri;
2447330f729Sjoerg   case X86::AND16mi8: return X86::AND16mi;
2457330f729Sjoerg   case X86::AND32ri8: return X86::AND32ri;
2467330f729Sjoerg   case X86::AND32mi8: return X86::AND32mi;
2477330f729Sjoerg   case X86::AND64ri8: return X86::AND64ri32;
2487330f729Sjoerg   case X86::AND64mi8: return X86::AND64mi32;
2497330f729Sjoerg 
2507330f729Sjoerg     // OR
2517330f729Sjoerg   case X86::OR16ri8: return X86::OR16ri;
2527330f729Sjoerg   case X86::OR16mi8: return X86::OR16mi;
2537330f729Sjoerg   case X86::OR32ri8: return X86::OR32ri;
2547330f729Sjoerg   case X86::OR32mi8: return X86::OR32mi;
2557330f729Sjoerg   case X86::OR64ri8: return X86::OR64ri32;
2567330f729Sjoerg   case X86::OR64mi8: return X86::OR64mi32;
2577330f729Sjoerg 
2587330f729Sjoerg     // XOR
2597330f729Sjoerg   case X86::XOR16ri8: return X86::XOR16ri;
2607330f729Sjoerg   case X86::XOR16mi8: return X86::XOR16mi;
2617330f729Sjoerg   case X86::XOR32ri8: return X86::XOR32ri;
2627330f729Sjoerg   case X86::XOR32mi8: return X86::XOR32mi;
2637330f729Sjoerg   case X86::XOR64ri8: return X86::XOR64ri32;
2647330f729Sjoerg   case X86::XOR64mi8: return X86::XOR64mi32;
2657330f729Sjoerg 
2667330f729Sjoerg     // ADD
2677330f729Sjoerg   case X86::ADD16ri8: return X86::ADD16ri;
2687330f729Sjoerg   case X86::ADD16mi8: return X86::ADD16mi;
2697330f729Sjoerg   case X86::ADD32ri8: return X86::ADD32ri;
2707330f729Sjoerg   case X86::ADD32mi8: return X86::ADD32mi;
2717330f729Sjoerg   case X86::ADD64ri8: return X86::ADD64ri32;
2727330f729Sjoerg   case X86::ADD64mi8: return X86::ADD64mi32;
2737330f729Sjoerg 
2747330f729Sjoerg    // ADC
2757330f729Sjoerg   case X86::ADC16ri8: return X86::ADC16ri;
2767330f729Sjoerg   case X86::ADC16mi8: return X86::ADC16mi;
2777330f729Sjoerg   case X86::ADC32ri8: return X86::ADC32ri;
2787330f729Sjoerg   case X86::ADC32mi8: return X86::ADC32mi;
2797330f729Sjoerg   case X86::ADC64ri8: return X86::ADC64ri32;
2807330f729Sjoerg   case X86::ADC64mi8: return X86::ADC64mi32;
2817330f729Sjoerg 
2827330f729Sjoerg     // SUB
2837330f729Sjoerg   case X86::SUB16ri8: return X86::SUB16ri;
2847330f729Sjoerg   case X86::SUB16mi8: return X86::SUB16mi;
2857330f729Sjoerg   case X86::SUB32ri8: return X86::SUB32ri;
2867330f729Sjoerg   case X86::SUB32mi8: return X86::SUB32mi;
2877330f729Sjoerg   case X86::SUB64ri8: return X86::SUB64ri32;
2887330f729Sjoerg   case X86::SUB64mi8: return X86::SUB64mi32;
2897330f729Sjoerg 
2907330f729Sjoerg    // SBB
2917330f729Sjoerg   case X86::SBB16ri8: return X86::SBB16ri;
2927330f729Sjoerg   case X86::SBB16mi8: return X86::SBB16mi;
2937330f729Sjoerg   case X86::SBB32ri8: return X86::SBB32ri;
2947330f729Sjoerg   case X86::SBB32mi8: return X86::SBB32mi;
2957330f729Sjoerg   case X86::SBB64ri8: return X86::SBB64ri32;
2967330f729Sjoerg   case X86::SBB64mi8: return X86::SBB64mi32;
2977330f729Sjoerg 
2987330f729Sjoerg     // CMP
2997330f729Sjoerg   case X86::CMP16ri8: return X86::CMP16ri;
3007330f729Sjoerg   case X86::CMP16mi8: return X86::CMP16mi;
3017330f729Sjoerg   case X86::CMP32ri8: return X86::CMP32ri;
3027330f729Sjoerg   case X86::CMP32mi8: return X86::CMP32mi;
3037330f729Sjoerg   case X86::CMP64ri8: return X86::CMP64ri32;
3047330f729Sjoerg   case X86::CMP64mi8: return X86::CMP64mi32;
3057330f729Sjoerg 
3067330f729Sjoerg     // PUSH
3077330f729Sjoerg   case X86::PUSH32i8:  return X86::PUSHi32;
3087330f729Sjoerg   case X86::PUSH16i8:  return X86::PUSHi16;
3097330f729Sjoerg   case X86::PUSH64i8:  return X86::PUSH64i32;
3107330f729Sjoerg   }
3117330f729Sjoerg }
3127330f729Sjoerg 
getRelaxedOpcode(const MCInst & Inst,bool Is16BitMode)313*82d56013Sjoerg static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode) {
3147330f729Sjoerg   unsigned R = getRelaxedOpcodeArith(Inst);
3157330f729Sjoerg   if (R != Inst.getOpcode())
3167330f729Sjoerg     return R;
317*82d56013Sjoerg   return getRelaxedOpcodeBranch(Inst, Is16BitMode);
318*82d56013Sjoerg }
319*82d56013Sjoerg 
getCondFromBranch(const MCInst & MI,const MCInstrInfo & MCII)320*82d56013Sjoerg static X86::CondCode getCondFromBranch(const MCInst &MI,
321*82d56013Sjoerg                                        const MCInstrInfo &MCII) {
322*82d56013Sjoerg   unsigned Opcode = MI.getOpcode();
323*82d56013Sjoerg   switch (Opcode) {
324*82d56013Sjoerg   default:
325*82d56013Sjoerg     return X86::COND_INVALID;
326*82d56013Sjoerg   case X86::JCC_1: {
327*82d56013Sjoerg     const MCInstrDesc &Desc = MCII.get(Opcode);
328*82d56013Sjoerg     return static_cast<X86::CondCode>(
329*82d56013Sjoerg         MI.getOperand(Desc.getNumOperands() - 1).getImm());
330*82d56013Sjoerg   }
331*82d56013Sjoerg   }
332*82d56013Sjoerg }
333*82d56013Sjoerg 
334*82d56013Sjoerg static X86::SecondMacroFusionInstKind
classifySecondInstInMacroFusion(const MCInst & MI,const MCInstrInfo & MCII)335*82d56013Sjoerg classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII) {
336*82d56013Sjoerg   X86::CondCode CC = getCondFromBranch(MI, MCII);
337*82d56013Sjoerg   return classifySecondCondCodeInMacroFusion(CC);
338*82d56013Sjoerg }
339*82d56013Sjoerg 
340*82d56013Sjoerg /// Check if the instruction uses RIP relative addressing.
isRIPRelative(const MCInst & MI,const MCInstrInfo & MCII)341*82d56013Sjoerg static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
342*82d56013Sjoerg   unsigned Opcode = MI.getOpcode();
343*82d56013Sjoerg   const MCInstrDesc &Desc = MCII.get(Opcode);
344*82d56013Sjoerg   uint64_t TSFlags = Desc.TSFlags;
345*82d56013Sjoerg   unsigned CurOp = X86II::getOperandBias(Desc);
346*82d56013Sjoerg   int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
347*82d56013Sjoerg   if (MemoryOperand < 0)
348*82d56013Sjoerg     return false;
349*82d56013Sjoerg   unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
350*82d56013Sjoerg   unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
351*82d56013Sjoerg   return (BaseReg == X86::RIP);
352*82d56013Sjoerg }
353*82d56013Sjoerg 
354*82d56013Sjoerg /// Check if the instruction is a prefix.
isPrefix(const MCInst & MI,const MCInstrInfo & MCII)355*82d56013Sjoerg static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII) {
356*82d56013Sjoerg   return X86II::isPrefix(MCII.get(MI.getOpcode()).TSFlags);
357*82d56013Sjoerg }
358*82d56013Sjoerg 
359*82d56013Sjoerg /// Check if the instruction is valid as the first instruction in macro fusion.
isFirstMacroFusibleInst(const MCInst & Inst,const MCInstrInfo & MCII)360*82d56013Sjoerg static bool isFirstMacroFusibleInst(const MCInst &Inst,
361*82d56013Sjoerg                                     const MCInstrInfo &MCII) {
362*82d56013Sjoerg   // An Intel instruction with RIP relative addressing is not macro fusible.
363*82d56013Sjoerg   if (isRIPRelative(Inst, MCII))
364*82d56013Sjoerg     return false;
365*82d56013Sjoerg   X86::FirstMacroFusionInstKind FIK =
366*82d56013Sjoerg       X86::classifyFirstOpcodeInMacroFusion(Inst.getOpcode());
367*82d56013Sjoerg   return FIK != X86::FirstMacroFusionInstKind::Invalid;
368*82d56013Sjoerg }
369*82d56013Sjoerg 
370*82d56013Sjoerg /// X86 can reduce the bytes of NOP by padding instructions with prefixes to
371*82d56013Sjoerg /// get a better peformance in some cases. Here, we determine which prefix is
372*82d56013Sjoerg /// the most suitable.
373*82d56013Sjoerg ///
374*82d56013Sjoerg /// If the instruction has a segment override prefix, use the existing one.
375*82d56013Sjoerg /// If the target is 64-bit, use the CS.
376*82d56013Sjoerg /// If the target is 32-bit,
377*82d56013Sjoerg ///   - If the instruction has a ESP/EBP base register, use SS.
378*82d56013Sjoerg ///   - Otherwise use DS.
determinePaddingPrefix(const MCInst & Inst) const379*82d56013Sjoerg uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
380*82d56013Sjoerg   assert((STI.hasFeature(X86::Mode32Bit) || STI.hasFeature(X86::Mode64Bit)) &&
381*82d56013Sjoerg          "Prefixes can be added only in 32-bit or 64-bit mode.");
382*82d56013Sjoerg   const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
383*82d56013Sjoerg   uint64_t TSFlags = Desc.TSFlags;
384*82d56013Sjoerg 
385*82d56013Sjoerg   // Determine where the memory operand starts, if present.
386*82d56013Sjoerg   int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
387*82d56013Sjoerg   if (MemoryOperand != -1)
388*82d56013Sjoerg     MemoryOperand += X86II::getOperandBias(Desc);
389*82d56013Sjoerg 
390*82d56013Sjoerg   unsigned SegmentReg = 0;
391*82d56013Sjoerg   if (MemoryOperand >= 0) {
392*82d56013Sjoerg     // Check for explicit segment override on memory operand.
393*82d56013Sjoerg     SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
394*82d56013Sjoerg   }
395*82d56013Sjoerg 
396*82d56013Sjoerg   switch (TSFlags & X86II::FormMask) {
397*82d56013Sjoerg   default:
398*82d56013Sjoerg     break;
399*82d56013Sjoerg   case X86II::RawFrmDstSrc: {
400*82d56013Sjoerg     // Check segment override opcode prefix as needed (not for %ds).
401*82d56013Sjoerg     if (Inst.getOperand(2).getReg() != X86::DS)
402*82d56013Sjoerg       SegmentReg = Inst.getOperand(2).getReg();
403*82d56013Sjoerg     break;
404*82d56013Sjoerg   }
405*82d56013Sjoerg   case X86II::RawFrmSrc: {
406*82d56013Sjoerg     // Check segment override opcode prefix as needed (not for %ds).
407*82d56013Sjoerg     if (Inst.getOperand(1).getReg() != X86::DS)
408*82d56013Sjoerg       SegmentReg = Inst.getOperand(1).getReg();
409*82d56013Sjoerg     break;
410*82d56013Sjoerg   }
411*82d56013Sjoerg   case X86II::RawFrmMemOffs: {
412*82d56013Sjoerg     // Check segment override opcode prefix as needed.
413*82d56013Sjoerg     SegmentReg = Inst.getOperand(1).getReg();
414*82d56013Sjoerg     break;
415*82d56013Sjoerg   }
416*82d56013Sjoerg   }
417*82d56013Sjoerg 
418*82d56013Sjoerg   if (SegmentReg != 0)
419*82d56013Sjoerg     return X86::getSegmentOverridePrefixForReg(SegmentReg);
420*82d56013Sjoerg 
421*82d56013Sjoerg   if (STI.hasFeature(X86::Mode64Bit))
422*82d56013Sjoerg     return X86::CS_Encoding;
423*82d56013Sjoerg 
424*82d56013Sjoerg   if (MemoryOperand >= 0) {
425*82d56013Sjoerg     unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
426*82d56013Sjoerg     unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
427*82d56013Sjoerg     if (BaseReg == X86::ESP || BaseReg == X86::EBP)
428*82d56013Sjoerg       return X86::SS_Encoding;
429*82d56013Sjoerg   }
430*82d56013Sjoerg   return X86::DS_Encoding;
431*82d56013Sjoerg }
432*82d56013Sjoerg 
433*82d56013Sjoerg /// Check if the two instructions will be macro-fused on the target cpu.
isMacroFused(const MCInst & Cmp,const MCInst & Jcc) const434*82d56013Sjoerg bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
435*82d56013Sjoerg   const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
436*82d56013Sjoerg   if (!InstDesc.isConditionalBranch())
437*82d56013Sjoerg     return false;
438*82d56013Sjoerg   if (!isFirstMacroFusibleInst(Cmp, *MCII))
439*82d56013Sjoerg     return false;
440*82d56013Sjoerg   const X86::FirstMacroFusionInstKind CmpKind =
441*82d56013Sjoerg       X86::classifyFirstOpcodeInMacroFusion(Cmp.getOpcode());
442*82d56013Sjoerg   const X86::SecondMacroFusionInstKind BranchKind =
443*82d56013Sjoerg       classifySecondInstInMacroFusion(Jcc, *MCII);
444*82d56013Sjoerg   return X86::isMacroFused(CmpKind, BranchKind);
445*82d56013Sjoerg }
446*82d56013Sjoerg 
447*82d56013Sjoerg /// Check if the instruction has a variant symbol operand.
hasVariantSymbol(const MCInst & MI)448*82d56013Sjoerg static bool hasVariantSymbol(const MCInst &MI) {
449*82d56013Sjoerg   for (auto &Operand : MI) {
450*82d56013Sjoerg     if (!Operand.isExpr())
451*82d56013Sjoerg       continue;
452*82d56013Sjoerg     const MCExpr &Expr = *Operand.getExpr();
453*82d56013Sjoerg     if (Expr.getKind() == MCExpr::SymbolRef &&
454*82d56013Sjoerg         cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
455*82d56013Sjoerg       return true;
456*82d56013Sjoerg   }
457*82d56013Sjoerg   return false;
458*82d56013Sjoerg }
459*82d56013Sjoerg 
allowAutoPadding() const460*82d56013Sjoerg bool X86AsmBackend::allowAutoPadding() const {
461*82d56013Sjoerg   return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
462*82d56013Sjoerg }
463*82d56013Sjoerg 
allowEnhancedRelaxation() const464*82d56013Sjoerg bool X86AsmBackend::allowEnhancedRelaxation() const {
465*82d56013Sjoerg   return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
466*82d56013Sjoerg }
467*82d56013Sjoerg 
468*82d56013Sjoerg /// X86 has certain instructions which enable interrupts exactly one
469*82d56013Sjoerg /// instruction *after* the instruction which stores to SS.  Return true if the
470*82d56013Sjoerg /// given instruction has such an interrupt delay slot.
hasInterruptDelaySlot(const MCInst & Inst)471*82d56013Sjoerg static bool hasInterruptDelaySlot(const MCInst &Inst) {
472*82d56013Sjoerg   switch (Inst.getOpcode()) {
473*82d56013Sjoerg   case X86::POPSS16:
474*82d56013Sjoerg   case X86::POPSS32:
475*82d56013Sjoerg   case X86::STI:
476*82d56013Sjoerg     return true;
477*82d56013Sjoerg 
478*82d56013Sjoerg   case X86::MOV16sr:
479*82d56013Sjoerg   case X86::MOV32sr:
480*82d56013Sjoerg   case X86::MOV64sr:
481*82d56013Sjoerg   case X86::MOV16sm:
482*82d56013Sjoerg     if (Inst.getOperand(0).getReg() == X86::SS)
483*82d56013Sjoerg       return true;
484*82d56013Sjoerg     break;
485*82d56013Sjoerg   }
486*82d56013Sjoerg   return false;
487*82d56013Sjoerg }
488*82d56013Sjoerg 
489*82d56013Sjoerg /// Check if the instruction to be emitted is right after any data.
490*82d56013Sjoerg static bool
isRightAfterData(MCFragment * CurrentFragment,const std::pair<MCFragment *,size_t> & PrevInstPosition)491*82d56013Sjoerg isRightAfterData(MCFragment *CurrentFragment,
492*82d56013Sjoerg                  const std::pair<MCFragment *, size_t> &PrevInstPosition) {
493*82d56013Sjoerg   MCFragment *F = CurrentFragment;
494*82d56013Sjoerg   // Empty data fragments may be created to prevent further data being
495*82d56013Sjoerg   // added into the previous fragment, we need to skip them since they
496*82d56013Sjoerg   // have no contents.
497*82d56013Sjoerg   for (; isa_and_nonnull<MCDataFragment>(F); F = F->getPrevNode())
498*82d56013Sjoerg     if (cast<MCDataFragment>(F)->getContents().size() != 0)
499*82d56013Sjoerg       break;
500*82d56013Sjoerg 
501*82d56013Sjoerg   // Since data is always emitted into a DataFragment, our check strategy is
502*82d56013Sjoerg   // simple here.
503*82d56013Sjoerg   //   - If the fragment is a DataFragment
504*82d56013Sjoerg   //     - If it's not the fragment where the previous instruction is,
505*82d56013Sjoerg   //       returns true.
506*82d56013Sjoerg   //     - If it's the fragment holding the previous instruction but its
507*82d56013Sjoerg   //       size changed since the the previous instruction was emitted into
508*82d56013Sjoerg   //       it, returns true.
509*82d56013Sjoerg   //     - Otherwise returns false.
510*82d56013Sjoerg   //   - If the fragment is not a DataFragment, returns false.
511*82d56013Sjoerg   if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
512*82d56013Sjoerg     return DF != PrevInstPosition.first ||
513*82d56013Sjoerg            DF->getContents().size() != PrevInstPosition.second;
514*82d56013Sjoerg 
515*82d56013Sjoerg   return false;
516*82d56013Sjoerg }
517*82d56013Sjoerg 
518*82d56013Sjoerg /// \returns the fragment size if it has instructions, otherwise returns 0.
getSizeForInstFragment(const MCFragment * F)519*82d56013Sjoerg static size_t getSizeForInstFragment(const MCFragment *F) {
520*82d56013Sjoerg   if (!F || !F->hasInstructions())
521*82d56013Sjoerg     return 0;
522*82d56013Sjoerg   // MCEncodedFragmentWithContents being templated makes this tricky.
523*82d56013Sjoerg   switch (F->getKind()) {
524*82d56013Sjoerg   default:
525*82d56013Sjoerg     llvm_unreachable("Unknown fragment with instructions!");
526*82d56013Sjoerg   case MCFragment::FT_Data:
527*82d56013Sjoerg     return cast<MCDataFragment>(*F).getContents().size();
528*82d56013Sjoerg   case MCFragment::FT_Relaxable:
529*82d56013Sjoerg     return cast<MCRelaxableFragment>(*F).getContents().size();
530*82d56013Sjoerg   case MCFragment::FT_CompactEncodedInst:
531*82d56013Sjoerg     return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
532*82d56013Sjoerg   }
533*82d56013Sjoerg }
534*82d56013Sjoerg 
535*82d56013Sjoerg /// Return true if we can insert NOP or prefixes automatically before the
536*82d56013Sjoerg /// the instruction to be emitted.
canPadInst(const MCInst & Inst,MCObjectStreamer & OS) const537*82d56013Sjoerg bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
538*82d56013Sjoerg   if (hasVariantSymbol(Inst))
539*82d56013Sjoerg     // Linker may rewrite the instruction with variant symbol operand(e.g.
540*82d56013Sjoerg     // TLSCALL).
541*82d56013Sjoerg     return false;
542*82d56013Sjoerg 
543*82d56013Sjoerg   if (hasInterruptDelaySlot(PrevInst))
544*82d56013Sjoerg     // If this instruction follows an interrupt enabling instruction with a one
545*82d56013Sjoerg     // instruction delay, inserting a nop would change behavior.
546*82d56013Sjoerg     return false;
547*82d56013Sjoerg 
548*82d56013Sjoerg   if (isPrefix(PrevInst, *MCII))
549*82d56013Sjoerg     // If this instruction follows a prefix, inserting a nop/prefix would change
550*82d56013Sjoerg     // semantic.
551*82d56013Sjoerg     return false;
552*82d56013Sjoerg 
553*82d56013Sjoerg   if (isPrefix(Inst, *MCII))
554*82d56013Sjoerg     // If this instruction is a prefix, inserting a prefix would change
555*82d56013Sjoerg     // semantic.
556*82d56013Sjoerg     return false;
557*82d56013Sjoerg 
558*82d56013Sjoerg   if (isRightAfterData(OS.getCurrentFragment(), PrevInstPosition))
559*82d56013Sjoerg     // If this instruction follows any data, there is no clear
560*82d56013Sjoerg     // instruction boundary, inserting a nop/prefix would change semantic.
561*82d56013Sjoerg     return false;
562*82d56013Sjoerg 
563*82d56013Sjoerg   return true;
564*82d56013Sjoerg }
565*82d56013Sjoerg 
canPadBranches(MCObjectStreamer & OS) const566*82d56013Sjoerg bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
567*82d56013Sjoerg   if (!OS.getAllowAutoPadding())
568*82d56013Sjoerg     return false;
569*82d56013Sjoerg   assert(allowAutoPadding() && "incorrect initialization!");
570*82d56013Sjoerg 
571*82d56013Sjoerg   // We only pad in text section.
572*82d56013Sjoerg   if (!OS.getCurrentSectionOnly()->getKind().isText())
573*82d56013Sjoerg     return false;
574*82d56013Sjoerg 
575*82d56013Sjoerg   // To be Done: Currently don't deal with Bundle cases.
576*82d56013Sjoerg   if (OS.getAssembler().isBundlingEnabled())
577*82d56013Sjoerg     return false;
578*82d56013Sjoerg 
579*82d56013Sjoerg   // Branches only need to be aligned in 32-bit or 64-bit mode.
580*82d56013Sjoerg   if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit)))
581*82d56013Sjoerg     return false;
582*82d56013Sjoerg 
583*82d56013Sjoerg   return true;
584*82d56013Sjoerg }
585*82d56013Sjoerg 
586*82d56013Sjoerg /// Check if the instruction operand needs to be aligned.
needAlign(const MCInst & Inst) const587*82d56013Sjoerg bool X86AsmBackend::needAlign(const MCInst &Inst) const {
588*82d56013Sjoerg   const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
589*82d56013Sjoerg   return (Desc.isConditionalBranch() &&
590*82d56013Sjoerg           (AlignBranchType & X86::AlignBranchJcc)) ||
591*82d56013Sjoerg          (Desc.isUnconditionalBranch() &&
592*82d56013Sjoerg           (AlignBranchType & X86::AlignBranchJmp)) ||
593*82d56013Sjoerg          (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
594*82d56013Sjoerg          (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
595*82d56013Sjoerg          (Desc.isIndirectBranch() &&
596*82d56013Sjoerg           (AlignBranchType & X86::AlignBranchIndirect));
597*82d56013Sjoerg }
598*82d56013Sjoerg 
599*82d56013Sjoerg /// Insert BoundaryAlignFragment before instructions to align branches.
emitInstructionBegin(MCObjectStreamer & OS,const MCInst & Inst)600*82d56013Sjoerg void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
601*82d56013Sjoerg                                          const MCInst &Inst) {
602*82d56013Sjoerg   CanPadInst = canPadInst(Inst, OS);
603*82d56013Sjoerg 
604*82d56013Sjoerg   if (!canPadBranches(OS))
605*82d56013Sjoerg     return;
606*82d56013Sjoerg 
607*82d56013Sjoerg   if (!isMacroFused(PrevInst, Inst))
608*82d56013Sjoerg     // Macro fusion doesn't happen indeed, clear the pending.
609*82d56013Sjoerg     PendingBA = nullptr;
610*82d56013Sjoerg 
611*82d56013Sjoerg   if (!CanPadInst)
612*82d56013Sjoerg     return;
613*82d56013Sjoerg 
614*82d56013Sjoerg   if (PendingBA && OS.getCurrentFragment()->getPrevNode() == PendingBA) {
615*82d56013Sjoerg     // Macro fusion actually happens and there is no other fragment inserted
616*82d56013Sjoerg     // after the previous instruction.
617*82d56013Sjoerg     //
618*82d56013Sjoerg     // Do nothing here since we already inserted a BoudaryAlign fragment when
619*82d56013Sjoerg     // we met the first instruction in the fused pair and we'll tie them
620*82d56013Sjoerg     // together in emitInstructionEnd.
621*82d56013Sjoerg     //
622*82d56013Sjoerg     // Note: When there is at least one fragment, such as MCAlignFragment,
623*82d56013Sjoerg     // inserted after the previous instruction, e.g.
624*82d56013Sjoerg     //
625*82d56013Sjoerg     // \code
626*82d56013Sjoerg     //   cmp %rax %rcx
627*82d56013Sjoerg     //   .align 16
628*82d56013Sjoerg     //   je .Label0
629*82d56013Sjoerg     // \ endcode
630*82d56013Sjoerg     //
631*82d56013Sjoerg     // We will treat the JCC as a unfused branch although it may be fused
632*82d56013Sjoerg     // with the CMP.
633*82d56013Sjoerg     return;
634*82d56013Sjoerg   }
635*82d56013Sjoerg 
636*82d56013Sjoerg   if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
637*82d56013Sjoerg                           isFirstMacroFusibleInst(Inst, *MCII))) {
638*82d56013Sjoerg     // If we meet a unfused branch or the first instuction in a fusiable pair,
639*82d56013Sjoerg     // insert a BoundaryAlign fragment.
640*82d56013Sjoerg     OS.insert(PendingBA = new MCBoundaryAlignFragment(AlignBoundary));
641*82d56013Sjoerg   }
642*82d56013Sjoerg }
643*82d56013Sjoerg 
644*82d56013Sjoerg /// Set the last fragment to be aligned for the BoundaryAlignFragment.
emitInstructionEnd(MCObjectStreamer & OS,const MCInst & Inst)645*82d56013Sjoerg void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) {
646*82d56013Sjoerg   PrevInst = Inst;
647*82d56013Sjoerg   MCFragment *CF = OS.getCurrentFragment();
648*82d56013Sjoerg   PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
649*82d56013Sjoerg   if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
650*82d56013Sjoerg     F->setAllowAutoPadding(CanPadInst);
651*82d56013Sjoerg 
652*82d56013Sjoerg   if (!canPadBranches(OS))
653*82d56013Sjoerg     return;
654*82d56013Sjoerg 
655*82d56013Sjoerg   if (!needAlign(Inst) || !PendingBA)
656*82d56013Sjoerg     return;
657*82d56013Sjoerg 
658*82d56013Sjoerg   // Tie the aligned instructions into a a pending BoundaryAlign.
659*82d56013Sjoerg   PendingBA->setLastFragment(CF);
660*82d56013Sjoerg   PendingBA = nullptr;
661*82d56013Sjoerg 
662*82d56013Sjoerg   // We need to ensure that further data isn't added to the current
663*82d56013Sjoerg   // DataFragment, so that we can get the size of instructions later in
664*82d56013Sjoerg   // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
665*82d56013Sjoerg   // DataFragment.
666*82d56013Sjoerg   if (isa_and_nonnull<MCDataFragment>(CF))
667*82d56013Sjoerg     OS.insert(new MCDataFragment());
668*82d56013Sjoerg 
669*82d56013Sjoerg   // Update the maximum alignment on the current section if necessary.
670*82d56013Sjoerg   MCSection *Sec = OS.getCurrentSectionOnly();
671*82d56013Sjoerg   if (AlignBoundary.value() > Sec->getAlignment())
672*82d56013Sjoerg     Sec->setAlignment(AlignBoundary);
6737330f729Sjoerg }
6747330f729Sjoerg 
getFixupKind(StringRef Name) const6757330f729Sjoerg Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
6767330f729Sjoerg   if (STI.getTargetTriple().isOSBinFormatELF()) {
677*82d56013Sjoerg     unsigned Type;
6787330f729Sjoerg     if (STI.getTargetTriple().getArch() == Triple::x86_64) {
679*82d56013Sjoerg       Type = llvm::StringSwitch<unsigned>(Name)
680*82d56013Sjoerg #define ELF_RELOC(X, Y) .Case(#X, Y)
681*82d56013Sjoerg #include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
682*82d56013Sjoerg #undef ELF_RELOC
683*82d56013Sjoerg                  .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
684*82d56013Sjoerg                  .Case("BFD_RELOC_8", ELF::R_X86_64_8)
685*82d56013Sjoerg                  .Case("BFD_RELOC_16", ELF::R_X86_64_16)
686*82d56013Sjoerg                  .Case("BFD_RELOC_32", ELF::R_X86_64_32)
687*82d56013Sjoerg                  .Case("BFD_RELOC_64", ELF::R_X86_64_64)
688*82d56013Sjoerg                  .Default(-1u);
6897330f729Sjoerg     } else {
690*82d56013Sjoerg       Type = llvm::StringSwitch<unsigned>(Name)
691*82d56013Sjoerg #define ELF_RELOC(X, Y) .Case(#X, Y)
692*82d56013Sjoerg #include "llvm/BinaryFormat/ELFRelocs/i386.def"
693*82d56013Sjoerg #undef ELF_RELOC
694*82d56013Sjoerg                  .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
695*82d56013Sjoerg                  .Case("BFD_RELOC_8", ELF::R_386_8)
696*82d56013Sjoerg                  .Case("BFD_RELOC_16", ELF::R_386_16)
697*82d56013Sjoerg                  .Case("BFD_RELOC_32", ELF::R_386_32)
698*82d56013Sjoerg                  .Default(-1u);
6997330f729Sjoerg     }
700*82d56013Sjoerg     if (Type == -1u)
701*82d56013Sjoerg       return None;
702*82d56013Sjoerg     return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
7037330f729Sjoerg   }
7047330f729Sjoerg   return MCAsmBackend::getFixupKind(Name);
7057330f729Sjoerg }
7067330f729Sjoerg 
getFixupKindInfo(MCFixupKind Kind) const707*82d56013Sjoerg const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
708*82d56013Sjoerg   const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
709*82d56013Sjoerg       {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
710*82d56013Sjoerg       {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
711*82d56013Sjoerg       {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
712*82d56013Sjoerg       {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
713*82d56013Sjoerg       {"reloc_signed_4byte", 0, 32, 0},
714*82d56013Sjoerg       {"reloc_signed_4byte_relax", 0, 32, 0},
715*82d56013Sjoerg       {"reloc_global_offset_table", 0, 32, 0},
716*82d56013Sjoerg       {"reloc_global_offset_table8", 0, 64, 0},
717*82d56013Sjoerg       {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
718*82d56013Sjoerg   };
719*82d56013Sjoerg 
720*82d56013Sjoerg   // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
721*82d56013Sjoerg   // do not require any extra processing.
722*82d56013Sjoerg   if (Kind >= FirstLiteralRelocationKind)
723*82d56013Sjoerg     return MCAsmBackend::getFixupKindInfo(FK_NONE);
724*82d56013Sjoerg 
725*82d56013Sjoerg   if (Kind < FirstTargetFixupKind)
726*82d56013Sjoerg     return MCAsmBackend::getFixupKindInfo(Kind);
727*82d56013Sjoerg 
728*82d56013Sjoerg   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
729*82d56013Sjoerg          "Invalid kind!");
730*82d56013Sjoerg   assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
731*82d56013Sjoerg   return Infos[Kind - FirstTargetFixupKind];
732*82d56013Sjoerg }
733*82d56013Sjoerg 
shouldForceRelocation(const MCAssembler &,const MCFixup & Fixup,const MCValue &)7347330f729Sjoerg bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
7357330f729Sjoerg                                           const MCFixup &Fixup,
7367330f729Sjoerg                                           const MCValue &) {
737*82d56013Sjoerg   return Fixup.getKind() >= FirstLiteralRelocationKind;
738*82d56013Sjoerg }
739*82d56013Sjoerg 
getFixupKindSize(unsigned Kind)740*82d56013Sjoerg static unsigned getFixupKindSize(unsigned Kind) {
741*82d56013Sjoerg   switch (Kind) {
742*82d56013Sjoerg   default:
743*82d56013Sjoerg     llvm_unreachable("invalid fixup kind!");
744*82d56013Sjoerg   case FK_NONE:
745*82d56013Sjoerg     return 0;
746*82d56013Sjoerg   case FK_PCRel_1:
747*82d56013Sjoerg   case FK_SecRel_1:
748*82d56013Sjoerg   case FK_Data_1:
749*82d56013Sjoerg     return 1;
750*82d56013Sjoerg   case FK_PCRel_2:
751*82d56013Sjoerg   case FK_SecRel_2:
752*82d56013Sjoerg   case FK_Data_2:
753*82d56013Sjoerg     return 2;
754*82d56013Sjoerg   case FK_PCRel_4:
755*82d56013Sjoerg   case X86::reloc_riprel_4byte:
756*82d56013Sjoerg   case X86::reloc_riprel_4byte_relax:
757*82d56013Sjoerg   case X86::reloc_riprel_4byte_relax_rex:
758*82d56013Sjoerg   case X86::reloc_riprel_4byte_movq_load:
759*82d56013Sjoerg   case X86::reloc_signed_4byte:
760*82d56013Sjoerg   case X86::reloc_signed_4byte_relax:
761*82d56013Sjoerg   case X86::reloc_global_offset_table:
762*82d56013Sjoerg   case X86::reloc_branch_4byte_pcrel:
763*82d56013Sjoerg   case FK_SecRel_4:
764*82d56013Sjoerg   case FK_Data_4:
765*82d56013Sjoerg     return 4;
766*82d56013Sjoerg   case FK_PCRel_8:
767*82d56013Sjoerg   case FK_SecRel_8:
768*82d56013Sjoerg   case FK_Data_8:
769*82d56013Sjoerg   case X86::reloc_global_offset_table8:
770*82d56013Sjoerg     return 8;
771*82d56013Sjoerg   }
772*82d56013Sjoerg }
773*82d56013Sjoerg 
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const774*82d56013Sjoerg void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
775*82d56013Sjoerg                                const MCValue &Target,
776*82d56013Sjoerg                                MutableArrayRef<char> Data,
777*82d56013Sjoerg                                uint64_t Value, bool IsResolved,
778*82d56013Sjoerg                                const MCSubtargetInfo *STI) const {
779*82d56013Sjoerg   unsigned Kind = Fixup.getKind();
780*82d56013Sjoerg   if (Kind >= FirstLiteralRelocationKind)
781*82d56013Sjoerg     return;
782*82d56013Sjoerg   unsigned Size = getFixupKindSize(Kind);
783*82d56013Sjoerg 
784*82d56013Sjoerg   assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
785*82d56013Sjoerg 
786*82d56013Sjoerg   int64_t SignedValue = static_cast<int64_t>(Value);
787*82d56013Sjoerg   if ((Target.isAbsolute() || IsResolved) &&
788*82d56013Sjoerg       getFixupKindInfo(Fixup.getKind()).Flags &
789*82d56013Sjoerg       MCFixupKindInfo::FKF_IsPCRel) {
790*82d56013Sjoerg     // check that PC relative fixup fits into the fixup size.
791*82d56013Sjoerg     if (Size > 0 && !isIntN(Size * 8, SignedValue))
792*82d56013Sjoerg       Asm.getContext().reportError(
793*82d56013Sjoerg                                    Fixup.getLoc(), "value of " + Twine(SignedValue) +
794*82d56013Sjoerg                                    " is too large for field of " + Twine(Size) +
795*82d56013Sjoerg                                    ((Size == 1) ? " byte." : " bytes."));
796*82d56013Sjoerg   } else {
797*82d56013Sjoerg     // Check that uppper bits are either all zeros or all ones.
798*82d56013Sjoerg     // Specifically ignore overflow/underflow as long as the leakage is
799*82d56013Sjoerg     // limited to the lower bits. This is to remain compatible with
800*82d56013Sjoerg     // other assemblers.
801*82d56013Sjoerg     assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
802*82d56013Sjoerg            "Value does not fit in the Fixup field");
803*82d56013Sjoerg   }
804*82d56013Sjoerg 
805*82d56013Sjoerg   for (unsigned i = 0; i != Size; ++i)
806*82d56013Sjoerg     Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
8077330f729Sjoerg }
8087330f729Sjoerg 
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const8097330f729Sjoerg bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst,
8107330f729Sjoerg                                       const MCSubtargetInfo &STI) const {
8117330f729Sjoerg   // Branches can always be relaxed in either mode.
8127330f729Sjoerg   if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
8137330f729Sjoerg     return true;
8147330f729Sjoerg 
8157330f729Sjoerg   // Check if this instruction is ever relaxable.
8167330f729Sjoerg   if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
8177330f729Sjoerg     return false;
8187330f729Sjoerg 
8197330f729Sjoerg 
8207330f729Sjoerg   // Check if the relaxable operand has an expression. For the current set of
8217330f729Sjoerg   // relaxable instructions, the relaxable operand is always the last operand.
8227330f729Sjoerg   unsigned RelaxableOp = Inst.getNumOperands() - 1;
8237330f729Sjoerg   if (Inst.getOperand(RelaxableOp).isExpr())
8247330f729Sjoerg     return true;
8257330f729Sjoerg 
8267330f729Sjoerg   return false;
8277330f729Sjoerg }
8287330f729Sjoerg 
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const8297330f729Sjoerg bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
8307330f729Sjoerg                                          uint64_t Value,
8317330f729Sjoerg                                          const MCRelaxableFragment *DF,
8327330f729Sjoerg                                          const MCAsmLayout &Layout) const {
8337330f729Sjoerg   // Relax if the value is too big for a (signed) i8.
8347330f729Sjoerg   return !isInt<8>(Value);
8357330f729Sjoerg }
8367330f729Sjoerg 
8377330f729Sjoerg // FIXME: Can tblgen help at all here to verify there aren't other instructions
8387330f729Sjoerg // we can relax?
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const839*82d56013Sjoerg void X86AsmBackend::relaxInstruction(MCInst &Inst,
840*82d56013Sjoerg                                      const MCSubtargetInfo &STI) const {
8417330f729Sjoerg   // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
842*82d56013Sjoerg   bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
843*82d56013Sjoerg   unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
8447330f729Sjoerg 
8457330f729Sjoerg   if (RelaxedOp == Inst.getOpcode()) {
8467330f729Sjoerg     SmallString<256> Tmp;
8477330f729Sjoerg     raw_svector_ostream OS(Tmp);
8487330f729Sjoerg     Inst.dump_pretty(OS);
8497330f729Sjoerg     OS << "\n";
8507330f729Sjoerg     report_fatal_error("unexpected instruction to relax: " + OS.str());
8517330f729Sjoerg   }
8527330f729Sjoerg 
853*82d56013Sjoerg   Inst.setOpcode(RelaxedOp);
854*82d56013Sjoerg }
855*82d56013Sjoerg 
856*82d56013Sjoerg /// Return true if this instruction has been fully relaxed into it's most
857*82d56013Sjoerg /// general available form.
isFullyRelaxed(const MCRelaxableFragment & RF)858*82d56013Sjoerg static bool isFullyRelaxed(const MCRelaxableFragment &RF) {
859*82d56013Sjoerg   auto &Inst = RF.getInst();
860*82d56013Sjoerg   auto &STI = *RF.getSubtargetInfo();
861*82d56013Sjoerg   bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
862*82d56013Sjoerg   return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode();
863*82d56013Sjoerg }
864*82d56013Sjoerg 
padInstructionViaPrefix(MCRelaxableFragment & RF,MCCodeEmitter & Emitter,unsigned & RemainingSize) const865*82d56013Sjoerg bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
866*82d56013Sjoerg                                             MCCodeEmitter &Emitter,
867*82d56013Sjoerg                                             unsigned &RemainingSize) const {
868*82d56013Sjoerg   if (!RF.getAllowAutoPadding())
869*82d56013Sjoerg     return false;
870*82d56013Sjoerg   // If the instruction isn't fully relaxed, shifting it around might require a
871*82d56013Sjoerg   // larger value for one of the fixups then can be encoded.  The outer loop
872*82d56013Sjoerg   // will also catch this before moving to the next instruction, but we need to
873*82d56013Sjoerg   // prevent padding this single instruction as well.
874*82d56013Sjoerg   if (!isFullyRelaxed(RF))
875*82d56013Sjoerg     return false;
876*82d56013Sjoerg 
877*82d56013Sjoerg   const unsigned OldSize = RF.getContents().size();
878*82d56013Sjoerg   if (OldSize == 15)
879*82d56013Sjoerg     return false;
880*82d56013Sjoerg 
881*82d56013Sjoerg   const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
882*82d56013Sjoerg   const unsigned RemainingPrefixSize = [&]() -> unsigned {
883*82d56013Sjoerg     SmallString<15> Code;
884*82d56013Sjoerg     raw_svector_ostream VecOS(Code);
885*82d56013Sjoerg     Emitter.emitPrefix(RF.getInst(), VecOS, STI);
886*82d56013Sjoerg     assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
887*82d56013Sjoerg 
888*82d56013Sjoerg     // TODO: It turns out we need a decent amount of plumbing for the target
889*82d56013Sjoerg     // specific bits to determine number of prefixes its safe to add.  Various
890*82d56013Sjoerg     // targets (older chips mostly, but also Atom family) encounter decoder
891*82d56013Sjoerg     // stalls with too many prefixes.  For testing purposes, we set the value
892*82d56013Sjoerg     // externally for the moment.
893*82d56013Sjoerg     unsigned ExistingPrefixSize = Code.size();
894*82d56013Sjoerg     if (TargetPrefixMax <= ExistingPrefixSize)
895*82d56013Sjoerg       return 0;
896*82d56013Sjoerg     return TargetPrefixMax - ExistingPrefixSize;
897*82d56013Sjoerg   }();
898*82d56013Sjoerg   const unsigned PrefixBytesToAdd =
899*82d56013Sjoerg       std::min(MaxPossiblePad, RemainingPrefixSize);
900*82d56013Sjoerg   if (PrefixBytesToAdd == 0)
901*82d56013Sjoerg     return false;
902*82d56013Sjoerg 
903*82d56013Sjoerg   const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
904*82d56013Sjoerg 
905*82d56013Sjoerg   SmallString<256> Code;
906*82d56013Sjoerg   Code.append(PrefixBytesToAdd, Prefix);
907*82d56013Sjoerg   Code.append(RF.getContents().begin(), RF.getContents().end());
908*82d56013Sjoerg   RF.getContents() = Code;
909*82d56013Sjoerg 
910*82d56013Sjoerg   // Adjust the fixups for the change in offsets
911*82d56013Sjoerg   for (auto &F : RF.getFixups()) {
912*82d56013Sjoerg     F.setOffset(F.getOffset() + PrefixBytesToAdd);
913*82d56013Sjoerg   }
914*82d56013Sjoerg 
915*82d56013Sjoerg   RemainingSize -= PrefixBytesToAdd;
916*82d56013Sjoerg   return true;
917*82d56013Sjoerg }
918*82d56013Sjoerg 
padInstructionViaRelaxation(MCRelaxableFragment & RF,MCCodeEmitter & Emitter,unsigned & RemainingSize) const919*82d56013Sjoerg bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
920*82d56013Sjoerg                                                 MCCodeEmitter &Emitter,
921*82d56013Sjoerg                                                 unsigned &RemainingSize) const {
922*82d56013Sjoerg   if (isFullyRelaxed(RF))
923*82d56013Sjoerg     // TODO: There are lots of other tricks we could apply for increasing
924*82d56013Sjoerg     // encoding size without impacting performance.
925*82d56013Sjoerg     return false;
926*82d56013Sjoerg 
927*82d56013Sjoerg   MCInst Relaxed = RF.getInst();
928*82d56013Sjoerg   relaxInstruction(Relaxed, *RF.getSubtargetInfo());
929*82d56013Sjoerg 
930*82d56013Sjoerg   SmallVector<MCFixup, 4> Fixups;
931*82d56013Sjoerg   SmallString<15> Code;
932*82d56013Sjoerg   raw_svector_ostream VecOS(Code);
933*82d56013Sjoerg   Emitter.encodeInstruction(Relaxed, VecOS, Fixups, *RF.getSubtargetInfo());
934*82d56013Sjoerg   const unsigned OldSize = RF.getContents().size();
935*82d56013Sjoerg   const unsigned NewSize = Code.size();
936*82d56013Sjoerg   assert(NewSize >= OldSize && "size decrease during relaxation?");
937*82d56013Sjoerg   unsigned Delta = NewSize - OldSize;
938*82d56013Sjoerg   if (Delta > RemainingSize)
939*82d56013Sjoerg     return false;
940*82d56013Sjoerg   RF.setInst(Relaxed);
941*82d56013Sjoerg   RF.getContents() = Code;
942*82d56013Sjoerg   RF.getFixups() = Fixups;
943*82d56013Sjoerg   RemainingSize -= Delta;
944*82d56013Sjoerg   return true;
945*82d56013Sjoerg }
946*82d56013Sjoerg 
padInstructionEncoding(MCRelaxableFragment & RF,MCCodeEmitter & Emitter,unsigned & RemainingSize) const947*82d56013Sjoerg bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
948*82d56013Sjoerg                                            MCCodeEmitter &Emitter,
949*82d56013Sjoerg                                            unsigned &RemainingSize) const {
950*82d56013Sjoerg   bool Changed = false;
951*82d56013Sjoerg   if (RemainingSize != 0)
952*82d56013Sjoerg     Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
953*82d56013Sjoerg   if (RemainingSize != 0)
954*82d56013Sjoerg     Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
955*82d56013Sjoerg   return Changed;
956*82d56013Sjoerg }
957*82d56013Sjoerg 
finishLayout(MCAssembler const & Asm,MCAsmLayout & Layout) const958*82d56013Sjoerg void X86AsmBackend::finishLayout(MCAssembler const &Asm,
959*82d56013Sjoerg                                  MCAsmLayout &Layout) const {
960*82d56013Sjoerg   // See if we can further relax some instructions to cut down on the number of
961*82d56013Sjoerg   // nop bytes required for code alignment.  The actual win is in reducing
962*82d56013Sjoerg   // instruction count, not number of bytes.  Modern X86-64 can easily end up
963*82d56013Sjoerg   // decode limited.  It is often better to reduce the number of instructions
964*82d56013Sjoerg   // (i.e. eliminate nops) even at the cost of increasing the size and
965*82d56013Sjoerg   // complexity of others.
966*82d56013Sjoerg   if (!X86PadForAlign && !X86PadForBranchAlign)
967*82d56013Sjoerg     return;
968*82d56013Sjoerg 
969*82d56013Sjoerg   // The processed regions are delimitered by LabeledFragments. -g may have more
970*82d56013Sjoerg   // MCSymbols and therefore different relaxation results. X86PadForAlign is
971*82d56013Sjoerg   // disabled by default to eliminate the -g vs non -g difference.
972*82d56013Sjoerg   DenseSet<MCFragment *> LabeledFragments;
973*82d56013Sjoerg   for (const MCSymbol &S : Asm.symbols())
974*82d56013Sjoerg     LabeledFragments.insert(S.getFragment(false));
975*82d56013Sjoerg 
976*82d56013Sjoerg   for (MCSection &Sec : Asm) {
977*82d56013Sjoerg     if (!Sec.getKind().isText())
978*82d56013Sjoerg       continue;
979*82d56013Sjoerg 
980*82d56013Sjoerg     SmallVector<MCRelaxableFragment *, 4> Relaxable;
981*82d56013Sjoerg     for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
982*82d56013Sjoerg       MCFragment &F = *I;
983*82d56013Sjoerg 
984*82d56013Sjoerg       if (LabeledFragments.count(&F))
985*82d56013Sjoerg         Relaxable.clear();
986*82d56013Sjoerg 
987*82d56013Sjoerg       if (F.getKind() == MCFragment::FT_Data ||
988*82d56013Sjoerg           F.getKind() == MCFragment::FT_CompactEncodedInst)
989*82d56013Sjoerg         // Skip and ignore
990*82d56013Sjoerg         continue;
991*82d56013Sjoerg 
992*82d56013Sjoerg       if (F.getKind() == MCFragment::FT_Relaxable) {
993*82d56013Sjoerg         auto &RF = cast<MCRelaxableFragment>(*I);
994*82d56013Sjoerg         Relaxable.push_back(&RF);
995*82d56013Sjoerg         continue;
996*82d56013Sjoerg       }
997*82d56013Sjoerg 
998*82d56013Sjoerg       auto canHandle = [](MCFragment &F) -> bool {
999*82d56013Sjoerg         switch (F.getKind()) {
1000*82d56013Sjoerg         default:
1001*82d56013Sjoerg           return false;
1002*82d56013Sjoerg         case MCFragment::FT_Align:
1003*82d56013Sjoerg           return X86PadForAlign;
1004*82d56013Sjoerg         case MCFragment::FT_BoundaryAlign:
1005*82d56013Sjoerg           return X86PadForBranchAlign;
1006*82d56013Sjoerg         }
1007*82d56013Sjoerg       };
1008*82d56013Sjoerg       // For any unhandled kind, assume we can't change layout.
1009*82d56013Sjoerg       if (!canHandle(F)) {
1010*82d56013Sjoerg         Relaxable.clear();
1011*82d56013Sjoerg         continue;
1012*82d56013Sjoerg       }
1013*82d56013Sjoerg 
1014*82d56013Sjoerg #ifndef NDEBUG
1015*82d56013Sjoerg       const uint64_t OrigOffset = Layout.getFragmentOffset(&F);
1016*82d56013Sjoerg #endif
1017*82d56013Sjoerg       const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F);
1018*82d56013Sjoerg 
1019*82d56013Sjoerg       // To keep the effects local, prefer to relax instructions closest to
1020*82d56013Sjoerg       // the align directive.  This is purely about human understandability
1021*82d56013Sjoerg       // of the resulting code.  If we later find a reason to expand
1022*82d56013Sjoerg       // particular instructions over others, we can adjust.
1023*82d56013Sjoerg       MCFragment *FirstChangedFragment = nullptr;
1024*82d56013Sjoerg       unsigned RemainingSize = OrigSize;
1025*82d56013Sjoerg       while (!Relaxable.empty() && RemainingSize != 0) {
1026*82d56013Sjoerg         auto &RF = *Relaxable.pop_back_val();
1027*82d56013Sjoerg         // Give the backend a chance to play any tricks it wishes to increase
1028*82d56013Sjoerg         // the encoding size of the given instruction.  Target independent code
1029*82d56013Sjoerg         // will try further relaxation, but target's may play further tricks.
1030*82d56013Sjoerg         if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
1031*82d56013Sjoerg           FirstChangedFragment = &RF;
1032*82d56013Sjoerg 
1033*82d56013Sjoerg         // If we have an instruction which hasn't been fully relaxed, we can't
1034*82d56013Sjoerg         // skip past it and insert bytes before it.  Changing its starting
1035*82d56013Sjoerg         // offset might require a larger negative offset than it can encode.
1036*82d56013Sjoerg         // We don't need to worry about larger positive offsets as none of the
1037*82d56013Sjoerg         // possible offsets between this and our align are visible, and the
1038*82d56013Sjoerg         // ones afterwards aren't changing.
1039*82d56013Sjoerg         if (!isFullyRelaxed(RF))
1040*82d56013Sjoerg           break;
1041*82d56013Sjoerg       }
1042*82d56013Sjoerg       Relaxable.clear();
1043*82d56013Sjoerg 
1044*82d56013Sjoerg       if (FirstChangedFragment) {
1045*82d56013Sjoerg         // Make sure the offsets for any fragments in the effected range get
1046*82d56013Sjoerg         // updated.  Note that this (conservatively) invalidates the offsets of
1047*82d56013Sjoerg         // those following, but this is not required.
1048*82d56013Sjoerg         Layout.invalidateFragmentsFrom(FirstChangedFragment);
1049*82d56013Sjoerg       }
1050*82d56013Sjoerg 
1051*82d56013Sjoerg       // BoundaryAlign explicitly tracks it's size (unlike align)
1052*82d56013Sjoerg       if (F.getKind() == MCFragment::FT_BoundaryAlign)
1053*82d56013Sjoerg         cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
1054*82d56013Sjoerg 
1055*82d56013Sjoerg #ifndef NDEBUG
1056*82d56013Sjoerg       const uint64_t FinalOffset = Layout.getFragmentOffset(&F);
1057*82d56013Sjoerg       const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F);
1058*82d56013Sjoerg       assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
1059*82d56013Sjoerg              "can't move start of next fragment!");
1060*82d56013Sjoerg       assert(FinalSize == RemainingSize && "inconsistent size computation?");
1061*82d56013Sjoerg #endif
1062*82d56013Sjoerg 
1063*82d56013Sjoerg       // If we're looking at a boundary align, make sure we don't try to pad
1064*82d56013Sjoerg       // its target instructions for some following directive.  Doing so would
1065*82d56013Sjoerg       // break the alignment of the current boundary align.
1066*82d56013Sjoerg       if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
1067*82d56013Sjoerg         const MCFragment *LastFragment = BF->getLastFragment();
1068*82d56013Sjoerg         if (!LastFragment)
1069*82d56013Sjoerg           continue;
1070*82d56013Sjoerg         while (&*I != LastFragment)
1071*82d56013Sjoerg           ++I;
1072*82d56013Sjoerg       }
1073*82d56013Sjoerg     }
1074*82d56013Sjoerg   }
1075*82d56013Sjoerg 
1076*82d56013Sjoerg   // The layout is done. Mark every fragment as valid.
1077*82d56013Sjoerg   for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
1078*82d56013Sjoerg     MCSection &Section = *Layout.getSectionOrder()[i];
1079*82d56013Sjoerg     Layout.getFragmentOffset(&*Section.getFragmentList().rbegin());
1080*82d56013Sjoerg     Asm.computeFragmentSize(Layout, *Section.getFragmentList().rbegin());
1081*82d56013Sjoerg   }
1082*82d56013Sjoerg }
1083*82d56013Sjoerg 
getMaximumNopSize() const1084*82d56013Sjoerg unsigned X86AsmBackend::getMaximumNopSize() const {
1085*82d56013Sjoerg   if (STI.hasFeature(X86::Mode16Bit))
1086*82d56013Sjoerg     return 4;
1087*82d56013Sjoerg   if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Mode64Bit))
1088*82d56013Sjoerg     return 1;
1089*82d56013Sjoerg   if (STI.getFeatureBits()[X86::FeatureFast7ByteNOP])
1090*82d56013Sjoerg     return 7;
1091*82d56013Sjoerg   if (STI.getFeatureBits()[X86::FeatureFast15ByteNOP])
1092*82d56013Sjoerg     return 15;
1093*82d56013Sjoerg   if (STI.getFeatureBits()[X86::FeatureFast11ByteNOP])
1094*82d56013Sjoerg     return 11;
1095*82d56013Sjoerg   // FIXME: handle 32-bit mode
1096*82d56013Sjoerg   // 15-bytes is the longest single NOP instruction, but 10-bytes is
1097*82d56013Sjoerg   // commonly the longest that can be efficiently decoded.
1098*82d56013Sjoerg   return 10;
10997330f729Sjoerg }
11007330f729Sjoerg 
11017330f729Sjoerg /// Write a sequence of optimal nops to the output, covering \p Count
11027330f729Sjoerg /// bytes.
11037330f729Sjoerg /// \return - true on success, false on failure
writeNopData(raw_ostream & OS,uint64_t Count) const11047330f729Sjoerg bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
1105*82d56013Sjoerg   static const char Nops32Bit[10][11] = {
11067330f729Sjoerg       // nop
11077330f729Sjoerg       "\x90",
11087330f729Sjoerg       // xchg %ax,%ax
11097330f729Sjoerg       "\x66\x90",
11107330f729Sjoerg       // nopl (%[re]ax)
11117330f729Sjoerg       "\x0f\x1f\x00",
11127330f729Sjoerg       // nopl 0(%[re]ax)
11137330f729Sjoerg       "\x0f\x1f\x40\x00",
11147330f729Sjoerg       // nopl 0(%[re]ax,%[re]ax,1)
11157330f729Sjoerg       "\x0f\x1f\x44\x00\x00",
11167330f729Sjoerg       // nopw 0(%[re]ax,%[re]ax,1)
11177330f729Sjoerg       "\x66\x0f\x1f\x44\x00\x00",
11187330f729Sjoerg       // nopl 0L(%[re]ax)
11197330f729Sjoerg       "\x0f\x1f\x80\x00\x00\x00\x00",
11207330f729Sjoerg       // nopl 0L(%[re]ax,%[re]ax,1)
11217330f729Sjoerg       "\x0f\x1f\x84\x00\x00\x00\x00\x00",
11227330f729Sjoerg       // nopw 0L(%[re]ax,%[re]ax,1)
11237330f729Sjoerg       "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
11247330f729Sjoerg       // nopw %cs:0L(%[re]ax,%[re]ax,1)
11257330f729Sjoerg       "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
11267330f729Sjoerg   };
11277330f729Sjoerg 
1128*82d56013Sjoerg   // 16-bit mode uses different nop patterns than 32-bit.
1129*82d56013Sjoerg   static const char Nops16Bit[4][11] = {
1130*82d56013Sjoerg       // nop
1131*82d56013Sjoerg       "\x90",
1132*82d56013Sjoerg       // xchg %eax,%eax
1133*82d56013Sjoerg       "\x66\x90",
1134*82d56013Sjoerg       // lea 0(%si),%si
1135*82d56013Sjoerg       "\x8d\x74\x00",
1136*82d56013Sjoerg       // lea 0w(%si),%si
1137*82d56013Sjoerg       "\x8d\xb4\x00\x00",
1138*82d56013Sjoerg   };
11397330f729Sjoerg 
1140*82d56013Sjoerg   const char(*Nops)[11] =
1141*82d56013Sjoerg       STI.getFeatureBits()[X86::Mode16Bit] ? Nops16Bit : Nops32Bit;
1142*82d56013Sjoerg 
1143*82d56013Sjoerg   uint64_t MaxNopLength = (uint64_t)getMaximumNopSize();
11447330f729Sjoerg 
11457330f729Sjoerg   // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
11467330f729Sjoerg   // length.
11477330f729Sjoerg   do {
11487330f729Sjoerg     const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
11497330f729Sjoerg     const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
11507330f729Sjoerg     for (uint8_t i = 0; i < Prefixes; i++)
11517330f729Sjoerg       OS << '\x66';
11527330f729Sjoerg     const uint8_t Rest = ThisNopLength - Prefixes;
11537330f729Sjoerg     if (Rest != 0)
11547330f729Sjoerg       OS.write(Nops[Rest - 1], Rest);
11557330f729Sjoerg     Count -= ThisNopLength;
11567330f729Sjoerg   } while (Count != 0);
11577330f729Sjoerg 
11587330f729Sjoerg   return true;
11597330f729Sjoerg }
11607330f729Sjoerg 
11617330f729Sjoerg /* *** */
11627330f729Sjoerg 
11637330f729Sjoerg namespace {
11647330f729Sjoerg 
11657330f729Sjoerg class ELFX86AsmBackend : public X86AsmBackend {
11667330f729Sjoerg public:
11677330f729Sjoerg   uint8_t OSABI;
ELFX86AsmBackend(const Target & T,uint8_t OSABI,const MCSubtargetInfo & STI)11687330f729Sjoerg   ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
11697330f729Sjoerg       : X86AsmBackend(T, STI), OSABI(OSABI) {}
11707330f729Sjoerg };
11717330f729Sjoerg 
11727330f729Sjoerg class ELFX86_32AsmBackend : public ELFX86AsmBackend {
11737330f729Sjoerg public:
ELFX86_32AsmBackend(const Target & T,uint8_t OSABI,const MCSubtargetInfo & STI)11747330f729Sjoerg   ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
11757330f729Sjoerg                       const MCSubtargetInfo &STI)
11767330f729Sjoerg     : ELFX86AsmBackend(T, OSABI, STI) {}
11777330f729Sjoerg 
11787330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const11797330f729Sjoerg   createObjectTargetWriter() const override {
11807330f729Sjoerg     return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
11817330f729Sjoerg   }
11827330f729Sjoerg };
11837330f729Sjoerg 
11847330f729Sjoerg class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
11857330f729Sjoerg public:
ELFX86_X32AsmBackend(const Target & T,uint8_t OSABI,const MCSubtargetInfo & STI)11867330f729Sjoerg   ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
11877330f729Sjoerg                        const MCSubtargetInfo &STI)
11887330f729Sjoerg       : ELFX86AsmBackend(T, OSABI, STI) {}
11897330f729Sjoerg 
11907330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const11917330f729Sjoerg   createObjectTargetWriter() const override {
11927330f729Sjoerg     return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
11937330f729Sjoerg                                     ELF::EM_X86_64);
11947330f729Sjoerg   }
11957330f729Sjoerg };
11967330f729Sjoerg 
11977330f729Sjoerg class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
11987330f729Sjoerg public:
ELFX86_IAMCUAsmBackend(const Target & T,uint8_t OSABI,const MCSubtargetInfo & STI)11997330f729Sjoerg   ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
12007330f729Sjoerg                          const MCSubtargetInfo &STI)
12017330f729Sjoerg       : ELFX86AsmBackend(T, OSABI, STI) {}
12027330f729Sjoerg 
12037330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const12047330f729Sjoerg   createObjectTargetWriter() const override {
12057330f729Sjoerg     return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
12067330f729Sjoerg                                     ELF::EM_IAMCU);
12077330f729Sjoerg   }
12087330f729Sjoerg };
12097330f729Sjoerg 
12107330f729Sjoerg class ELFX86_64AsmBackend : public ELFX86AsmBackend {
12117330f729Sjoerg public:
ELFX86_64AsmBackend(const Target & T,uint8_t OSABI,const MCSubtargetInfo & STI)12127330f729Sjoerg   ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
12137330f729Sjoerg                       const MCSubtargetInfo &STI)
12147330f729Sjoerg     : ELFX86AsmBackend(T, OSABI, STI) {}
12157330f729Sjoerg 
12167330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const12177330f729Sjoerg   createObjectTargetWriter() const override {
12187330f729Sjoerg     return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
12197330f729Sjoerg   }
12207330f729Sjoerg };
12217330f729Sjoerg 
12227330f729Sjoerg class WindowsX86AsmBackend : public X86AsmBackend {
12237330f729Sjoerg   bool Is64Bit;
12247330f729Sjoerg 
12257330f729Sjoerg public:
WindowsX86AsmBackend(const Target & T,bool is64Bit,const MCSubtargetInfo & STI)12267330f729Sjoerg   WindowsX86AsmBackend(const Target &T, bool is64Bit,
12277330f729Sjoerg                        const MCSubtargetInfo &STI)
12287330f729Sjoerg     : X86AsmBackend(T, STI)
12297330f729Sjoerg     , Is64Bit(is64Bit) {
12307330f729Sjoerg   }
12317330f729Sjoerg 
getFixupKind(StringRef Name) const12327330f729Sjoerg   Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
12337330f729Sjoerg     return StringSwitch<Optional<MCFixupKind>>(Name)
12347330f729Sjoerg         .Case("dir32", FK_Data_4)
12357330f729Sjoerg         .Case("secrel32", FK_SecRel_4)
12367330f729Sjoerg         .Case("secidx", FK_SecRel_2)
12377330f729Sjoerg         .Default(MCAsmBackend::getFixupKind(Name));
12387330f729Sjoerg   }
12397330f729Sjoerg 
12407330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const12417330f729Sjoerg   createObjectTargetWriter() const override {
12427330f729Sjoerg     return createX86WinCOFFObjectWriter(Is64Bit);
12437330f729Sjoerg   }
12447330f729Sjoerg };
12457330f729Sjoerg 
12467330f729Sjoerg namespace CU {
12477330f729Sjoerg 
12487330f729Sjoerg   /// Compact unwind encoding values.
12497330f729Sjoerg   enum CompactUnwindEncodings {
12507330f729Sjoerg     /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
12517330f729Sjoerg     /// the return address, then [RE]SP is moved to [RE]BP.
12527330f729Sjoerg     UNWIND_MODE_BP_FRAME                   = 0x01000000,
12537330f729Sjoerg 
12547330f729Sjoerg     /// A frameless function with a small constant stack size.
12557330f729Sjoerg     UNWIND_MODE_STACK_IMMD                 = 0x02000000,
12567330f729Sjoerg 
12577330f729Sjoerg     /// A frameless function with a large constant stack size.
12587330f729Sjoerg     UNWIND_MODE_STACK_IND                  = 0x03000000,
12597330f729Sjoerg 
12607330f729Sjoerg     /// No compact unwind encoding is available.
12617330f729Sjoerg     UNWIND_MODE_DWARF                      = 0x04000000,
12627330f729Sjoerg 
12637330f729Sjoerg     /// Mask for encoding the frame registers.
12647330f729Sjoerg     UNWIND_BP_FRAME_REGISTERS              = 0x00007FFF,
12657330f729Sjoerg 
12667330f729Sjoerg     /// Mask for encoding the frameless registers.
12677330f729Sjoerg     UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
12687330f729Sjoerg   };
12697330f729Sjoerg 
1270*82d56013Sjoerg } // namespace CU
12717330f729Sjoerg 
12727330f729Sjoerg class DarwinX86AsmBackend : public X86AsmBackend {
12737330f729Sjoerg   const MCRegisterInfo &MRI;
12747330f729Sjoerg 
12757330f729Sjoerg   /// Number of registers that can be saved in a compact unwind encoding.
12767330f729Sjoerg   enum { CU_NUM_SAVED_REGS = 6 };
12777330f729Sjoerg 
12787330f729Sjoerg   mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1279*82d56013Sjoerg   Triple TT;
12807330f729Sjoerg   bool Is64Bit;
12817330f729Sjoerg 
12827330f729Sjoerg   unsigned OffsetSize;                   ///< Offset of a "push" instruction.
12837330f729Sjoerg   unsigned MoveInstrSize;                ///< Size of a "move" instruction.
12847330f729Sjoerg   unsigned StackDivide;                  ///< Amount to adjust stack size by.
12857330f729Sjoerg protected:
12867330f729Sjoerg   /// Size of a "push" instruction for the given register.
PushInstrSize(unsigned Reg) const12877330f729Sjoerg   unsigned PushInstrSize(unsigned Reg) const {
12887330f729Sjoerg     switch (Reg) {
12897330f729Sjoerg       case X86::EBX:
12907330f729Sjoerg       case X86::ECX:
12917330f729Sjoerg       case X86::EDX:
12927330f729Sjoerg       case X86::EDI:
12937330f729Sjoerg       case X86::ESI:
12947330f729Sjoerg       case X86::EBP:
12957330f729Sjoerg       case X86::RBX:
12967330f729Sjoerg       case X86::RBP:
12977330f729Sjoerg         return 1;
12987330f729Sjoerg       case X86::R12:
12997330f729Sjoerg       case X86::R13:
13007330f729Sjoerg       case X86::R14:
13017330f729Sjoerg       case X86::R15:
13027330f729Sjoerg         return 2;
13037330f729Sjoerg     }
13047330f729Sjoerg     return 1;
13057330f729Sjoerg   }
13067330f729Sjoerg 
13077330f729Sjoerg private:
13087330f729Sjoerg   /// Get the compact unwind number for a given register. The number
13097330f729Sjoerg   /// corresponds to the enum lists in compact_unwind_encoding.h.
getCompactUnwindRegNum(unsigned Reg) const13107330f729Sjoerg   int getCompactUnwindRegNum(unsigned Reg) const {
13117330f729Sjoerg     static const MCPhysReg CU32BitRegs[7] = {
13127330f729Sjoerg       X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
13137330f729Sjoerg     };
13147330f729Sjoerg     static const MCPhysReg CU64BitRegs[] = {
13157330f729Sjoerg       X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
13167330f729Sjoerg     };
13177330f729Sjoerg     const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
13187330f729Sjoerg     for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
13197330f729Sjoerg       if (*CURegs == Reg)
13207330f729Sjoerg         return Idx;
13217330f729Sjoerg 
13227330f729Sjoerg     return -1;
13237330f729Sjoerg   }
13247330f729Sjoerg 
13257330f729Sjoerg   /// Return the registers encoded for a compact encoding with a frame
13267330f729Sjoerg   /// pointer.
encodeCompactUnwindRegistersWithFrame() const13277330f729Sjoerg   uint32_t encodeCompactUnwindRegistersWithFrame() const {
13287330f729Sjoerg     // Encode the registers in the order they were saved --- 3-bits per
13297330f729Sjoerg     // register. The list of saved registers is assumed to be in reverse
13307330f729Sjoerg     // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
13317330f729Sjoerg     uint32_t RegEnc = 0;
13327330f729Sjoerg     for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
13337330f729Sjoerg       unsigned Reg = SavedRegs[i];
13347330f729Sjoerg       if (Reg == 0) break;
13357330f729Sjoerg 
13367330f729Sjoerg       int CURegNum = getCompactUnwindRegNum(Reg);
13377330f729Sjoerg       if (CURegNum == -1) return ~0U;
13387330f729Sjoerg 
13397330f729Sjoerg       // Encode the 3-bit register number in order, skipping over 3-bits for
13407330f729Sjoerg       // each register.
13417330f729Sjoerg       RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
13427330f729Sjoerg     }
13437330f729Sjoerg 
13447330f729Sjoerg     assert((RegEnc & 0x3FFFF) == RegEnc &&
13457330f729Sjoerg            "Invalid compact register encoding!");
13467330f729Sjoerg     return RegEnc;
13477330f729Sjoerg   }
13487330f729Sjoerg 
13497330f729Sjoerg   /// Create the permutation encoding used with frameless stacks. It is
13507330f729Sjoerg   /// passed the number of registers to be saved and an array of the registers
13517330f729Sjoerg   /// saved.
encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const13527330f729Sjoerg   uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
13537330f729Sjoerg     // The saved registers are numbered from 1 to 6. In order to encode the
13547330f729Sjoerg     // order in which they were saved, we re-number them according to their
13557330f729Sjoerg     // place in the register order. The re-numbering is relative to the last
13567330f729Sjoerg     // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
13577330f729Sjoerg     // that order:
13587330f729Sjoerg     //
13597330f729Sjoerg     //    Orig  Re-Num
13607330f729Sjoerg     //    ----  ------
13617330f729Sjoerg     //     6       6
13627330f729Sjoerg     //     2       2
13637330f729Sjoerg     //     4       3
13647330f729Sjoerg     //     5       3
13657330f729Sjoerg     //
13667330f729Sjoerg     for (unsigned i = 0; i < RegCount; ++i) {
13677330f729Sjoerg       int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
13687330f729Sjoerg       if (CUReg == -1) return ~0U;
13697330f729Sjoerg       SavedRegs[i] = CUReg;
13707330f729Sjoerg     }
13717330f729Sjoerg 
13727330f729Sjoerg     // Reverse the list.
13737330f729Sjoerg     std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
13747330f729Sjoerg 
13757330f729Sjoerg     uint32_t RenumRegs[CU_NUM_SAVED_REGS];
13767330f729Sjoerg     for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
13777330f729Sjoerg       unsigned Countless = 0;
13787330f729Sjoerg       for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
13797330f729Sjoerg         if (SavedRegs[j] < SavedRegs[i])
13807330f729Sjoerg           ++Countless;
13817330f729Sjoerg 
13827330f729Sjoerg       RenumRegs[i] = SavedRegs[i] - Countless - 1;
13837330f729Sjoerg     }
13847330f729Sjoerg 
13857330f729Sjoerg     // Take the renumbered values and encode them into a 10-bit number.
13867330f729Sjoerg     uint32_t permutationEncoding = 0;
13877330f729Sjoerg     switch (RegCount) {
13887330f729Sjoerg     case 6:
13897330f729Sjoerg       permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
13907330f729Sjoerg                              + 6 * RenumRegs[2] +  2 * RenumRegs[3]
13917330f729Sjoerg                              +     RenumRegs[4];
13927330f729Sjoerg       break;
13937330f729Sjoerg     case 5:
13947330f729Sjoerg       permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
13957330f729Sjoerg                              + 6 * RenumRegs[3] +  2 * RenumRegs[4]
13967330f729Sjoerg                              +     RenumRegs[5];
13977330f729Sjoerg       break;
13987330f729Sjoerg     case 4:
13997330f729Sjoerg       permutationEncoding |=  60 * RenumRegs[2] + 12 * RenumRegs[3]
14007330f729Sjoerg                              + 3 * RenumRegs[4] +      RenumRegs[5];
14017330f729Sjoerg       break;
14027330f729Sjoerg     case 3:
14037330f729Sjoerg       permutationEncoding |=  20 * RenumRegs[3] +  4 * RenumRegs[4]
14047330f729Sjoerg                              +     RenumRegs[5];
14057330f729Sjoerg       break;
14067330f729Sjoerg     case 2:
14077330f729Sjoerg       permutationEncoding |=   5 * RenumRegs[4] +      RenumRegs[5];
14087330f729Sjoerg       break;
14097330f729Sjoerg     case 1:
14107330f729Sjoerg       permutationEncoding |=       RenumRegs[5];
14117330f729Sjoerg       break;
14127330f729Sjoerg     }
14137330f729Sjoerg 
14147330f729Sjoerg     assert((permutationEncoding & 0x3FF) == permutationEncoding &&
14157330f729Sjoerg            "Invalid compact register encoding!");
14167330f729Sjoerg     return permutationEncoding;
14177330f729Sjoerg   }
14187330f729Sjoerg 
14197330f729Sjoerg public:
DarwinX86AsmBackend(const Target & T,const MCRegisterInfo & MRI,const MCSubtargetInfo & STI)14207330f729Sjoerg   DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1421*82d56013Sjoerg                       const MCSubtargetInfo &STI)
1422*82d56013Sjoerg       : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1423*82d56013Sjoerg         Is64Bit(TT.isArch64Bit()) {
14247330f729Sjoerg     memset(SavedRegs, 0, sizeof(SavedRegs));
14257330f729Sjoerg     OffsetSize = Is64Bit ? 8 : 4;
14267330f729Sjoerg     MoveInstrSize = Is64Bit ? 3 : 2;
14277330f729Sjoerg     StackDivide = Is64Bit ? 8 : 4;
14287330f729Sjoerg   }
14297330f729Sjoerg 
14307330f729Sjoerg   std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const14317330f729Sjoerg   createObjectTargetWriter() const override {
1432*82d56013Sjoerg     uint32_t CPUType = cantFail(MachO::getCPUType(TT));
1433*82d56013Sjoerg     uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1434*82d56013Sjoerg     return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
14357330f729Sjoerg   }
14367330f729Sjoerg 
1437*82d56013Sjoerg   /// Implementation of algorithm to generate the compact unwind encoding
1438*82d56013Sjoerg   /// for the CFI instructions.
1439*82d56013Sjoerg   uint32_t
generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const1440*82d56013Sjoerg   generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const override {
1441*82d56013Sjoerg     if (Instrs.empty()) return 0;
1442*82d56013Sjoerg 
1443*82d56013Sjoerg     // Reset the saved registers.
1444*82d56013Sjoerg     unsigned SavedRegIdx = 0;
1445*82d56013Sjoerg     memset(SavedRegs, 0, sizeof(SavedRegs));
1446*82d56013Sjoerg 
1447*82d56013Sjoerg     bool HasFP = false;
1448*82d56013Sjoerg 
1449*82d56013Sjoerg     // Encode that we are using EBP/RBP as the frame pointer.
1450*82d56013Sjoerg     uint32_t CompactUnwindEncoding = 0;
1451*82d56013Sjoerg 
1452*82d56013Sjoerg     unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1453*82d56013Sjoerg     unsigned InstrOffset = 0;
1454*82d56013Sjoerg     unsigned StackAdjust = 0;
1455*82d56013Sjoerg     unsigned StackSize = 0;
1456*82d56013Sjoerg     unsigned NumDefCFAOffsets = 0;
1457*82d56013Sjoerg     int MinAbsOffset = std::numeric_limits<int>::max();
1458*82d56013Sjoerg 
1459*82d56013Sjoerg     for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
1460*82d56013Sjoerg       const MCCFIInstruction &Inst = Instrs[i];
1461*82d56013Sjoerg 
1462*82d56013Sjoerg       switch (Inst.getOperation()) {
1463*82d56013Sjoerg       default:
1464*82d56013Sjoerg         // Any other CFI directives indicate a frame that we aren't prepared
1465*82d56013Sjoerg         // to represent via compact unwind, so just bail out.
1466*82d56013Sjoerg         return 0;
1467*82d56013Sjoerg       case MCCFIInstruction::OpDefCfaRegister: {
1468*82d56013Sjoerg         // Defines a frame pointer. E.g.
1469*82d56013Sjoerg         //
1470*82d56013Sjoerg         //     movq %rsp, %rbp
1471*82d56013Sjoerg         //  L0:
1472*82d56013Sjoerg         //     .cfi_def_cfa_register %rbp
1473*82d56013Sjoerg         //
1474*82d56013Sjoerg         HasFP = true;
1475*82d56013Sjoerg 
1476*82d56013Sjoerg         // If the frame pointer is other than esp/rsp, we do not have a way to
1477*82d56013Sjoerg         // generate a compact unwinding representation, so bail out.
1478*82d56013Sjoerg         if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1479*82d56013Sjoerg             (Is64Bit ? X86::RBP : X86::EBP))
1480*82d56013Sjoerg           return 0;
1481*82d56013Sjoerg 
1482*82d56013Sjoerg         // Reset the counts.
1483*82d56013Sjoerg         memset(SavedRegs, 0, sizeof(SavedRegs));
1484*82d56013Sjoerg         StackAdjust = 0;
1485*82d56013Sjoerg         SavedRegIdx = 0;
1486*82d56013Sjoerg         MinAbsOffset = std::numeric_limits<int>::max();
1487*82d56013Sjoerg         InstrOffset += MoveInstrSize;
1488*82d56013Sjoerg         break;
14897330f729Sjoerg       }
1490*82d56013Sjoerg       case MCCFIInstruction::OpDefCfaOffset: {
1491*82d56013Sjoerg         // Defines a new offset for the CFA. E.g.
1492*82d56013Sjoerg         //
1493*82d56013Sjoerg         //  With frame:
1494*82d56013Sjoerg         //
1495*82d56013Sjoerg         //     pushq %rbp
1496*82d56013Sjoerg         //  L0:
1497*82d56013Sjoerg         //     .cfi_def_cfa_offset 16
1498*82d56013Sjoerg         //
1499*82d56013Sjoerg         //  Without frame:
1500*82d56013Sjoerg         //
1501*82d56013Sjoerg         //     subq $72, %rsp
1502*82d56013Sjoerg         //  L0:
1503*82d56013Sjoerg         //     .cfi_def_cfa_offset 80
1504*82d56013Sjoerg         //
1505*82d56013Sjoerg         StackSize = Inst.getOffset() / StackDivide;
1506*82d56013Sjoerg         ++NumDefCFAOffsets;
1507*82d56013Sjoerg         break;
1508*82d56013Sjoerg       }
1509*82d56013Sjoerg       case MCCFIInstruction::OpOffset: {
1510*82d56013Sjoerg         // Defines a "push" of a callee-saved register. E.g.
1511*82d56013Sjoerg         //
1512*82d56013Sjoerg         //     pushq %r15
1513*82d56013Sjoerg         //     pushq %r14
1514*82d56013Sjoerg         //     pushq %rbx
1515*82d56013Sjoerg         //  L0:
1516*82d56013Sjoerg         //     subq $120, %rsp
1517*82d56013Sjoerg         //  L1:
1518*82d56013Sjoerg         //     .cfi_offset %rbx, -40
1519*82d56013Sjoerg         //     .cfi_offset %r14, -32
1520*82d56013Sjoerg         //     .cfi_offset %r15, -24
1521*82d56013Sjoerg         //
1522*82d56013Sjoerg         if (SavedRegIdx == CU_NUM_SAVED_REGS)
1523*82d56013Sjoerg           // If there are too many saved registers, we cannot use a compact
1524*82d56013Sjoerg           // unwind encoding.
1525*82d56013Sjoerg           return CU::UNWIND_MODE_DWARF;
15267330f729Sjoerg 
1527*82d56013Sjoerg         unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1528*82d56013Sjoerg         SavedRegs[SavedRegIdx++] = Reg;
1529*82d56013Sjoerg         StackAdjust += OffsetSize;
1530*82d56013Sjoerg         MinAbsOffset = std::min(MinAbsOffset, abs(Inst.getOffset()));
1531*82d56013Sjoerg         InstrOffset += PushInstrSize(Reg);
1532*82d56013Sjoerg         break;
1533*82d56013Sjoerg       }
1534*82d56013Sjoerg       }
15357330f729Sjoerg     }
15367330f729Sjoerg 
1537*82d56013Sjoerg     StackAdjust /= StackDivide;
1538*82d56013Sjoerg 
1539*82d56013Sjoerg     if (HasFP) {
1540*82d56013Sjoerg       if ((StackAdjust & 0xFF) != StackAdjust)
1541*82d56013Sjoerg         // Offset was too big for a compact unwind encoding.
1542*82d56013Sjoerg         return CU::UNWIND_MODE_DWARF;
1543*82d56013Sjoerg 
1544*82d56013Sjoerg       // We don't attempt to track a real StackAdjust, so if the saved registers
1545*82d56013Sjoerg       // aren't adjacent to rbp we can't cope.
1546*82d56013Sjoerg       if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1547*82d56013Sjoerg         return CU::UNWIND_MODE_DWARF;
1548*82d56013Sjoerg 
1549*82d56013Sjoerg       // Get the encoding of the saved registers when we have a frame pointer.
1550*82d56013Sjoerg       uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1551*82d56013Sjoerg       if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1552*82d56013Sjoerg 
1553*82d56013Sjoerg       CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1554*82d56013Sjoerg       CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1555*82d56013Sjoerg       CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1556*82d56013Sjoerg     } else {
1557*82d56013Sjoerg       SubtractInstrIdx += InstrOffset;
1558*82d56013Sjoerg       ++StackAdjust;
1559*82d56013Sjoerg 
1560*82d56013Sjoerg       if ((StackSize & 0xFF) == StackSize) {
1561*82d56013Sjoerg         // Frameless stack with a small stack size.
1562*82d56013Sjoerg         CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1563*82d56013Sjoerg 
1564*82d56013Sjoerg         // Encode the stack size.
1565*82d56013Sjoerg         CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1566*82d56013Sjoerg       } else {
1567*82d56013Sjoerg         if ((StackAdjust & 0x7) != StackAdjust)
1568*82d56013Sjoerg           // The extra stack adjustments are too big for us to handle.
1569*82d56013Sjoerg           return CU::UNWIND_MODE_DWARF;
1570*82d56013Sjoerg 
1571*82d56013Sjoerg         // Frameless stack with an offset too large for us to encode compactly.
1572*82d56013Sjoerg         CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1573*82d56013Sjoerg 
1574*82d56013Sjoerg         // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1575*82d56013Sjoerg         // instruction.
1576*82d56013Sjoerg         CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1577*82d56013Sjoerg 
1578*82d56013Sjoerg         // Encode any extra stack adjustments (done via push instructions).
1579*82d56013Sjoerg         CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1580*82d56013Sjoerg       }
1581*82d56013Sjoerg 
1582*82d56013Sjoerg       // Encode the number of registers saved. (Reverse the list first.)
1583*82d56013Sjoerg       std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1584*82d56013Sjoerg       CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1585*82d56013Sjoerg 
1586*82d56013Sjoerg       // Get the encoding of the saved registers when we don't have a frame
1587*82d56013Sjoerg       // pointer.
1588*82d56013Sjoerg       uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1589*82d56013Sjoerg       if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1590*82d56013Sjoerg 
1591*82d56013Sjoerg       // Encode the register encoding.
1592*82d56013Sjoerg       CompactUnwindEncoding |=
1593*82d56013Sjoerg         RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1594*82d56013Sjoerg     }
1595*82d56013Sjoerg 
1596*82d56013Sjoerg     return CompactUnwindEncoding;
15977330f729Sjoerg   }
15987330f729Sjoerg };
15997330f729Sjoerg 
16007330f729Sjoerg } // end anonymous namespace
16017330f729Sjoerg 
createX86_32AsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)16027330f729Sjoerg MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
16037330f729Sjoerg                                            const MCSubtargetInfo &STI,
16047330f729Sjoerg                                            const MCRegisterInfo &MRI,
16057330f729Sjoerg                                            const MCTargetOptions &Options) {
16067330f729Sjoerg   const Triple &TheTriple = STI.getTargetTriple();
16077330f729Sjoerg   if (TheTriple.isOSBinFormatMachO())
1608*82d56013Sjoerg     return new DarwinX86AsmBackend(T, MRI, STI);
16097330f729Sjoerg 
16107330f729Sjoerg   if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
16117330f729Sjoerg     return new WindowsX86AsmBackend(T, false, STI);
16127330f729Sjoerg 
16137330f729Sjoerg   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
16147330f729Sjoerg 
16157330f729Sjoerg   if (TheTriple.isOSIAMCU())
16167330f729Sjoerg     return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
16177330f729Sjoerg 
16187330f729Sjoerg   return new ELFX86_32AsmBackend(T, OSABI, STI);
16197330f729Sjoerg }
16207330f729Sjoerg 
createX86_64AsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)16217330f729Sjoerg MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
16227330f729Sjoerg                                            const MCSubtargetInfo &STI,
16237330f729Sjoerg                                            const MCRegisterInfo &MRI,
16247330f729Sjoerg                                            const MCTargetOptions &Options) {
16257330f729Sjoerg   const Triple &TheTriple = STI.getTargetTriple();
1626*82d56013Sjoerg   if (TheTriple.isOSBinFormatMachO())
1627*82d56013Sjoerg     return new DarwinX86AsmBackend(T, MRI, STI);
16287330f729Sjoerg 
16297330f729Sjoerg   if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
16307330f729Sjoerg     return new WindowsX86AsmBackend(T, true, STI);
16317330f729Sjoerg 
16327330f729Sjoerg   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
16337330f729Sjoerg 
16347330f729Sjoerg   if (TheTriple.getEnvironment() == Triple::GNUX32)
16357330f729Sjoerg     return new ELFX86_X32AsmBackend(T, OSABI, STI);
16367330f729Sjoerg   return new ELFX86_64AsmBackend(T, OSABI, STI);
16377330f729Sjoerg }
1638