xref: /llvm-project/bolt/lib/Target/X86/X86MCPlusBuilder.cpp (revision c09cd64e5c6dea6e97ef7d6cee5f689df2b408d7)
1 //===- bolt/Target/X86/X86MCPlusBuilder.cpp -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides X86-specific MCPlus builder.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86InstrRelaxTables.h"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "bolt/Core/MCPlus.h"
17 #include "bolt/Core/MCPlusBuilder.h"
18 #include "llvm/BinaryFormat/ELF.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstBuilder.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegister.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/DataExtractor.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/Errc.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/ErrorOr.h"
32 #include <set>
33 
34 #define DEBUG_TYPE "mcplus"
35 
36 using namespace llvm;
37 using namespace bolt;
38 
39 namespace opts {
40 
41 extern cl::OptionCategory BoltOptCategory;
42 
43 static cl::opt<bool> X86StripRedundantAddressSize(
44     "x86-strip-redundant-address-size",
45     cl::desc("Remove redundant Address-Size override prefix"), cl::init(true),
46     cl::ZeroOrMore, cl::cat(BoltOptCategory));
47 
48 } // namespace opts
49 
50 namespace {
51 
52 unsigned getShortBranchOpcode(unsigned Opcode) {
53   switch (Opcode) {
54   default:
55     return Opcode;
56   case X86::JMP_2: return X86::JMP_1;
57   case X86::JMP_4: return X86::JMP_1;
58   case X86::JCC_2: return X86::JCC_1;
59   case X86::JCC_4: return X86::JCC_1;
60   }
61 }
62 
63 unsigned getShortArithOpcode(unsigned Opcode) {
64   return X86::getShortOpcodeArith(Opcode);
65 }
66 
67 bool isMOVSX64rm32(const MCInst &Inst) {
68   return Inst.getOpcode() == X86::MOVSX64rm32;
69 }
70 
71 bool isADD64rr(const MCInst &Inst) { return Inst.getOpcode() == X86::ADD64rr; }
72 
73 bool isADDri(const MCInst &Inst) {
74   return Inst.getOpcode() == X86::ADD64ri32 ||
75          Inst.getOpcode() == X86::ADD64ri8;
76 }
77 
78 class X86MCPlusBuilder : public MCPlusBuilder {
79 public:
80   X86MCPlusBuilder(const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
81                    const MCRegisterInfo *RegInfo)
82       : MCPlusBuilder(Analysis, Info, RegInfo) {}
83 
84   bool isBranch(const MCInst &Inst) const override {
85     return Analysis->isBranch(Inst) && !isTailCall(Inst);
86   }
87 
88   bool isNoop(const MCInst &Inst) const override {
89     return X86::isNOP(Inst.getOpcode());
90   }
91 
92   unsigned getCondCode(const MCInst &Inst) const override {
93     unsigned Opcode = Inst.getOpcode();
94     if (X86::isJCC(Opcode))
95       return Inst.getOperand(Info->get(Opcode).NumOperands - 1).getImm();
96     return X86::COND_INVALID;
97   }
98 
99   unsigned getInvertedCondCode(unsigned CC) const override {
100     switch (CC) {
101     default: return X86::COND_INVALID;
102     case X86::COND_E:  return X86::COND_NE;
103     case X86::COND_NE: return X86::COND_E;
104     case X86::COND_L:  return X86::COND_GE;
105     case X86::COND_LE: return X86::COND_G;
106     case X86::COND_G:  return X86::COND_LE;
107     case X86::COND_GE: return X86::COND_L;
108     case X86::COND_B:  return X86::COND_AE;
109     case X86::COND_BE: return X86::COND_A;
110     case X86::COND_A:  return X86::COND_BE;
111     case X86::COND_AE: return X86::COND_B;
112     case X86::COND_S:  return X86::COND_NS;
113     case X86::COND_NS: return X86::COND_S;
114     case X86::COND_P:  return X86::COND_NP;
115     case X86::COND_NP: return X86::COND_P;
116     case X86::COND_O:  return X86::COND_NO;
117     case X86::COND_NO: return X86::COND_O;
118     }
119   }
120 
121   unsigned getCondCodesLogicalOr(unsigned CC1, unsigned CC2) const override {
122     enum DecodedCondCode : uint8_t {
123       DCC_EQUAL = 0x1,
124       DCC_GREATER = 0x2,
125       DCC_LESSER = 0x4,
126       DCC_GREATER_OR_LESSER = 0x6,
127       DCC_UNSIGNED = 0x8,
128       DCC_SIGNED = 0x10,
129       DCC_INVALID = 0x20,
130     };
131 
132     auto decodeCondCode = [&](unsigned CC) -> uint8_t {
133       switch (CC) {
134       default: return DCC_INVALID;
135       case X86::COND_E: return DCC_EQUAL;
136       case X86::COND_NE: return DCC_GREATER | DCC_LESSER;
137       case X86::COND_L: return DCC_LESSER | DCC_SIGNED;
138       case X86::COND_LE: return DCC_EQUAL | DCC_LESSER | DCC_SIGNED;
139       case X86::COND_G: return DCC_GREATER | DCC_SIGNED;
140       case X86::COND_GE: return DCC_GREATER | DCC_EQUAL | DCC_SIGNED;
141       case X86::COND_B: return DCC_LESSER | DCC_UNSIGNED;
142       case X86::COND_BE: return DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED;
143       case X86::COND_A: return DCC_GREATER | DCC_UNSIGNED;
144       case X86::COND_AE: return DCC_GREATER | DCC_EQUAL | DCC_UNSIGNED;
145       }
146     };
147 
148     uint8_t DCC = decodeCondCode(CC1) | decodeCondCode(CC2);
149 
150     if (DCC & DCC_INVALID)
151       return X86::COND_INVALID;
152 
153     if (DCC & DCC_SIGNED && DCC & DCC_UNSIGNED)
154       return X86::COND_INVALID;
155 
156     switch (DCC) {
157     default: return X86::COND_INVALID;
158     case DCC_EQUAL | DCC_LESSER | DCC_SIGNED: return X86::COND_LE;
159     case DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED: return X86::COND_BE;
160     case DCC_EQUAL | DCC_GREATER | DCC_SIGNED: return X86::COND_GE;
161     case DCC_EQUAL | DCC_GREATER | DCC_UNSIGNED: return X86::COND_AE;
162     case DCC_GREATER | DCC_LESSER | DCC_SIGNED: return X86::COND_NE;
163     case DCC_GREATER | DCC_LESSER | DCC_UNSIGNED: return X86::COND_NE;
164     case DCC_GREATER | DCC_LESSER: return X86::COND_NE;
165     case DCC_EQUAL | DCC_SIGNED: return X86::COND_E;
166     case DCC_EQUAL | DCC_UNSIGNED: return X86::COND_E;
167     case DCC_EQUAL: return X86::COND_E;
168     case DCC_LESSER | DCC_SIGNED: return X86::COND_L;
169     case DCC_LESSER | DCC_UNSIGNED: return X86::COND_B;
170     case DCC_GREATER | DCC_SIGNED: return X86::COND_G;
171     case DCC_GREATER | DCC_UNSIGNED: return X86::COND_A;
172     }
173   }
174 
175   bool isValidCondCode(unsigned CC) const override {
176     return (CC != X86::COND_INVALID);
177   }
178 
179   bool isBreakpoint(const MCInst &Inst) const override {
180     return Inst.getOpcode() == X86::INT3;
181   }
182 
183   bool isPrefix(const MCInst &Inst) const override {
184     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
185     return X86II::isPrefix(Desc.TSFlags);
186   }
187 
188   bool isRep(const MCInst &Inst) const override {
189     return Inst.getFlags() == X86::IP_HAS_REPEAT;
190   }
191 
192   bool deleteREPPrefix(MCInst &Inst) const override {
193     if (Inst.getFlags() == X86::IP_HAS_REPEAT) {
194       Inst.setFlags(0);
195       return true;
196     }
197     return false;
198   }
199 
200   // FIXME: For compatibility with old LLVM only!
201   bool isTerminator(const MCInst &Inst) const override {
202     unsigned Opcode = Inst.getOpcode();
203     return Info->get(Opcode).isTerminator() || X86::isUD1(Opcode) ||
204            X86::isUD2(Opcode);
205   }
206 
207   bool isIndirectCall(const MCInst &Inst) const override {
208     return isCall(Inst) &&
209            ((getMemoryOperandNo(Inst) != -1) || Inst.getOperand(0).isReg());
210   }
211 
212   bool isPop(const MCInst &Inst) const override {
213     return getPopSize(Inst) == 0 ? false : true;
214   }
215 
216   bool isTerminateBranch(const MCInst &Inst) const override {
217     return Inst.getOpcode() == X86::ENDBR32 || Inst.getOpcode() == X86::ENDBR64;
218   }
219 
220   int getPopSize(const MCInst &Inst) const override {
221     switch (Inst.getOpcode()) {
222     case X86::POP16r:
223     case X86::POP16rmm:
224     case X86::POP16rmr:
225     case X86::POPF16:
226     case X86::POPA16:
227     case X86::POPDS16:
228     case X86::POPES16:
229     case X86::POPFS16:
230     case X86::POPGS16:
231     case X86::POPSS16:
232       return 2;
233     case X86::POP32r:
234     case X86::POP32rmm:
235     case X86::POP32rmr:
236     case X86::POPA32:
237     case X86::POPDS32:
238     case X86::POPES32:
239     case X86::POPF32:
240     case X86::POPFS32:
241     case X86::POPGS32:
242     case X86::POPSS32:
243       return 4;
244     case X86::POP64r:
245     case X86::POP64rmm:
246     case X86::POP64rmr:
247     case X86::POPF64:
248     case X86::POPFS64:
249     case X86::POPGS64:
250       return 8;
251     }
252     return 0;
253   }
254 
255   bool isPush(const MCInst &Inst) const override {
256     return getPushSize(Inst) == 0 ? false : true;
257   }
258 
259   int getPushSize(const MCInst &Inst) const override {
260     switch (Inst.getOpcode()) {
261     case X86::PUSH16i8:
262     case X86::PUSH16r:
263     case X86::PUSH16rmm:
264     case X86::PUSH16rmr:
265     case X86::PUSHA16:
266     case X86::PUSHCS16:
267     case X86::PUSHDS16:
268     case X86::PUSHES16:
269     case X86::PUSHF16:
270     case X86::PUSHFS16:
271     case X86::PUSHGS16:
272     case X86::PUSHSS16:
273     case X86::PUSHi16:
274       return 2;
275     case X86::PUSH32i8:
276     case X86::PUSH32r:
277     case X86::PUSH32rmm:
278     case X86::PUSH32rmr:
279     case X86::PUSHA32:
280     case X86::PUSHCS32:
281     case X86::PUSHDS32:
282     case X86::PUSHES32:
283     case X86::PUSHF32:
284     case X86::PUSHFS32:
285     case X86::PUSHGS32:
286     case X86::PUSHSS32:
287     case X86::PUSHi32:
288       return 4;
289     case X86::PUSH64i32:
290     case X86::PUSH64i8:
291     case X86::PUSH64r:
292     case X86::PUSH64rmm:
293     case X86::PUSH64rmr:
294     case X86::PUSHF64:
295     case X86::PUSHFS64:
296     case X86::PUSHGS64:
297       return 8;
298     }
299     return 0;
300   }
301 
302   bool isSUB(const MCInst &Inst) const override {
303     return X86::isSUB(Inst.getOpcode());
304   }
305 
306   bool isLEA64r(const MCInst &Inst) const override {
307     return Inst.getOpcode() == X86::LEA64r;
308   }
309 
310   bool isLeave(const MCInst &Inst) const override {
311     return Inst.getOpcode() == X86::LEAVE || Inst.getOpcode() == X86::LEAVE64;
312   }
313 
314   bool isMoveMem2Reg(const MCInst &Inst) const override {
315     switch (Inst.getOpcode()) {
316     case X86::MOV16rm:
317     case X86::MOV32rm:
318     case X86::MOV64rm:
319       return true;
320     }
321     return false;
322   }
323 
324   bool isUnsupportedBranch(unsigned Opcode) const override {
325     switch (Opcode) {
326     default:
327       return false;
328     case X86::LOOP:
329     case X86::LOOPE:
330     case X86::LOOPNE:
331     case X86::JECXZ:
332     case X86::JRCXZ:
333       return true;
334     }
335   }
336 
337   bool isLoad(const MCInst &Inst) const override {
338     if (isPop(Inst))
339       return true;
340 
341     int MemOpNo = getMemoryOperandNo(Inst);
342     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
343 
344     if (MemOpNo == -1)
345       return false;
346 
347     return MCII.mayLoad();
348   }
349 
350   bool isStore(const MCInst &Inst) const override {
351     if (isPush(Inst))
352       return true;
353 
354     int MemOpNo = getMemoryOperandNo(Inst);
355     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
356 
357     if (MemOpNo == -1)
358       return false;
359 
360     return MCII.mayStore();
361   }
362 
363   bool isCleanRegXOR(const MCInst &Inst) const override {
364     switch (Inst.getOpcode()) {
365     case X86::XOR16rr:
366     case X86::XOR32rr:
367     case X86::XOR64rr:
368       break;
369     default:
370       return false;
371     }
372     return (Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg());
373   }
374 
375   bool isPacked(const MCInst &Inst) const override {
376     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
377     return (Desc.TSFlags & X86II::OpPrefixMask) == X86II::PD;
378   }
379 
380   unsigned getTrapFillValue() const override { return 0xCC; }
381 
382   struct IndJmpMatcherFrag1 : MCInstMatcher {
383     std::unique_ptr<MCInstMatcher> Base;
384     std::unique_ptr<MCInstMatcher> Scale;
385     std::unique_ptr<MCInstMatcher> Index;
386     std::unique_ptr<MCInstMatcher> Offset;
387 
388     IndJmpMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
389                        std::unique_ptr<MCInstMatcher> Scale,
390                        std::unique_ptr<MCInstMatcher> Index,
391                        std::unique_ptr<MCInstMatcher> Offset)
392         : Base(std::move(Base)), Scale(std::move(Scale)),
393           Index(std::move(Index)), Offset(std::move(Offset)) {}
394 
395     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
396                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
397       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
398         return false;
399 
400       if (CurInst->getOpcode() != X86::JMP64m)
401         return false;
402 
403       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
404       if (MemOpNo == -1)
405         return false;
406 
407       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
408         return false;
409       if (!Scale->match(MRI, MIB, this->InstrWindow,
410                         MemOpNo + X86::AddrScaleAmt))
411         return false;
412       if (!Index->match(MRI, MIB, this->InstrWindow,
413                         MemOpNo + X86::AddrIndexReg))
414         return false;
415       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
416         return false;
417       return true;
418     }
419 
420     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
421       MIB.addAnnotation(*CurInst, Annotation, true);
422       Base->annotate(MIB, Annotation);
423       Scale->annotate(MIB, Annotation);
424       Index->annotate(MIB, Annotation);
425       Offset->annotate(MIB, Annotation);
426     }
427   };
428 
429   std::unique_ptr<MCInstMatcher>
430   matchIndJmp(std::unique_ptr<MCInstMatcher> Base,
431               std::unique_ptr<MCInstMatcher> Scale,
432               std::unique_ptr<MCInstMatcher> Index,
433               std::unique_ptr<MCInstMatcher> Offset) const override {
434     return std::unique_ptr<MCInstMatcher>(
435         new IndJmpMatcherFrag1(std::move(Base), std::move(Scale),
436                                std::move(Index), std::move(Offset)));
437   }
438 
439   struct IndJmpMatcherFrag2 : MCInstMatcher {
440     std::unique_ptr<MCInstMatcher> Reg;
441 
442     IndJmpMatcherFrag2(std::unique_ptr<MCInstMatcher> Reg)
443         : Reg(std::move(Reg)) {}
444 
445     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
446                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
447       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
448         return false;
449 
450       if (CurInst->getOpcode() != X86::JMP64r)
451         return false;
452 
453       return Reg->match(MRI, MIB, this->InstrWindow, 0);
454     }
455 
456     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
457       MIB.addAnnotation(*CurInst, Annotation, true);
458       Reg->annotate(MIB, Annotation);
459     }
460   };
461 
462   std::unique_ptr<MCInstMatcher>
463   matchIndJmp(std::unique_ptr<MCInstMatcher> Target) const override {
464     return std::unique_ptr<MCInstMatcher>(
465         new IndJmpMatcherFrag2(std::move(Target)));
466   }
467 
468   struct LoadMatcherFrag1 : MCInstMatcher {
469     std::unique_ptr<MCInstMatcher> Base;
470     std::unique_ptr<MCInstMatcher> Scale;
471     std::unique_ptr<MCInstMatcher> Index;
472     std::unique_ptr<MCInstMatcher> Offset;
473 
474     LoadMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
475                      std::unique_ptr<MCInstMatcher> Scale,
476                      std::unique_ptr<MCInstMatcher> Index,
477                      std::unique_ptr<MCInstMatcher> Offset)
478         : Base(std::move(Base)), Scale(std::move(Scale)),
479           Index(std::move(Index)), Offset(std::move(Offset)) {}
480 
481     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
482                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
483       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
484         return false;
485 
486       if (CurInst->getOpcode() != X86::MOV64rm &&
487           CurInst->getOpcode() != X86::MOVSX64rm32)
488         return false;
489 
490       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
491       if (MemOpNo == -1)
492         return false;
493 
494       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
495         return false;
496       if (!Scale->match(MRI, MIB, this->InstrWindow,
497                         MemOpNo + X86::AddrScaleAmt))
498         return false;
499       if (!Index->match(MRI, MIB, this->InstrWindow,
500                         MemOpNo + X86::AddrIndexReg))
501         return false;
502       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
503         return false;
504       return true;
505     }
506 
507     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
508       MIB.addAnnotation(*CurInst, Annotation, true);
509       Base->annotate(MIB, Annotation);
510       Scale->annotate(MIB, Annotation);
511       Index->annotate(MIB, Annotation);
512       Offset->annotate(MIB, Annotation);
513     }
514   };
515 
516   std::unique_ptr<MCInstMatcher>
517   matchLoad(std::unique_ptr<MCInstMatcher> Base,
518             std::unique_ptr<MCInstMatcher> Scale,
519             std::unique_ptr<MCInstMatcher> Index,
520             std::unique_ptr<MCInstMatcher> Offset) const override {
521     return std::unique_ptr<MCInstMatcher>(
522         new LoadMatcherFrag1(std::move(Base), std::move(Scale),
523                              std::move(Index), std::move(Offset)));
524   }
525 
526   struct AddMatcher : MCInstMatcher {
527     std::unique_ptr<MCInstMatcher> A;
528     std::unique_ptr<MCInstMatcher> B;
529 
530     AddMatcher(std::unique_ptr<MCInstMatcher> A,
531                std::unique_ptr<MCInstMatcher> B)
532         : A(std::move(A)), B(std::move(B)) {}
533 
534     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
535                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
536       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
537         return false;
538 
539       if (CurInst->getOpcode() == X86::ADD64rr ||
540           CurInst->getOpcode() == X86::ADD64rr_DB ||
541           CurInst->getOpcode() == X86::ADD64rr_REV) {
542         if (!A->match(MRI, MIB, this->InstrWindow, 1)) {
543           if (!B->match(MRI, MIB, this->InstrWindow, 1))
544             return false;
545           return A->match(MRI, MIB, this->InstrWindow, 2);
546         }
547 
548         if (B->match(MRI, MIB, this->InstrWindow, 2))
549           return true;
550 
551         if (!B->match(MRI, MIB, this->InstrWindow, 1))
552           return false;
553         return A->match(MRI, MIB, this->InstrWindow, 2);
554       }
555 
556       return false;
557     }
558 
559     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
560       MIB.addAnnotation(*CurInst, Annotation, true);
561       A->annotate(MIB, Annotation);
562       B->annotate(MIB, Annotation);
563     }
564   };
565 
566   virtual std::unique_ptr<MCInstMatcher>
567   matchAdd(std::unique_ptr<MCInstMatcher> A,
568            std::unique_ptr<MCInstMatcher> B) const override {
569     return std::unique_ptr<MCInstMatcher>(
570         new AddMatcher(std::move(A), std::move(B)));
571   }
572 
573   struct LEAMatcher : MCInstMatcher {
574     std::unique_ptr<MCInstMatcher> Target;
575 
576     LEAMatcher(std::unique_ptr<MCInstMatcher> Target)
577         : Target(std::move(Target)) {}
578 
579     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
580                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
581       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
582         return false;
583 
584       if (CurInst->getOpcode() != X86::LEA64r)
585         return false;
586 
587       if (CurInst->getOperand(1 + X86::AddrScaleAmt).getImm() != 1 ||
588           CurInst->getOperand(1 + X86::AddrIndexReg).getReg() !=
589               X86::NoRegister ||
590           (CurInst->getOperand(1 + X86::AddrBaseReg).getReg() !=
591                X86::NoRegister &&
592            CurInst->getOperand(1 + X86::AddrBaseReg).getReg() != X86::RIP))
593         return false;
594 
595       return Target->match(MRI, MIB, this->InstrWindow, 1 + X86::AddrDisp);
596     }
597 
598     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
599       MIB.addAnnotation(*CurInst, Annotation, true);
600       Target->annotate(MIB, Annotation);
601     }
602   };
603 
604   virtual std::unique_ptr<MCInstMatcher>
605   matchLoadAddr(std::unique_ptr<MCInstMatcher> Target) const override {
606     return std::unique_ptr<MCInstMatcher>(new LEAMatcher(std::move(Target)));
607   }
608 
609   bool hasPCRelOperand(const MCInst &Inst) const override {
610     for (const MCOperand &Operand : Inst)
611       if (Operand.isReg() && Operand.getReg() == X86::RIP)
612         return true;
613     return false;
614   }
615 
616   int getMemoryOperandNo(const MCInst &Inst) const override {
617     unsigned Opcode = Inst.getOpcode();
618     const MCInstrDesc &Desc = Info->get(Opcode);
619     int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
620     if (MemOpNo >= 0)
621       MemOpNo += X86II::getOperandBias(Desc);
622     return MemOpNo;
623   }
624 
625   bool hasEVEXEncoding(const MCInst &Inst) const override {
626     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
627     return (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
628   }
629 
630   bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
631     const auto *I = Insts.begin();
632     while (I != Insts.end() && isPrefix(*I))
633       ++I;
634     if (I == Insts.end())
635       return false;
636 
637     const MCInst &FirstInst = *I;
638     ++I;
639     while (I != Insts.end() && isPrefix(*I))
640       ++I;
641     if (I == Insts.end())
642       return false;
643     const MCInst &SecondInst = *I;
644 
645     if (!isConditionalBranch(SecondInst))
646       return false;
647     // Cannot fuse if the first instruction uses RIP-relative memory.
648     if (hasPCRelOperand(FirstInst))
649       return false;
650 
651     const X86::FirstMacroFusionInstKind CmpKind =
652         X86::classifyFirstOpcodeInMacroFusion(FirstInst.getOpcode());
653     if (CmpKind == X86::FirstMacroFusionInstKind::Invalid)
654       return false;
655 
656     X86::CondCode CC = static_cast<X86::CondCode>(getCondCode(SecondInst));
657     X86::SecondMacroFusionInstKind BranchKind =
658         X86::classifySecondCondCodeInMacroFusion(CC);
659     if (BranchKind == X86::SecondMacroFusionInstKind::Invalid)
660       return false;
661     return X86::isMacroFused(CmpKind, BranchKind);
662   }
663 
664   bool
665   evaluateX86MemoryOperand(const MCInst &Inst, unsigned *BaseRegNum,
666                            int64_t *ScaleImm, unsigned *IndexRegNum,
667                            int64_t *DispImm, unsigned *SegmentRegNum,
668                            const MCExpr **DispExpr = nullptr) const override {
669     assert(BaseRegNum && ScaleImm && IndexRegNum && SegmentRegNum &&
670            "one of the input pointers is null");
671     int MemOpNo = getMemoryOperandNo(Inst);
672     if (MemOpNo < 0)
673       return false;
674     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
675 
676     if (MemOpOffset + X86::AddrSegmentReg >= MCPlus::getNumPrimeOperands(Inst))
677       return false;
678 
679     const MCOperand &Base = Inst.getOperand(MemOpOffset + X86::AddrBaseReg);
680     const MCOperand &Scale = Inst.getOperand(MemOpOffset + X86::AddrScaleAmt);
681     const MCOperand &Index = Inst.getOperand(MemOpOffset + X86::AddrIndexReg);
682     const MCOperand &Disp = Inst.getOperand(MemOpOffset + X86::AddrDisp);
683     const MCOperand &Segment =
684         Inst.getOperand(MemOpOffset + X86::AddrSegmentReg);
685 
686     // Make sure it is a well-formed memory operand.
687     if (!Base.isReg() || !Scale.isImm() || !Index.isReg() ||
688         (!Disp.isImm() && !Disp.isExpr()) || !Segment.isReg())
689       return false;
690 
691     *BaseRegNum = Base.getReg();
692     *ScaleImm = Scale.getImm();
693     *IndexRegNum = Index.getReg();
694     if (Disp.isImm()) {
695       assert(DispImm && "DispImm needs to be set");
696       *DispImm = Disp.getImm();
697       if (DispExpr)
698         *DispExpr = nullptr;
699     } else {
700       assert(DispExpr && "DispExpr needs to be set");
701       *DispExpr = Disp.getExpr();
702       if (DispImm)
703         *DispImm = 0;
704     }
705     *SegmentRegNum = Segment.getReg();
706     return true;
707   }
708 
709   bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
710                                 uint64_t Address,
711                                 uint64_t Size) const override {
712     unsigned      BaseRegNum;
713     int64_t       ScaleValue;
714     unsigned      IndexRegNum;
715     int64_t       DispValue;
716     unsigned      SegRegNum;
717     const MCExpr *DispExpr = nullptr;
718     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
719                                   &DispValue, &SegRegNum, &DispExpr))
720       return false;
721 
722     // Make sure it's a well-formed addressing we can statically evaluate.
723     if ((BaseRegNum != X86::RIP && BaseRegNum != X86::NoRegister) ||
724         IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
725         DispExpr)
726       return false;
727 
728     Target = DispValue;
729     if (BaseRegNum == X86::RIP) {
730       assert(Size != 0 && "instruction size required in order to statically "
731                           "evaluate RIP-relative address");
732       Target += Address + Size;
733     }
734     return true;
735   }
736 
737   MCInst::iterator getMemOperandDisp(MCInst &Inst) const override {
738     int MemOpNo = getMemoryOperandNo(Inst);
739     if (MemOpNo < 0)
740       return Inst.end();
741     return Inst.begin() + (MemOpNo + X86::AddrDisp);
742   }
743 
744   bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
745     MCOperand *OI = getMemOperandDisp(Inst);
746     if (OI == Inst.end())
747       return false;
748     *OI = Operand;
749     return true;
750   }
751 
752   /// Get the registers used as function parameters.
753   /// This function is specific to the x86_64 abi on Linux.
754   BitVector getRegsUsedAsParams() const override {
755     BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
756     Regs |= getAliases(X86::RSI);
757     Regs |= getAliases(X86::RDI);
758     Regs |= getAliases(X86::RDX);
759     Regs |= getAliases(X86::RCX);
760     Regs |= getAliases(X86::R8);
761     Regs |= getAliases(X86::R9);
762     return Regs;
763   }
764 
765   void getCalleeSavedRegs(BitVector &Regs) const override {
766     Regs |= getAliases(X86::RBX);
767     Regs |= getAliases(X86::RBP);
768     Regs |= getAliases(X86::R12);
769     Regs |= getAliases(X86::R13);
770     Regs |= getAliases(X86::R14);
771     Regs |= getAliases(X86::R15);
772   }
773 
774   void getDefaultDefIn(BitVector &Regs) const override {
775     assert(Regs.size() >= RegInfo->getNumRegs() &&
776            "The size of BitVector is less than RegInfo->getNumRegs().");
777     Regs.set(X86::RAX);
778     Regs.set(X86::RCX);
779     Regs.set(X86::RDX);
780     Regs.set(X86::RSI);
781     Regs.set(X86::RDI);
782     Regs.set(X86::R8);
783     Regs.set(X86::R9);
784     Regs.set(X86::XMM0);
785     Regs.set(X86::XMM1);
786     Regs.set(X86::XMM2);
787     Regs.set(X86::XMM3);
788     Regs.set(X86::XMM4);
789     Regs.set(X86::XMM5);
790     Regs.set(X86::XMM6);
791     Regs.set(X86::XMM7);
792   }
793 
794   void getDefaultLiveOut(BitVector &Regs) const override {
795     assert(Regs.size() >= RegInfo->getNumRegs() &&
796            "The size of BitVector is less than RegInfo->getNumRegs().");
797     Regs |= getAliases(X86::RAX);
798     Regs |= getAliases(X86::RDX);
799     Regs |= getAliases(X86::RCX);
800     Regs |= getAliases(X86::XMM0);
801     Regs |= getAliases(X86::XMM1);
802   }
803 
804   void getGPRegs(BitVector &Regs, bool IncludeAlias) const override {
805     if (IncludeAlias) {
806       Regs |= getAliases(X86::RAX);
807       Regs |= getAliases(X86::RBX);
808       Regs |= getAliases(X86::RBP);
809       Regs |= getAliases(X86::RSI);
810       Regs |= getAliases(X86::RDI);
811       Regs |= getAliases(X86::RDX);
812       Regs |= getAliases(X86::RCX);
813       Regs |= getAliases(X86::R8);
814       Regs |= getAliases(X86::R9);
815       Regs |= getAliases(X86::R10);
816       Regs |= getAliases(X86::R11);
817       Regs |= getAliases(X86::R12);
818       Regs |= getAliases(X86::R13);
819       Regs |= getAliases(X86::R14);
820       Regs |= getAliases(X86::R15);
821       return;
822     }
823     Regs.set(X86::RAX);
824     Regs.set(X86::RBX);
825     Regs.set(X86::RBP);
826     Regs.set(X86::RSI);
827     Regs.set(X86::RDI);
828     Regs.set(X86::RDX);
829     Regs.set(X86::RCX);
830     Regs.set(X86::R8);
831     Regs.set(X86::R9);
832     Regs.set(X86::R10);
833     Regs.set(X86::R11);
834     Regs.set(X86::R12);
835     Regs.set(X86::R13);
836     Regs.set(X86::R14);
837     Regs.set(X86::R15);
838   }
839 
840   void getClassicGPRegs(BitVector &Regs) const override {
841     Regs |= getAliases(X86::RAX);
842     Regs |= getAliases(X86::RBX);
843     Regs |= getAliases(X86::RBP);
844     Regs |= getAliases(X86::RSI);
845     Regs |= getAliases(X86::RDI);
846     Regs |= getAliases(X86::RDX);
847     Regs |= getAliases(X86::RCX);
848   }
849 
850   void getRepRegs(BitVector &Regs) const override {
851     Regs |= getAliases(X86::RCX);
852   }
853 
854   MCPhysReg getAliasSized(MCPhysReg Reg, uint8_t Size) const override {
855     switch (Reg) {
856     case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: case X86::AH:
857       switch (Size) {
858       case 8: return X86::RAX;       case 4: return X86::EAX;
859       case 2: return X86::AX;        case 1: return X86::AL;
860       default: llvm_unreachable("Unexpected size");
861       }
862     case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: case X86::BH:
863       switch (Size) {
864       case 8: return X86::RBX;       case 4: return X86::EBX;
865       case 2: return X86::BX;        case 1: return X86::BL;
866       default: llvm_unreachable("Unexpected size");
867       }
868     case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: case X86::DH:
869       switch (Size) {
870       case 8: return X86::RDX;       case 4: return X86::EDX;
871       case 2: return X86::DX;        case 1: return X86::DL;
872       default: llvm_unreachable("Unexpected size");
873       }
874     case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL:
875       switch (Size) {
876       case 8: return X86::RDI;       case 4: return X86::EDI;
877       case 2: return X86::DI;        case 1: return X86::DIL;
878       default: llvm_unreachable("Unexpected size");
879       }
880     case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL:
881       switch (Size) {
882       case 8: return X86::RSI;       case 4: return X86::ESI;
883       case 2: return X86::SI;        case 1: return X86::SIL;
884       default: llvm_unreachable("Unexpected size");
885       }
886     case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: case X86::CH:
887       switch (Size) {
888       case 8: return X86::RCX;       case 4: return X86::ECX;
889       case 2: return X86::CX;        case 1: return X86::CL;
890       default: llvm_unreachable("Unexpected size");
891       }
892     case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL:
893       switch (Size) {
894       case 8: return X86::RSP;       case 4: return X86::ESP;
895       case 2: return X86::SP;        case 1: return X86::SPL;
896       default: llvm_unreachable("Unexpected size");
897       }
898     case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL:
899       switch (Size) {
900       case 8: return X86::RBP;       case 4: return X86::EBP;
901       case 2: return X86::BP;        case 1: return X86::BPL;
902       default: llvm_unreachable("Unexpected size");
903       }
904   case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
905       switch (Size) {
906       case 8: return X86::R8;        case 4: return X86::R8D;
907       case 2: return X86::R8W;       case 1: return X86::R8B;
908       default: llvm_unreachable("Unexpected size");
909       }
910     case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
911       switch (Size) {
912       case 8: return X86::R9;        case 4: return X86::R9D;
913       case 2: return X86::R9W;       case 1: return X86::R9B;
914       default: llvm_unreachable("Unexpected size");
915       }
916     case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
917       switch (Size) {
918       case 8: return X86::R10;        case 4: return X86::R10D;
919       case 2: return X86::R10W;       case 1: return X86::R10B;
920       default: llvm_unreachable("Unexpected size");
921       }
922     case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
923       switch (Size) {
924       case 8: return X86::R11;        case 4: return X86::R11D;
925       case 2: return X86::R11W;       case 1: return X86::R11B;
926       default: llvm_unreachable("Unexpected size");
927       }
928     case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
929       switch (Size) {
930       case 8: return X86::R12;        case 4: return X86::R12D;
931       case 2: return X86::R12W;       case 1: return X86::R12B;
932       default: llvm_unreachable("Unexpected size");
933       }
934     case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
935       switch (Size) {
936       case 8: return X86::R13;        case 4: return X86::R13D;
937       case 2: return X86::R13W;       case 1: return X86::R13B;
938       default: llvm_unreachable("Unexpected size");
939       }
940     case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
941       switch (Size) {
942       case 8: return X86::R14;        case 4: return X86::R14D;
943       case 2: return X86::R14W;       case 1: return X86::R14B;
944       default: llvm_unreachable("Unexpected size");
945       }
946     case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
947       switch (Size) {
948       case 8: return X86::R15;        case 4: return X86::R15D;
949       case 2: return X86::R15W;       case 1: return X86::R15B;
950       default: llvm_unreachable("Unexpected size");
951       }
952     default:
953       dbgs() << Reg << " (get alias sized)\n";
954       llvm_unreachable("Unexpected reg number");
955       break;
956     }
957   }
958 
959   bool isUpper8BitReg(MCPhysReg Reg) const override {
960     switch (Reg) {
961     case X86::AH:
962     case X86::BH:
963     case X86::CH:
964     case X86::DH:
965       return true;
966     default:
967       return false;
968     }
969   }
970 
971   bool cannotUseREX(const MCInst &Inst) const override {
972     switch (Inst.getOpcode()) {
973     case X86::MOV8mr_NOREX:
974     case X86::MOV8rm_NOREX:
975     case X86::MOV8rr_NOREX:
976     case X86::MOVSX32rm8_NOREX:
977     case X86::MOVSX32rr8_NOREX:
978     case X86::MOVZX32rm8_NOREX:
979     case X86::MOVZX32rr8_NOREX:
980     case X86::MOV8mr:
981     case X86::MOV8rm:
982     case X86::MOV8rr:
983     case X86::MOVSX32rm8:
984     case X86::MOVSX32rr8:
985     case X86::MOVZX32rm8:
986     case X86::MOVZX32rr8:
987     case X86::TEST8ri:
988       for (const MCOperand &Operand : MCPlus::primeOperands(Inst)) {
989         if (!Operand.isReg())
990           continue;
991         if (isUpper8BitReg(Operand.getReg()))
992           return true;
993       }
994       LLVM_FALLTHROUGH;
995     default:
996       return false;
997     }
998   }
999 
1000   bool isStackAccess(const MCInst &Inst, bool &IsLoad, bool &IsStore,
1001                      bool &IsStoreFromReg, MCPhysReg &Reg, int32_t &SrcImm,
1002                      uint16_t &StackPtrReg, int64_t &StackOffset, uint8_t &Size,
1003                      bool &IsSimple, bool &IsIndexed) const override {
1004     // Detect simple push/pop cases first
1005     if (int Sz = getPushSize(Inst)) {
1006       IsLoad = false;
1007       IsStore = true;
1008       IsStoreFromReg = true;
1009       StackPtrReg = X86::RSP;
1010       StackOffset = -Sz;
1011       Size = Sz;
1012       IsSimple = true;
1013       if (Inst.getOperand(0).isImm())
1014         SrcImm = Inst.getOperand(0).getImm();
1015       else if (Inst.getOperand(0).isReg())
1016         Reg = Inst.getOperand(0).getReg();
1017       else
1018         IsSimple = false;
1019 
1020       return true;
1021     }
1022     if (int Sz = getPopSize(Inst)) {
1023       IsLoad = true;
1024       IsStore = false;
1025       if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isReg()) {
1026         IsSimple = false;
1027       } else {
1028         Reg = Inst.getOperand(0).getReg();
1029         IsSimple = true;
1030       }
1031       StackPtrReg = X86::RSP;
1032       StackOffset = 0;
1033       Size = Sz;
1034       return true;
1035     }
1036 
1037     struct InstInfo {
1038       // Size in bytes that Inst loads from memory.
1039       uint8_t DataSize;
1040       bool IsLoad;
1041       bool IsStore;
1042       bool StoreFromReg;
1043       bool Simple;
1044     };
1045 
1046     InstInfo I;
1047     int MemOpNo = getMemoryOperandNo(Inst);
1048     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1049     // If it is not dealing with a memory operand, we discard it
1050     if (MemOpNo == -1 || MCII.isCall())
1051       return false;
1052 
1053     switch (Inst.getOpcode()) {
1054     default: {
1055       uint8_t Sz = 0;
1056       bool IsLoad = MCII.mayLoad();
1057       bool IsStore = MCII.mayStore();
1058       // Is it LEA? (deals with memory but is not loading nor storing)
1059       if (!IsLoad && !IsStore)
1060         return false;
1061 
1062       // Try to guess data size involved in the load/store by looking at the
1063       // register size. If there's no reg involved, return 0 as size, meaning
1064       // we don't know.
1065       for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
1066         if (MCII.OpInfo[I].OperandType != MCOI::OPERAND_REGISTER)
1067           continue;
1068         if (static_cast<int>(I) >= MemOpNo && I < X86::AddrNumOperands)
1069           continue;
1070         Sz = RegInfo->getRegClass(MCII.OpInfo[I].RegClass).getSizeInBits() / 8;
1071         break;
1072       }
1073       I = {Sz, IsLoad, IsStore, false, false};
1074       break;
1075     }
1076     case X86::MOV16rm: I = {2, true, false, false, true}; break;
1077     case X86::MOV32rm: I = {4, true, false, false, true}; break;
1078     case X86::MOV64rm: I = {8, true, false, false, true}; break;
1079     case X86::MOV16mr: I = {2, false, true, true, true};  break;
1080     case X86::MOV32mr: I = {4, false, true, true, true};  break;
1081     case X86::MOV64mr: I = {8, false, true, true, true};  break;
1082     case X86::MOV16mi: I = {2, false, true, false, true}; break;
1083     case X86::MOV32mi: I = {4, false, true, false, true}; break;
1084     } // end switch (Inst.getOpcode())
1085 
1086     unsigned BaseRegNum;
1087     int64_t ScaleValue;
1088     unsigned IndexRegNum;
1089     int64_t DispValue;
1090     unsigned SegRegNum;
1091     const MCExpr *DispExpr;
1092     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1093                                   &DispValue, &SegRegNum, &DispExpr)) {
1094       LLVM_DEBUG(dbgs() << "Evaluate failed on ");
1095       LLVM_DEBUG(Inst.dump());
1096       return false;
1097     }
1098 
1099     // Make sure it's a stack access
1100     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP)
1101       return false;
1102 
1103     IsLoad = I.IsLoad;
1104     IsStore = I.IsStore;
1105     IsStoreFromReg = I.StoreFromReg;
1106     Size = I.DataSize;
1107     IsSimple = I.Simple;
1108     StackPtrReg = BaseRegNum;
1109     StackOffset = DispValue;
1110     IsIndexed = IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister;
1111 
1112     if (!I.Simple)
1113       return true;
1114 
1115     // Retrieve related register in simple MOV from/to stack operations.
1116     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
1117     if (I.IsLoad) {
1118       MCOperand RegOpnd = Inst.getOperand(0);
1119       assert(RegOpnd.isReg() && "unexpected destination operand");
1120       Reg = RegOpnd.getReg();
1121     } else if (I.IsStore) {
1122       MCOperand SrcOpnd =
1123           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1124       if (I.StoreFromReg) {
1125         assert(SrcOpnd.isReg() && "unexpected source operand");
1126         Reg = SrcOpnd.getReg();
1127       } else {
1128         assert(SrcOpnd.isImm() && "unexpected source operand");
1129         SrcImm = SrcOpnd.getImm();
1130       }
1131     }
1132 
1133     return true;
1134   }
1135 
1136   void changeToPushOrPop(MCInst &Inst) const override {
1137     assert(!isPush(Inst) && !isPop(Inst));
1138 
1139     struct InstInfo {
1140       // Size in bytes that Inst loads from memory.
1141       uint8_t DataSize;
1142       bool IsLoad;
1143       bool StoreFromReg;
1144     };
1145 
1146     InstInfo I;
1147     switch (Inst.getOpcode()) {
1148     default: {
1149       llvm_unreachable("Unhandled opcode");
1150       return;
1151     }
1152     case X86::MOV16rm: I = {2, true, false}; break;
1153     case X86::MOV32rm: I = {4, true, false}; break;
1154     case X86::MOV64rm: I = {8, true, false}; break;
1155     case X86::MOV16mr: I = {2, false, true};  break;
1156     case X86::MOV32mr: I = {4, false, true};  break;
1157     case X86::MOV64mr: I = {8, false, true};  break;
1158     case X86::MOV16mi: I = {2, false, false}; break;
1159     case X86::MOV32mi: I = {4, false, false}; break;
1160     } // end switch (Inst.getOpcode())
1161 
1162     unsigned BaseRegNum;
1163     int64_t ScaleValue;
1164     unsigned IndexRegNum;
1165     int64_t DispValue;
1166     unsigned SegRegNum;
1167     const MCExpr *DispExpr;
1168     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1169                                   &DispValue, &SegRegNum, &DispExpr)) {
1170       llvm_unreachable("Evaluate failed");
1171       return;
1172     }
1173     // Make sure it's a stack access
1174     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP) {
1175       llvm_unreachable("Not a stack access");
1176       return;
1177     }
1178 
1179     unsigned MemOpOffset = getMemoryOperandNo(Inst);
1180     unsigned NewOpcode = 0;
1181     if (I.IsLoad) {
1182       switch (I.DataSize) {
1183       case 2: NewOpcode = X86::POP16r; break;
1184       case 4: NewOpcode = X86::POP32r; break;
1185       case 8: NewOpcode = X86::POP64r; break;
1186       default:
1187         llvm_unreachable("Unexpected size");
1188       }
1189       unsigned RegOpndNum = Inst.getOperand(0).getReg();
1190       Inst.clear();
1191       Inst.setOpcode(NewOpcode);
1192       Inst.addOperand(MCOperand::createReg(RegOpndNum));
1193     } else {
1194       MCOperand SrcOpnd =
1195           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1196       if (I.StoreFromReg) {
1197         switch (I.DataSize) {
1198         case 2: NewOpcode = X86::PUSH16r; break;
1199         case 4: NewOpcode = X86::PUSH32r; break;
1200         case 8: NewOpcode = X86::PUSH64r; break;
1201         default:
1202           llvm_unreachable("Unexpected size");
1203         }
1204         assert(SrcOpnd.isReg() && "Unexpected source operand");
1205         unsigned RegOpndNum = SrcOpnd.getReg();
1206         Inst.clear();
1207         Inst.setOpcode(NewOpcode);
1208         Inst.addOperand(MCOperand::createReg(RegOpndNum));
1209       } else {
1210         switch (I.DataSize) {
1211         case 2: NewOpcode = X86::PUSH16i8; break;
1212         case 4: NewOpcode = X86::PUSH32i8; break;
1213         case 8: NewOpcode = X86::PUSH64i32; break;
1214         default:
1215           llvm_unreachable("Unexpected size");
1216         }
1217         assert(SrcOpnd.isImm() && "Unexpected source operand");
1218         int64_t SrcImm = SrcOpnd.getImm();
1219         Inst.clear();
1220         Inst.setOpcode(NewOpcode);
1221         Inst.addOperand(MCOperand::createImm(SrcImm));
1222       }
1223     }
1224   }
1225 
1226   bool isStackAdjustment(const MCInst &Inst) const override {
1227     switch (Inst.getOpcode()) {
1228     default:
1229       return false;
1230     case X86::SUB64ri32:
1231     case X86::SUB64ri8:
1232     case X86::ADD64ri32:
1233     case X86::ADD64ri8:
1234     case X86::LEA64r:
1235       break;
1236     }
1237 
1238     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1239     for (int I = 0, E = MCII.getNumDefs(); I != E; ++I) {
1240       const MCOperand &Operand = Inst.getOperand(I);
1241       if (Operand.isReg() && Operand.getReg() == X86::RSP)
1242         return true;
1243     }
1244     return false;
1245   }
1246 
1247   bool
1248   evaluateStackOffsetExpr(const MCInst &Inst, int64_t &Output,
1249                           std::pair<MCPhysReg, int64_t> Input1,
1250                           std::pair<MCPhysReg, int64_t> Input2) const override {
1251 
1252     auto getOperandVal = [&](MCPhysReg Reg) -> ErrorOr<int64_t> {
1253       if (Reg == Input1.first)
1254         return Input1.second;
1255       if (Reg == Input2.first)
1256         return Input2.second;
1257       return make_error_code(errc::result_out_of_range);
1258     };
1259 
1260     switch (Inst.getOpcode()) {
1261     default:
1262       return false;
1263 
1264     case X86::SUB64ri32:
1265     case X86::SUB64ri8:
1266       if (!Inst.getOperand(2).isImm())
1267         return false;
1268       if (ErrorOr<int64_t> InputVal =
1269               getOperandVal(Inst.getOperand(1).getReg()))
1270         Output = *InputVal - Inst.getOperand(2).getImm();
1271       else
1272         return false;
1273       break;
1274     case X86::ADD64ri32:
1275     case X86::ADD64ri8:
1276       if (!Inst.getOperand(2).isImm())
1277         return false;
1278       if (ErrorOr<int64_t> InputVal =
1279               getOperandVal(Inst.getOperand(1).getReg()))
1280         Output = *InputVal + Inst.getOperand(2).getImm();
1281       else
1282         return false;
1283       break;
1284     case X86::ADD64i32:
1285       if (!Inst.getOperand(0).isImm())
1286         return false;
1287       if (ErrorOr<int64_t> InputVal = getOperandVal(X86::RAX))
1288         Output = *InputVal + Inst.getOperand(0).getImm();
1289       else
1290         return false;
1291       break;
1292 
1293     case X86::LEA64r: {
1294       unsigned BaseRegNum;
1295       int64_t ScaleValue;
1296       unsigned IndexRegNum;
1297       int64_t DispValue;
1298       unsigned SegRegNum;
1299       const MCExpr *DispExpr = nullptr;
1300       if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue,
1301                                     &IndexRegNum, &DispValue, &SegRegNum,
1302                                     &DispExpr))
1303         return false;
1304 
1305       if (BaseRegNum == X86::NoRegister || IndexRegNum != X86::NoRegister ||
1306           SegRegNum != X86::NoRegister || DispExpr)
1307         return false;
1308 
1309       if (ErrorOr<int64_t> InputVal = getOperandVal(BaseRegNum))
1310         Output = *InputVal + DispValue;
1311       else
1312         return false;
1313 
1314       break;
1315     }
1316     }
1317     return true;
1318   }
1319 
1320   bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
1321                       MCPhysReg &To) const override {
1322     switch (Inst.getOpcode()) {
1323     default:
1324       return false;
1325     case X86::LEAVE:
1326     case X86::LEAVE64:
1327       To = getStackPointer();
1328       From = getFramePointer();
1329       return true;
1330     case X86::MOV64rr:
1331       To = Inst.getOperand(0).getReg();
1332       From = Inst.getOperand(1).getReg();
1333       return true;
1334     }
1335   }
1336 
1337   MCPhysReg getStackPointer() const override { return X86::RSP; }
1338   MCPhysReg getFramePointer() const override { return X86::RBP; }
1339   MCPhysReg getFlagsReg() const override { return X86::EFLAGS; }
1340 
1341   bool escapesVariable(const MCInst &Inst,
1342                        bool HasFramePointer) const override {
1343     int MemOpNo = getMemoryOperandNo(Inst);
1344     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1345     const unsigned NumDefs = MCII.getNumDefs();
1346     static BitVector SPBPAliases(BitVector(getAliases(X86::RSP)) |=
1347                                  getAliases(X86::RBP));
1348     static BitVector SPAliases(getAliases(X86::RSP));
1349 
1350     // FIXME: PUSH can be technically a leak, but let's ignore this for now
1351     // because a lot of harmless prologue code will spill SP to the stack.
1352     // Unless push is clearly pushing an object address to the stack as
1353     // demonstrated by having a MemOp.
1354     bool IsPush = isPush(Inst);
1355     if (IsPush && MemOpNo == -1)
1356       return false;
1357 
1358     // We use this to detect LEA (has memop but does not access mem)
1359     bool AccessMem = MCII.mayLoad() || MCII.mayStore();
1360     bool DoesLeak = false;
1361     for (int I = 0, E = MCPlus::getNumPrimeOperands(Inst); I != E; ++I) {
1362       // Ignore if SP/BP is used to dereference memory -- that's fine
1363       if (MemOpNo != -1 && !IsPush && AccessMem && I >= MemOpNo &&
1364           I <= MemOpNo + 5)
1365         continue;
1366       // Ignore if someone is writing to SP/BP
1367       if (I < static_cast<int>(NumDefs))
1368         continue;
1369 
1370       const MCOperand &Operand = Inst.getOperand(I);
1371       if (HasFramePointer && Operand.isReg() && SPBPAliases[Operand.getReg()]) {
1372         DoesLeak = true;
1373         break;
1374       }
1375       if (!HasFramePointer && Operand.isReg() && SPAliases[Operand.getReg()]) {
1376         DoesLeak = true;
1377         break;
1378       }
1379     }
1380 
1381     // If potential leak, check if it is not just writing to itself/sp/bp
1382     if (DoesLeak) {
1383       for (int I = 0, E = NumDefs; I != E; ++I) {
1384         const MCOperand &Operand = Inst.getOperand(I);
1385         if (HasFramePointer && Operand.isReg() &&
1386             SPBPAliases[Operand.getReg()]) {
1387           DoesLeak = false;
1388           break;
1389         }
1390         if (!HasFramePointer && Operand.isReg() &&
1391             SPAliases[Operand.getReg()]) {
1392           DoesLeak = false;
1393           break;
1394         }
1395       }
1396     }
1397     return DoesLeak;
1398   }
1399 
1400   bool addToImm(MCInst &Inst, int64_t &Amt, MCContext *Ctx) const override {
1401     unsigned ImmOpNo = -1U;
1402     int MemOpNo = getMemoryOperandNo(Inst);
1403     if (MemOpNo != -1)
1404       ImmOpNo = MemOpNo + X86::AddrDisp;
1405     else
1406       for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1407            ++Index)
1408         if (Inst.getOperand(Index).isImm())
1409           ImmOpNo = Index;
1410     if (ImmOpNo == -1U)
1411       return false;
1412 
1413     MCOperand &Operand = Inst.getOperand(ImmOpNo);
1414     Amt += Operand.getImm();
1415     Operand.setImm(Amt);
1416     // Check for the need for relaxation
1417     if (int64_t(Amt) == int64_t(int8_t(Amt)))
1418       return true;
1419 
1420     // Relax instruction
1421     switch (Inst.getOpcode()) {
1422     case X86::SUB64ri8:
1423       Inst.setOpcode(X86::SUB64ri32);
1424       break;
1425     case X86::ADD64ri8:
1426       Inst.setOpcode(X86::ADD64ri32);
1427       break;
1428     default:
1429       // No need for relaxation
1430       break;
1431     }
1432     return true;
1433   }
1434 
1435   /// TODO: this implementation currently works for the most common opcodes that
1436   /// load from memory. It can be extended to work with memory store opcodes as
1437   /// well as more memory load opcodes.
1438   bool replaceMemOperandWithImm(MCInst &Inst, StringRef ConstantData,
1439                                 uint64_t Offset) const override {
1440     enum CheckSignExt : uint8_t {
1441       NOCHECK = 0,
1442       CHECK8,
1443       CHECK32,
1444     };
1445 
1446     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
1447     struct InstInfo {
1448       // Size in bytes that Inst loads from memory.
1449       uint8_t DataSize;
1450 
1451       // True when the target operand has to be duplicated because the opcode
1452       // expects a LHS operand.
1453       bool HasLHS;
1454 
1455       // List of checks and corresponding opcodes to be used. We try to use the
1456       // smallest possible immediate value when various sizes are available,
1457       // hence we may need to check whether a larger constant fits in a smaller
1458       // immediate.
1459       CheckList Checks;
1460     };
1461 
1462     InstInfo I;
1463 
1464     switch (Inst.getOpcode()) {
1465     default: {
1466       switch (getPopSize(Inst)) {
1467       case 2:            I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1468       case 4:            I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1469       case 8:            I = {8, false, {{CHECK32, X86::MOV64ri32},
1470                                          {NOCHECK, X86::MOV64rm}}};  break;
1471       default:           return false;
1472       }
1473       break;
1474     }
1475 
1476     // MOV
1477     case X86::MOV8rm:      I = {1, false, {{NOCHECK, X86::MOV8ri}}};   break;
1478     case X86::MOV16rm:     I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1479     case X86::MOV32rm:     I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1480     case X86::MOV64rm:     I = {8, false, {{CHECK32, X86::MOV64ri32},
1481                                            {NOCHECK, X86::MOV64rm}}};  break;
1482 
1483     // MOVZX
1484     case X86::MOVZX16rm8:  I = {1, false, {{NOCHECK, X86::MOV16ri}}};  break;
1485     case X86::MOVZX32rm8:  I = {1, false, {{NOCHECK, X86::MOV32ri}}};  break;
1486     case X86::MOVZX32rm16: I = {2, false, {{NOCHECK, X86::MOV32ri}}};  break;
1487 
1488     // CMP
1489     case X86::CMP8rm:      I = {1, false, {{NOCHECK, X86::CMP8ri}}};   break;
1490     case X86::CMP16rm:     I = {2, false, {{CHECK8,  X86::CMP16ri8},
1491                                            {NOCHECK, X86::CMP16ri}}};  break;
1492     case X86::CMP32rm:     I = {4, false, {{CHECK8,  X86::CMP32ri8},
1493                                            {NOCHECK, X86::CMP32ri}}};  break;
1494     case X86::CMP64rm:     I = {8, false, {{CHECK8,  X86::CMP64ri8},
1495                                            {CHECK32, X86::CMP64ri32},
1496                                            {NOCHECK, X86::CMP64rm}}};  break;
1497 
1498     // TEST
1499     case X86::TEST8mr:     I = {1, false, {{NOCHECK, X86::TEST8ri}}};  break;
1500     case X86::TEST16mr:    I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
1501     case X86::TEST32mr:    I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
1502     case X86::TEST64mr:    I = {8, false, {{CHECK32, X86::TEST64ri32},
1503                                            {NOCHECK, X86::TEST64mr}}}; break;
1504 
1505     // ADD
1506     case X86::ADD8rm:      I = {1, true,  {{NOCHECK, X86::ADD8ri}}};   break;
1507     case X86::ADD16rm:     I = {2, true,  {{CHECK8,  X86::ADD16ri8},
1508                                            {NOCHECK, X86::ADD16ri}}};  break;
1509     case X86::ADD32rm:     I = {4, true,  {{CHECK8,  X86::ADD32ri8},
1510                                            {NOCHECK, X86::ADD32ri}}};  break;
1511     case X86::ADD64rm:     I = {8, true,  {{CHECK8,  X86::ADD64ri8},
1512                                            {CHECK32, X86::ADD64ri32},
1513                                            {NOCHECK, X86::ADD64rm}}};  break;
1514 
1515     // SUB
1516     case X86::SUB8rm:      I = {1, true,  {{NOCHECK, X86::SUB8ri}}};   break;
1517     case X86::SUB16rm:     I = {2, true,  {{CHECK8,  X86::SUB16ri8},
1518                                            {NOCHECK, X86::SUB16ri}}};  break;
1519     case X86::SUB32rm:     I = {4, true,  {{CHECK8,  X86::SUB32ri8},
1520                                            {NOCHECK, X86::SUB32ri}}};  break;
1521     case X86::SUB64rm:     I = {8, true,  {{CHECK8,  X86::SUB64ri8},
1522                                            {CHECK32, X86::SUB64ri32},
1523                                            {NOCHECK, X86::SUB64rm}}};  break;
1524 
1525     // AND
1526     case X86::AND8rm:      I = {1, true,  {{NOCHECK, X86::AND8ri}}};   break;
1527     case X86::AND16rm:     I = {2, true,  {{CHECK8,  X86::AND16ri8},
1528                                            {NOCHECK, X86::AND16ri}}};  break;
1529     case X86::AND32rm:     I = {4, true,  {{CHECK8,  X86::AND32ri8},
1530                                            {NOCHECK, X86::AND32ri}}};  break;
1531     case X86::AND64rm:     I = {8, true,  {{CHECK8,  X86::AND64ri8},
1532                                            {CHECK32, X86::AND64ri32},
1533                                            {NOCHECK, X86::AND64rm}}};  break;
1534 
1535     // OR
1536     case X86::OR8rm:       I = {1, true,  {{NOCHECK, X86::OR8ri}}};    break;
1537     case X86::OR16rm:      I = {2, true,  {{CHECK8,  X86::OR16ri8},
1538                                            {NOCHECK, X86::OR16ri}}};   break;
1539     case X86::OR32rm:      I = {4, true,  {{CHECK8,  X86::OR32ri8},
1540                                            {NOCHECK, X86::OR32ri}}};   break;
1541     case X86::OR64rm:      I = {8, true,  {{CHECK8,  X86::OR64ri8},
1542                                            {CHECK32, X86::OR64ri32},
1543                                            {NOCHECK, X86::OR64rm}}};   break;
1544 
1545     // XOR
1546     case X86::XOR8rm:      I = {1, true,  {{NOCHECK, X86::XOR8ri}}};   break;
1547     case X86::XOR16rm:     I = {2, true,  {{CHECK8,  X86::XOR16ri8},
1548                                            {NOCHECK, X86::XOR16ri}}};  break;
1549     case X86::XOR32rm:     I = {4, true,  {{CHECK8,  X86::XOR32ri8},
1550                                            {NOCHECK, X86::XOR32ri}}};  break;
1551     case X86::XOR64rm:     I = {8, true,  {{CHECK8,  X86::XOR64ri8},
1552                                            {CHECK32, X86::XOR64ri32},
1553                                            {NOCHECK, X86::XOR64rm}}};  break;
1554     }
1555 
1556     // Compute the immediate value.
1557     assert(Offset + I.DataSize <= ConstantData.size() &&
1558            "invalid offset for given constant data");
1559     int64_t ImmVal =
1560         DataExtractor(ConstantData, true, 8).getSigned(&Offset, I.DataSize);
1561 
1562     // Compute the new opcode.
1563     unsigned NewOpcode = 0;
1564     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
1565       NewOpcode = Check.second;
1566       if (Check.first == NOCHECK)
1567         break;
1568       if (Check.first == CHECK8 && isInt<8>(ImmVal))
1569         break;
1570       if (Check.first == CHECK32 && isInt<32>(ImmVal))
1571         break;
1572     }
1573     if (NewOpcode == Inst.getOpcode())
1574       return false;
1575 
1576     // Modify the instruction.
1577     MCOperand ImmOp = MCOperand::createImm(ImmVal);
1578     uint32_t TargetOpNum = 0;
1579     // Test instruction does not follow the regular pattern of putting the
1580     // memory reference of a load (5 MCOperands) last in the list of operands.
1581     // Since it is not modifying the register operand, it is not treated as
1582     // a destination operand and it is not the first operand as it is in the
1583     // other instructions we treat here.
1584     if (NewOpcode == X86::TEST8ri || NewOpcode == X86::TEST16ri ||
1585         NewOpcode == X86::TEST32ri || NewOpcode == X86::TEST64ri32)
1586       TargetOpNum = getMemoryOperandNo(Inst) + X86::AddrNumOperands;
1587 
1588     MCOperand TargetOp = Inst.getOperand(TargetOpNum);
1589     Inst.clear();
1590     Inst.setOpcode(NewOpcode);
1591     Inst.addOperand(TargetOp);
1592     if (I.HasLHS)
1593       Inst.addOperand(TargetOp);
1594     Inst.addOperand(ImmOp);
1595 
1596     return true;
1597   }
1598 
1599   /// TODO: this implementation currently works for the most common opcodes that
1600   /// load from memory. It can be extended to work with memory store opcodes as
1601   /// well as more memory load opcodes.
1602   bool replaceMemOperandWithReg(MCInst &Inst, MCPhysReg RegNum) const override {
1603     unsigned NewOpcode;
1604 
1605     switch (Inst.getOpcode()) {
1606     default: {
1607       switch (getPopSize(Inst)) {
1608       case 2:            NewOpcode = X86::MOV16rr; break;
1609       case 4:            NewOpcode = X86::MOV32rr; break;
1610       case 8:            NewOpcode = X86::MOV64rr; break;
1611       default:           return false;
1612       }
1613       break;
1614     }
1615 
1616     // MOV
1617     case X86::MOV8rm:      NewOpcode = X86::MOV8rr;   break;
1618     case X86::MOV16rm:     NewOpcode = X86::MOV16rr;  break;
1619     case X86::MOV32rm:     NewOpcode = X86::MOV32rr;  break;
1620     case X86::MOV64rm:     NewOpcode = X86::MOV64rr;  break;
1621     }
1622 
1623     // Modify the instruction.
1624     MCOperand RegOp = MCOperand::createReg(RegNum);
1625     MCOperand TargetOp = Inst.getOperand(0);
1626     Inst.clear();
1627     Inst.setOpcode(NewOpcode);
1628     Inst.addOperand(TargetOp);
1629     Inst.addOperand(RegOp);
1630 
1631     return true;
1632   }
1633 
1634   bool isRedundantMove(const MCInst &Inst) const override {
1635     switch (Inst.getOpcode()) {
1636     default:
1637       return false;
1638 
1639     // MOV
1640     case X86::MOV8rr:
1641     case X86::MOV16rr:
1642     case X86::MOV32rr:
1643     case X86::MOV64rr:
1644       break;
1645     }
1646 
1647     assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg());
1648     return Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg();
1649   }
1650 
1651   bool requiresAlignedAddress(const MCInst &Inst) const override {
1652     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
1653     for (unsigned int I = 0; I < Desc.getNumOperands(); ++I) {
1654       const MCOperandInfo &Op = Desc.OpInfo[I];
1655       if (Op.OperandType != MCOI::OPERAND_REGISTER)
1656         continue;
1657       if (Op.RegClass == X86::VR128RegClassID)
1658         return true;
1659     }
1660     return false;
1661   }
1662 
1663   bool convertJmpToTailCall(MCInst &Inst) override {
1664     if (isTailCall(Inst))
1665       return false;
1666 
1667     int NewOpcode;
1668     switch (Inst.getOpcode()) {
1669     default:
1670       return false;
1671     case X86::JMP_1:
1672     case X86::JMP_2:
1673     case X86::JMP_4:
1674       NewOpcode = X86::JMP_4;
1675       break;
1676     case X86::JMP16m:
1677     case X86::JMP32m:
1678     case X86::JMP64m:
1679       NewOpcode = X86::JMP32m;
1680       break;
1681     case X86::JMP16r:
1682     case X86::JMP32r:
1683     case X86::JMP64r:
1684       NewOpcode = X86::JMP32r;
1685       break;
1686     }
1687 
1688     Inst.setOpcode(NewOpcode);
1689     setTailCall(Inst);
1690     return true;
1691   }
1692 
1693   bool convertTailCallToJmp(MCInst &Inst) override {
1694     int NewOpcode;
1695     switch (Inst.getOpcode()) {
1696     default:
1697       return false;
1698     case X86::JMP_4:
1699       NewOpcode = X86::JMP_1;
1700       break;
1701     case X86::JMP32m:
1702       NewOpcode = X86::JMP64m;
1703       break;
1704     case X86::JMP32r:
1705       NewOpcode = X86::JMP64r;
1706       break;
1707     }
1708 
1709     Inst.setOpcode(NewOpcode);
1710     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1711     clearOffset(Inst);
1712     return true;
1713   }
1714 
1715   bool convertTailCallToCall(MCInst &Inst) override {
1716     int NewOpcode;
1717     switch (Inst.getOpcode()) {
1718     default:
1719       return false;
1720     case X86::JMP_4:
1721       NewOpcode = X86::CALL64pcrel32;
1722       break;
1723     case X86::JMP32m:
1724       NewOpcode = X86::CALL64m;
1725       break;
1726     case X86::JMP32r:
1727       NewOpcode = X86::CALL64r;
1728       break;
1729     }
1730 
1731     Inst.setOpcode(NewOpcode);
1732     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1733     return true;
1734   }
1735 
1736   bool convertCallToIndirectCall(MCInst &Inst, const MCSymbol *TargetLocation,
1737                                  MCContext *Ctx) override {
1738     assert((Inst.getOpcode() == X86::CALL64pcrel32 ||
1739             (Inst.getOpcode() == X86::JMP_4 && isTailCall(Inst))) &&
1740            "64-bit direct (tail) call instruction expected");
1741     const auto NewOpcode =
1742         (Inst.getOpcode() == X86::CALL64pcrel32) ? X86::CALL64m : X86::JMP32m;
1743     Inst.setOpcode(NewOpcode);
1744 
1745     // Replace the first operand and preserve auxiliary operands of
1746     // the instruction.
1747     Inst.erase(Inst.begin());
1748     Inst.insert(Inst.begin(),
1749                 MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
1750     Inst.insert(Inst.begin(),
1751                 MCOperand::createExpr(                  // Displacement
1752                     MCSymbolRefExpr::create(TargetLocation,
1753                                             MCSymbolRefExpr::VK_None, *Ctx)));
1754     Inst.insert(Inst.begin(),
1755                 MCOperand::createReg(X86::NoRegister)); // IndexReg
1756     Inst.insert(Inst.begin(),
1757                 MCOperand::createImm(1));               // ScaleAmt
1758     Inst.insert(Inst.begin(),
1759                 MCOperand::createReg(X86::RIP));        // BaseReg
1760 
1761     return true;
1762   }
1763 
1764   void convertIndirectCallToLoad(MCInst &Inst, MCPhysReg Reg) override {
1765     bool IsTailCall = isTailCall(Inst);
1766     if (IsTailCall)
1767       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1768     if (Inst.getOpcode() == X86::CALL64m ||
1769         (Inst.getOpcode() == X86::JMP32m && IsTailCall)) {
1770       Inst.setOpcode(X86::MOV64rm);
1771       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
1772       return;
1773     }
1774     if (Inst.getOpcode() == X86::CALL64r ||
1775         (Inst.getOpcode() == X86::JMP32r && IsTailCall)) {
1776       Inst.setOpcode(X86::MOV64rr);
1777       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
1778       return;
1779     }
1780     LLVM_DEBUG(Inst.dump());
1781     llvm_unreachable("not implemented");
1782   }
1783 
1784   bool shortenInstruction(MCInst &Inst,
1785                           const MCSubtargetInfo &STI) const override {
1786     unsigned OldOpcode = Inst.getOpcode();
1787     unsigned NewOpcode = OldOpcode;
1788 
1789     int MemOpNo = getMemoryOperandNo(Inst);
1790 
1791     // Check and remove redundant Address-Size override prefix.
1792     if (opts::X86StripRedundantAddressSize) {
1793       uint64_t TSFlags = Info->get(OldOpcode).TSFlags;
1794       unsigned Flags = Inst.getFlags();
1795 
1796       if (!X86_MC::needsAddressSizeOverride(Inst, STI, MemOpNo, TSFlags) &&
1797           Flags & X86::IP_HAS_AD_SIZE)
1798         Inst.setFlags(Flags ^ X86::IP_HAS_AD_SIZE);
1799     }
1800 
1801     // Check and remove EIZ/RIZ. These cases represent ambiguous cases where
1802     // SIB byte is present, but no index is used and modrm alone should have
1803     // been enough. Converting to NoRegister effectively removes the SIB byte.
1804     if (MemOpNo >= 0) {
1805       MCOperand &IndexOp =
1806           Inst.getOperand(static_cast<unsigned>(MemOpNo) + X86::AddrIndexReg);
1807       if (IndexOp.getReg() == X86::EIZ || IndexOp.getReg() == X86::RIZ)
1808         IndexOp = MCOperand::createReg(X86::NoRegister);
1809     }
1810 
1811     if (isBranch(Inst)) {
1812       NewOpcode = getShortBranchOpcode(OldOpcode);
1813     } else if (OldOpcode == X86::MOV64ri) {
1814       if (Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
1815         const int64_t Imm =
1816             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
1817         if (int64_t(Imm) == int64_t(int32_t(Imm)))
1818           NewOpcode = X86::MOV64ri32;
1819       }
1820     } else {
1821       // If it's arithmetic instruction check if signed operand fits in 1 byte.
1822       const unsigned ShortOpcode = getShortArithOpcode(OldOpcode);
1823       if (ShortOpcode != OldOpcode &&
1824           Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
1825         int64_t Imm =
1826             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
1827         if (int64_t(Imm) == int64_t(int8_t(Imm)))
1828           NewOpcode = ShortOpcode;
1829       }
1830     }
1831 
1832     if (NewOpcode == OldOpcode)
1833       return false;
1834 
1835     Inst.setOpcode(NewOpcode);
1836     return true;
1837   }
1838 
1839   bool
1840   convertMoveToConditionalMove(MCInst &Inst, unsigned CC, bool AllowStackMemOp,
1841                                bool AllowBasePtrStackMemOp) const override {
1842     // - Register-register moves are OK
1843     // - Stores are filtered out by opcode (no store CMOV)
1844     // - Non-stack loads are prohibited (generally unsafe)
1845     // - Stack loads are OK if AllowStackMemOp is true
1846     // - Stack loads with RBP are OK if AllowBasePtrStackMemOp is true
1847     if (isLoad(Inst)) {
1848       // If stack memory operands are not allowed, no loads are allowed
1849       if (!AllowStackMemOp)
1850         return false;
1851 
1852       // If stack memory operands are allowed, check if it's a load from stack
1853       bool IsLoad, IsStore, IsStoreFromReg, IsSimple, IsIndexed;
1854       MCPhysReg Reg;
1855       int32_t SrcImm;
1856       uint16_t StackPtrReg;
1857       int64_t StackOffset;
1858       uint8_t Size;
1859       bool IsStackAccess =
1860           isStackAccess(Inst, IsLoad, IsStore, IsStoreFromReg, Reg, SrcImm,
1861                         StackPtrReg, StackOffset, Size, IsSimple, IsIndexed);
1862       // Prohibit non-stack-based loads
1863       if (!IsStackAccess)
1864         return false;
1865       // If stack memory operands are allowed, check if it's RBP-based
1866       if (!AllowBasePtrStackMemOp &&
1867           RegInfo->isSubRegisterEq(X86::RBP, StackPtrReg))
1868         return false;
1869     }
1870 
1871     unsigned NewOpcode = 0;
1872     switch (Inst.getOpcode()) {
1873     case X86::MOV16rr:
1874       NewOpcode = X86::CMOV16rr;
1875       break;
1876     case X86::MOV16rm:
1877       NewOpcode = X86::CMOV16rm;
1878       break;
1879     case X86::MOV32rr:
1880       NewOpcode = X86::CMOV32rr;
1881       break;
1882     case X86::MOV32rm:
1883       NewOpcode = X86::CMOV32rm;
1884       break;
1885     case X86::MOV64rr:
1886       NewOpcode = X86::CMOV64rr;
1887       break;
1888     case X86::MOV64rm:
1889       NewOpcode = X86::CMOV64rm;
1890       break;
1891     default:
1892       return false;
1893     }
1894     Inst.setOpcode(NewOpcode);
1895     // Insert CC at the end of prime operands, before annotations
1896     Inst.insert(Inst.begin() + MCPlus::getNumPrimeOperands(Inst),
1897                 MCOperand::createImm(CC));
1898     // CMOV is a 3-operand MCInst, so duplicate the destination as src1
1899     Inst.insert(Inst.begin(), Inst.getOperand(0));
1900     return true;
1901   }
1902 
1903   bool lowerTailCall(MCInst &Inst) override {
1904     if (Inst.getOpcode() == X86::JMP_4 && isTailCall(Inst)) {
1905       Inst.setOpcode(X86::JMP_1);
1906       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1907       return true;
1908     }
1909     return false;
1910   }
1911 
1912   const MCSymbol *getTargetSymbol(const MCInst &Inst,
1913                                   unsigned OpNum = 0) const override {
1914     if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
1915       return nullptr;
1916 
1917     const MCOperand &Op = Inst.getOperand(OpNum);
1918     if (!Op.isExpr())
1919       return nullptr;
1920 
1921     auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
1922     if (!SymExpr || SymExpr->getKind() != MCSymbolRefExpr::VK_None)
1923       return nullptr;
1924 
1925     return &SymExpr->getSymbol();
1926   }
1927 
1928   // This is the same as the base class, but since we are overriding one of
1929   // getTargetSymbol's signatures above, we need to override all of them.
1930   const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
1931     return &cast<const MCSymbolRefExpr>(Expr)->getSymbol();
1932   }
1933 
1934   bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
1935                      const MCSymbol *&TBB, const MCSymbol *&FBB,
1936                      MCInst *&CondBranch,
1937                      MCInst *&UncondBranch) const override {
1938     auto I = End;
1939 
1940     // Bottom-up analysis
1941     while (I != Begin) {
1942       --I;
1943 
1944       // Ignore nops and CFIs
1945       if (isPseudo(*I))
1946         continue;
1947 
1948       // Stop when we find the first non-terminator
1949       if (!isTerminator(*I))
1950         break;
1951 
1952       if (!isBranch(*I))
1953         break;
1954 
1955       // Handle unconditional branches.
1956       if ((I->getOpcode() == X86::JMP_1 || I->getOpcode() == X86::JMP_2 ||
1957            I->getOpcode() == X86::JMP_4) &&
1958           !isTailCall(*I)) {
1959         // If any code was seen after this unconditional branch, we've seen
1960         // unreachable code. Ignore them.
1961         CondBranch = nullptr;
1962         UncondBranch = &*I;
1963         const MCSymbol *Sym = getTargetSymbol(*I);
1964         assert(Sym != nullptr &&
1965                "Couldn't extract BB symbol from jump operand");
1966         TBB = Sym;
1967         continue;
1968       }
1969 
1970       // Handle conditional branches and ignore indirect branches
1971       if (!isUnsupportedBranch(I->getOpcode()) &&
1972           getCondCode(*I) == X86::COND_INVALID) {
1973         // Indirect branch
1974         return false;
1975       }
1976 
1977       if (CondBranch == nullptr) {
1978         const MCSymbol *TargetBB = getTargetSymbol(*I);
1979         if (TargetBB == nullptr) {
1980           // Unrecognized branch target
1981           return false;
1982         }
1983         FBB = TBB;
1984         TBB = TargetBB;
1985         CondBranch = &*I;
1986         continue;
1987       }
1988 
1989       llvm_unreachable("multiple conditional branches in one BB");
1990     }
1991     return true;
1992   }
1993 
1994   template <typename Itr>
1995   std::pair<IndirectBranchType, MCInst *>
1996   analyzePICJumpTable(Itr II, Itr IE, MCPhysReg R1, MCPhysReg R2) const {
1997     // Analyze PIC-style jump table code template:
1998     //
1999     //    lea PIC_JUMP_TABLE(%rip), {%r1|%r2}     <- MemLocInstr
2000     //    mov ({%r1|%r2}, %index, 4), {%r2|%r1}
2001     //    add %r2, %r1
2002     //    jmp *%r1
2003     //
2004     // (with any irrelevant instructions in-between)
2005     //
2006     // When we call this helper we've already determined %r1 and %r2, and
2007     // reverse instruction iterator \p II is pointing to the ADD instruction.
2008     //
2009     // PIC jump table looks like following:
2010     //
2011     //   JT:  ----------
2012     //    E1:| L1 - JT  |
2013     //       |----------|
2014     //    E2:| L2 - JT  |
2015     //       |----------|
2016     //       |          |
2017     //          ......
2018     //    En:| Ln - JT  |
2019     //        ----------
2020     //
2021     // Where L1, L2, ..., Ln represent labels in the function.
2022     //
2023     // The actual relocations in the table will be of the form:
2024     //
2025     //   Ln - JT
2026     //    = (Ln - En) + (En - JT)
2027     //    = R_X86_64_PC32(Ln) + En - JT
2028     //    = R_X86_64_PC32(Ln + offsetof(En))
2029     //
2030     LLVM_DEBUG(dbgs() << "Checking for PIC jump table\n");
2031     MCInst *MemLocInstr = nullptr;
2032     const MCInst *MovInstr = nullptr;
2033     while (++II != IE) {
2034       MCInst &Instr = *II;
2035       const MCInstrDesc &InstrDesc = Info->get(Instr.getOpcode());
2036       if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo) &&
2037           !InstrDesc.hasDefOfPhysReg(Instr, R2, *RegInfo)) {
2038         // Ignore instructions that don't affect R1, R2 registers.
2039         continue;
2040       }
2041       if (!MovInstr) {
2042         // Expect to see MOV instruction.
2043         if (!isMOVSX64rm32(Instr)) {
2044           LLVM_DEBUG(dbgs() << "MOV instruction expected.\n");
2045           break;
2046         }
2047 
2048         // Check if it's setting %r1 or %r2. In canonical form it sets %r2.
2049         // If it sets %r1 - rename the registers so we have to only check
2050         // a single form.
2051         unsigned MovDestReg = Instr.getOperand(0).getReg();
2052         if (MovDestReg != R2)
2053           std::swap(R1, R2);
2054         if (MovDestReg != R2) {
2055           LLVM_DEBUG(dbgs() << "MOV instruction expected to set %r2\n");
2056           break;
2057         }
2058 
2059         // Verify operands for MOV.
2060         unsigned  BaseRegNum;
2061         int64_t   ScaleValue;
2062         unsigned  IndexRegNum;
2063         int64_t   DispValue;
2064         unsigned  SegRegNum;
2065         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2066                                       &IndexRegNum, &DispValue, &SegRegNum))
2067           break;
2068         if (BaseRegNum != R1 || ScaleValue != 4 ||
2069             IndexRegNum == X86::NoRegister || DispValue != 0 ||
2070             SegRegNum != X86::NoRegister)
2071           break;
2072         MovInstr = &Instr;
2073       } else {
2074         if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo))
2075           continue;
2076         if (!isLEA64r(Instr)) {
2077           LLVM_DEBUG(dbgs() << "LEA instruction expected\n");
2078           break;
2079         }
2080         if (Instr.getOperand(0).getReg() != R1) {
2081           LLVM_DEBUG(dbgs() << "LEA instruction expected to set %r1\n");
2082           break;
2083         }
2084 
2085         // Verify operands for LEA.
2086         unsigned      BaseRegNum;
2087         int64_t       ScaleValue;
2088         unsigned      IndexRegNum;
2089         const MCExpr *DispExpr = nullptr;
2090         int64_t       DispValue;
2091         unsigned      SegRegNum;
2092         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2093                                       &IndexRegNum, &DispValue, &SegRegNum,
2094                                       &DispExpr))
2095           break;
2096         if (BaseRegNum != RegInfo->getProgramCounter() ||
2097             IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
2098             DispExpr == nullptr)
2099           break;
2100         MemLocInstr = &Instr;
2101         break;
2102       }
2103     }
2104 
2105     if (!MemLocInstr)
2106       return std::make_pair(IndirectBranchType::UNKNOWN, nullptr);
2107 
2108     LLVM_DEBUG(dbgs() << "checking potential PIC jump table\n");
2109     return std::make_pair(IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE,
2110                           MemLocInstr);
2111   }
2112 
2113   IndirectBranchType analyzeIndirectBranch(
2114       MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
2115       const unsigned PtrSize, MCInst *&MemLocInstrOut, unsigned &BaseRegNumOut,
2116       unsigned &IndexRegNumOut, int64_t &DispValueOut,
2117       const MCExpr *&DispExprOut, MCInst *&PCRelBaseOut) const override {
2118     // Try to find a (base) memory location from where the address for
2119     // the indirect branch is loaded. For X86-64 the memory will be specified
2120     // in the following format:
2121     //
2122     //   {%rip}/{%basereg} + Imm + IndexReg * Scale
2123     //
2124     // We are interested in the cases where Scale == sizeof(uintptr_t) and
2125     // the contents of the memory are presumably an array of pointers to code.
2126     //
2127     // Normal jump table:
2128     //
2129     //    jmp *(JUMP_TABLE, %index, Scale)        <- MemLocInstr
2130     //
2131     //    or
2132     //
2133     //    mov (JUMP_TABLE, %index, Scale), %r1    <- MemLocInstr
2134     //    ...
2135     //    jmp %r1
2136     //
2137     // We handle PIC-style jump tables separately.
2138     //
2139     MemLocInstrOut = nullptr;
2140     BaseRegNumOut = X86::NoRegister;
2141     IndexRegNumOut = X86::NoRegister;
2142     DispValueOut = 0;
2143     DispExprOut = nullptr;
2144 
2145     std::reverse_iterator<InstructionIterator> II(End);
2146     std::reverse_iterator<InstructionIterator> IE(Begin);
2147 
2148     IndirectBranchType Type = IndirectBranchType::UNKNOWN;
2149 
2150     // An instruction referencing memory used by jump instruction (directly or
2151     // via register). This location could be an array of function pointers
2152     // in case of indirect tail call, or a jump table.
2153     MCInst *MemLocInstr = nullptr;
2154 
2155     if (MCPlus::getNumPrimeOperands(Instruction) == 1) {
2156       // If the indirect jump is on register - try to detect if the
2157       // register value is loaded from a memory location.
2158       assert(Instruction.getOperand(0).isReg() && "register operand expected");
2159       const unsigned R1 = Instruction.getOperand(0).getReg();
2160       // Check if one of the previous instructions defines the jump-on register.
2161       for (auto PrevII = II; PrevII != IE; ++PrevII) {
2162         MCInst &PrevInstr = *PrevII;
2163         const MCInstrDesc &PrevInstrDesc = Info->get(PrevInstr.getOpcode());
2164 
2165         if (!PrevInstrDesc.hasDefOfPhysReg(PrevInstr, R1, *RegInfo))
2166           continue;
2167 
2168         if (isMoveMem2Reg(PrevInstr)) {
2169           MemLocInstr = &PrevInstr;
2170           break;
2171         }
2172         if (isADD64rr(PrevInstr)) {
2173           unsigned R2 = PrevInstr.getOperand(2).getReg();
2174           if (R1 == R2)
2175             return IndirectBranchType::UNKNOWN;
2176           std::tie(Type, MemLocInstr) = analyzePICJumpTable(PrevII, IE, R1, R2);
2177           break;
2178         }
2179         return IndirectBranchType::UNKNOWN;
2180       }
2181       if (!MemLocInstr) {
2182         // No definition seen for the register in this function so far. Could be
2183         // an input parameter - which means it is an external code reference.
2184         // It also could be that the definition happens to be in the code that
2185         // we haven't processed yet. Since we have to be conservative, return
2186         // as UNKNOWN case.
2187         return IndirectBranchType::UNKNOWN;
2188       }
2189     } else {
2190       MemLocInstr = &Instruction;
2191     }
2192 
2193     const MCRegister RIPRegister = RegInfo->getProgramCounter();
2194 
2195     // Analyze the memory location.
2196     unsigned BaseRegNum, IndexRegNum, SegRegNum;
2197     int64_t ScaleValue, DispValue;
2198     const MCExpr *DispExpr;
2199 
2200     if (!evaluateX86MemoryOperand(*MemLocInstr, &BaseRegNum, &ScaleValue,
2201                                   &IndexRegNum, &DispValue, &SegRegNum,
2202                                   &DispExpr))
2203       return IndirectBranchType::UNKNOWN;
2204 
2205     BaseRegNumOut = BaseRegNum;
2206     IndexRegNumOut = IndexRegNum;
2207     DispValueOut = DispValue;
2208     DispExprOut = DispExpr;
2209 
2210     if ((BaseRegNum != X86::NoRegister && BaseRegNum != RIPRegister) ||
2211         SegRegNum != X86::NoRegister)
2212       return IndirectBranchType::UNKNOWN;
2213 
2214     if (MemLocInstr == &Instruction &&
2215         (!ScaleValue || IndexRegNum == X86::NoRegister)) {
2216       MemLocInstrOut = MemLocInstr;
2217       return IndirectBranchType::POSSIBLE_FIXED_BRANCH;
2218     }
2219 
2220     if (Type == IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2221         (ScaleValue != 1 || BaseRegNum != RIPRegister))
2222       return IndirectBranchType::UNKNOWN;
2223 
2224     if (Type != IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2225         ScaleValue != PtrSize)
2226       return IndirectBranchType::UNKNOWN;
2227 
2228     MemLocInstrOut = MemLocInstr;
2229 
2230     return Type;
2231   }
2232 
2233   /// Analyze a callsite to see if it could be a virtual method call.  This only
2234   /// checks to see if the overall pattern is satisfied, it does not guarantee
2235   /// that the callsite is a true virtual method call.
2236   /// The format of virtual method calls that are recognized is one of the
2237   /// following:
2238   ///
2239   ///  Form 1: (found in debug code)
2240   ///    add METHOD_OFFSET, %VtableReg
2241   ///    mov (%VtableReg), %MethodReg
2242   ///    ...
2243   ///    call or jmp *%MethodReg
2244   ///
2245   ///  Form 2:
2246   ///    mov METHOD_OFFSET(%VtableReg), %MethodReg
2247   ///    ...
2248   ///    call or jmp *%MethodReg
2249   ///
2250   ///  Form 3:
2251   ///    ...
2252   ///    call or jmp *METHOD_OFFSET(%VtableReg)
2253   ///
2254   bool analyzeVirtualMethodCall(InstructionIterator ForwardBegin,
2255                                 InstructionIterator ForwardEnd,
2256                                 std::vector<MCInst *> &MethodFetchInsns,
2257                                 unsigned &VtableRegNum, unsigned &MethodRegNum,
2258                                 uint64_t &MethodOffset) const override {
2259     VtableRegNum = X86::NoRegister;
2260     MethodRegNum = X86::NoRegister;
2261     MethodOffset = 0;
2262 
2263     std::reverse_iterator<InstructionIterator> Itr(ForwardEnd);
2264     std::reverse_iterator<InstructionIterator> End(ForwardBegin);
2265 
2266     MCInst &CallInst = *Itr++;
2267     assert(isIndirectBranch(CallInst) || isCall(CallInst));
2268 
2269     unsigned BaseReg, IndexReg, SegmentReg;
2270     int64_t Scale, Disp;
2271     const MCExpr *DispExpr;
2272 
2273     // The call can just be jmp offset(reg)
2274     if (evaluateX86MemoryOperand(CallInst, &BaseReg, &Scale, &IndexReg, &Disp,
2275                                  &SegmentReg, &DispExpr)) {
2276       if (!DispExpr && BaseReg != X86::RIP && BaseReg != X86::RBP &&
2277           BaseReg != X86::NoRegister) {
2278         MethodRegNum = BaseReg;
2279         if (Scale == 1 && IndexReg == X86::NoRegister &&
2280             SegmentReg == X86::NoRegister) {
2281           VtableRegNum = MethodRegNum;
2282           MethodOffset = Disp;
2283           MethodFetchInsns.push_back(&CallInst);
2284           return true;
2285         }
2286       }
2287       return false;
2288     }
2289     if (CallInst.getOperand(0).isReg())
2290       MethodRegNum = CallInst.getOperand(0).getReg();
2291     else
2292       return false;
2293 
2294     if (MethodRegNum == X86::RIP || MethodRegNum == X86::RBP) {
2295       VtableRegNum = X86::NoRegister;
2296       MethodRegNum = X86::NoRegister;
2297       return false;
2298     }
2299 
2300     // find load from vtable, this may or may not include the method offset
2301     while (Itr != End) {
2302       MCInst &CurInst = *Itr++;
2303       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2304       if (Desc.hasDefOfPhysReg(CurInst, MethodRegNum, *RegInfo)) {
2305         if (isLoad(CurInst) &&
2306             evaluateX86MemoryOperand(CurInst, &BaseReg, &Scale, &IndexReg,
2307                                      &Disp, &SegmentReg, &DispExpr)) {
2308           if (!DispExpr && Scale == 1 && BaseReg != X86::RIP &&
2309               BaseReg != X86::RBP && BaseReg != X86::NoRegister &&
2310               IndexReg == X86::NoRegister && SegmentReg == X86::NoRegister &&
2311               BaseReg != X86::RIP) {
2312             VtableRegNum = BaseReg;
2313             MethodOffset = Disp;
2314             MethodFetchInsns.push_back(&CurInst);
2315             if (MethodOffset != 0)
2316               return true;
2317             break;
2318           }
2319         }
2320         return false;
2321       }
2322     }
2323 
2324     if (!VtableRegNum)
2325       return false;
2326 
2327     // look for any adds affecting the method register.
2328     while (Itr != End) {
2329       MCInst &CurInst = *Itr++;
2330       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2331       if (Desc.hasDefOfPhysReg(CurInst, VtableRegNum, *RegInfo)) {
2332         if (isADDri(CurInst)) {
2333           assert(!MethodOffset);
2334           MethodOffset = CurInst.getOperand(2).getImm();
2335           MethodFetchInsns.insert(MethodFetchInsns.begin(), &CurInst);
2336           break;
2337         }
2338       }
2339     }
2340 
2341     return true;
2342   }
2343 
2344   bool createStackPointerIncrement(MCInst &Inst, int Size,
2345                                    bool NoFlagsClobber) const override {
2346     if (NoFlagsClobber) {
2347       Inst.setOpcode(X86::LEA64r);
2348       Inst.clear();
2349       Inst.addOperand(MCOperand::createReg(X86::RSP));
2350       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2351       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2352       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2353       Inst.addOperand(MCOperand::createImm(-Size));           // Displacement
2354       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2355       return true;
2356     }
2357     Inst.setOpcode(X86::SUB64ri8);
2358     Inst.clear();
2359     Inst.addOperand(MCOperand::createReg(X86::RSP));
2360     Inst.addOperand(MCOperand::createReg(X86::RSP));
2361     Inst.addOperand(MCOperand::createImm(Size));
2362     return true;
2363   }
2364 
2365   bool createStackPointerDecrement(MCInst &Inst, int Size,
2366                                    bool NoFlagsClobber) const override {
2367     if (NoFlagsClobber) {
2368       Inst.setOpcode(X86::LEA64r);
2369       Inst.clear();
2370       Inst.addOperand(MCOperand::createReg(X86::RSP));
2371       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2372       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2373       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2374       Inst.addOperand(MCOperand::createImm(Size));            // Displacement
2375       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2376       return true;
2377     }
2378     Inst.setOpcode(X86::ADD64ri8);
2379     Inst.clear();
2380     Inst.addOperand(MCOperand::createReg(X86::RSP));
2381     Inst.addOperand(MCOperand::createReg(X86::RSP));
2382     Inst.addOperand(MCOperand::createImm(Size));
2383     return true;
2384   }
2385 
2386   bool createSaveToStack(MCInst &Inst, const MCPhysReg &StackReg, int Offset,
2387                          const MCPhysReg &SrcReg, int Size) const override {
2388     unsigned NewOpcode;
2389     switch (Size) {
2390     default:
2391       return false;
2392     case 2:      NewOpcode = X86::MOV16mr; break;
2393     case 4:      NewOpcode = X86::MOV32mr; break;
2394     case 8:      NewOpcode = X86::MOV64mr; break;
2395     }
2396     Inst.setOpcode(NewOpcode);
2397     Inst.clear();
2398     Inst.addOperand(MCOperand::createReg(StackReg));        // BaseReg
2399     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2400     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2401     Inst.addOperand(MCOperand::createImm(Offset));          // Displacement
2402     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2403     Inst.addOperand(MCOperand::createReg(SrcReg));
2404     return true;
2405   }
2406 
2407   bool createRestoreFromStack(MCInst &Inst, const MCPhysReg &StackReg,
2408                               int Offset, const MCPhysReg &DstReg,
2409                               int Size) const override {
2410     return createLoad(Inst, StackReg, /*Scale=*/1, /*IndexReg=*/X86::NoRegister,
2411                       Offset, nullptr, /*AddrSegmentReg=*/X86::NoRegister,
2412                       DstReg, Size);
2413   }
2414 
2415   bool createLoad(MCInst &Inst, const MCPhysReg &BaseReg, int64_t Scale,
2416                   const MCPhysReg &IndexReg, int64_t Offset,
2417                   const MCExpr *OffsetExpr, const MCPhysReg &AddrSegmentReg,
2418                   const MCPhysReg &DstReg, int Size) const override {
2419     unsigned NewOpcode;
2420     switch (Size) {
2421     default:
2422       return false;
2423     case 2:      NewOpcode = X86::MOV16rm; break;
2424     case 4:      NewOpcode = X86::MOV32rm; break;
2425     case 8:      NewOpcode = X86::MOV64rm; break;
2426     }
2427     Inst.setOpcode(NewOpcode);
2428     Inst.clear();
2429     Inst.addOperand(MCOperand::createReg(DstReg));
2430     Inst.addOperand(MCOperand::createReg(BaseReg));
2431     Inst.addOperand(MCOperand::createImm(Scale));
2432     Inst.addOperand(MCOperand::createReg(IndexReg));
2433     if (OffsetExpr)
2434       Inst.addOperand(MCOperand::createExpr(OffsetExpr)); // Displacement
2435     else
2436       Inst.addOperand(MCOperand::createImm(Offset)); // Displacement
2437     Inst.addOperand(MCOperand::createReg(AddrSegmentReg)); // AddrSegmentReg
2438     return true;
2439   }
2440 
2441   void createLoadImmediate(MCInst &Inst, const MCPhysReg Dest,
2442                            uint32_t Imm) const override {
2443     Inst.setOpcode(X86::MOV64ri32);
2444     Inst.clear();
2445     Inst.addOperand(MCOperand::createReg(Dest));
2446     Inst.addOperand(MCOperand::createImm(Imm));
2447   }
2448 
2449   bool createIncMemory(MCInst &Inst, const MCSymbol *Target,
2450                        MCContext *Ctx) const override {
2451 
2452     Inst.setOpcode(X86::LOCK_INC64m);
2453     Inst.clear();
2454     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
2455     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2456     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2457 
2458     Inst.addOperand(MCOperand::createExpr(
2459         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None,
2460                                 *Ctx)));                    // Displacement
2461     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2462     return true;
2463   }
2464 
2465   bool createIJmp32Frag(SmallVectorImpl<MCInst> &Insts,
2466                         const MCOperand &BaseReg, const MCOperand &Scale,
2467                         const MCOperand &IndexReg, const MCOperand &Offset,
2468                         const MCOperand &TmpReg) const override {
2469     // The code fragment we emit here is:
2470     //
2471     //  mov32 (%base, %index, scale), %tmpreg
2472     //  ijmp *(%tmpreg)
2473     //
2474     MCInst IJmp;
2475     IJmp.setOpcode(X86::JMP64r);
2476     IJmp.addOperand(TmpReg);
2477 
2478     MCInst Load;
2479     Load.setOpcode(X86::MOV32rm);
2480     Load.addOperand(TmpReg);
2481     Load.addOperand(BaseReg);
2482     Load.addOperand(Scale);
2483     Load.addOperand(IndexReg);
2484     Load.addOperand(Offset);
2485     Load.addOperand(MCOperand::createReg(X86::NoRegister));
2486 
2487     Insts.push_back(Load);
2488     Insts.push_back(IJmp);
2489     return true;
2490   }
2491 
2492   bool createNoop(MCInst &Inst) const override {
2493     Inst.setOpcode(X86::NOOP);
2494     return true;
2495   }
2496 
2497   bool createReturn(MCInst &Inst) const override {
2498     Inst.setOpcode(X86::RET64);
2499     return true;
2500   }
2501 
2502   InstructionListType createInlineMemcpy(bool ReturnEnd) const override {
2503     InstructionListType Code;
2504     if (ReturnEnd)
2505       Code.emplace_back(MCInstBuilder(X86::LEA64r)
2506                             .addReg(X86::RAX)
2507                             .addReg(X86::RDI)
2508                             .addImm(1)
2509                             .addReg(X86::RDX)
2510                             .addImm(0)
2511                             .addReg(X86::NoRegister));
2512     else
2513       Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2514                             .addReg(X86::RAX)
2515                             .addReg(X86::RDI));
2516 
2517     Code.emplace_back(MCInstBuilder(X86::MOV32rr)
2518                           .addReg(X86::ECX)
2519                           .addReg(X86::EDX));
2520     Code.emplace_back(MCInstBuilder(X86::REP_MOVSB_64));
2521 
2522     return Code;
2523   }
2524 
2525   InstructionListType createOneByteMemcpy() const override {
2526     InstructionListType Code;
2527     Code.emplace_back(MCInstBuilder(X86::MOV8rm)
2528                           .addReg(X86::CL)
2529                           .addReg(X86::RSI)
2530                           .addImm(0)
2531                           .addReg(X86::NoRegister)
2532                           .addImm(0)
2533                           .addReg(X86::NoRegister));
2534     Code.emplace_back(MCInstBuilder(X86::MOV8mr)
2535                           .addReg(X86::RDI)
2536                           .addImm(0)
2537                           .addReg(X86::NoRegister)
2538                           .addImm(0)
2539                           .addReg(X86::NoRegister)
2540                           .addReg(X86::CL));
2541     Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2542                           .addReg(X86::RAX)
2543                           .addReg(X86::RDI));
2544     return Code;
2545   }
2546 
2547   InstructionListType createCmpJE(MCPhysReg RegNo, int64_t Imm,
2548                                   const MCSymbol *Target,
2549                                   MCContext *Ctx) const override {
2550     InstructionListType Code;
2551     Code.emplace_back(MCInstBuilder(X86::CMP64ri8)
2552                           .addReg(RegNo)
2553                           .addImm(Imm));
2554     Code.emplace_back(MCInstBuilder(X86::JCC_1)
2555                           .addExpr(MCSymbolRefExpr::create(
2556                               Target, MCSymbolRefExpr::VK_None, *Ctx))
2557                           .addImm(X86::COND_E));
2558     return Code;
2559   }
2560 
2561   Optional<Relocation>
2562   createRelocation(const MCFixup &Fixup,
2563                    const MCAsmBackend &MAB) const override {
2564     const MCFixupKindInfo &FKI = MAB.getFixupKindInfo(Fixup.getKind());
2565 
2566     assert(FKI.TargetOffset == 0 && "0-bit relocation offset expected");
2567     const uint64_t RelOffset = Fixup.getOffset();
2568 
2569     uint64_t RelType;
2570     if (FKI.Flags & MCFixupKindInfo::FKF_IsPCRel) {
2571       switch (FKI.TargetSize) {
2572       default:
2573         return NoneType();
2574       case  8: RelType = ELF::R_X86_64_PC8; break;
2575       case 16: RelType = ELF::R_X86_64_PC16; break;
2576       case 32: RelType = ELF::R_X86_64_PC32; break;
2577       case 64: RelType = ELF::R_X86_64_PC64; break;
2578       }
2579     } else {
2580       switch (FKI.TargetSize) {
2581       default:
2582         return NoneType();
2583       case  8: RelType = ELF::R_X86_64_8; break;
2584       case 16: RelType = ELF::R_X86_64_16; break;
2585       case 32: RelType = ELF::R_X86_64_32; break;
2586       case 64: RelType = ELF::R_X86_64_64; break;
2587       }
2588     }
2589 
2590     // Extract a symbol and an addend out of the fixup value expression.
2591     //
2592     // Only the following limited expression types are supported:
2593     //   Symbol + Addend
2594     //   Symbol
2595     uint64_t Addend = 0;
2596     MCSymbol *Symbol = nullptr;
2597     const MCExpr *ValueExpr = Fixup.getValue();
2598     if (ValueExpr->getKind() == MCExpr::Binary) {
2599       const auto *BinaryExpr = cast<MCBinaryExpr>(ValueExpr);
2600       assert(BinaryExpr->getOpcode() == MCBinaryExpr::Add &&
2601              "unexpected binary expression");
2602       const MCExpr *LHS = BinaryExpr->getLHS();
2603       assert(LHS->getKind() == MCExpr::SymbolRef && "unexpected LHS");
2604       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(LHS));
2605       const MCExpr *RHS = BinaryExpr->getRHS();
2606       assert(RHS->getKind() == MCExpr::Constant && "unexpected RHS");
2607       Addend = cast<MCConstantExpr>(RHS)->getValue();
2608     } else {
2609       assert(ValueExpr->getKind() == MCExpr::SymbolRef && "unexpected value");
2610       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(ValueExpr));
2611     }
2612 
2613     return Relocation({RelOffset, Symbol, RelType, Addend, 0});
2614   }
2615 
2616   bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
2617                                int64_t Addend, MCContext *Ctx, int64_t &Value,
2618                                uint64_t RelType) const override {
2619     unsigned ImmOpNo = -1U;
2620 
2621     for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
2622          ++Index) {
2623       if (Inst.getOperand(Index).isImm()) {
2624         ImmOpNo = Index;
2625         // TODO: this is a bit hacky.  It finds the correct operand by
2626         // searching for a specific immediate value.  If no value is
2627         // provided it defaults to the last immediate operand found.
2628         // This could lead to unexpected results if the instruction
2629         // has more than one immediate with the same value.
2630         if (Inst.getOperand(ImmOpNo).getImm() == Value)
2631           break;
2632       }
2633     }
2634 
2635     if (ImmOpNo == -1U)
2636       return false;
2637 
2638     Value = Inst.getOperand(ImmOpNo).getImm();
2639 
2640     setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
2641 
2642     return true;
2643   }
2644 
2645   bool replaceRegWithImm(MCInst &Inst, unsigned Register,
2646                          int64_t Imm) const override {
2647 
2648     enum CheckSignExt : uint8_t {
2649       NOCHECK = 0,
2650       CHECK8,
2651       CHECK32,
2652     };
2653 
2654     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
2655     struct InstInfo {
2656       // Size in bytes that Inst loads from memory.
2657       uint8_t DataSize;
2658 
2659       // True when the target operand has to be duplicated because the opcode
2660       // expects a LHS operand.
2661       bool HasLHS;
2662 
2663       // List of checks and corresponding opcodes to be used. We try to use the
2664       // smallest possible immediate value when various sizes are available,
2665       // hence we may need to check whether a larger constant fits in a smaller
2666       // immediate.
2667       CheckList Checks;
2668     };
2669 
2670     InstInfo I;
2671 
2672     switch (Inst.getOpcode()) {
2673     default: {
2674       switch (getPushSize(Inst)) {
2675 
2676       case 2: I = {2, false, {{CHECK8, X86::PUSH16i8}, {NOCHECK, X86::PUSHi16}}}; break;
2677       case 4: I = {4, false, {{CHECK8, X86::PUSH32i8}, {NOCHECK, X86::PUSHi32}}}; break;
2678       case 8: I = {8, false, {{CHECK8, X86::PUSH64i8},
2679                               {CHECK32, X86::PUSH64i32},
2680                               {NOCHECK, Inst.getOpcode()}}}; break;
2681       default: return false;
2682       }
2683       break;
2684     }
2685 
2686     // MOV
2687     case X86::MOV8rr:       I = {1, false, {{NOCHECK, X86::MOV8ri}}}; break;
2688     case X86::MOV16rr:      I = {2, false, {{NOCHECK, X86::MOV16ri}}}; break;
2689     case X86::MOV32rr:      I = {4, false, {{NOCHECK, X86::MOV32ri}}}; break;
2690     case X86::MOV64rr:      I = {8, false, {{CHECK32, X86::MOV64ri32},
2691                                             {NOCHECK, X86::MOV64ri}}}; break;
2692 
2693     case X86::MOV8mr:       I = {1, false, {{NOCHECK, X86::MOV8mi}}}; break;
2694     case X86::MOV16mr:      I = {2, false, {{NOCHECK, X86::MOV16mi}}}; break;
2695     case X86::MOV32mr:      I = {4, false, {{NOCHECK, X86::MOV32mi}}}; break;
2696     case X86::MOV64mr:      I = {8, false, {{CHECK32, X86::MOV64mi32},
2697                                             {NOCHECK, X86::MOV64mr}}}; break;
2698 
2699     // MOVZX
2700     case X86::MOVZX16rr8:   I = {1, false, {{NOCHECK, X86::MOV16ri}}}; break;
2701     case X86::MOVZX32rr8:   I = {1, false, {{NOCHECK, X86::MOV32ri}}}; break;
2702     case X86::MOVZX32rr16:  I = {2, false, {{NOCHECK, X86::MOV32ri}}}; break;
2703 
2704     // CMP
2705     case X86::CMP8rr:       I = {1, false, {{NOCHECK, X86::CMP8ri}}}; break;
2706     case X86::CMP16rr:      I = {2, false, {{CHECK8, X86::CMP16ri8},
2707                                             {NOCHECK, X86::CMP16ri}}}; break;
2708     case X86::CMP32rr:      I = {4, false, {{CHECK8, X86::CMP32ri8},
2709                                             {NOCHECK, X86::CMP32ri}}}; break;
2710     case X86::CMP64rr:      I = {8, false, {{CHECK8, X86::CMP64ri8},
2711                                             {CHECK32, X86::CMP64ri32},
2712                                             {NOCHECK, X86::CMP64rr}}}; break;
2713 
2714     // TEST
2715     case X86::TEST8rr:      I = {1, false, {{NOCHECK, X86::TEST8ri}}}; break;
2716     case X86::TEST16rr:     I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
2717     case X86::TEST32rr:     I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
2718     case X86::TEST64rr:     I = {8, false, {{CHECK32, X86::TEST64ri32},
2719                                             {NOCHECK, X86::TEST64rr}}}; break;
2720 
2721     // ADD
2722     case X86::ADD8rr:       I = {1, true, {{NOCHECK, X86::ADD8ri}}}; break;
2723     case X86::ADD16rr:      I = {2, true, {{CHECK8, X86::ADD16ri8},
2724                                            {NOCHECK, X86::ADD16ri}}}; break;
2725     case X86::ADD32rr:      I = {4, true, {{CHECK8, X86::ADD32ri8},
2726                                            {NOCHECK, X86::ADD32ri}}}; break;
2727     case X86::ADD64rr:      I = {8, true, {{CHECK8, X86::ADD64ri8},
2728                                            {CHECK32, X86::ADD64ri32},
2729                                            {NOCHECK, X86::ADD64rr}}}; break;
2730 
2731     // SUB
2732     case X86::SUB8rr:       I = {1, true, {{NOCHECK, X86::SUB8ri}}}; break;
2733     case X86::SUB16rr:      I = {2, true, {{CHECK8, X86::SUB16ri8},
2734                                            {NOCHECK, X86::SUB16ri}}}; break;
2735     case X86::SUB32rr:      I = {4, true, {{CHECK8, X86::SUB32ri8},
2736                                            {NOCHECK, X86::SUB32ri}}}; break;
2737     case X86::SUB64rr:      I = {8, true, {{CHECK8, X86::SUB64ri8},
2738                                            {CHECK32, X86::SUB64ri32},
2739                                            {NOCHECK, X86::SUB64rr}}}; break;
2740 
2741     // AND
2742     case X86::AND8rr:       I = {1, true, {{NOCHECK, X86::AND8ri}}}; break;
2743     case X86::AND16rr:      I = {2, true, {{CHECK8, X86::AND16ri8},
2744                                            {NOCHECK, X86::AND16ri}}}; break;
2745     case X86::AND32rr:      I = {4, true, {{CHECK8, X86::AND32ri8},
2746                                            {NOCHECK, X86::AND32ri}}}; break;
2747     case X86::AND64rr:      I = {8, true, {{CHECK8, X86::AND64ri8},
2748                                            {CHECK32, X86::AND64ri32},
2749                                            {NOCHECK, X86::AND64rr}}}; break;
2750 
2751     // OR
2752     case X86::OR8rr:        I = {1, true, {{NOCHECK, X86::OR8ri}}}; break;
2753     case X86::OR16rr:       I = {2, true, {{CHECK8, X86::OR16ri8},
2754                                            {NOCHECK, X86::OR16ri}}}; break;
2755     case X86::OR32rr:       I = {4, true, {{CHECK8, X86::OR32ri8},
2756                                            {NOCHECK, X86::OR32ri}}}; break;
2757     case X86::OR64rr:       I = {8, true, {{CHECK8, X86::OR64ri8},
2758                                            {CHECK32, X86::OR64ri32},
2759                                            {NOCHECK, X86::OR64rr}}}; break;
2760 
2761     // XOR
2762     case X86::XOR8rr:       I = {1, true, {{NOCHECK, X86::XOR8ri}}}; break;
2763     case X86::XOR16rr:      I = {2, true, {{CHECK8, X86::XOR16ri8},
2764                                            {NOCHECK, X86::XOR16ri}}}; break;
2765     case X86::XOR32rr:      I = {4, true, {{CHECK8, X86::XOR32ri8},
2766                                            {NOCHECK, X86::XOR32ri}}}; break;
2767     case X86::XOR64rr:      I = {8, true, {{CHECK8, X86::XOR64ri8},
2768                                            {CHECK32, X86::XOR64ri32},
2769                                            {NOCHECK, X86::XOR64rr}}}; break;
2770     }
2771 
2772     // Compute the new opcode.
2773     unsigned NewOpcode = 0;
2774     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
2775       NewOpcode = Check.second;
2776       if (Check.first == NOCHECK)
2777         break;
2778       if (Check.first == CHECK8 && isInt<8>(Imm))
2779         break;
2780       if (Check.first == CHECK32 && isInt<32>(Imm))
2781         break;
2782     }
2783     if (NewOpcode == Inst.getOpcode())
2784       return false;
2785 
2786     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
2787 
2788     unsigned NumFound = 0;
2789     for (unsigned Index = InstDesc.getNumDefs() + (I.HasLHS ? 1 : 0),
2790                   E = InstDesc.getNumOperands();
2791          Index != E; ++Index)
2792       if (Inst.getOperand(Index).isReg() &&
2793           Inst.getOperand(Index).getReg() == Register)
2794         NumFound++;
2795 
2796     if (NumFound != 1)
2797       return false;
2798 
2799     MCOperand TargetOp = Inst.getOperand(0);
2800     Inst.clear();
2801     Inst.setOpcode(NewOpcode);
2802     Inst.addOperand(TargetOp);
2803     if (I.HasLHS)
2804       Inst.addOperand(TargetOp);
2805     Inst.addOperand(MCOperand::createImm(Imm));
2806 
2807     return true;
2808   }
2809 
2810   bool replaceRegWithReg(MCInst &Inst, unsigned ToReplace,
2811                          unsigned ReplaceWith) const override {
2812 
2813     // Get the HasLHS value so that iteration can be done
2814     bool HasLHS;
2815     if (X86::isAND(Inst.getOpcode()) || X86::isADD(Inst.getOpcode()) ||
2816         X86::isSUB(Inst.getOpcode())) {
2817       HasLHS = true;
2818     } else if (isPop(Inst) || isPush(Inst) || X86::isCMP(Inst.getOpcode()) ||
2819                X86::isTEST(Inst.getOpcode())) {
2820       HasLHS = false;
2821     } else {
2822       switch (Inst.getOpcode()) {
2823       case X86::MOV8rr:
2824       case X86::MOV8rm:
2825       case X86::MOV8mr:
2826       case X86::MOV8ri:
2827       case X86::MOV16rr:
2828       case X86::MOV16rm:
2829       case X86::MOV16mr:
2830       case X86::MOV16ri:
2831       case X86::MOV32rr:
2832       case X86::MOV32rm:
2833       case X86::MOV32mr:
2834       case X86::MOV32ri:
2835       case X86::MOV64rr:
2836       case X86::MOV64rm:
2837       case X86::MOV64mr:
2838       case X86::MOV64ri:
2839       case X86::MOVZX16rr8:
2840       case X86::MOVZX32rr8:
2841       case X86::MOVZX32rr16:
2842       case X86::MOVSX32rm8:
2843       case X86::MOVSX32rr8:
2844       case X86::MOVSX64rm32:
2845       case X86::LEA64r:
2846         HasLHS = false;
2847         break;
2848       default:
2849         return false;
2850       }
2851     }
2852 
2853     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
2854 
2855     bool FoundOne = false;
2856 
2857     // Iterate only through src operands that arent also dest operands
2858     for (unsigned Index = InstDesc.getNumDefs() + (HasLHS ? 1 : 0),
2859                   E = InstDesc.getNumOperands();
2860          Index != E; ++Index) {
2861       BitVector RegAliases = getAliases(ToReplace, true);
2862       if (!Inst.getOperand(Index).isReg() ||
2863           !RegAliases.test(Inst.getOperand(Index).getReg()))
2864         continue;
2865       // Resize register if needed
2866       unsigned SizedReplaceWith = getAliasSized(
2867           ReplaceWith, getRegSize(Inst.getOperand(Index).getReg()));
2868       MCOperand NewOperand = MCOperand::createReg(SizedReplaceWith);
2869       Inst.getOperand(Index) = NewOperand;
2870       FoundOne = true;
2871     }
2872 
2873     // Return true if at least one operand was replaced
2874     return FoundOne;
2875   }
2876 
2877   bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
2878                           MCContext *Ctx) const override {
2879     Inst.setOpcode(X86::JMP_1);
2880     Inst.addOperand(MCOperand::createExpr(
2881         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx)));
2882     return true;
2883   }
2884 
2885   bool createCall(MCInst &Inst, const MCSymbol *Target,
2886                   MCContext *Ctx) override {
2887     Inst.setOpcode(X86::CALL64pcrel32);
2888     Inst.addOperand(MCOperand::createExpr(
2889         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
2890     return true;
2891   }
2892 
2893   bool createTailCall(MCInst &Inst, const MCSymbol *Target,
2894                       MCContext *Ctx) override {
2895     return createDirectCall(Inst, Target, Ctx, /*IsTailCall*/ true);
2896   }
2897 
2898   void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
2899                           MCContext *Ctx) override {
2900     Seq.clear();
2901     Seq.emplace_back();
2902     createDirectCall(Seq.back(), Target, Ctx, /*IsTailCall*/ true);
2903   }
2904 
2905   bool createTrap(MCInst &Inst) const override {
2906     Inst.clear();
2907     Inst.setOpcode(X86::TRAP);
2908     return true;
2909   }
2910 
2911   bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
2912                               MCContext *Ctx) const override {
2913     unsigned InvCC = getInvertedCondCode(getCondCode(Inst));
2914     assert(InvCC != X86::COND_INVALID && "invalid branch instruction");
2915     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(InvCC);
2916     Inst.getOperand(0) = MCOperand::createExpr(
2917         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
2918     return true;
2919   }
2920 
2921   bool replaceBranchCondition(MCInst &Inst, const MCSymbol *TBB, MCContext *Ctx,
2922                               unsigned CC) const override {
2923     if (CC == X86::COND_INVALID)
2924       return false;
2925     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(CC);
2926     Inst.getOperand(0) = MCOperand::createExpr(
2927         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
2928     return true;
2929   }
2930 
2931   unsigned getCanonicalBranchCondCode(unsigned CC) const override {
2932     switch (CC) {
2933     default:           return X86::COND_INVALID;
2934 
2935     case X86::COND_E:  return X86::COND_E;
2936     case X86::COND_NE: return X86::COND_E;
2937 
2938     case X86::COND_L:  return X86::COND_L;
2939     case X86::COND_GE: return X86::COND_L;
2940 
2941     case X86::COND_LE: return X86::COND_G;
2942     case X86::COND_G:  return X86::COND_G;
2943 
2944     case X86::COND_B:  return X86::COND_B;
2945     case X86::COND_AE: return X86::COND_B;
2946 
2947     case X86::COND_BE: return X86::COND_A;
2948     case X86::COND_A:  return X86::COND_A;
2949 
2950     case X86::COND_S:  return X86::COND_S;
2951     case X86::COND_NS: return X86::COND_S;
2952 
2953     case X86::COND_P:  return X86::COND_P;
2954     case X86::COND_NP: return X86::COND_P;
2955 
2956     case X86::COND_O:  return X86::COND_O;
2957     case X86::COND_NO: return X86::COND_O;
2958     }
2959   }
2960 
2961   bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
2962                            MCContext *Ctx) const override {
2963     assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
2964            "Invalid instruction");
2965     Inst.getOperand(0) = MCOperand::createExpr(
2966         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
2967     return true;
2968   }
2969 
2970   MCPhysReg getX86R11() const override { return X86::R11; }
2971 
2972   MCPhysReg getIntArgRegister(unsigned ArgNo) const override {
2973     // FIXME: this should depend on the calling convention.
2974     switch (ArgNo) {
2975     case 0:   return X86::RDI;
2976     case 1:   return X86::RSI;
2977     case 2:   return X86::RDX;
2978     case 3:   return X86::RCX;
2979     case 4:   return X86::R8;
2980     case 5:   return X86::R9;
2981     default:  return getNoRegister();
2982     }
2983   }
2984 
2985   void createPause(MCInst &Inst) const override {
2986     Inst.clear();
2987     Inst.setOpcode(X86::PAUSE);
2988   }
2989 
2990   void createLfence(MCInst &Inst) const override {
2991     Inst.clear();
2992     Inst.setOpcode(X86::LFENCE);
2993   }
2994 
2995   bool createDirectCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx,
2996                         bool IsTailCall) override {
2997     Inst.clear();
2998     Inst.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
2999     Inst.addOperand(MCOperand::createExpr(
3000         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3001     if (IsTailCall)
3002       setTailCall(Inst);
3003     return true;
3004   }
3005 
3006   void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
3007                       MCContext *Ctx, bool IsTailCall) override {
3008     Seq.clear();
3009     MCInst Inst;
3010     Inst.setOpcode(X86::JMP_1);
3011     Inst.addOperand(MCOperand::createExpr(
3012         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3013     if (IsTailCall)
3014       setTailCall(Inst);
3015     Seq.emplace_back(Inst);
3016   }
3017 
3018   bool isConditionalMove(const MCInst &Inst) const override {
3019     unsigned OpCode = Inst.getOpcode();
3020     return (OpCode == X86::CMOV16rr || OpCode == X86::CMOV32rr ||
3021             OpCode == X86::CMOV64rr);
3022   }
3023 
3024   bool isBranchOnMem(const MCInst &Inst) const override {
3025     unsigned OpCode = Inst.getOpcode();
3026     if (OpCode == X86::CALL64m || (OpCode == X86::JMP32m && isTailCall(Inst)) ||
3027         OpCode == X86::JMP64m)
3028       return true;
3029 
3030     return false;
3031   }
3032 
3033   bool isBranchOnReg(const MCInst &Inst) const override {
3034     unsigned OpCode = Inst.getOpcode();
3035     if (OpCode == X86::CALL64r || (OpCode == X86::JMP32r && isTailCall(Inst)) ||
3036         OpCode == X86::JMP64r)
3037       return true;
3038 
3039     return false;
3040   }
3041 
3042   void createPushRegister(MCInst &Inst, MCPhysReg Reg,
3043                           unsigned Size) const override {
3044     Inst.clear();
3045     unsigned NewOpcode = 0;
3046     if (Reg == X86::EFLAGS) {
3047       switch (Size) {
3048       case 2: NewOpcode = X86::PUSHF16;  break;
3049       case 4: NewOpcode = X86::PUSHF32;  break;
3050       case 8: NewOpcode = X86::PUSHF64;  break;
3051       default:
3052         llvm_unreachable("Unexpected size");
3053       }
3054       Inst.setOpcode(NewOpcode);
3055       return;
3056     }
3057     switch (Size) {
3058     case 2: NewOpcode = X86::PUSH16r;  break;
3059     case 4: NewOpcode = X86::PUSH32r;  break;
3060     case 8: NewOpcode = X86::PUSH64r;  break;
3061     default:
3062       llvm_unreachable("Unexpected size");
3063     }
3064     Inst.setOpcode(NewOpcode);
3065     Inst.addOperand(MCOperand::createReg(Reg));
3066   }
3067 
3068   void createPopRegister(MCInst &Inst, MCPhysReg Reg,
3069                          unsigned Size) const override {
3070     Inst.clear();
3071     unsigned NewOpcode = 0;
3072     if (Reg == X86::EFLAGS) {
3073       switch (Size) {
3074       case 2: NewOpcode = X86::POPF16;  break;
3075       case 4: NewOpcode = X86::POPF32;  break;
3076       case 8: NewOpcode = X86::POPF64;  break;
3077       default:
3078         llvm_unreachable("Unexpected size");
3079       }
3080       Inst.setOpcode(NewOpcode);
3081       return;
3082     }
3083     switch (Size) {
3084     case 2: NewOpcode = X86::POP16r;  break;
3085     case 4: NewOpcode = X86::POP32r;  break;
3086     case 8: NewOpcode = X86::POP64r;  break;
3087     default:
3088       llvm_unreachable("Unexpected size");
3089     }
3090     Inst.setOpcode(NewOpcode);
3091     Inst.addOperand(MCOperand::createReg(Reg));
3092   }
3093 
3094   void createPushFlags(MCInst &Inst, unsigned Size) const override {
3095     return createPushRegister(Inst, X86::EFLAGS, Size);
3096   }
3097 
3098   void createPopFlags(MCInst &Inst, unsigned Size) const override {
3099     return createPopRegister(Inst, X86::EFLAGS, Size);
3100   }
3101 
3102   void createAddRegImm(MCInst &Inst, MCPhysReg Reg, int64_t Value,
3103                        unsigned Size) const {
3104     unsigned int Opcode;
3105     switch (Size) {
3106     case 1: Opcode = X86::ADD8ri; break;
3107     case 2: Opcode = X86::ADD16ri; break;
3108     case 4: Opcode = X86::ADD32ri; break;
3109     default:
3110       llvm_unreachable("Unexpected size");
3111     }
3112     Inst.setOpcode(Opcode);
3113     Inst.clear();
3114     Inst.addOperand(MCOperand::createReg(Reg));
3115     Inst.addOperand(MCOperand::createReg(Reg));
3116     Inst.addOperand(MCOperand::createImm(Value));
3117   }
3118 
3119   void createClearRegWithNoEFlagsUpdate(MCInst &Inst, MCPhysReg Reg,
3120                                         unsigned Size) const {
3121     unsigned int Opcode;
3122     switch (Size) {
3123     case 1: Opcode = X86::MOV8ri; break;
3124     case 2: Opcode = X86::MOV16ri; break;
3125     case 4: Opcode = X86::MOV32ri; break;
3126     case 8: Opcode = X86::MOV64ri; break;
3127     default:
3128       llvm_unreachable("Unexpected size");
3129     }
3130     Inst.setOpcode(Opcode);
3131     Inst.clear();
3132     Inst.addOperand(MCOperand::createReg(Reg));
3133     Inst.addOperand(MCOperand::createImm(0));
3134   }
3135 
3136   void createX86SaveOVFlagToRegister(MCInst &Inst, MCPhysReg Reg) const {
3137     Inst.setOpcode(X86::SETCCr);
3138     Inst.clear();
3139     Inst.addOperand(MCOperand::createReg(Reg));
3140     Inst.addOperand(MCOperand::createImm(X86::COND_O));
3141   }
3142 
3143   void createX86Lahf(MCInst &Inst) const {
3144     Inst.setOpcode(X86::LAHF);
3145     Inst.clear();
3146   }
3147 
3148   void createX86Sahf(MCInst &Inst) const {
3149     Inst.setOpcode(X86::SAHF);
3150     Inst.clear();
3151   }
3152 
3153   void createInstrIncMemory(InstructionListType &Instrs, const MCSymbol *Target,
3154                             MCContext *Ctx, bool IsLeaf) const override {
3155     unsigned int I = 0;
3156 
3157     Instrs.resize(IsLeaf ? 13 : 11);
3158     // Don't clobber application red zone (ABI dependent)
3159     if (IsLeaf)
3160       createStackPointerIncrement(Instrs[I++], 128,
3161                                   /*NoFlagsClobber=*/true);
3162 
3163     // Performance improvements based on the optimization discussed at
3164     // https://reviews.llvm.org/D6629
3165     // LAHF/SAHF are used instead of PUSHF/POPF
3166     // PUSHF
3167     createPushRegister(Instrs[I++], X86::RAX, 8);
3168     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3169     createX86Lahf(Instrs[I++]);
3170     createPushRegister(Instrs[I++], X86::RAX, 8);
3171     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3172     createX86SaveOVFlagToRegister(Instrs[I++], X86::AL);
3173     // LOCK INC
3174     createIncMemory(Instrs[I++], Target, Ctx);
3175     // POPF
3176     createAddRegImm(Instrs[I++], X86::AL, 127, 1);
3177     createPopRegister(Instrs[I++], X86::RAX, 8);
3178     createX86Sahf(Instrs[I++]);
3179     createPopRegister(Instrs[I++], X86::RAX, 8);
3180 
3181     if (IsLeaf)
3182       createStackPointerDecrement(Instrs[I], 128,
3183                                   /*NoFlagsClobber=*/true);
3184   }
3185 
3186   void createSwap(MCInst &Inst, MCPhysReg Source, MCPhysReg MemBaseReg,
3187                   int64_t Disp) const {
3188     Inst.setOpcode(X86::XCHG64rm);
3189     Inst.addOperand(MCOperand::createReg(Source));
3190     Inst.addOperand(MCOperand::createReg(Source));
3191     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3192     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3193     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3194     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3195     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3196   }
3197 
3198   void createIndirectBranch(MCInst &Inst, MCPhysReg MemBaseReg,
3199                             int64_t Disp) const {
3200     Inst.setOpcode(X86::JMP64m);
3201     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3202     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3203     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3204     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3205     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3206   }
3207 
3208   InstructionListType createInstrumentedIndirectCall(const MCInst &CallInst,
3209                                                      bool TailCall,
3210                                                      MCSymbol *HandlerFuncAddr,
3211                                                      int CallSiteID,
3212                                                      MCContext *Ctx) override {
3213     // Check if the target address expression used in the original indirect call
3214     // uses the stack pointer, which we are going to clobber.
3215     static BitVector SPAliases(getAliases(X86::RSP));
3216     bool UsesSP = false;
3217     // Skip defs.
3218     for (unsigned I = Info->get(CallInst.getOpcode()).getNumDefs(),
3219                   E = MCPlus::getNumPrimeOperands(CallInst);
3220          I != E; ++I) {
3221       const MCOperand &Operand = CallInst.getOperand(I);
3222       if (Operand.isReg() && SPAliases[Operand.getReg()]) {
3223         UsesSP = true;
3224         break;
3225       }
3226     }
3227 
3228     InstructionListType Insts;
3229     MCPhysReg TempReg = getIntArgRegister(0);
3230     // Code sequence used to enter indirect call instrumentation helper:
3231     //   push %rdi
3232     //   add $8, %rsp       ;; $rsp may be used in target, so fix it to prev val
3233     //   movq target, %rdi  ;; via convertIndirectCallTargetToLoad
3234     //   sub $8, %rsp       ;; restore correct stack value
3235     //   push %rdi
3236     //   movq $CallSiteID, %rdi
3237     //   push %rdi
3238     //   callq/jmp HandlerFuncAddr
3239     Insts.emplace_back();
3240     createPushRegister(Insts.back(), TempReg, 8);
3241     if (UsesSP) { // Only adjust SP if we really need to
3242       Insts.emplace_back();
3243       createStackPointerDecrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3244     }
3245     Insts.emplace_back(CallInst);
3246     // Insts.back() and CallInst now share the same annotation instruction.
3247     // Strip it from Insts.back(), only preserving tail call annotation.
3248     stripAnnotations(Insts.back(), /*KeepTC=*/true);
3249     convertIndirectCallToLoad(Insts.back(), TempReg);
3250     if (UsesSP) {
3251       Insts.emplace_back();
3252       createStackPointerIncrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3253     }
3254     Insts.emplace_back();
3255     createPushRegister(Insts.back(), TempReg, 8);
3256     Insts.emplace_back();
3257     createLoadImmediate(Insts.back(), TempReg, CallSiteID);
3258     Insts.emplace_back();
3259     createPushRegister(Insts.back(), TempReg, 8);
3260     Insts.emplace_back();
3261     createDirectCall(Insts.back(), HandlerFuncAddr, Ctx,
3262                      /*TailCall=*/TailCall);
3263     // Carry over metadata
3264     for (int I = MCPlus::getNumPrimeOperands(CallInst),
3265              E = CallInst.getNumOperands();
3266          I != E; ++I)
3267       Insts.back().addOperand(CallInst.getOperand(I));
3268 
3269     return Insts;
3270   }
3271 
3272   InstructionListType createInstrumentedIndCallHandlerExitBB() const override {
3273     const MCPhysReg TempReg = getIntArgRegister(0);
3274     // We just need to undo the sequence created for every ind call in
3275     // instrumentIndirectTarget(), which can be accomplished minimally with:
3276     //   popfq
3277     //   pop %rdi
3278     //   add $16, %rsp
3279     //   xchg (%rsp), %rdi
3280     //   jmp *-8(%rsp)
3281     InstructionListType Insts(5);
3282     createPopFlags(Insts[0], 8);
3283     createPopRegister(Insts[1], TempReg, 8);
3284     createStackPointerDecrement(Insts[2], 16, /*NoFlagsClobber=*/false);
3285     createSwap(Insts[3], TempReg, X86::RSP, 0);
3286     createIndirectBranch(Insts[4], X86::RSP, -8);
3287     return Insts;
3288   }
3289 
3290   InstructionListType
3291   createInstrumentedIndTailCallHandlerExitBB() const override {
3292     const MCPhysReg TempReg = getIntArgRegister(0);
3293     // Same thing as above, but for tail calls
3294     //   popfq
3295     //   add $16, %rsp
3296     //   pop %rdi
3297     //   jmp *-16(%rsp)
3298     InstructionListType Insts(4);
3299     createPopFlags(Insts[0], 8);
3300     createStackPointerDecrement(Insts[1], 16, /*NoFlagsClobber=*/false);
3301     createPopRegister(Insts[2], TempReg, 8);
3302     createIndirectBranch(Insts[3], X86::RSP, -16);
3303     return Insts;
3304   }
3305 
3306   InstructionListType
3307   createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline,
3308                                           const MCSymbol *IndCallHandler,
3309                                           MCContext *Ctx) override {
3310     const MCPhysReg TempReg = getIntArgRegister(0);
3311     // Code sequence used to check whether InstrTampoline was initialized
3312     // and call it if so, returns via IndCallHandler.
3313     //   pushfq
3314     //   mov    InstrTrampoline,%rdi
3315     //   cmp    $0x0,%rdi
3316     //   je     IndCallHandler
3317     //   callq  *%rdi
3318     //   jmpq   IndCallHandler
3319     InstructionListType Insts;
3320     Insts.emplace_back();
3321     createPushFlags(Insts.back(), 8);
3322     Insts.emplace_back();
3323     createMove(Insts.back(), InstrTrampoline, TempReg, Ctx);
3324     InstructionListType cmpJmp = createCmpJE(TempReg, 0, IndCallHandler, Ctx);
3325     Insts.insert(Insts.end(), cmpJmp.begin(), cmpJmp.end());
3326     Insts.emplace_back();
3327     Insts.back().setOpcode(X86::CALL64r);
3328     Insts.back().addOperand(MCOperand::createReg(TempReg));
3329     Insts.emplace_back();
3330     createDirectCall(Insts.back(), IndCallHandler, Ctx, /*IsTailCall*/ true);
3331     return Insts;
3332   }
3333 
3334   InstructionListType createNumCountersGetter(MCContext *Ctx) const override {
3335     InstructionListType Insts(2);
3336     MCSymbol *NumLocs = Ctx->getOrCreateSymbol("__bolt_num_counters");
3337     createMove(Insts[0], NumLocs, X86::EAX, Ctx);
3338     createReturn(Insts[1]);
3339     return Insts;
3340   }
3341 
3342   InstructionListType
3343   createInstrLocationsGetter(MCContext *Ctx) const override {
3344     InstructionListType Insts(2);
3345     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_locations");
3346     createLea(Insts[0], Locs, X86::EAX, Ctx);
3347     createReturn(Insts[1]);
3348     return Insts;
3349   }
3350 
3351   InstructionListType createInstrTablesGetter(MCContext *Ctx) const override {
3352     InstructionListType Insts(2);
3353     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_tables");
3354     createLea(Insts[0], Locs, X86::EAX, Ctx);
3355     createReturn(Insts[1]);
3356     return Insts;
3357   }
3358 
3359   InstructionListType createInstrNumFuncsGetter(MCContext *Ctx) const override {
3360     InstructionListType Insts(2);
3361     MCSymbol *NumFuncs = Ctx->getOrCreateSymbol("__bolt_instr_num_funcs");
3362     createMove(Insts[0], NumFuncs, X86::EAX, Ctx);
3363     createReturn(Insts[1]);
3364     return Insts;
3365   }
3366 
3367   InstructionListType createSymbolTrampoline(const MCSymbol *TgtSym,
3368                                              MCContext *Ctx) const override {
3369     InstructionListType Insts(1);
3370     createUncondBranch(Insts[0], TgtSym, Ctx);
3371     return Insts;
3372   }
3373 
3374   InstructionListType createDummyReturnFunction(MCContext *Ctx) const override {
3375     InstructionListType Insts(1);
3376     createReturn(Insts[0]);
3377     return Insts;
3378   }
3379 
3380   BlocksVectorTy indirectCallPromotion(
3381       const MCInst &CallInst,
3382       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3383       const std::vector<std::pair<MCSymbol *, uint64_t>> &VtableSyms,
3384       const std::vector<MCInst *> &MethodFetchInsns,
3385       const bool MinimizeCodeSize, MCContext *Ctx) override {
3386     const bool IsTailCall = isTailCall(CallInst);
3387     const bool IsJumpTable = getJumpTable(CallInst) != 0;
3388     BlocksVectorTy Results;
3389 
3390     // Label for the current code block.
3391     MCSymbol *NextTarget = nullptr;
3392 
3393     // The join block which contains all the instructions following CallInst.
3394     // MergeBlock remains null if CallInst is a tail call.
3395     MCSymbol *MergeBlock = nullptr;
3396 
3397     unsigned FuncAddrReg = X86::R10;
3398 
3399     const bool LoadElim = !VtableSyms.empty();
3400     assert((!LoadElim || VtableSyms.size() == Targets.size()) &&
3401            "There must be a vtable entry for every method "
3402            "in the targets vector.");
3403 
3404     if (MinimizeCodeSize && !LoadElim) {
3405       std::set<unsigned> UsedRegs;
3406 
3407       for (unsigned int I = 0; I < MCPlus::getNumPrimeOperands(CallInst); ++I) {
3408         const MCOperand &Op = CallInst.getOperand(I);
3409         if (Op.isReg())
3410           UsedRegs.insert(Op.getReg());
3411       }
3412 
3413       if (UsedRegs.count(X86::R10) == 0)
3414         FuncAddrReg = X86::R10;
3415       else if (UsedRegs.count(X86::R11) == 0)
3416         FuncAddrReg = X86::R11;
3417       else
3418         return Results;
3419     }
3420 
3421     const auto jumpToMergeBlock = [&](InstructionListType &NewCall) {
3422       assert(MergeBlock);
3423       NewCall.push_back(CallInst);
3424       MCInst &Merge = NewCall.back();
3425       Merge.clear();
3426       createUncondBranch(Merge, MergeBlock, Ctx);
3427     };
3428 
3429     for (unsigned int i = 0; i < Targets.size(); ++i) {
3430       Results.emplace_back(NextTarget, InstructionListType());
3431       InstructionListType *NewCall = &Results.back().second;
3432 
3433       if (MinimizeCodeSize && !LoadElim) {
3434         // Load the call target into FuncAddrReg.
3435         NewCall->push_back(CallInst); // Copy CallInst in order to get SMLoc
3436         MCInst &Target = NewCall->back();
3437         Target.clear();
3438         Target.setOpcode(X86::MOV64ri32);
3439         Target.addOperand(MCOperand::createReg(FuncAddrReg));
3440         if (Targets[i].first) {
3441           // Is this OK?
3442           Target.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3443               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3444         } else {
3445           const uint64_t Addr = Targets[i].second;
3446           // Immediate address is out of sign extended 32 bit range.
3447           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3448             return BlocksVectorTy();
3449 
3450           Target.addOperand(MCOperand::createImm(Addr));
3451         }
3452 
3453         // Compare current call target to a specific address.
3454         NewCall->push_back(CallInst);
3455         MCInst &Compare = NewCall->back();
3456         Compare.clear();
3457         if (isBranchOnReg(CallInst))
3458           Compare.setOpcode(X86::CMP64rr);
3459         else if (CallInst.getOpcode() == X86::CALL64pcrel32)
3460           Compare.setOpcode(X86::CMP64ri32);
3461         else
3462           Compare.setOpcode(X86::CMP64rm);
3463 
3464         Compare.addOperand(MCOperand::createReg(FuncAddrReg));
3465 
3466         // TODO: Would be preferable to only load this value once.
3467         for (unsigned i = 0;
3468              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3469           if (!CallInst.getOperand(i).isInst())
3470             Compare.addOperand(CallInst.getOperand(i));
3471       } else {
3472         // Compare current call target to a specific address.
3473         NewCall->push_back(CallInst);
3474         MCInst &Compare = NewCall->back();
3475         Compare.clear();
3476         if (isBranchOnReg(CallInst))
3477           Compare.setOpcode(X86::CMP64ri32);
3478         else
3479           Compare.setOpcode(X86::CMP64mi32);
3480 
3481         // Original call address.
3482         for (unsigned i = 0;
3483              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3484           if (!CallInst.getOperand(i).isInst())
3485             Compare.addOperand(CallInst.getOperand(i));
3486 
3487         // Target address.
3488         if (Targets[i].first || LoadElim) {
3489           const MCSymbol *Sym =
3490               LoadElim ? VtableSyms[i].first : Targets[i].first;
3491           const uint64_t Addend = LoadElim ? VtableSyms[i].second : 0;
3492           const MCExpr *Expr = MCSymbolRefExpr::create(Sym, *Ctx);
3493           if (Addend)
3494             Expr = MCBinaryExpr::createAdd(
3495                 Expr, MCConstantExpr::create(Addend, *Ctx), *Ctx);
3496           Compare.addOperand(MCOperand::createExpr(Expr));
3497         } else {
3498           const uint64_t Addr = Targets[i].second;
3499           // Immediate address is out of sign extended 32 bit range.
3500           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3501             return BlocksVectorTy();
3502 
3503           Compare.addOperand(MCOperand::createImm(Addr));
3504         }
3505       }
3506 
3507       // jump to next target compare.
3508       NextTarget =
3509           Ctx->createNamedTempSymbol(); // generate label for the next block
3510       NewCall->push_back(CallInst);
3511 
3512       if (IsJumpTable) {
3513         MCInst &Je = NewCall->back();
3514 
3515         // Jump to next compare if target addresses don't match.
3516         Je.clear();
3517         Je.setOpcode(X86::JCC_1);
3518         if (Targets[i].first)
3519           Je.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3520               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3521         else
3522           Je.addOperand(MCOperand::createImm(Targets[i].second));
3523 
3524         Je.addOperand(MCOperand::createImm(X86::COND_E));
3525         assert(!isInvoke(CallInst));
3526       } else {
3527         MCInst &Jne = NewCall->back();
3528 
3529         // Jump to next compare if target addresses don't match.
3530         Jne.clear();
3531         Jne.setOpcode(X86::JCC_1);
3532         Jne.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3533             NextTarget, MCSymbolRefExpr::VK_None, *Ctx)));
3534         Jne.addOperand(MCOperand::createImm(X86::COND_NE));
3535 
3536         // Call specific target directly.
3537         Results.emplace_back(Ctx->createNamedTempSymbol(),
3538                              InstructionListType());
3539         NewCall = &Results.back().second;
3540         NewCall->push_back(CallInst);
3541         MCInst &CallOrJmp = NewCall->back();
3542 
3543         CallOrJmp.clear();
3544 
3545         if (MinimizeCodeSize && !LoadElim) {
3546           CallOrJmp.setOpcode(IsTailCall ? X86::JMP32r : X86::CALL64r);
3547           CallOrJmp.addOperand(MCOperand::createReg(FuncAddrReg));
3548         } else {
3549           CallOrJmp.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
3550 
3551           if (Targets[i].first)
3552             CallOrJmp.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3553                 Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3554           else
3555             CallOrJmp.addOperand(MCOperand::createImm(Targets[i].second));
3556         }
3557         if (IsTailCall)
3558           setTailCall(CallOrJmp);
3559 
3560         if (CallOrJmp.getOpcode() == X86::CALL64r ||
3561             CallOrJmp.getOpcode() == X86::CALL64pcrel32) {
3562           if (Optional<uint32_t> Offset = getOffset(CallInst))
3563             // Annotated as duplicated call
3564             setOffset(CallOrJmp, *Offset);
3565         }
3566 
3567         if (isInvoke(CallInst) && !isInvoke(CallOrJmp)) {
3568           // Copy over any EH or GNU args size information from the original
3569           // call.
3570           Optional<MCPlus::MCLandingPad> EHInfo = getEHInfo(CallInst);
3571           if (EHInfo)
3572             addEHInfo(CallOrJmp, *EHInfo);
3573           int64_t GnuArgsSize = getGnuArgsSize(CallInst);
3574           if (GnuArgsSize >= 0)
3575             addGnuArgsSize(CallOrJmp, GnuArgsSize);
3576         }
3577 
3578         if (!IsTailCall) {
3579           // The fallthrough block for the most common target should be
3580           // the merge block.
3581           if (i == 0) {
3582             // Fallthrough to merge block.
3583             MergeBlock = Ctx->createNamedTempSymbol();
3584           } else {
3585             // Insert jump to the merge block if we are not doing a fallthrough.
3586             jumpToMergeBlock(*NewCall);
3587           }
3588         }
3589       }
3590     }
3591 
3592     // Cold call block.
3593     Results.emplace_back(NextTarget, InstructionListType());
3594     InstructionListType &NewCall = Results.back().second;
3595     for (const MCInst *Inst : MethodFetchInsns)
3596       if (Inst != &CallInst)
3597         NewCall.push_back(*Inst);
3598     NewCall.push_back(CallInst);
3599 
3600     // Jump to merge block from cold call block
3601     if (!IsTailCall && !IsJumpTable) {
3602       jumpToMergeBlock(NewCall);
3603 
3604       // Record merge block
3605       Results.emplace_back(MergeBlock, InstructionListType());
3606     }
3607 
3608     return Results;
3609   }
3610 
3611   BlocksVectorTy jumpTablePromotion(
3612       const MCInst &IJmpInst,
3613       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3614       const std::vector<MCInst *> &TargetFetchInsns,
3615       MCContext *Ctx) const override {
3616     assert(getJumpTable(IJmpInst) != 0);
3617     uint16_t IndexReg = getAnnotationAs<uint16_t>(IJmpInst, "JTIndexReg");
3618     if (IndexReg == 0)
3619       return BlocksVectorTy();
3620 
3621     BlocksVectorTy Results;
3622 
3623     // Label for the current code block.
3624     MCSymbol *NextTarget = nullptr;
3625 
3626     for (unsigned int i = 0; i < Targets.size(); ++i) {
3627       Results.emplace_back(NextTarget, InstructionListType());
3628       InstructionListType *CurBB = &Results.back().second;
3629 
3630       // Compare current index to a specific index.
3631       CurBB->emplace_back(MCInst());
3632       MCInst &CompareInst = CurBB->back();
3633       CompareInst.setLoc(IJmpInst.getLoc());
3634       CompareInst.setOpcode(X86::CMP64ri32);
3635       CompareInst.addOperand(MCOperand::createReg(IndexReg));
3636 
3637       const uint64_t CaseIdx = Targets[i].second;
3638       // Immediate address is out of sign extended 32 bit range.
3639       if (int64_t(CaseIdx) != int64_t(int32_t(CaseIdx)))
3640         return BlocksVectorTy();
3641 
3642       CompareInst.addOperand(MCOperand::createImm(CaseIdx));
3643       shortenInstruction(CompareInst, *Ctx->getSubtargetInfo());
3644 
3645       // jump to next target compare.
3646       NextTarget =
3647           Ctx->createNamedTempSymbol(); // generate label for the next block
3648       CurBB->push_back(MCInst());
3649 
3650       MCInst &JEInst = CurBB->back();
3651       JEInst.setLoc(IJmpInst.getLoc());
3652 
3653       // Jump to target if indices match
3654       JEInst.setOpcode(X86::JCC_1);
3655       JEInst.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3656           Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3657       JEInst.addOperand(MCOperand::createImm(X86::COND_E));
3658     }
3659 
3660     // Cold call block.
3661     Results.emplace_back(NextTarget, InstructionListType());
3662     InstructionListType &CurBB = Results.back().second;
3663     for (const MCInst *Inst : TargetFetchInsns)
3664       if (Inst != &IJmpInst)
3665         CurBB.push_back(*Inst);
3666 
3667     CurBB.push_back(IJmpInst);
3668 
3669     return Results;
3670   }
3671 
3672 private:
3673   bool createMove(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3674                   MCContext *Ctx) const {
3675     Inst.setOpcode(X86::MOV64rm);
3676     Inst.addOperand(MCOperand::createReg(Reg));
3677     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3678     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3679     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3680     Inst.addOperand(MCOperand::createExpr(
3681         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3682                                 *Ctx)));                    // Displacement
3683     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3684 
3685     return true;
3686   }
3687 
3688   bool createLea(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3689                  MCContext *Ctx) const {
3690     Inst.setOpcode(X86::LEA64r);
3691     Inst.addOperand(MCOperand::createReg(Reg));
3692     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3693     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3694     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3695     Inst.addOperand(MCOperand::createExpr(
3696         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3697                                 *Ctx)));                    // Displacement
3698     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3699     return true;
3700   }
3701 };
3702 
3703 } // namespace
3704 
3705 namespace llvm {
3706 namespace bolt {
3707 
3708 MCPlusBuilder *createX86MCPlusBuilder(const MCInstrAnalysis *Analysis,
3709                                       const MCInstrInfo *Info,
3710                                       const MCRegisterInfo *RegInfo) {
3711   return new X86MCPlusBuilder(Analysis, Info, RegInfo);
3712 }
3713 
3714 } // namespace bolt
3715 } // namespace llvm
3716