xref: /llvm-project/bolt/lib/Target/X86/X86MCPlusBuilder.cpp (revision 687e4af1c05ae36af88900d41150e260d8f273c0)
1 //===- bolt/Target/X86/X86MCPlusBuilder.cpp -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides X86-specific MCPlus builder.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86MCTargetDesc.h"
15 #include "bolt/Core/MCPlus.h"
16 #include "bolt/Core/MCPlusBuilder.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCFixupKindInfo.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstBuilder.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegister.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/Support/DataExtractor.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Errc.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/ErrorOr.h"
30 #include <set>
31 
32 #define DEBUG_TYPE "mcplus"
33 
34 using namespace llvm;
35 using namespace bolt;
36 
37 namespace {
38 
39 unsigned getShortBranchOpcode(unsigned Opcode) {
40   switch (Opcode) {
41   default:
42     return Opcode;
43   case X86::JMP_2: return X86::JMP_1;
44   case X86::JMP_4: return X86::JMP_1;
45   case X86::JCC_2: return X86::JCC_1;
46   case X86::JCC_4: return X86::JCC_1;
47   }
48 }
49 
50 unsigned getShortArithOpcode(unsigned Opcode) {
51   switch (Opcode) {
52   default:
53     return Opcode;
54 
55   // IMUL
56   case X86::IMUL16rri:   return X86::IMUL16rri8;
57   case X86::IMUL16rmi:   return X86::IMUL16rmi8;
58   case X86::IMUL32rri:   return X86::IMUL32rri8;
59   case X86::IMUL32rmi:   return X86::IMUL32rmi8;
60   case X86::IMUL64rri32: return X86::IMUL64rri8;
61   case X86::IMUL64rmi32: return X86::IMUL64rmi8;
62 
63   // OR
64   case X86::OR16ri:    return X86::OR16ri8;
65   case X86::OR16mi:    return X86::OR16mi8;
66   case X86::OR32ri:    return X86::OR32ri8;
67   case X86::OR32mi:    return X86::OR32mi8;
68   case X86::OR64ri32:  return X86::OR64ri8;
69   case X86::OR64mi32:  return X86::OR64mi8;
70 
71   // AND
72   case X86::AND16ri:   return X86::AND16ri8;
73   case X86::AND16mi:   return X86::AND16mi8;
74   case X86::AND32ri:   return X86::AND32ri8;
75   case X86::AND32mi:   return X86::AND32mi8;
76   case X86::AND64ri32: return X86::AND64ri8;
77   case X86::AND64mi32: return X86::AND64mi8;
78 
79   // XOR
80   case X86::XOR16ri:   return X86::XOR16ri8;
81   case X86::XOR16mi:   return X86::XOR16mi8;
82   case X86::XOR32ri:   return X86::XOR32ri8;
83   case X86::XOR32mi:   return X86::XOR32mi8;
84   case X86::XOR64ri32: return X86::XOR64ri8;
85   case X86::XOR64mi32: return X86::XOR64mi8;
86 
87   // ADD
88   case X86::ADD16ri:   return X86::ADD16ri8;
89   case X86::ADD16mi:   return X86::ADD16mi8;
90   case X86::ADD32ri:   return X86::ADD32ri8;
91   case X86::ADD32mi:   return X86::ADD32mi8;
92   case X86::ADD64ri32: return X86::ADD64ri8;
93   case X86::ADD64mi32: return X86::ADD64mi8;
94 
95   // SUB
96   case X86::SUB16ri:   return X86::SUB16ri8;
97   case X86::SUB16mi:   return X86::SUB16mi8;
98   case X86::SUB32ri:   return X86::SUB32ri8;
99   case X86::SUB32mi:   return X86::SUB32mi8;
100   case X86::SUB64ri32: return X86::SUB64ri8;
101   case X86::SUB64mi32: return X86::SUB64mi8;
102 
103   // CMP
104   case X86::CMP16ri:   return X86::CMP16ri8;
105   case X86::CMP16mi:   return X86::CMP16mi8;
106   case X86::CMP32ri:   return X86::CMP32ri8;
107   case X86::CMP32mi:   return X86::CMP32mi8;
108   case X86::CMP64ri32: return X86::CMP64ri8;
109   case X86::CMP64mi32: return X86::CMP64mi8;
110 
111   // PUSH
112   case X86::PUSHi32:    return X86::PUSH32i8;
113   case X86::PUSHi16:    return X86::PUSH16i8;
114   case X86::PUSH64i32:  return X86::PUSH64i8;
115   }
116 }
117 
118 bool isADD(unsigned Opcode) {
119   switch (Opcode) {
120   default:
121     return false;
122   case X86::ADD16i16:
123   case X86::ADD16mi:
124   case X86::ADD16mi8:
125   case X86::ADD16mr:
126   case X86::ADD16ri:
127   case X86::ADD16ri8:
128   case X86::ADD16ri8_DB:
129   case X86::ADD16ri_DB:
130   case X86::ADD16rm:
131   case X86::ADD16rr:
132   case X86::ADD16rr_DB:
133   case X86::ADD16rr_REV:
134   case X86::ADD32i32:
135   case X86::ADD32mi:
136   case X86::ADD32mi8:
137   case X86::ADD32mr:
138   case X86::ADD32ri:
139   case X86::ADD32ri8:
140   case X86::ADD32ri8_DB:
141   case X86::ADD32ri_DB:
142   case X86::ADD32rm:
143   case X86::ADD32rr:
144   case X86::ADD32rr_DB:
145   case X86::ADD32rr_REV:
146   case X86::ADD64i32:
147   case X86::ADD64mi32:
148   case X86::ADD64mi8:
149   case X86::ADD64mr:
150   case X86::ADD64ri32:
151   case X86::ADD64ri32_DB:
152   case X86::ADD64ri8:
153   case X86::ADD64ri8_DB:
154   case X86::ADD64rm:
155   case X86::ADD64rr:
156   case X86::ADD64rr_DB:
157   case X86::ADD64rr_REV:
158   case X86::ADD8i8:
159   case X86::ADD8mi:
160   case X86::ADD8mi8:
161   case X86::ADD8mr:
162   case X86::ADD8ri:
163   case X86::ADD8ri8:
164   case X86::ADD8rm:
165   case X86::ADD8rr:
166   case X86::ADD8rr_REV:
167     return true;
168   }
169 }
170 
171 bool isAND(unsigned Opcode) {
172   switch (Opcode) {
173   default:
174     return false;
175   case X86::AND16i16:
176   case X86::AND16mi:
177   case X86::AND16mi8:
178   case X86::AND16mr:
179   case X86::AND16ri:
180   case X86::AND16ri8:
181   case X86::AND16rm:
182   case X86::AND16rr:
183   case X86::AND16rr_REV:
184   case X86::AND32i32:
185   case X86::AND32mi:
186   case X86::AND32mi8:
187   case X86::AND32mr:
188   case X86::AND32ri:
189   case X86::AND32ri8:
190   case X86::AND32rm:
191   case X86::AND32rr:
192   case X86::AND32rr_REV:
193   case X86::AND64i32:
194   case X86::AND64mi32:
195   case X86::AND64mi8:
196   case X86::AND64mr:
197   case X86::AND64ri32:
198   case X86::AND64ri8:
199   case X86::AND64rm:
200   case X86::AND64rr:
201   case X86::AND64rr_REV:
202   case X86::AND8i8:
203   case X86::AND8mi:
204   case X86::AND8mi8:
205   case X86::AND8mr:
206   case X86::AND8ri:
207   case X86::AND8ri8:
208   case X86::AND8rm:
209   case X86::AND8rr:
210   case X86::AND8rr_REV:
211     return true;
212   }
213 }
214 
215 bool isCMP(unsigned Opcode) {
216   switch (Opcode) {
217   default:
218     return false;
219   case X86::CMP16i16:
220   case X86::CMP16mi:
221   case X86::CMP16mi8:
222   case X86::CMP16mr:
223   case X86::CMP16ri:
224   case X86::CMP16ri8:
225   case X86::CMP16rm:
226   case X86::CMP16rr:
227   case X86::CMP16rr_REV:
228   case X86::CMP32i32:
229   case X86::CMP32mi:
230   case X86::CMP32mi8:
231   case X86::CMP32mr:
232   case X86::CMP32ri:
233   case X86::CMP32ri8:
234   case X86::CMP32rm:
235   case X86::CMP32rr:
236   case X86::CMP32rr_REV:
237   case X86::CMP64i32:
238   case X86::CMP64mi32:
239   case X86::CMP64mi8:
240   case X86::CMP64mr:
241   case X86::CMP64ri32:
242   case X86::CMP64ri8:
243   case X86::CMP64rm:
244   case X86::CMP64rr:
245   case X86::CMP64rr_REV:
246   case X86::CMP8i8:
247   case X86::CMP8mi:
248   case X86::CMP8mi8:
249   case X86::CMP8mr:
250   case X86::CMP8ri:
251   case X86::CMP8ri8:
252   case X86::CMP8rm:
253   case X86::CMP8rr:
254   case X86::CMP8rr_REV:
255     return true;
256   }
257 }
258 
259 bool isSUB(unsigned Opcode) {
260   switch (Opcode) {
261   default:
262     return false;
263   case X86::SUB16i16:
264   case X86::SUB16mi:
265   case X86::SUB16mi8:
266   case X86::SUB16mr:
267   case X86::SUB16ri:
268   case X86::SUB16ri8:
269   case X86::SUB16rm:
270   case X86::SUB16rr:
271   case X86::SUB16rr_REV:
272   case X86::SUB32i32:
273   case X86::SUB32mi:
274   case X86::SUB32mi8:
275   case X86::SUB32mr:
276   case X86::SUB32ri:
277   case X86::SUB32ri8:
278   case X86::SUB32rm:
279   case X86::SUB32rr:
280   case X86::SUB32rr_REV:
281   case X86::SUB64i32:
282   case X86::SUB64mi32:
283   case X86::SUB64mi8:
284   case X86::SUB64mr:
285   case X86::SUB64ri32:
286   case X86::SUB64ri8:
287   case X86::SUB64rm:
288   case X86::SUB64rr:
289   case X86::SUB64rr_REV:
290   case X86::SUB8i8:
291   case X86::SUB8mi:
292   case X86::SUB8mi8:
293   case X86::SUB8mr:
294   case X86::SUB8ri:
295   case X86::SUB8ri8:
296   case X86::SUB8rm:
297   case X86::SUB8rr:
298   case X86::SUB8rr_REV:
299     return true;
300   }
301 }
302 
303 bool isTEST(unsigned Opcode) {
304   switch (Opcode) {
305   default:
306     return false;
307   case X86::TEST16i16:
308   case X86::TEST16mi:
309   case X86::TEST16mr:
310   case X86::TEST16ri:
311   case X86::TEST16rr:
312   case X86::TEST32i32:
313   case X86::TEST32mi:
314   case X86::TEST32mr:
315   case X86::TEST32ri:
316   case X86::TEST32rr:
317   case X86::TEST64i32:
318   case X86::TEST64mi32:
319   case X86::TEST64mr:
320   case X86::TEST64ri32:
321   case X86::TEST64rr:
322   case X86::TEST8i8:
323   case X86::TEST8mi:
324   case X86::TEST8mr:
325   case X86::TEST8ri:
326   case X86::TEST8rr:
327     return true;
328   }
329 }
330 
331 class X86MCPlusBuilder : public MCPlusBuilder {
332 public:
333   X86MCPlusBuilder(const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
334                    const MCRegisterInfo *RegInfo)
335       : MCPlusBuilder(Analysis, Info, RegInfo) {}
336 
337   bool isBranch(const MCInst &Inst) const override {
338     return Analysis->isBranch(Inst) && !isTailCall(Inst);
339   }
340 
341   bool isUnconditionalBranch(const MCInst &Inst) const override {
342     return Analysis->isUnconditionalBranch(Inst) && !isTailCall(Inst);
343   }
344 
345   bool isNoop(const MCInst &Inst) const override {
346     switch (Inst.getOpcode()) {
347     case X86::NOOP:
348     case X86::NOOPL:
349     case X86::NOOPLr:
350     case X86::NOOPQ:
351     case X86::NOOPQr:
352     case X86::NOOPW:
353     case X86::NOOPWr:
354       return true;
355     }
356     return false;
357   }
358 
359   unsigned getCondCode(const MCInst &Inst) const override {
360     switch (Inst.getOpcode()) {
361     default:
362       return X86::COND_INVALID;
363     case X86::JCC_1:
364     case X86::JCC_2:
365     case X86::JCC_4:
366       return Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1)
367           .getImm();
368     }
369   }
370 
371   unsigned getInvertedCondCode(unsigned CC) const override {
372     switch (CC) {
373     default: return X86::COND_INVALID;
374     case X86::COND_E:  return X86::COND_NE;
375     case X86::COND_NE: return X86::COND_E;
376     case X86::COND_L:  return X86::COND_GE;
377     case X86::COND_LE: return X86::COND_G;
378     case X86::COND_G:  return X86::COND_LE;
379     case X86::COND_GE: return X86::COND_L;
380     case X86::COND_B:  return X86::COND_AE;
381     case X86::COND_BE: return X86::COND_A;
382     case X86::COND_A:  return X86::COND_BE;
383     case X86::COND_AE: return X86::COND_B;
384     case X86::COND_S:  return X86::COND_NS;
385     case X86::COND_NS: return X86::COND_S;
386     case X86::COND_P:  return X86::COND_NP;
387     case X86::COND_NP: return X86::COND_P;
388     case X86::COND_O:  return X86::COND_NO;
389     case X86::COND_NO: return X86::COND_O;
390     }
391   }
392 
393   unsigned getCondCodesLogicalOr(unsigned CC1, unsigned CC2) const override {
394     enum DecodedCondCode : uint8_t {
395       DCC_EQUAL = 0x1,
396       DCC_GREATER = 0x2,
397       DCC_LESSER = 0x4,
398       DCC_GREATER_OR_LESSER = 0x6,
399       DCC_UNSIGNED = 0x8,
400       DCC_SIGNED = 0x10,
401       DCC_INVALID = 0x20,
402     };
403 
404     auto decodeCondCode = [&](unsigned CC) -> uint8_t {
405       switch (CC) {
406       default: return DCC_INVALID;
407       case X86::COND_E: return DCC_EQUAL;
408       case X86::COND_NE: return DCC_GREATER | DCC_LESSER;
409       case X86::COND_L: return DCC_LESSER | DCC_SIGNED;
410       case X86::COND_LE: return DCC_EQUAL | DCC_LESSER | DCC_SIGNED;
411       case X86::COND_G: return DCC_GREATER | DCC_SIGNED;
412       case X86::COND_GE: return DCC_GREATER | DCC_EQUAL | DCC_SIGNED;
413       case X86::COND_B: return DCC_LESSER | DCC_UNSIGNED;
414       case X86::COND_BE: return DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED;
415       case X86::COND_A: return DCC_GREATER | DCC_UNSIGNED;
416       case X86::COND_AE: return DCC_GREATER | DCC_EQUAL | DCC_UNSIGNED;
417       }
418     };
419 
420     uint8_t DCC = decodeCondCode(CC1) | decodeCondCode(CC2);
421 
422     if (DCC & DCC_INVALID)
423       return X86::COND_INVALID;
424 
425     if (DCC & DCC_SIGNED && DCC & DCC_UNSIGNED)
426       return X86::COND_INVALID;
427 
428     switch (DCC) {
429     default: return X86::COND_INVALID;
430     case DCC_EQUAL | DCC_LESSER | DCC_SIGNED: return X86::COND_LE;
431     case DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED: return X86::COND_BE;
432     case DCC_EQUAL | DCC_GREATER | DCC_SIGNED: return X86::COND_GE;
433     case DCC_EQUAL | DCC_GREATER | DCC_UNSIGNED: return X86::COND_AE;
434     case DCC_GREATER | DCC_LESSER | DCC_SIGNED: return X86::COND_NE;
435     case DCC_GREATER | DCC_LESSER | DCC_UNSIGNED: return X86::COND_NE;
436     case DCC_GREATER | DCC_LESSER: return X86::COND_NE;
437     case DCC_EQUAL | DCC_SIGNED: return X86::COND_E;
438     case DCC_EQUAL | DCC_UNSIGNED: return X86::COND_E;
439     case DCC_EQUAL: return X86::COND_E;
440     case DCC_LESSER | DCC_SIGNED: return X86::COND_L;
441     case DCC_LESSER | DCC_UNSIGNED: return X86::COND_B;
442     case DCC_GREATER | DCC_SIGNED: return X86::COND_G;
443     case DCC_GREATER | DCC_UNSIGNED: return X86::COND_A;
444     }
445   }
446 
447   bool isValidCondCode(unsigned CC) const override {
448     return (CC != X86::COND_INVALID);
449   }
450 
451   bool isBreakpoint(const MCInst &Inst) const override {
452     return Inst.getOpcode() == X86::INT3;
453   }
454 
455   bool isPrefix(const MCInst &Inst) const override {
456     switch (Inst.getOpcode()) {
457     case X86::LOCK_PREFIX:
458     case X86::REPNE_PREFIX:
459     case X86::REP_PREFIX:
460       return true;
461     }
462     return false;
463   }
464 
465   bool isRep(const MCInst &Inst) const override {
466     return Inst.getFlags() == X86::IP_HAS_REPEAT;
467   }
468 
469   bool deleteREPPrefix(MCInst &Inst) const override {
470     if (Inst.getFlags() == X86::IP_HAS_REPEAT) {
471       Inst.setFlags(0);
472       return true;
473     }
474     return false;
475   }
476 
477   // FIXME: For compatibility with old LLVM only!
478   bool isTerminator(const MCInst &Inst) const override {
479     if (Info->get(Inst.getOpcode()).isTerminator())
480       return true;
481     switch (Inst.getOpcode()) {
482     default:
483       return false;
484     case X86::TRAP:
485     // Opcodes previously known as X86::UD2B
486     case X86::UD1Wm:
487     case X86::UD1Lm:
488     case X86::UD1Qm:
489     case X86::UD1Wr:
490     case X86::UD1Lr:
491     case X86::UD1Qr:
492       return true;
493     }
494   }
495 
496   bool isIndirectCall(const MCInst &Inst) const override {
497     return isCall(Inst) &&
498            ((getMemoryOperandNo(Inst) != -1) || Inst.getOperand(0).isReg());
499   }
500 
501   bool isPop(const MCInst &Inst) const override {
502     return getPopSize(Inst) == 0 ? false : true;
503   }
504 
505   bool isTerminateBranch(const MCInst &Inst) const override {
506     return Inst.getOpcode() == X86::ENDBR32 || Inst.getOpcode() == X86::ENDBR64;
507   }
508 
509   int getPopSize(const MCInst &Inst) const override {
510     switch (Inst.getOpcode()) {
511     case X86::POP16r:
512     case X86::POP16rmm:
513     case X86::POP16rmr:
514     case X86::POPF16:
515     case X86::POPA16:
516     case X86::POPDS16:
517     case X86::POPES16:
518     case X86::POPFS16:
519     case X86::POPGS16:
520     case X86::POPSS16:
521       return 2;
522     case X86::POP32r:
523     case X86::POP32rmm:
524     case X86::POP32rmr:
525     case X86::POPA32:
526     case X86::POPDS32:
527     case X86::POPES32:
528     case X86::POPF32:
529     case X86::POPFS32:
530     case X86::POPGS32:
531     case X86::POPSS32:
532       return 4;
533     case X86::POP64r:
534     case X86::POP64rmm:
535     case X86::POP64rmr:
536     case X86::POPF64:
537     case X86::POPFS64:
538     case X86::POPGS64:
539       return 8;
540     }
541     return 0;
542   }
543 
544   bool isPush(const MCInst &Inst) const override {
545     return getPushSize(Inst) == 0 ? false : true;
546   }
547 
548   int getPushSize(const MCInst &Inst) const override {
549     switch (Inst.getOpcode()) {
550     case X86::PUSH16i8:
551     case X86::PUSH16r:
552     case X86::PUSH16rmm:
553     case X86::PUSH16rmr:
554     case X86::PUSHA16:
555     case X86::PUSHCS16:
556     case X86::PUSHDS16:
557     case X86::PUSHES16:
558     case X86::PUSHF16:
559     case X86::PUSHFS16:
560     case X86::PUSHGS16:
561     case X86::PUSHSS16:
562     case X86::PUSHi16:
563       return 2;
564     case X86::PUSH32i8:
565     case X86::PUSH32r:
566     case X86::PUSH32rmm:
567     case X86::PUSH32rmr:
568     case X86::PUSHA32:
569     case X86::PUSHCS32:
570     case X86::PUSHDS32:
571     case X86::PUSHES32:
572     case X86::PUSHF32:
573     case X86::PUSHFS32:
574     case X86::PUSHGS32:
575     case X86::PUSHSS32:
576     case X86::PUSHi32:
577       return 4;
578     case X86::PUSH64i32:
579     case X86::PUSH64i8:
580     case X86::PUSH64r:
581     case X86::PUSH64rmm:
582     case X86::PUSH64rmr:
583     case X86::PUSHF64:
584     case X86::PUSHFS64:
585     case X86::PUSHGS64:
586       return 8;
587     }
588     return 0;
589   }
590 
591   bool isADD64rr(const MCInst &Inst) const override {
592     return Inst.getOpcode() == X86::ADD64rr;
593   }
594 
595   bool isSUB(const MCInst &Inst) const override {
596     return ::isSUB(Inst.getOpcode());
597   }
598 
599   bool isADDri(const MCInst &Inst) const {
600     return Inst.getOpcode() == X86::ADD64ri32 ||
601            Inst.getOpcode() == X86::ADD64ri8;
602   }
603 
604   bool isLEA64r(const MCInst &Inst) const override {
605     return Inst.getOpcode() == X86::LEA64r;
606   }
607 
608   bool isMOVSX64rm32(const MCInst &Inst) const override {
609     return Inst.getOpcode() == X86::MOVSX64rm32;
610   }
611 
612   bool isLeave(const MCInst &Inst) const override {
613     return Inst.getOpcode() == X86::LEAVE || Inst.getOpcode() == X86::LEAVE64;
614   }
615 
616   bool isMoveMem2Reg(const MCInst &Inst) const override {
617     switch (Inst.getOpcode()) {
618     case X86::MOV16rm:
619     case X86::MOV32rm:
620     case X86::MOV64rm:
621       return true;
622     }
623     return false;
624   }
625 
626   bool isUnsupportedBranch(unsigned Opcode) const override {
627     switch (Opcode) {
628     default:
629       return false;
630     case X86::LOOP:
631     case X86::LOOPE:
632     case X86::LOOPNE:
633     case X86::JECXZ:
634     case X86::JRCXZ:
635       return true;
636     }
637   }
638 
639   bool isLoad(const MCInst &Inst) const override {
640     if (isPop(Inst))
641       return true;
642 
643     int MemOpNo = getMemoryOperandNo(Inst);
644     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
645 
646     if (MemOpNo == -1)
647       return false;
648 
649     return MCII.mayLoad();
650   }
651 
652   bool isStore(const MCInst &Inst) const override {
653     if (isPush(Inst))
654       return true;
655 
656     int MemOpNo = getMemoryOperandNo(Inst);
657     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
658 
659     if (MemOpNo == -1)
660       return false;
661 
662     return MCII.mayStore();
663   }
664 
665   bool isCleanRegXOR(const MCInst &Inst) const override {
666     switch (Inst.getOpcode()) {
667     case X86::XOR16rr:
668     case X86::XOR32rr:
669     case X86::XOR64rr:
670       break;
671     default:
672       return false;
673     }
674     return (Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg());
675   }
676 
677   bool isPacked(const MCInst &Inst) const override {
678     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
679     return (Desc.TSFlags & X86II::OpPrefixMask) == X86II::PD;
680   }
681 
682   unsigned getTrapFillValue() const override { return 0xCC; }
683 
684   struct IndJmpMatcherFrag1 : MCInstMatcher {
685     std::unique_ptr<MCInstMatcher> Base;
686     std::unique_ptr<MCInstMatcher> Scale;
687     std::unique_ptr<MCInstMatcher> Index;
688     std::unique_ptr<MCInstMatcher> Offset;
689 
690     IndJmpMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
691                        std::unique_ptr<MCInstMatcher> Scale,
692                        std::unique_ptr<MCInstMatcher> Index,
693                        std::unique_ptr<MCInstMatcher> Offset)
694         : Base(std::move(Base)), Scale(std::move(Scale)),
695           Index(std::move(Index)), Offset(std::move(Offset)) {}
696 
697     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
698                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
699       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
700         return false;
701 
702       if (CurInst->getOpcode() != X86::JMP64m)
703         return false;
704 
705       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
706       if (MemOpNo == -1)
707         return false;
708 
709       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
710         return false;
711       if (!Scale->match(MRI, MIB, this->InstrWindow,
712                         MemOpNo + X86::AddrScaleAmt))
713         return false;
714       if (!Index->match(MRI, MIB, this->InstrWindow,
715                         MemOpNo + X86::AddrIndexReg))
716         return false;
717       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
718         return false;
719       return true;
720     }
721 
722     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
723       MIB.addAnnotation(*CurInst, Annotation, true);
724       Base->annotate(MIB, Annotation);
725       Scale->annotate(MIB, Annotation);
726       Index->annotate(MIB, Annotation);
727       Offset->annotate(MIB, Annotation);
728     }
729   };
730 
731   std::unique_ptr<MCInstMatcher>
732   matchIndJmp(std::unique_ptr<MCInstMatcher> Base,
733               std::unique_ptr<MCInstMatcher> Scale,
734               std::unique_ptr<MCInstMatcher> Index,
735               std::unique_ptr<MCInstMatcher> Offset) const override {
736     return std::unique_ptr<MCInstMatcher>(
737         new IndJmpMatcherFrag1(std::move(Base), std::move(Scale),
738                                std::move(Index), std::move(Offset)));
739   }
740 
741   struct IndJmpMatcherFrag2 : MCInstMatcher {
742     std::unique_ptr<MCInstMatcher> Reg;
743 
744     IndJmpMatcherFrag2(std::unique_ptr<MCInstMatcher> Reg)
745         : Reg(std::move(Reg)) {}
746 
747     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
748                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
749       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
750         return false;
751 
752       if (CurInst->getOpcode() != X86::JMP64r)
753         return false;
754 
755       return Reg->match(MRI, MIB, this->InstrWindow, 0);
756     }
757 
758     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
759       MIB.addAnnotation(*CurInst, Annotation, true);
760       Reg->annotate(MIB, Annotation);
761     }
762   };
763 
764   std::unique_ptr<MCInstMatcher>
765   matchIndJmp(std::unique_ptr<MCInstMatcher> Target) const override {
766     return std::unique_ptr<MCInstMatcher>(
767         new IndJmpMatcherFrag2(std::move(Target)));
768   }
769 
770   struct LoadMatcherFrag1 : MCInstMatcher {
771     std::unique_ptr<MCInstMatcher> Base;
772     std::unique_ptr<MCInstMatcher> Scale;
773     std::unique_ptr<MCInstMatcher> Index;
774     std::unique_ptr<MCInstMatcher> Offset;
775 
776     LoadMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
777                      std::unique_ptr<MCInstMatcher> Scale,
778                      std::unique_ptr<MCInstMatcher> Index,
779                      std::unique_ptr<MCInstMatcher> Offset)
780         : Base(std::move(Base)), Scale(std::move(Scale)),
781           Index(std::move(Index)), Offset(std::move(Offset)) {}
782 
783     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
784                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
785       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
786         return false;
787 
788       if (CurInst->getOpcode() != X86::MOV64rm &&
789           CurInst->getOpcode() != X86::MOVSX64rm32)
790         return false;
791 
792       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
793       if (MemOpNo == -1)
794         return false;
795 
796       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
797         return false;
798       if (!Scale->match(MRI, MIB, this->InstrWindow,
799                         MemOpNo + X86::AddrScaleAmt))
800         return false;
801       if (!Index->match(MRI, MIB, this->InstrWindow,
802                         MemOpNo + X86::AddrIndexReg))
803         return false;
804       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
805         return false;
806       return true;
807     }
808 
809     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
810       MIB.addAnnotation(*CurInst, Annotation, true);
811       Base->annotate(MIB, Annotation);
812       Scale->annotate(MIB, Annotation);
813       Index->annotate(MIB, Annotation);
814       Offset->annotate(MIB, Annotation);
815     }
816   };
817 
818   std::unique_ptr<MCInstMatcher>
819   matchLoad(std::unique_ptr<MCInstMatcher> Base,
820             std::unique_ptr<MCInstMatcher> Scale,
821             std::unique_ptr<MCInstMatcher> Index,
822             std::unique_ptr<MCInstMatcher> Offset) const override {
823     return std::unique_ptr<MCInstMatcher>(
824         new LoadMatcherFrag1(std::move(Base), std::move(Scale),
825                              std::move(Index), std::move(Offset)));
826   }
827 
828   struct AddMatcher : MCInstMatcher {
829     std::unique_ptr<MCInstMatcher> A;
830     std::unique_ptr<MCInstMatcher> B;
831 
832     AddMatcher(std::unique_ptr<MCInstMatcher> A,
833                std::unique_ptr<MCInstMatcher> B)
834         : A(std::move(A)), B(std::move(B)) {}
835 
836     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
837                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
838       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
839         return false;
840 
841       if (CurInst->getOpcode() == X86::ADD64rr ||
842           CurInst->getOpcode() == X86::ADD64rr_DB ||
843           CurInst->getOpcode() == X86::ADD64rr_REV) {
844         if (!A->match(MRI, MIB, this->InstrWindow, 1)) {
845           if (!B->match(MRI, MIB, this->InstrWindow, 1))
846             return false;
847           return A->match(MRI, MIB, this->InstrWindow, 2);
848         }
849 
850         if (B->match(MRI, MIB, this->InstrWindow, 2))
851           return true;
852 
853         if (!B->match(MRI, MIB, this->InstrWindow, 1))
854           return false;
855         return A->match(MRI, MIB, this->InstrWindow, 2);
856       }
857 
858       return false;
859     }
860 
861     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
862       MIB.addAnnotation(*CurInst, Annotation, true);
863       A->annotate(MIB, Annotation);
864       B->annotate(MIB, Annotation);
865     }
866   };
867 
868   virtual std::unique_ptr<MCInstMatcher>
869   matchAdd(std::unique_ptr<MCInstMatcher> A,
870            std::unique_ptr<MCInstMatcher> B) const override {
871     return std::unique_ptr<MCInstMatcher>(
872         new AddMatcher(std::move(A), std::move(B)));
873   }
874 
875   struct LEAMatcher : MCInstMatcher {
876     std::unique_ptr<MCInstMatcher> Target;
877 
878     LEAMatcher(std::unique_ptr<MCInstMatcher> Target)
879         : Target(std::move(Target)) {}
880 
881     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
882                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
883       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
884         return false;
885 
886       if (CurInst->getOpcode() != X86::LEA64r)
887         return false;
888 
889       if (CurInst->getOperand(1 + X86::AddrScaleAmt).getImm() != 1 ||
890           CurInst->getOperand(1 + X86::AddrIndexReg).getReg() !=
891               X86::NoRegister ||
892           (CurInst->getOperand(1 + X86::AddrBaseReg).getReg() !=
893                X86::NoRegister &&
894            CurInst->getOperand(1 + X86::AddrBaseReg).getReg() != X86::RIP))
895         return false;
896 
897       return Target->match(MRI, MIB, this->InstrWindow, 1 + X86::AddrDisp);
898     }
899 
900     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
901       MIB.addAnnotation(*CurInst, Annotation, true);
902       Target->annotate(MIB, Annotation);
903     }
904   };
905 
906   virtual std::unique_ptr<MCInstMatcher>
907   matchLoadAddr(std::unique_ptr<MCInstMatcher> Target) const override {
908     return std::unique_ptr<MCInstMatcher>(new LEAMatcher(std::move(Target)));
909   }
910 
911   bool hasPCRelOperand(const MCInst &Inst) const override {
912     for (const MCOperand &Operand : Inst)
913       if (Operand.isReg() && Operand.getReg() == X86::RIP)
914         return true;
915     return false;
916   }
917 
918   int getMemoryOperandNo(const MCInst &Inst) const override {
919     unsigned Opcode = Inst.getOpcode();
920     const MCInstrDesc &Desc = Info->get(Opcode);
921     int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
922     if (MemOpNo >= 0)
923       MemOpNo += X86II::getOperandBias(Desc);
924     return MemOpNo;
925   }
926 
927   bool hasEVEXEncoding(const MCInst &Inst) const override {
928     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
929     return (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
930   }
931 
932   bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
933     const auto *I = Insts.begin();
934     while (I != Insts.end() && isPrefix(*I))
935       ++I;
936     if (I == Insts.end())
937       return false;
938 
939     const MCInst &FirstInst = *I;
940     ++I;
941     while (I != Insts.end() && isPrefix(*I))
942       ++I;
943     if (I == Insts.end())
944       return false;
945     const MCInst &SecondInst = *I;
946 
947     if (!isConditionalBranch(SecondInst))
948       return false;
949     // Cannot fuse if the first instruction uses RIP-relative memory.
950     if (hasPCRelOperand(FirstInst))
951       return false;
952 
953     const X86::FirstMacroFusionInstKind CmpKind =
954         X86::classifyFirstOpcodeInMacroFusion(FirstInst.getOpcode());
955     if (CmpKind == X86::FirstMacroFusionInstKind::Invalid)
956       return false;
957 
958     X86::CondCode CC = static_cast<X86::CondCode>(getCondCode(SecondInst));
959     X86::SecondMacroFusionInstKind BranchKind =
960         X86::classifySecondCondCodeInMacroFusion(CC);
961     if (BranchKind == X86::SecondMacroFusionInstKind::Invalid)
962       return false;
963     return X86::isMacroFused(CmpKind, BranchKind);
964   }
965 
966   bool
967   evaluateX86MemoryOperand(const MCInst &Inst, unsigned *BaseRegNum,
968                            int64_t *ScaleImm, unsigned *IndexRegNum,
969                            int64_t *DispImm, unsigned *SegmentRegNum,
970                            const MCExpr **DispExpr = nullptr) const override {
971     assert(BaseRegNum && ScaleImm && IndexRegNum && SegmentRegNum &&
972            "one of the input pointers is null");
973     int MemOpNo = getMemoryOperandNo(Inst);
974     if (MemOpNo < 0)
975       return false;
976     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
977 
978     if (MemOpOffset + X86::AddrSegmentReg >= MCPlus::getNumPrimeOperands(Inst))
979       return false;
980 
981     const MCOperand &Base = Inst.getOperand(MemOpOffset + X86::AddrBaseReg);
982     const MCOperand &Scale = Inst.getOperand(MemOpOffset + X86::AddrScaleAmt);
983     const MCOperand &Index = Inst.getOperand(MemOpOffset + X86::AddrIndexReg);
984     const MCOperand &Disp = Inst.getOperand(MemOpOffset + X86::AddrDisp);
985     const MCOperand &Segment =
986         Inst.getOperand(MemOpOffset + X86::AddrSegmentReg);
987 
988     // Make sure it is a well-formed memory operand.
989     if (!Base.isReg() || !Scale.isImm() || !Index.isReg() ||
990         (!Disp.isImm() && !Disp.isExpr()) || !Segment.isReg())
991       return false;
992 
993     *BaseRegNum = Base.getReg();
994     *ScaleImm = Scale.getImm();
995     *IndexRegNum = Index.getReg();
996     if (Disp.isImm()) {
997       assert(DispImm && "DispImm needs to be set");
998       *DispImm = Disp.getImm();
999       if (DispExpr)
1000         *DispExpr = nullptr;
1001     } else {
1002       assert(DispExpr && "DispExpr needs to be set");
1003       *DispExpr = Disp.getExpr();
1004       if (DispImm)
1005         *DispImm = 0;
1006     }
1007     *SegmentRegNum = Segment.getReg();
1008     return true;
1009   }
1010 
1011   bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
1012                                 uint64_t Address,
1013                                 uint64_t Size) const override {
1014     unsigned      BaseRegNum;
1015     int64_t       ScaleValue;
1016     unsigned      IndexRegNum;
1017     int64_t       DispValue;
1018     unsigned      SegRegNum;
1019     const MCExpr *DispExpr = nullptr;
1020     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1021                                   &DispValue, &SegRegNum, &DispExpr))
1022       return false;
1023 
1024     // Make sure it's a well-formed addressing we can statically evaluate.
1025     if ((BaseRegNum != X86::RIP && BaseRegNum != X86::NoRegister) ||
1026         IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
1027         DispExpr)
1028       return false;
1029 
1030     Target = DispValue;
1031     if (BaseRegNum == X86::RIP) {
1032       assert(Size != 0 && "instruction size required in order to statically "
1033                           "evaluate RIP-relative address");
1034       Target += Address + Size;
1035     }
1036     return true;
1037   }
1038 
1039   MCInst::iterator getMemOperandDisp(MCInst &Inst) const override {
1040     int MemOpNo = getMemoryOperandNo(Inst);
1041     if (MemOpNo < 0)
1042       return Inst.end();
1043     return Inst.begin() + (MemOpNo + X86::AddrDisp);
1044   }
1045 
1046   bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
1047     MCOperand *OI = getMemOperandDisp(Inst);
1048     if (OI == Inst.end())
1049       return false;
1050     *OI = Operand;
1051     return true;
1052   }
1053 
1054   /// Get the registers used as function parameters.
1055   /// This function is specific to the x86_64 abi on Linux.
1056   BitVector getRegsUsedAsParams() const override {
1057     BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
1058     Regs |= getAliases(X86::RSI);
1059     Regs |= getAliases(X86::RDI);
1060     Regs |= getAliases(X86::RDX);
1061     Regs |= getAliases(X86::RCX);
1062     Regs |= getAliases(X86::R8);
1063     Regs |= getAliases(X86::R9);
1064     return Regs;
1065   }
1066 
1067   void getCalleeSavedRegs(BitVector &Regs) const override {
1068     Regs |= getAliases(X86::RBX);
1069     Regs |= getAliases(X86::RBP);
1070     Regs |= getAliases(X86::R12);
1071     Regs |= getAliases(X86::R13);
1072     Regs |= getAliases(X86::R14);
1073     Regs |= getAliases(X86::R15);
1074   }
1075 
1076   void getDefaultDefIn(BitVector &Regs) const override {
1077     assert(Regs.size() >= RegInfo->getNumRegs() &&
1078            "The size of BitVector is less than RegInfo->getNumRegs().");
1079     Regs.set(X86::RAX);
1080     Regs.set(X86::RCX);
1081     Regs.set(X86::RDX);
1082     Regs.set(X86::RSI);
1083     Regs.set(X86::RDI);
1084     Regs.set(X86::R8);
1085     Regs.set(X86::R9);
1086     Regs.set(X86::XMM0);
1087     Regs.set(X86::XMM1);
1088     Regs.set(X86::XMM2);
1089     Regs.set(X86::XMM3);
1090     Regs.set(X86::XMM4);
1091     Regs.set(X86::XMM5);
1092     Regs.set(X86::XMM6);
1093     Regs.set(X86::XMM7);
1094   }
1095 
1096   void getDefaultLiveOut(BitVector &Regs) const override {
1097     assert(Regs.size() >= RegInfo->getNumRegs() &&
1098            "The size of BitVector is less than RegInfo->getNumRegs().");
1099     Regs |= getAliases(X86::RAX);
1100     Regs |= getAliases(X86::RDX);
1101     Regs |= getAliases(X86::RCX);
1102     Regs |= getAliases(X86::XMM0);
1103     Regs |= getAliases(X86::XMM1);
1104   }
1105 
1106   void getGPRegs(BitVector &Regs, bool IncludeAlias) const override {
1107     if (IncludeAlias) {
1108       Regs |= getAliases(X86::RAX);
1109       Regs |= getAliases(X86::RBX);
1110       Regs |= getAliases(X86::RBP);
1111       Regs |= getAliases(X86::RSI);
1112       Regs |= getAliases(X86::RDI);
1113       Regs |= getAliases(X86::RDX);
1114       Regs |= getAliases(X86::RCX);
1115       Regs |= getAliases(X86::R8);
1116       Regs |= getAliases(X86::R9);
1117       Regs |= getAliases(X86::R10);
1118       Regs |= getAliases(X86::R11);
1119       Regs |= getAliases(X86::R12);
1120       Regs |= getAliases(X86::R13);
1121       Regs |= getAliases(X86::R14);
1122       Regs |= getAliases(X86::R15);
1123       return;
1124     }
1125     Regs.set(X86::RAX);
1126     Regs.set(X86::RBX);
1127     Regs.set(X86::RBP);
1128     Regs.set(X86::RSI);
1129     Regs.set(X86::RDI);
1130     Regs.set(X86::RDX);
1131     Regs.set(X86::RCX);
1132     Regs.set(X86::R8);
1133     Regs.set(X86::R9);
1134     Regs.set(X86::R10);
1135     Regs.set(X86::R11);
1136     Regs.set(X86::R12);
1137     Regs.set(X86::R13);
1138     Regs.set(X86::R14);
1139     Regs.set(X86::R15);
1140   }
1141 
1142   void getClassicGPRegs(BitVector &Regs) const override {
1143     Regs |= getAliases(X86::RAX);
1144     Regs |= getAliases(X86::RBX);
1145     Regs |= getAliases(X86::RBP);
1146     Regs |= getAliases(X86::RSI);
1147     Regs |= getAliases(X86::RDI);
1148     Regs |= getAliases(X86::RDX);
1149     Regs |= getAliases(X86::RCX);
1150   }
1151 
1152   void getRepRegs(BitVector &Regs) const override {
1153     Regs |= getAliases(X86::RCX);
1154   }
1155 
1156   MCPhysReg getAliasSized(MCPhysReg Reg, uint8_t Size) const override {
1157     switch (Reg) {
1158     case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: case X86::AH:
1159       switch (Size) {
1160       case 8: return X86::RAX;       case 4: return X86::EAX;
1161       case 2: return X86::AX;        case 1: return X86::AL;
1162       default: llvm_unreachable("Unexpected size");
1163       }
1164     case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: case X86::BH:
1165       switch (Size) {
1166       case 8: return X86::RBX;       case 4: return X86::EBX;
1167       case 2: return X86::BX;        case 1: return X86::BL;
1168       default: llvm_unreachable("Unexpected size");
1169       }
1170     case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: case X86::DH:
1171       switch (Size) {
1172       case 8: return X86::RDX;       case 4: return X86::EDX;
1173       case 2: return X86::DX;        case 1: return X86::DL;
1174       default: llvm_unreachable("Unexpected size");
1175       }
1176     case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL:
1177       switch (Size) {
1178       case 8: return X86::RDI;       case 4: return X86::EDI;
1179       case 2: return X86::DI;        case 1: return X86::DIL;
1180       default: llvm_unreachable("Unexpected size");
1181       }
1182     case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL:
1183       switch (Size) {
1184       case 8: return X86::RSI;       case 4: return X86::ESI;
1185       case 2: return X86::SI;        case 1: return X86::SIL;
1186       default: llvm_unreachable("Unexpected size");
1187       }
1188     case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: case X86::CH:
1189       switch (Size) {
1190       case 8: return X86::RCX;       case 4: return X86::ECX;
1191       case 2: return X86::CX;        case 1: return X86::CL;
1192       default: llvm_unreachable("Unexpected size");
1193       }
1194     case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL:
1195       switch (Size) {
1196       case 8: return X86::RSP;       case 4: return X86::ESP;
1197       case 2: return X86::SP;        case 1: return X86::SPL;
1198       default: llvm_unreachable("Unexpected size");
1199       }
1200     case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL:
1201       switch (Size) {
1202       case 8: return X86::RBP;       case 4: return X86::EBP;
1203       case 2: return X86::BP;        case 1: return X86::BPL;
1204       default: llvm_unreachable("Unexpected size");
1205       }
1206   case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
1207       switch (Size) {
1208       case 8: return X86::R8;        case 4: return X86::R8D;
1209       case 2: return X86::R8W;       case 1: return X86::R8B;
1210       default: llvm_unreachable("Unexpected size");
1211       }
1212     case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
1213       switch (Size) {
1214       case 8: return X86::R9;        case 4: return X86::R9D;
1215       case 2: return X86::R9W;       case 1: return X86::R9B;
1216       default: llvm_unreachable("Unexpected size");
1217       }
1218     case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
1219       switch (Size) {
1220       case 8: return X86::R10;        case 4: return X86::R10D;
1221       case 2: return X86::R10W;       case 1: return X86::R10B;
1222       default: llvm_unreachable("Unexpected size");
1223       }
1224     case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
1225       switch (Size) {
1226       case 8: return X86::R11;        case 4: return X86::R11D;
1227       case 2: return X86::R11W;       case 1: return X86::R11B;
1228       default: llvm_unreachable("Unexpected size");
1229       }
1230     case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
1231       switch (Size) {
1232       case 8: return X86::R12;        case 4: return X86::R12D;
1233       case 2: return X86::R12W;       case 1: return X86::R12B;
1234       default: llvm_unreachable("Unexpected size");
1235       }
1236     case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
1237       switch (Size) {
1238       case 8: return X86::R13;        case 4: return X86::R13D;
1239       case 2: return X86::R13W;       case 1: return X86::R13B;
1240       default: llvm_unreachable("Unexpected size");
1241       }
1242     case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
1243       switch (Size) {
1244       case 8: return X86::R14;        case 4: return X86::R14D;
1245       case 2: return X86::R14W;       case 1: return X86::R14B;
1246       default: llvm_unreachable("Unexpected size");
1247       }
1248     case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
1249       switch (Size) {
1250       case 8: return X86::R15;        case 4: return X86::R15D;
1251       case 2: return X86::R15W;       case 1: return X86::R15B;
1252       default: llvm_unreachable("Unexpected size");
1253       }
1254     default:
1255       dbgs() << Reg << " (get alias sized)\n";
1256       llvm_unreachable("Unexpected reg number");
1257       break;
1258     }
1259   }
1260 
1261   bool isUpper8BitReg(MCPhysReg Reg) const override {
1262     switch (Reg) {
1263     case X86::AH:
1264     case X86::BH:
1265     case X86::CH:
1266     case X86::DH:
1267       return true;
1268     default:
1269       return false;
1270     }
1271   }
1272 
1273   bool cannotUseREX(const MCInst &Inst) const override {
1274     switch (Inst.getOpcode()) {
1275     case X86::MOV8mr_NOREX:
1276     case X86::MOV8rm_NOREX:
1277     case X86::MOV8rr_NOREX:
1278     case X86::MOVSX32rm8_NOREX:
1279     case X86::MOVSX32rr8_NOREX:
1280     case X86::MOVZX32rm8_NOREX:
1281     case X86::MOVZX32rr8_NOREX:
1282     case X86::MOV8mr:
1283     case X86::MOV8rm:
1284     case X86::MOV8rr:
1285     case X86::MOVSX32rm8:
1286     case X86::MOVSX32rr8:
1287     case X86::MOVZX32rm8:
1288     case X86::MOVZX32rr8:
1289     case X86::TEST8ri:
1290       for (int I = 0, E = MCPlus::getNumPrimeOperands(Inst); I != E; ++I) {
1291         const MCOperand &Operand = Inst.getOperand(I);
1292         if (!Operand.isReg())
1293           continue;
1294         if (isUpper8BitReg(Operand.getReg()))
1295           return true;
1296       }
1297       LLVM_FALLTHROUGH;
1298     default:
1299       return false;
1300     }
1301   }
1302 
1303   bool isStackAccess(const MCInst &Inst, bool &IsLoad, bool &IsStore,
1304                      bool &IsStoreFromReg, MCPhysReg &Reg, int32_t &SrcImm,
1305                      uint16_t &StackPtrReg, int64_t &StackOffset, uint8_t &Size,
1306                      bool &IsSimple, bool &IsIndexed) const override {
1307     // Detect simple push/pop cases first
1308     if (int Sz = getPushSize(Inst)) {
1309       IsLoad = false;
1310       IsStore = true;
1311       IsStoreFromReg = true;
1312       StackPtrReg = X86::RSP;
1313       StackOffset = -Sz;
1314       Size = Sz;
1315       IsSimple = true;
1316       if (Inst.getOperand(0).isImm())
1317         SrcImm = Inst.getOperand(0).getImm();
1318       else if (Inst.getOperand(0).isReg())
1319         Reg = Inst.getOperand(0).getReg();
1320       else
1321         IsSimple = false;
1322 
1323       return true;
1324     }
1325     if (int Sz = getPopSize(Inst)) {
1326       IsLoad = true;
1327       IsStore = false;
1328       if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isReg()) {
1329         IsSimple = false;
1330       } else {
1331         Reg = Inst.getOperand(0).getReg();
1332         IsSimple = true;
1333       }
1334       StackPtrReg = X86::RSP;
1335       StackOffset = 0;
1336       Size = Sz;
1337       return true;
1338     }
1339 
1340     struct InstInfo {
1341       // Size in bytes that Inst loads from memory.
1342       uint8_t DataSize;
1343       bool IsLoad;
1344       bool IsStore;
1345       bool StoreFromReg;
1346       bool Simple;
1347     };
1348 
1349     InstInfo I;
1350     int MemOpNo = getMemoryOperandNo(Inst);
1351     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1352     // If it is not dealing with a memory operand, we discard it
1353     if (MemOpNo == -1 || MCII.isCall())
1354       return false;
1355 
1356     switch (Inst.getOpcode()) {
1357     default: {
1358       uint8_t Sz = 0;
1359       bool IsLoad = MCII.mayLoad();
1360       bool IsStore = MCII.mayStore();
1361       // Is it LEA? (deals with memory but is not loading nor storing)
1362       if (!IsLoad && !IsStore)
1363         return false;
1364 
1365       // Try to guess data size involved in the load/store by looking at the
1366       // register size. If there's no reg involved, return 0 as size, meaning
1367       // we don't know.
1368       for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
1369         if (MCII.OpInfo[I].OperandType != MCOI::OPERAND_REGISTER)
1370           continue;
1371         if (static_cast<int>(I) >= MemOpNo && I < X86::AddrNumOperands)
1372           continue;
1373         Sz = RegInfo->getRegClass(MCII.OpInfo[I].RegClass).getSizeInBits() / 8;
1374         break;
1375       }
1376       I = {Sz, IsLoad, IsStore, false, false};
1377       break;
1378     }
1379     case X86::MOV16rm: I = {2, true, false, false, true}; break;
1380     case X86::MOV32rm: I = {4, true, false, false, true}; break;
1381     case X86::MOV64rm: I = {8, true, false, false, true}; break;
1382     case X86::MOV16mr: I = {2, false, true, true, true};  break;
1383     case X86::MOV32mr: I = {4, false, true, true, true};  break;
1384     case X86::MOV64mr: I = {8, false, true, true, true};  break;
1385     case X86::MOV16mi: I = {2, false, true, false, true}; break;
1386     case X86::MOV32mi: I = {4, false, true, false, true}; break;
1387     } // end switch (Inst.getOpcode())
1388 
1389     unsigned BaseRegNum;
1390     int64_t ScaleValue;
1391     unsigned IndexRegNum;
1392     int64_t DispValue;
1393     unsigned SegRegNum;
1394     const MCExpr *DispExpr;
1395     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1396                                   &DispValue, &SegRegNum, &DispExpr)) {
1397       LLVM_DEBUG(dbgs() << "Evaluate failed on ");
1398       LLVM_DEBUG(Inst.dump());
1399       return false;
1400     }
1401 
1402     // Make sure it's a stack access
1403     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP)
1404       return false;
1405 
1406     IsLoad = I.IsLoad;
1407     IsStore = I.IsStore;
1408     IsStoreFromReg = I.StoreFromReg;
1409     Size = I.DataSize;
1410     IsSimple = I.Simple;
1411     StackPtrReg = BaseRegNum;
1412     StackOffset = DispValue;
1413     IsIndexed = IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister;
1414 
1415     if (!I.Simple)
1416       return true;
1417 
1418     // Retrieve related register in simple MOV from/to stack operations.
1419     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
1420     if (I.IsLoad) {
1421       MCOperand RegOpnd = Inst.getOperand(0);
1422       assert(RegOpnd.isReg() && "unexpected destination operand");
1423       Reg = RegOpnd.getReg();
1424     } else if (I.IsStore) {
1425       MCOperand SrcOpnd =
1426           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1427       if (I.StoreFromReg) {
1428         assert(SrcOpnd.isReg() && "unexpected source operand");
1429         Reg = SrcOpnd.getReg();
1430       } else {
1431         assert(SrcOpnd.isImm() && "unexpected source operand");
1432         SrcImm = SrcOpnd.getImm();
1433       }
1434     }
1435 
1436     return true;
1437   }
1438 
1439   void changeToPushOrPop(MCInst &Inst) const override {
1440     assert(!isPush(Inst) && !isPop(Inst));
1441 
1442     struct InstInfo {
1443       // Size in bytes that Inst loads from memory.
1444       uint8_t DataSize;
1445       bool IsLoad;
1446       bool StoreFromReg;
1447     };
1448 
1449     InstInfo I;
1450     switch (Inst.getOpcode()) {
1451     default: {
1452       llvm_unreachable("Unhandled opcode");
1453       return;
1454     }
1455     case X86::MOV16rm: I = {2, true, false}; break;
1456     case X86::MOV32rm: I = {4, true, false}; break;
1457     case X86::MOV64rm: I = {8, true, false}; break;
1458     case X86::MOV16mr: I = {2, false, true};  break;
1459     case X86::MOV32mr: I = {4, false, true};  break;
1460     case X86::MOV64mr: I = {8, false, true};  break;
1461     case X86::MOV16mi: I = {2, false, false}; break;
1462     case X86::MOV32mi: I = {4, false, false}; break;
1463     } // end switch (Inst.getOpcode())
1464 
1465     unsigned BaseRegNum;
1466     int64_t ScaleValue;
1467     unsigned IndexRegNum;
1468     int64_t DispValue;
1469     unsigned SegRegNum;
1470     const MCExpr *DispExpr;
1471     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1472                                   &DispValue, &SegRegNum, &DispExpr)) {
1473       llvm_unreachable("Evaluate failed");
1474       return;
1475     }
1476     // Make sure it's a stack access
1477     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP) {
1478       llvm_unreachable("Not a stack access");
1479       return;
1480     }
1481 
1482     unsigned MemOpOffset = getMemoryOperandNo(Inst);
1483     unsigned NewOpcode = 0;
1484     if (I.IsLoad) {
1485       switch (I.DataSize) {
1486       case 2: NewOpcode = X86::POP16r; break;
1487       case 4: NewOpcode = X86::POP32r; break;
1488       case 8: NewOpcode = X86::POP64r; break;
1489       default:
1490         llvm_unreachable("Unexpected size");
1491       }
1492       unsigned RegOpndNum = Inst.getOperand(0).getReg();
1493       Inst.clear();
1494       Inst.setOpcode(NewOpcode);
1495       Inst.addOperand(MCOperand::createReg(RegOpndNum));
1496     } else {
1497       MCOperand SrcOpnd =
1498           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1499       if (I.StoreFromReg) {
1500         switch (I.DataSize) {
1501         case 2: NewOpcode = X86::PUSH16r; break;
1502         case 4: NewOpcode = X86::PUSH32r; break;
1503         case 8: NewOpcode = X86::PUSH64r; break;
1504         default:
1505           llvm_unreachable("Unexpected size");
1506         }
1507         assert(SrcOpnd.isReg() && "Unexpected source operand");
1508         unsigned RegOpndNum = SrcOpnd.getReg();
1509         Inst.clear();
1510         Inst.setOpcode(NewOpcode);
1511         Inst.addOperand(MCOperand::createReg(RegOpndNum));
1512       } else {
1513         switch (I.DataSize) {
1514         case 2: NewOpcode = X86::PUSH16i8; break;
1515         case 4: NewOpcode = X86::PUSH32i8; break;
1516         case 8: NewOpcode = X86::PUSH64i32; break;
1517         default:
1518           llvm_unreachable("Unexpected size");
1519         }
1520         assert(SrcOpnd.isImm() && "Unexpected source operand");
1521         int64_t SrcImm = SrcOpnd.getImm();
1522         Inst.clear();
1523         Inst.setOpcode(NewOpcode);
1524         Inst.addOperand(MCOperand::createImm(SrcImm));
1525       }
1526     }
1527   }
1528 
1529   bool isStackAdjustment(const MCInst &Inst) const override {
1530     switch (Inst.getOpcode()) {
1531     default:
1532       return false;
1533     case X86::SUB64ri32:
1534     case X86::SUB64ri8:
1535     case X86::ADD64ri32:
1536     case X86::ADD64ri8:
1537     case X86::LEA64r:
1538       break;
1539     }
1540 
1541     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1542     for (int I = 0, E = MCII.getNumDefs(); I != E; ++I) {
1543       const MCOperand &Operand = Inst.getOperand(I);
1544       if (Operand.isReg() && Operand.getReg() == X86::RSP)
1545         return true;
1546     }
1547     return false;
1548   }
1549 
1550   bool evaluateSimple(const MCInst &Inst, int64_t &Output,
1551                       std::pair<MCPhysReg, int64_t> Input1,
1552                       std::pair<MCPhysReg, int64_t> Input2) const override {
1553 
1554     auto getOperandVal = [&](MCPhysReg Reg) -> ErrorOr<int64_t> {
1555       if (Reg == Input1.first)
1556         return Input1.second;
1557       if (Reg == Input2.first)
1558         return Input2.second;
1559       return make_error_code(errc::result_out_of_range);
1560     };
1561 
1562     switch (Inst.getOpcode()) {
1563     default:
1564       return false;
1565 
1566     case X86::AND64ri32:
1567     case X86::AND64ri8:
1568       if (!Inst.getOperand(2).isImm())
1569         return false;
1570       if (ErrorOr<int64_t> InputVal =
1571               getOperandVal(Inst.getOperand(1).getReg()))
1572         Output = *InputVal & Inst.getOperand(2).getImm();
1573       else
1574         return false;
1575       break;
1576     case X86::SUB64ri32:
1577     case X86::SUB64ri8:
1578       if (!Inst.getOperand(2).isImm())
1579         return false;
1580       if (ErrorOr<int64_t> InputVal =
1581               getOperandVal(Inst.getOperand(1).getReg()))
1582         Output = *InputVal - Inst.getOperand(2).getImm();
1583       else
1584         return false;
1585       break;
1586     case X86::ADD64ri32:
1587     case X86::ADD64ri8:
1588       if (!Inst.getOperand(2).isImm())
1589         return false;
1590       if (ErrorOr<int64_t> InputVal =
1591               getOperandVal(Inst.getOperand(1).getReg()))
1592         Output = *InputVal + Inst.getOperand(2).getImm();
1593       else
1594         return false;
1595       break;
1596     case X86::ADD64i32:
1597       if (!Inst.getOperand(0).isImm())
1598         return false;
1599       if (ErrorOr<int64_t> InputVal = getOperandVal(X86::RAX))
1600         Output = *InputVal + Inst.getOperand(0).getImm();
1601       else
1602         return false;
1603       break;
1604 
1605     case X86::LEA64r: {
1606       unsigned BaseRegNum;
1607       int64_t ScaleValue;
1608       unsigned IndexRegNum;
1609       int64_t DispValue;
1610       unsigned SegRegNum;
1611       const MCExpr *DispExpr = nullptr;
1612       if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue,
1613                                     &IndexRegNum, &DispValue, &SegRegNum,
1614                                     &DispExpr))
1615         return false;
1616 
1617       if (BaseRegNum == X86::NoRegister || IndexRegNum != X86::NoRegister ||
1618           SegRegNum != X86::NoRegister || DispExpr)
1619         return false;
1620 
1621       if (ErrorOr<int64_t> InputVal = getOperandVal(BaseRegNum))
1622         Output = *InputVal + DispValue;
1623       else
1624         return false;
1625 
1626       break;
1627     }
1628     }
1629     return true;
1630   }
1631 
1632   bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
1633                       MCPhysReg &To) const override {
1634     switch (Inst.getOpcode()) {
1635     default:
1636       return false;
1637     case X86::LEAVE:
1638     case X86::LEAVE64:
1639       To = getStackPointer();
1640       From = getFramePointer();
1641       return true;
1642     case X86::MOV64rr:
1643       To = Inst.getOperand(0).getReg();
1644       From = Inst.getOperand(1).getReg();
1645       return true;
1646     }
1647   }
1648 
1649   MCPhysReg getStackPointer() const override { return X86::RSP; }
1650   MCPhysReg getFramePointer() const override { return X86::RBP; }
1651   MCPhysReg getFlagsReg() const override { return X86::EFLAGS; }
1652 
1653   bool escapesVariable(const MCInst &Inst,
1654                        bool HasFramePointer) const override {
1655     int MemOpNo = getMemoryOperandNo(Inst);
1656     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1657     const unsigned NumDefs = MCII.getNumDefs();
1658     static BitVector SPBPAliases(BitVector(getAliases(X86::RSP)) |=
1659                                  getAliases(X86::RBP));
1660     static BitVector SPAliases(getAliases(X86::RSP));
1661 
1662     // FIXME: PUSH can be technically a leak, but let's ignore this for now
1663     // because a lot of harmless prologue code will spill SP to the stack.
1664     // Unless push is clearly pushing an object address to the stack as
1665     // demonstrated by having a MemOp.
1666     bool IsPush = isPush(Inst);
1667     if (IsPush && MemOpNo == -1)
1668       return false;
1669 
1670     // We use this to detect LEA (has memop but does not access mem)
1671     bool AccessMem = MCII.mayLoad() || MCII.mayStore();
1672     bool DoesLeak = false;
1673     for (int I = 0, E = MCPlus::getNumPrimeOperands(Inst); I != E; ++I) {
1674       // Ignore if SP/BP is used to dereference memory -- that's fine
1675       if (MemOpNo != -1 && !IsPush && AccessMem && I >= MemOpNo &&
1676           I <= MemOpNo + 5)
1677         continue;
1678       // Ignore if someone is writing to SP/BP
1679       if (I < static_cast<int>(NumDefs))
1680         continue;
1681 
1682       const MCOperand &Operand = Inst.getOperand(I);
1683       if (HasFramePointer && Operand.isReg() && SPBPAliases[Operand.getReg()]) {
1684         DoesLeak = true;
1685         break;
1686       }
1687       if (!HasFramePointer && Operand.isReg() && SPAliases[Operand.getReg()]) {
1688         DoesLeak = true;
1689         break;
1690       }
1691     }
1692 
1693     // If potential leak, check if it is not just writing to itself/sp/bp
1694     if (DoesLeak) {
1695       for (int I = 0, E = NumDefs; I != E; ++I) {
1696         const MCOperand &Operand = Inst.getOperand(I);
1697         if (HasFramePointer && Operand.isReg() &&
1698             SPBPAliases[Operand.getReg()]) {
1699           DoesLeak = false;
1700           break;
1701         }
1702         if (!HasFramePointer && Operand.isReg() &&
1703             SPAliases[Operand.getReg()]) {
1704           DoesLeak = false;
1705           break;
1706         }
1707       }
1708     }
1709     return DoesLeak;
1710   }
1711 
1712   bool addToImm(MCInst &Inst, int64_t &Amt, MCContext *Ctx) const override {
1713     unsigned ImmOpNo = -1U;
1714     int MemOpNo = getMemoryOperandNo(Inst);
1715     if (MemOpNo != -1)
1716       ImmOpNo = MemOpNo + X86::AddrDisp;
1717     else
1718       for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1719            ++Index)
1720         if (Inst.getOperand(Index).isImm())
1721           ImmOpNo = Index;
1722     if (ImmOpNo == -1U)
1723       return false;
1724 
1725     MCOperand &Operand = Inst.getOperand(ImmOpNo);
1726     Amt += Operand.getImm();
1727     Operand.setImm(Amt);
1728     // Check for the need for relaxation
1729     if (int64_t(Amt) == int64_t(int8_t(Amt)))
1730       return true;
1731 
1732     // Relax instruction
1733     switch (Inst.getOpcode()) {
1734     case X86::SUB64ri8:
1735       Inst.setOpcode(X86::SUB64ri32);
1736       break;
1737     case X86::ADD64ri8:
1738       Inst.setOpcode(X86::ADD64ri32);
1739       break;
1740     default:
1741       // No need for relaxation
1742       break;
1743     }
1744     return true;
1745   }
1746 
1747   /// TODO: this implementation currently works for the most common opcodes that
1748   /// load from memory. It can be extended to work with memory store opcodes as
1749   /// well as more memory load opcodes.
1750   bool replaceMemOperandWithImm(MCInst &Inst, StringRef ConstantData,
1751                                 uint64_t Offset) const override {
1752     enum CheckSignExt : uint8_t {
1753       NOCHECK = 0,
1754       CHECK8,
1755       CHECK32,
1756     };
1757 
1758     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
1759     struct InstInfo {
1760       // Size in bytes that Inst loads from memory.
1761       uint8_t DataSize;
1762 
1763       // True when the target operand has to be duplicated because the opcode
1764       // expects a LHS operand.
1765       bool HasLHS;
1766 
1767       // List of checks and corresponding opcodes to be used. We try to use the
1768       // smallest possible immediate value when various sizes are available,
1769       // hence we may need to check whether a larger constant fits in a smaller
1770       // immediate.
1771       CheckList Checks;
1772     };
1773 
1774     InstInfo I;
1775 
1776     switch (Inst.getOpcode()) {
1777     default: {
1778       switch (getPopSize(Inst)) {
1779       case 2:            I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1780       case 4:            I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1781       case 8:            I = {8, false, {{CHECK32, X86::MOV64ri32},
1782                                          {NOCHECK, X86::MOV64rm}}};  break;
1783       default:           return false;
1784       }
1785       break;
1786     }
1787 
1788     // MOV
1789     case X86::MOV8rm:      I = {1, false, {{NOCHECK, X86::MOV8ri}}};   break;
1790     case X86::MOV16rm:     I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1791     case X86::MOV32rm:     I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1792     case X86::MOV64rm:     I = {8, false, {{CHECK32, X86::MOV64ri32},
1793                                            {NOCHECK, X86::MOV64rm}}};  break;
1794 
1795     // MOVZX
1796     case X86::MOVZX16rm8:  I = {1, false, {{NOCHECK, X86::MOV16ri}}};  break;
1797     case X86::MOVZX32rm8:  I = {1, false, {{NOCHECK, X86::MOV32ri}}};  break;
1798     case X86::MOVZX32rm16: I = {2, false, {{NOCHECK, X86::MOV32ri}}};  break;
1799 
1800     // CMP
1801     case X86::CMP8rm:      I = {1, false, {{NOCHECK, X86::CMP8ri}}};   break;
1802     case X86::CMP16rm:     I = {2, false, {{CHECK8,  X86::CMP16ri8},
1803                                            {NOCHECK, X86::CMP16ri}}};  break;
1804     case X86::CMP32rm:     I = {4, false, {{CHECK8,  X86::CMP32ri8},
1805                                            {NOCHECK, X86::CMP32ri}}};  break;
1806     case X86::CMP64rm:     I = {8, false, {{CHECK8,  X86::CMP64ri8},
1807                                            {CHECK32, X86::CMP64ri32},
1808                                            {NOCHECK, X86::CMP64rm}}};  break;
1809 
1810     // TEST
1811     case X86::TEST8mr:     I = {1, false, {{NOCHECK, X86::TEST8ri}}};  break;
1812     case X86::TEST16mr:    I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
1813     case X86::TEST32mr:    I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
1814     case X86::TEST64mr:    I = {8, false, {{CHECK32, X86::TEST64ri32},
1815                                            {NOCHECK, X86::TEST64mr}}}; break;
1816 
1817     // ADD
1818     case X86::ADD8rm:      I = {1, true,  {{NOCHECK, X86::ADD8ri}}};   break;
1819     case X86::ADD16rm:     I = {2, true,  {{CHECK8,  X86::ADD16ri8},
1820                                            {NOCHECK, X86::ADD16ri}}};  break;
1821     case X86::ADD32rm:     I = {4, true,  {{CHECK8,  X86::ADD32ri8},
1822                                            {NOCHECK, X86::ADD32ri}}};  break;
1823     case X86::ADD64rm:     I = {8, true,  {{CHECK8,  X86::ADD64ri8},
1824                                            {CHECK32, X86::ADD64ri32},
1825                                            {NOCHECK, X86::ADD64rm}}};  break;
1826 
1827     // SUB
1828     case X86::SUB8rm:      I = {1, true,  {{NOCHECK, X86::SUB8ri}}};   break;
1829     case X86::SUB16rm:     I = {2, true,  {{CHECK8,  X86::SUB16ri8},
1830                                            {NOCHECK, X86::SUB16ri}}};  break;
1831     case X86::SUB32rm:     I = {4, true,  {{CHECK8,  X86::SUB32ri8},
1832                                            {NOCHECK, X86::SUB32ri}}};  break;
1833     case X86::SUB64rm:     I = {8, true,  {{CHECK8,  X86::SUB64ri8},
1834                                            {CHECK32, X86::SUB64ri32},
1835                                            {NOCHECK, X86::SUB64rm}}};  break;
1836 
1837     // AND
1838     case X86::AND8rm:      I = {1, true,  {{NOCHECK, X86::AND8ri}}};   break;
1839     case X86::AND16rm:     I = {2, true,  {{CHECK8,  X86::AND16ri8},
1840                                            {NOCHECK, X86::AND16ri}}};  break;
1841     case X86::AND32rm:     I = {4, true,  {{CHECK8,  X86::AND32ri8},
1842                                            {NOCHECK, X86::AND32ri}}};  break;
1843     case X86::AND64rm:     I = {8, true,  {{CHECK8,  X86::AND64ri8},
1844                                            {CHECK32, X86::AND64ri32},
1845                                            {NOCHECK, X86::AND64rm}}};  break;
1846 
1847     // OR
1848     case X86::OR8rm:       I = {1, true,  {{NOCHECK, X86::OR8ri}}};    break;
1849     case X86::OR16rm:      I = {2, true,  {{CHECK8,  X86::OR16ri8},
1850                                            {NOCHECK, X86::OR16ri}}};   break;
1851     case X86::OR32rm:      I = {4, true,  {{CHECK8,  X86::OR32ri8},
1852                                            {NOCHECK, X86::OR32ri}}};   break;
1853     case X86::OR64rm:      I = {8, true,  {{CHECK8,  X86::OR64ri8},
1854                                            {CHECK32, X86::OR64ri32},
1855                                            {NOCHECK, X86::OR64rm}}};   break;
1856 
1857     // XOR
1858     case X86::XOR8rm:      I = {1, true,  {{NOCHECK, X86::XOR8ri}}};   break;
1859     case X86::XOR16rm:     I = {2, true,  {{CHECK8,  X86::XOR16ri8},
1860                                            {NOCHECK, X86::XOR16ri}}};  break;
1861     case X86::XOR32rm:     I = {4, true,  {{CHECK8,  X86::XOR32ri8},
1862                                            {NOCHECK, X86::XOR32ri}}};  break;
1863     case X86::XOR64rm:     I = {8, true,  {{CHECK8,  X86::XOR64ri8},
1864                                            {CHECK32, X86::XOR64ri32},
1865                                            {NOCHECK, X86::XOR64rm}}};  break;
1866     }
1867 
1868     // Compute the immediate value.
1869     assert(Offset + I.DataSize <= ConstantData.size() &&
1870            "invalid offset for given constant data");
1871     int64_t ImmVal =
1872         DataExtractor(ConstantData, true, 8).getSigned(&Offset, I.DataSize);
1873 
1874     // Compute the new opcode.
1875     unsigned NewOpcode = 0;
1876     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
1877       NewOpcode = Check.second;
1878       if (Check.first == NOCHECK)
1879         break;
1880       if (Check.first == CHECK8 && isInt<8>(ImmVal))
1881         break;
1882       if (Check.first == CHECK32 && isInt<32>(ImmVal))
1883         break;
1884     }
1885     if (NewOpcode == Inst.getOpcode())
1886       return false;
1887 
1888     // Modify the instruction.
1889     MCOperand ImmOp = MCOperand::createImm(ImmVal);
1890     uint32_t TargetOpNum = 0;
1891     // Test instruction does not follow the regular pattern of putting the
1892     // memory reference of a load (5 MCOperands) last in the list of operands.
1893     // Since it is not modifying the register operand, it is not treated as
1894     // a destination operand and it is not the first operand as it is in the
1895     // other instructions we treat here.
1896     if (NewOpcode == X86::TEST8ri || NewOpcode == X86::TEST16ri ||
1897         NewOpcode == X86::TEST32ri || NewOpcode == X86::TEST64ri32)
1898       TargetOpNum = getMemoryOperandNo(Inst) + X86::AddrNumOperands;
1899 
1900     MCOperand TargetOp = Inst.getOperand(TargetOpNum);
1901     Inst.clear();
1902     Inst.setOpcode(NewOpcode);
1903     Inst.addOperand(TargetOp);
1904     if (I.HasLHS)
1905       Inst.addOperand(TargetOp);
1906     Inst.addOperand(ImmOp);
1907 
1908     return true;
1909   }
1910 
1911   /// TODO: this implementation currently works for the most common opcodes that
1912   /// load from memory. It can be extended to work with memory store opcodes as
1913   /// well as more memory load opcodes.
1914   bool replaceMemOperandWithReg(MCInst &Inst, MCPhysReg RegNum) const override {
1915     unsigned NewOpcode;
1916 
1917     switch (Inst.getOpcode()) {
1918     default: {
1919       switch (getPopSize(Inst)) {
1920       case 2:            NewOpcode = X86::MOV16rr; break;
1921       case 4:            NewOpcode = X86::MOV32rr; break;
1922       case 8:            NewOpcode = X86::MOV64rr; break;
1923       default:           return false;
1924       }
1925       break;
1926     }
1927 
1928     // MOV
1929     case X86::MOV8rm:      NewOpcode = X86::MOV8rr;   break;
1930     case X86::MOV16rm:     NewOpcode = X86::MOV16rr;  break;
1931     case X86::MOV32rm:     NewOpcode = X86::MOV32rr;  break;
1932     case X86::MOV64rm:     NewOpcode = X86::MOV64rr;  break;
1933     }
1934 
1935     // Modify the instruction.
1936     MCOperand RegOp = MCOperand::createReg(RegNum);
1937     MCOperand TargetOp = Inst.getOperand(0);
1938     Inst.clear();
1939     Inst.setOpcode(NewOpcode);
1940     Inst.addOperand(TargetOp);
1941     Inst.addOperand(RegOp);
1942 
1943     return true;
1944   }
1945 
1946   bool isRedundantMove(const MCInst &Inst) const override {
1947     switch (Inst.getOpcode()) {
1948     default:
1949       return false;
1950 
1951     // MOV
1952     case X86::MOV8rr:
1953     case X86::MOV16rr:
1954     case X86::MOV32rr:
1955     case X86::MOV64rr:
1956       break;
1957     }
1958 
1959     assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg());
1960     return Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg();
1961   }
1962 
1963   bool requiresAlignedAddress(const MCInst &Inst) const override {
1964     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
1965     for (unsigned int I = 0; I < Desc.getNumOperands(); ++I) {
1966       const MCOperandInfo &Op = Desc.OpInfo[I];
1967       if (Op.OperandType != MCOI::OPERAND_REGISTER)
1968         continue;
1969       if (Op.RegClass == X86::VR128RegClassID)
1970         return true;
1971     }
1972     return false;
1973   }
1974 
1975   bool convertJmpToTailCall(MCInst &Inst) override {
1976     if (isTailCall(Inst))
1977       return false;
1978 
1979     int NewOpcode;
1980     switch (Inst.getOpcode()) {
1981     default:
1982       return false;
1983     case X86::JMP_1:
1984     case X86::JMP_2:
1985     case X86::JMP_4:
1986       NewOpcode = X86::JMP_4;
1987       break;
1988     case X86::JMP16m:
1989     case X86::JMP32m:
1990     case X86::JMP64m:
1991       NewOpcode = X86::JMP32m;
1992       break;
1993     case X86::JMP16r:
1994     case X86::JMP32r:
1995     case X86::JMP64r:
1996       NewOpcode = X86::JMP32r;
1997       break;
1998     }
1999 
2000     Inst.setOpcode(NewOpcode);
2001     setTailCall(Inst);
2002     return true;
2003   }
2004 
2005   bool convertTailCallToJmp(MCInst &Inst) override {
2006     int NewOpcode;
2007     switch (Inst.getOpcode()) {
2008     default:
2009       return false;
2010     case X86::JMP_4:
2011       NewOpcode = X86::JMP_1;
2012       break;
2013     case X86::JMP32m:
2014       NewOpcode = X86::JMP64m;
2015       break;
2016     case X86::JMP32r:
2017       NewOpcode = X86::JMP64r;
2018       break;
2019     }
2020 
2021     Inst.setOpcode(NewOpcode);
2022     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2023     clearOffset(Inst);
2024     return true;
2025   }
2026 
2027   bool convertTailCallToCall(MCInst &Inst) override {
2028     int NewOpcode;
2029     switch (Inst.getOpcode()) {
2030     default:
2031       return false;
2032     case X86::JMP_4:
2033       NewOpcode = X86::CALL64pcrel32;
2034       break;
2035     case X86::JMP32m:
2036       NewOpcode = X86::CALL64m;
2037       break;
2038     case X86::JMP32r:
2039       NewOpcode = X86::CALL64r;
2040       break;
2041     }
2042 
2043     Inst.setOpcode(NewOpcode);
2044     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2045     return true;
2046   }
2047 
2048   bool convertCallToIndirectCall(MCInst &Inst, const MCSymbol *TargetLocation,
2049                                  MCContext *Ctx) override {
2050     bool IsTailCall = isTailCall(Inst);
2051     assert((Inst.getOpcode() == X86::CALL64pcrel32 ||
2052             (Inst.getOpcode() == X86::JMP_4 && IsTailCall)) &&
2053            "64-bit direct (tail) call instruction expected");
2054     const auto NewOpcode =
2055         (Inst.getOpcode() == X86::CALL64pcrel32) ? X86::CALL64m : X86::JMP32m;
2056     Inst.setOpcode(NewOpcode);
2057 
2058     // Replace the first operand and preserve auxiliary operands of
2059     // the instruction.
2060     Inst.erase(Inst.begin());
2061     Inst.insert(Inst.begin(),
2062                 MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2063     Inst.insert(Inst.begin(),
2064                 MCOperand::createExpr(                  // Displacement
2065                     MCSymbolRefExpr::create(TargetLocation,
2066                                             MCSymbolRefExpr::VK_None, *Ctx)));
2067     Inst.insert(Inst.begin(),
2068                 MCOperand::createReg(X86::NoRegister)); // IndexReg
2069     Inst.insert(Inst.begin(),
2070                 MCOperand::createImm(1));               // ScaleAmt
2071     Inst.insert(Inst.begin(),
2072                 MCOperand::createReg(X86::RIP));        // BaseReg
2073 
2074     return true;
2075   }
2076 
2077   void convertIndirectCallToLoad(MCInst &Inst, MCPhysReg Reg) override {
2078     bool IsTailCall = isTailCall(Inst);
2079     if (IsTailCall)
2080       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2081     if (Inst.getOpcode() == X86::CALL64m ||
2082         (Inst.getOpcode() == X86::JMP32m && IsTailCall)) {
2083       Inst.setOpcode(X86::MOV64rm);
2084       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
2085       return;
2086     }
2087     if (Inst.getOpcode() == X86::CALL64r ||
2088         (Inst.getOpcode() == X86::JMP32r && IsTailCall)) {
2089       Inst.setOpcode(X86::MOV64rr);
2090       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
2091       return;
2092     }
2093     LLVM_DEBUG(Inst.dump());
2094     llvm_unreachable("not implemented");
2095   }
2096 
2097   bool shortenInstruction(MCInst &Inst) const override {
2098     unsigned OldOpcode = Inst.getOpcode();
2099     unsigned NewOpcode = OldOpcode;
2100 
2101     // Check and remove EIZ/RIZ. These cases represent ambiguous cases where SIB
2102     // byte is present, but no index is used and modrm alone shoud have been
2103     // enough. Converting to NoRegister effectively removes the SIB byte.
2104     int MemOpNo = getMemoryOperandNo(Inst);
2105     if (MemOpNo >= 0) {
2106       MCOperand &IndexOp =
2107           Inst.getOperand(static_cast<unsigned>(MemOpNo) + X86::AddrIndexReg);
2108       if (IndexOp.getReg() == X86::EIZ || IndexOp.getReg() == X86::RIZ)
2109         IndexOp = MCOperand::createReg(X86::NoRegister);
2110     }
2111 
2112     if (isBranch(Inst)) {
2113       NewOpcode = getShortBranchOpcode(OldOpcode);
2114     } else if (OldOpcode == X86::MOV64ri) {
2115       if (Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
2116         const int64_t Imm =
2117             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
2118         if (int64_t(Imm) == int64_t(int32_t(Imm)))
2119           NewOpcode = X86::MOV64ri32;
2120       }
2121     } else {
2122       // If it's arithmetic instruction check if signed operand fits in 1 byte.
2123       const unsigned ShortOpcode = getShortArithOpcode(OldOpcode);
2124       if (ShortOpcode != OldOpcode &&
2125           Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
2126         int64_t Imm =
2127             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
2128         if (int64_t(Imm) == int64_t(int8_t(Imm)))
2129           NewOpcode = ShortOpcode;
2130       }
2131     }
2132 
2133     if (NewOpcode == OldOpcode)
2134       return false;
2135 
2136     Inst.setOpcode(NewOpcode);
2137     return true;
2138   }
2139 
2140   bool
2141   convertMoveToConditionalMove(MCInst &Inst, unsigned CC, bool AllowStackMemOp,
2142                                bool AllowBasePtrStackMemOp) const override {
2143     // - Register-register moves are OK
2144     // - Stores are filtered out by opcode (no store CMOV)
2145     // - Non-stack loads are prohibited (generally unsafe)
2146     // - Stack loads are OK if AllowStackMemOp is true
2147     // - Stack loads with RBP are OK if AllowBasePtrStackMemOp is true
2148     if (isLoad(Inst)) {
2149       // If stack memory operands are not allowed, no loads are allowed
2150       if (!AllowStackMemOp)
2151         return false;
2152 
2153       // If stack memory operands are allowed, check if it's a load from stack
2154       bool IsLoad, IsStore, IsStoreFromReg, IsSimple, IsIndexed;
2155       MCPhysReg Reg;
2156       int32_t SrcImm;
2157       uint16_t StackPtrReg;
2158       int64_t StackOffset;
2159       uint8_t Size;
2160       bool IsStackAccess =
2161           isStackAccess(Inst, IsLoad, IsStore, IsStoreFromReg, Reg, SrcImm,
2162                         StackPtrReg, StackOffset, Size, IsSimple, IsIndexed);
2163       // Prohibit non-stack-based loads
2164       if (!IsStackAccess)
2165         return false;
2166       // If stack memory operands are allowed, check if it's RBP-based
2167       if (!AllowBasePtrStackMemOp &&
2168           RegInfo->isSubRegisterEq(X86::RBP, StackPtrReg))
2169         return false;
2170     }
2171 
2172     unsigned NewOpcode = 0;
2173     switch (Inst.getOpcode()) {
2174     case X86::MOV16rr:
2175       NewOpcode = X86::CMOV16rr;
2176       break;
2177     case X86::MOV16rm:
2178       NewOpcode = X86::CMOV16rm;
2179       break;
2180     case X86::MOV32rr:
2181       NewOpcode = X86::CMOV32rr;
2182       break;
2183     case X86::MOV32rm:
2184       NewOpcode = X86::CMOV32rm;
2185       break;
2186     case X86::MOV64rr:
2187       NewOpcode = X86::CMOV64rr;
2188       break;
2189     case X86::MOV64rm:
2190       NewOpcode = X86::CMOV64rm;
2191       break;
2192     default:
2193       return false;
2194     }
2195     Inst.setOpcode(NewOpcode);
2196     // Insert CC at the end of prime operands, before annotations
2197     Inst.insert(Inst.begin() + MCPlus::getNumPrimeOperands(Inst),
2198                 MCOperand::createImm(CC));
2199     // CMOV is a 3-operand MCInst, so duplicate the destination as src1
2200     Inst.insert(Inst.begin(), Inst.getOperand(0));
2201     return true;
2202   }
2203 
2204   bool lowerTailCall(MCInst &Inst) override {
2205     if (Inst.getOpcode() == X86::JMP_4 && isTailCall(Inst)) {
2206       Inst.setOpcode(X86::JMP_1);
2207       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2208       return true;
2209     }
2210     return false;
2211   }
2212 
2213   const MCSymbol *getTargetSymbol(const MCInst &Inst,
2214                                   unsigned OpNum = 0) const override {
2215     if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
2216       return nullptr;
2217 
2218     const MCOperand &Op = Inst.getOperand(OpNum);
2219     if (!Op.isExpr())
2220       return nullptr;
2221 
2222     auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
2223     if (!SymExpr || SymExpr->getKind() != MCSymbolRefExpr::VK_None)
2224       return nullptr;
2225 
2226     return &SymExpr->getSymbol();
2227   }
2228 
2229   // This is the same as the base class, but since we are overriding one of
2230   // getTargetSymbol's signatures above, we need to override all of them.
2231   const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
2232     return &cast<const MCSymbolRefExpr>(Expr)->getSymbol();
2233   }
2234 
2235   bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
2236                      const MCSymbol *&TBB, const MCSymbol *&FBB,
2237                      MCInst *&CondBranch,
2238                      MCInst *&UncondBranch) const override {
2239     auto I = End;
2240 
2241     // Bottom-up analysis
2242     while (I != Begin) {
2243       --I;
2244 
2245       // Ignore nops and CFIs
2246       if (isPseudo(*I))
2247         continue;
2248 
2249       // Stop when we find the first non-terminator
2250       if (!isTerminator(*I))
2251         break;
2252 
2253       if (!isBranch(*I))
2254         break;
2255 
2256       // Handle unconditional branches.
2257       if ((I->getOpcode() == X86::JMP_1 || I->getOpcode() == X86::JMP_2 ||
2258            I->getOpcode() == X86::JMP_4) &&
2259           !isTailCall(*I)) {
2260         // If any code was seen after this unconditional branch, we've seen
2261         // unreachable code. Ignore them.
2262         CondBranch = nullptr;
2263         UncondBranch = &*I;
2264         const MCSymbol *Sym = getTargetSymbol(*I);
2265         assert(Sym != nullptr &&
2266                "Couldn't extract BB symbol from jump operand");
2267         TBB = Sym;
2268         continue;
2269       }
2270 
2271       // Handle conditional branches and ignore indirect branches
2272       if (!isUnsupportedBranch(I->getOpcode()) &&
2273           getCondCode(*I) == X86::COND_INVALID) {
2274         // Indirect branch
2275         return false;
2276       }
2277 
2278       if (CondBranch == nullptr) {
2279         const MCSymbol *TargetBB = getTargetSymbol(*I);
2280         if (TargetBB == nullptr) {
2281           // Unrecognized branch target
2282           return false;
2283         }
2284         FBB = TBB;
2285         TBB = TargetBB;
2286         CondBranch = &*I;
2287         continue;
2288       }
2289 
2290       llvm_unreachable("multiple conditional branches in one BB");
2291     }
2292     return true;
2293   }
2294 
2295   template <typename Itr>
2296   std::pair<IndirectBranchType, MCInst *>
2297   analyzePICJumpTable(Itr II, Itr IE, MCPhysReg R1, MCPhysReg R2) const {
2298     // Analyze PIC-style jump table code template:
2299     //
2300     //    lea PIC_JUMP_TABLE(%rip), {%r1|%r2}     <- MemLocInstr
2301     //    mov ({%r1|%r2}, %index, 4), {%r2|%r1}
2302     //    add %r2, %r1
2303     //    jmp *%r1
2304     //
2305     // (with any irrelevant instructions in-between)
2306     //
2307     // When we call this helper we've already determined %r1 and %r2, and
2308     // reverse instruction iterator \p II is pointing to the ADD instruction.
2309     //
2310     // PIC jump table looks like following:
2311     //
2312     //   JT:  ----------
2313     //    E1:| L1 - JT  |
2314     //       |----------|
2315     //    E2:| L2 - JT  |
2316     //       |----------|
2317     //       |          |
2318     //          ......
2319     //    En:| Ln - JT  |
2320     //        ----------
2321     //
2322     // Where L1, L2, ..., Ln represent labels in the function.
2323     //
2324     // The actual relocations in the table will be of the form:
2325     //
2326     //   Ln - JT
2327     //    = (Ln - En) + (En - JT)
2328     //    = R_X86_64_PC32(Ln) + En - JT
2329     //    = R_X86_64_PC32(Ln + offsetof(En))
2330     //
2331     LLVM_DEBUG(dbgs() << "Checking for PIC jump table\n");
2332     MCInst *MemLocInstr = nullptr;
2333     const MCInst *MovInstr = nullptr;
2334     while (++II != IE) {
2335       MCInst &Instr = *II;
2336       const MCInstrDesc &InstrDesc = Info->get(Instr.getOpcode());
2337       if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo) &&
2338           !InstrDesc.hasDefOfPhysReg(Instr, R2, *RegInfo)) {
2339         // Ignore instructions that don't affect R1, R2 registers.
2340         continue;
2341       }
2342       if (!MovInstr) {
2343         // Expect to see MOV instruction.
2344         if (!isMOVSX64rm32(Instr)) {
2345           LLVM_DEBUG(dbgs() << "MOV instruction expected.\n");
2346           break;
2347         }
2348 
2349         // Check if it's setting %r1 or %r2. In canonical form it sets %r2.
2350         // If it sets %r1 - rename the registers so we have to only check
2351         // a single form.
2352         unsigned MovDestReg = Instr.getOperand(0).getReg();
2353         if (MovDestReg != R2)
2354           std::swap(R1, R2);
2355         if (MovDestReg != R2) {
2356           LLVM_DEBUG(dbgs() << "MOV instruction expected to set %r2\n");
2357           break;
2358         }
2359 
2360         // Verify operands for MOV.
2361         unsigned  BaseRegNum;
2362         int64_t   ScaleValue;
2363         unsigned  IndexRegNum;
2364         int64_t   DispValue;
2365         unsigned  SegRegNum;
2366         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2367                                       &IndexRegNum, &DispValue, &SegRegNum))
2368           break;
2369         if (BaseRegNum != R1 || ScaleValue != 4 ||
2370             IndexRegNum == X86::NoRegister || DispValue != 0 ||
2371             SegRegNum != X86::NoRegister)
2372           break;
2373         MovInstr = &Instr;
2374       } else {
2375         if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo))
2376           continue;
2377         if (!isLEA64r(Instr)) {
2378           LLVM_DEBUG(dbgs() << "LEA instruction expected\n");
2379           break;
2380         }
2381         if (Instr.getOperand(0).getReg() != R1) {
2382           LLVM_DEBUG(dbgs() << "LEA instruction expected to set %r1\n");
2383           break;
2384         }
2385 
2386         // Verify operands for LEA.
2387         unsigned      BaseRegNum;
2388         int64_t       ScaleValue;
2389         unsigned      IndexRegNum;
2390         const MCExpr *DispExpr = nullptr;
2391         int64_t       DispValue;
2392         unsigned      SegRegNum;
2393         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2394                                       &IndexRegNum, &DispValue, &SegRegNum,
2395                                       &DispExpr))
2396           break;
2397         if (BaseRegNum != RegInfo->getProgramCounter() ||
2398             IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
2399             DispExpr == nullptr)
2400           break;
2401         MemLocInstr = &Instr;
2402         break;
2403       }
2404     }
2405 
2406     if (!MemLocInstr)
2407       return std::make_pair(IndirectBranchType::UNKNOWN, nullptr);
2408 
2409     LLVM_DEBUG(dbgs() << "checking potential PIC jump table\n");
2410     return std::make_pair(IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE,
2411                           MemLocInstr);
2412   }
2413 
2414   IndirectBranchType analyzeIndirectBranch(
2415       MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
2416       const unsigned PtrSize, MCInst *&MemLocInstrOut, unsigned &BaseRegNumOut,
2417       unsigned &IndexRegNumOut, int64_t &DispValueOut,
2418       const MCExpr *&DispExprOut, MCInst *&PCRelBaseOut) const override {
2419     // Try to find a (base) memory location from where the address for
2420     // the indirect branch is loaded. For X86-64 the memory will be specified
2421     // in the following format:
2422     //
2423     //   {%rip}/{%basereg} + Imm + IndexReg * Scale
2424     //
2425     // We are interested in the cases where Scale == sizeof(uintptr_t) and
2426     // the contents of the memory are presumably an array of pointers to code.
2427     //
2428     // Normal jump table:
2429     //
2430     //    jmp *(JUMP_TABLE, %index, Scale)        <- MemLocInstr
2431     //
2432     //    or
2433     //
2434     //    mov (JUMP_TABLE, %index, Scale), %r1    <- MemLocInstr
2435     //    ...
2436     //    jmp %r1
2437     //
2438     // We handle PIC-style jump tables separately.
2439     //
2440     MemLocInstrOut = nullptr;
2441     BaseRegNumOut = X86::NoRegister;
2442     IndexRegNumOut = X86::NoRegister;
2443     DispValueOut = 0;
2444     DispExprOut = nullptr;
2445 
2446     std::reverse_iterator<InstructionIterator> II(End);
2447     std::reverse_iterator<InstructionIterator> IE(Begin);
2448 
2449     IndirectBranchType Type = IndirectBranchType::UNKNOWN;
2450 
2451     // An instruction referencing memory used by jump instruction (directly or
2452     // via register). This location could be an array of function pointers
2453     // in case of indirect tail call, or a jump table.
2454     MCInst *MemLocInstr = nullptr;
2455 
2456     if (MCPlus::getNumPrimeOperands(Instruction) == 1) {
2457       // If the indirect jump is on register - try to detect if the
2458       // register value is loaded from a memory location.
2459       assert(Instruction.getOperand(0).isReg() && "register operand expected");
2460       const unsigned R1 = Instruction.getOperand(0).getReg();
2461       // Check if one of the previous instructions defines the jump-on register.
2462       for (auto PrevII = II; PrevII != IE; ++PrevII) {
2463         MCInst &PrevInstr = *PrevII;
2464         const MCInstrDesc &PrevInstrDesc = Info->get(PrevInstr.getOpcode());
2465 
2466         if (!PrevInstrDesc.hasDefOfPhysReg(PrevInstr, R1, *RegInfo))
2467           continue;
2468 
2469         if (isMoveMem2Reg(PrevInstr)) {
2470           MemLocInstr = &PrevInstr;
2471           break;
2472         }
2473         if (isADD64rr(PrevInstr)) {
2474           unsigned R2 = PrevInstr.getOperand(2).getReg();
2475           if (R1 == R2)
2476             return IndirectBranchType::UNKNOWN;
2477           std::tie(Type, MemLocInstr) = analyzePICJumpTable(PrevII, IE, R1, R2);
2478           break;
2479         }
2480         return IndirectBranchType::UNKNOWN;
2481       }
2482       if (!MemLocInstr) {
2483         // No definition seen for the register in this function so far. Could be
2484         // an input parameter - which means it is an external code reference.
2485         // It also could be that the definition happens to be in the code that
2486         // we haven't processed yet. Since we have to be conservative, return
2487         // as UNKNOWN case.
2488         return IndirectBranchType::UNKNOWN;
2489       }
2490     } else {
2491       MemLocInstr = &Instruction;
2492     }
2493 
2494     const MCRegister RIPRegister = RegInfo->getProgramCounter();
2495 
2496     // Analyze the memory location.
2497     unsigned BaseRegNum, IndexRegNum, SegRegNum;
2498     int64_t ScaleValue, DispValue;
2499     const MCExpr *DispExpr;
2500 
2501     if (!evaluateX86MemoryOperand(*MemLocInstr, &BaseRegNum, &ScaleValue,
2502                                   &IndexRegNum, &DispValue, &SegRegNum,
2503                                   &DispExpr))
2504       return IndirectBranchType::UNKNOWN;
2505 
2506     BaseRegNumOut = BaseRegNum;
2507     IndexRegNumOut = IndexRegNum;
2508     DispValueOut = DispValue;
2509     DispExprOut = DispExpr;
2510 
2511     if ((BaseRegNum != X86::NoRegister && BaseRegNum != RIPRegister) ||
2512         SegRegNum != X86::NoRegister)
2513       return IndirectBranchType::UNKNOWN;
2514 
2515     if (MemLocInstr == &Instruction &&
2516         (!ScaleValue || IndexRegNum == X86::NoRegister)) {
2517       MemLocInstrOut = MemLocInstr;
2518       return IndirectBranchType::POSSIBLE_FIXED_BRANCH;
2519     }
2520 
2521     if (Type == IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2522         (ScaleValue != 1 || BaseRegNum != RIPRegister))
2523       return IndirectBranchType::UNKNOWN;
2524 
2525     if (Type != IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2526         ScaleValue != PtrSize)
2527       return IndirectBranchType::UNKNOWN;
2528 
2529     MemLocInstrOut = MemLocInstr;
2530 
2531     return Type;
2532   }
2533 
2534   /// Analyze a callsite to see if it could be a virtual method call.  This only
2535   /// checks to see if the overall pattern is satisfied, it does not guarantee
2536   /// that the callsite is a true virtual method call.
2537   /// The format of virtual method calls that are recognized is one of the
2538   /// following:
2539   ///
2540   ///  Form 1: (found in debug code)
2541   ///    add METHOD_OFFSET, %VtableReg
2542   ///    mov (%VtableReg), %MethodReg
2543   ///    ...
2544   ///    call or jmp *%MethodReg
2545   ///
2546   ///  Form 2:
2547   ///    mov METHOD_OFFSET(%VtableReg), %MethodReg
2548   ///    ...
2549   ///    call or jmp *%MethodReg
2550   ///
2551   ///  Form 3:
2552   ///    ...
2553   ///    call or jmp *METHOD_OFFSET(%VtableReg)
2554   ///
2555   bool analyzeVirtualMethodCall(InstructionIterator ForwardBegin,
2556                                 InstructionIterator ForwardEnd,
2557                                 std::vector<MCInst *> &MethodFetchInsns,
2558                                 unsigned &VtableRegNum, unsigned &MethodRegNum,
2559                                 uint64_t &MethodOffset) const override {
2560     VtableRegNum = X86::NoRegister;
2561     MethodRegNum = X86::NoRegister;
2562     MethodOffset = 0;
2563 
2564     std::reverse_iterator<InstructionIterator> Itr(ForwardEnd);
2565     std::reverse_iterator<InstructionIterator> End(ForwardBegin);
2566 
2567     MCInst &CallInst = *Itr++;
2568     assert(isIndirectBranch(CallInst) || isCall(CallInst));
2569 
2570     unsigned BaseReg, IndexReg, SegmentReg;
2571     int64_t Scale, Disp;
2572     const MCExpr *DispExpr;
2573 
2574     // The call can just be jmp offset(reg)
2575     if (evaluateX86MemoryOperand(CallInst, &BaseReg, &Scale, &IndexReg, &Disp,
2576                                  &SegmentReg, &DispExpr)) {
2577       if (!DispExpr && BaseReg != X86::RIP && BaseReg != X86::RBP &&
2578           BaseReg != X86::NoRegister) {
2579         MethodRegNum = BaseReg;
2580         if (Scale == 1 && IndexReg == X86::NoRegister &&
2581             SegmentReg == X86::NoRegister) {
2582           VtableRegNum = MethodRegNum;
2583           MethodOffset = Disp;
2584           MethodFetchInsns.push_back(&CallInst);
2585           return true;
2586         }
2587       }
2588       return false;
2589     }
2590     if (CallInst.getOperand(0).isReg())
2591       MethodRegNum = CallInst.getOperand(0).getReg();
2592     else
2593       return false;
2594 
2595     if (MethodRegNum == X86::RIP || MethodRegNum == X86::RBP) {
2596       VtableRegNum = X86::NoRegister;
2597       MethodRegNum = X86::NoRegister;
2598       return false;
2599     }
2600 
2601     // find load from vtable, this may or may not include the method offset
2602     while (Itr != End) {
2603       MCInst &CurInst = *Itr++;
2604       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2605       if (Desc.hasDefOfPhysReg(CurInst, MethodRegNum, *RegInfo)) {
2606         if (isLoad(CurInst) &&
2607             evaluateX86MemoryOperand(CurInst, &BaseReg, &Scale, &IndexReg,
2608                                      &Disp, &SegmentReg, &DispExpr)) {
2609           if (!DispExpr && Scale == 1 && BaseReg != X86::RIP &&
2610               BaseReg != X86::RBP && BaseReg != X86::NoRegister &&
2611               IndexReg == X86::NoRegister && SegmentReg == X86::NoRegister &&
2612               BaseReg != X86::RIP) {
2613             VtableRegNum = BaseReg;
2614             MethodOffset = Disp;
2615             MethodFetchInsns.push_back(&CurInst);
2616             if (MethodOffset != 0)
2617               return true;
2618             break;
2619           }
2620         }
2621         return false;
2622       }
2623     }
2624 
2625     if (!VtableRegNum)
2626       return false;
2627 
2628     // look for any adds affecting the method register.
2629     while (Itr != End) {
2630       MCInst &CurInst = *Itr++;
2631       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2632       if (Desc.hasDefOfPhysReg(CurInst, VtableRegNum, *RegInfo)) {
2633         if (isADDri(CurInst)) {
2634           assert(!MethodOffset);
2635           MethodOffset = CurInst.getOperand(2).getImm();
2636           MethodFetchInsns.insert(MethodFetchInsns.begin(), &CurInst);
2637           break;
2638         }
2639       }
2640     }
2641 
2642     return true;
2643   }
2644 
2645   bool createStackPointerIncrement(MCInst &Inst, int Size,
2646                                    bool NoFlagsClobber) const override {
2647     if (NoFlagsClobber) {
2648       Inst.setOpcode(X86::LEA64r);
2649       Inst.clear();
2650       Inst.addOperand(MCOperand::createReg(X86::RSP));
2651       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2652       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2653       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2654       Inst.addOperand(MCOperand::createImm(-Size));           // Displacement
2655       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2656       return true;
2657     }
2658     Inst.setOpcode(X86::SUB64ri8);
2659     Inst.clear();
2660     Inst.addOperand(MCOperand::createReg(X86::RSP));
2661     Inst.addOperand(MCOperand::createReg(X86::RSP));
2662     Inst.addOperand(MCOperand::createImm(Size));
2663     return true;
2664   }
2665 
2666   bool createStackPointerDecrement(MCInst &Inst, int Size,
2667                                    bool NoFlagsClobber) const override {
2668     if (NoFlagsClobber) {
2669       Inst.setOpcode(X86::LEA64r);
2670       Inst.clear();
2671       Inst.addOperand(MCOperand::createReg(X86::RSP));
2672       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2673       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2674       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2675       Inst.addOperand(MCOperand::createImm(Size));            // Displacement
2676       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2677       return true;
2678     }
2679     Inst.setOpcode(X86::ADD64ri8);
2680     Inst.clear();
2681     Inst.addOperand(MCOperand::createReg(X86::RSP));
2682     Inst.addOperand(MCOperand::createReg(X86::RSP));
2683     Inst.addOperand(MCOperand::createImm(Size));
2684     return true;
2685   }
2686 
2687   bool createSaveToStack(MCInst &Inst, const MCPhysReg &StackReg, int Offset,
2688                          const MCPhysReg &SrcReg, int Size) const override {
2689     unsigned NewOpcode;
2690     switch (Size) {
2691     default:
2692       return false;
2693     case 2:      NewOpcode = X86::MOV16mr; break;
2694     case 4:      NewOpcode = X86::MOV32mr; break;
2695     case 8:      NewOpcode = X86::MOV64mr; break;
2696     }
2697     Inst.setOpcode(NewOpcode);
2698     Inst.clear();
2699     Inst.addOperand(MCOperand::createReg(StackReg));        // BaseReg
2700     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2701     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2702     Inst.addOperand(MCOperand::createImm(Offset));          // Displacement
2703     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2704     Inst.addOperand(MCOperand::createReg(SrcReg));
2705     return true;
2706   }
2707 
2708   bool createRestoreFromStack(MCInst &Inst, const MCPhysReg &StackReg,
2709                               int Offset, const MCPhysReg &DstReg,
2710                               int Size) const override {
2711     return createLoad(Inst, StackReg, /*Scale=*/1, /*IndexReg=*/X86::NoRegister,
2712                       Offset, nullptr, /*AddrSegmentReg=*/X86::NoRegister,
2713                       DstReg, Size);
2714   }
2715 
2716   bool createLoad(MCInst &Inst, const MCPhysReg &BaseReg, int64_t Scale,
2717                   const MCPhysReg &IndexReg, int64_t Offset,
2718                   const MCExpr *OffsetExpr, const MCPhysReg &AddrSegmentReg,
2719                   const MCPhysReg &DstReg, int Size) const override {
2720     unsigned NewOpcode;
2721     switch (Size) {
2722     default:
2723       return false;
2724     case 2:      NewOpcode = X86::MOV16rm; break;
2725     case 4:      NewOpcode = X86::MOV32rm; break;
2726     case 8:      NewOpcode = X86::MOV64rm; break;
2727     }
2728     Inst.setOpcode(NewOpcode);
2729     Inst.clear();
2730     Inst.addOperand(MCOperand::createReg(DstReg));
2731     Inst.addOperand(MCOperand::createReg(BaseReg));
2732     Inst.addOperand(MCOperand::createImm(Scale));
2733     Inst.addOperand(MCOperand::createReg(IndexReg));
2734     if (OffsetExpr)
2735       Inst.addOperand(MCOperand::createExpr(OffsetExpr)); // Displacement
2736     else
2737       Inst.addOperand(MCOperand::createImm(Offset)); // Displacement
2738     Inst.addOperand(MCOperand::createReg(AddrSegmentReg)); // AddrSegmentReg
2739     return true;
2740   }
2741 
2742   void createLoadImmediate(MCInst &Inst, const MCPhysReg Dest,
2743                            uint32_t Imm) const override {
2744     Inst.setOpcode(X86::MOV64ri32);
2745     Inst.clear();
2746     Inst.addOperand(MCOperand::createReg(Dest));
2747     Inst.addOperand(MCOperand::createImm(Imm));
2748   }
2749 
2750   bool createIncMemory(MCInst &Inst, const MCSymbol *Target,
2751                        MCContext *Ctx) const override {
2752 
2753     Inst.setOpcode(X86::LOCK_INC64m);
2754     Inst.clear();
2755     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
2756     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2757     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2758 
2759     Inst.addOperand(MCOperand::createExpr(
2760         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None,
2761                                 *Ctx)));                    // Displacement
2762     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2763     return true;
2764   }
2765 
2766   bool createIJmp32Frag(SmallVectorImpl<MCInst> &Insts,
2767                         const MCOperand &BaseReg, const MCOperand &Scale,
2768                         const MCOperand &IndexReg, const MCOperand &Offset,
2769                         const MCOperand &TmpReg) const override {
2770     // The code fragment we emit here is:
2771     //
2772     //  mov32 (%base, %index, scale), %tmpreg
2773     //  ijmp *(%tmpreg)
2774     //
2775     MCInst IJmp;
2776     IJmp.setOpcode(X86::JMP64r);
2777     IJmp.addOperand(TmpReg);
2778 
2779     MCInst Load;
2780     Load.setOpcode(X86::MOV32rm);
2781     Load.addOperand(TmpReg);
2782     Load.addOperand(BaseReg);
2783     Load.addOperand(Scale);
2784     Load.addOperand(IndexReg);
2785     Load.addOperand(Offset);
2786     Load.addOperand(MCOperand::createReg(X86::NoRegister));
2787 
2788     Insts.push_back(Load);
2789     Insts.push_back(IJmp);
2790     return true;
2791   }
2792 
2793   bool createNoop(MCInst &Inst) const override {
2794     Inst.setOpcode(X86::NOOP);
2795     return true;
2796   }
2797 
2798   bool createReturn(MCInst &Inst) const override {
2799     Inst.setOpcode(X86::RET64);
2800     return true;
2801   }
2802 
2803   InstructionListType createInlineMemcpy(bool ReturnEnd) const override {
2804     InstructionListType Code;
2805     if (ReturnEnd)
2806       Code.emplace_back(MCInstBuilder(X86::LEA64r)
2807                             .addReg(X86::RAX)
2808                             .addReg(X86::RDI)
2809                             .addImm(1)
2810                             .addReg(X86::RDX)
2811                             .addImm(0)
2812                             .addReg(X86::NoRegister));
2813     else
2814       Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2815                             .addReg(X86::RAX)
2816                             .addReg(X86::RDI));
2817 
2818     Code.emplace_back(MCInstBuilder(X86::MOV32rr)
2819                           .addReg(X86::ECX)
2820                           .addReg(X86::EDX));
2821     Code.emplace_back(MCInstBuilder(X86::REP_MOVSB_64));
2822 
2823     return Code;
2824   }
2825 
2826   InstructionListType createOneByteMemcpy() const override {
2827     InstructionListType Code;
2828     Code.emplace_back(MCInstBuilder(X86::MOV8rm)
2829                           .addReg(X86::CL)
2830                           .addReg(X86::RSI)
2831                           .addImm(0)
2832                           .addReg(X86::NoRegister)
2833                           .addImm(0)
2834                           .addReg(X86::NoRegister));
2835     Code.emplace_back(MCInstBuilder(X86::MOV8mr)
2836                           .addReg(X86::RDI)
2837                           .addImm(0)
2838                           .addReg(X86::NoRegister)
2839                           .addImm(0)
2840                           .addReg(X86::NoRegister)
2841                           .addReg(X86::CL));
2842     Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2843                           .addReg(X86::RAX)
2844                           .addReg(X86::RDI));
2845     return Code;
2846   }
2847 
2848   InstructionListType createCmpJE(MCPhysReg RegNo, int64_t Imm,
2849                                   const MCSymbol *Target,
2850                                   MCContext *Ctx) const override {
2851     InstructionListType Code;
2852     Code.emplace_back(MCInstBuilder(X86::CMP64ri8)
2853                           .addReg(RegNo)
2854                           .addImm(Imm));
2855     Code.emplace_back(MCInstBuilder(X86::JCC_1)
2856                           .addExpr(MCSymbolRefExpr::create(
2857                               Target, MCSymbolRefExpr::VK_None, *Ctx))
2858                           .addImm(X86::COND_E));
2859     return Code;
2860   }
2861 
2862   Optional<Relocation>
2863   createRelocation(const MCFixup &Fixup,
2864                    const MCAsmBackend &MAB) const override {
2865     const MCFixupKindInfo &FKI = MAB.getFixupKindInfo(Fixup.getKind());
2866 
2867     assert(FKI.TargetOffset == 0 && "0-bit relocation offset expected");
2868     const uint64_t RelOffset = Fixup.getOffset();
2869 
2870     uint64_t RelType;
2871     if (FKI.Flags & MCFixupKindInfo::FKF_IsPCRel) {
2872       switch (FKI.TargetSize) {
2873       default:
2874         return NoneType();
2875       case  8: RelType = ELF::R_X86_64_PC8; break;
2876       case 16: RelType = ELF::R_X86_64_PC16; break;
2877       case 32: RelType = ELF::R_X86_64_PC32; break;
2878       case 64: RelType = ELF::R_X86_64_PC64; break;
2879       }
2880     } else {
2881       switch (FKI.TargetSize) {
2882       default:
2883         return NoneType();
2884       case  8: RelType = ELF::R_X86_64_8; break;
2885       case 16: RelType = ELF::R_X86_64_16; break;
2886       case 32: RelType = ELF::R_X86_64_32; break;
2887       case 64: RelType = ELF::R_X86_64_64; break;
2888       }
2889     }
2890 
2891     // Extract a symbol and an addend out of the fixup value expression.
2892     //
2893     // Only the following limited expression types are supported:
2894     //   Symbol + Addend
2895     //   Symbol
2896     uint64_t Addend = 0;
2897     MCSymbol *Symbol = nullptr;
2898     const MCExpr *ValueExpr = Fixup.getValue();
2899     if (ValueExpr->getKind() == MCExpr::Binary) {
2900       const auto *BinaryExpr = cast<MCBinaryExpr>(ValueExpr);
2901       assert(BinaryExpr->getOpcode() == MCBinaryExpr::Add &&
2902              "unexpected binary expression");
2903       const MCExpr *LHS = BinaryExpr->getLHS();
2904       assert(LHS->getKind() == MCExpr::SymbolRef && "unexpected LHS");
2905       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(LHS));
2906       const MCExpr *RHS = BinaryExpr->getRHS();
2907       assert(RHS->getKind() == MCExpr::Constant && "unexpected RHS");
2908       Addend = cast<MCConstantExpr>(RHS)->getValue();
2909     } else {
2910       assert(ValueExpr->getKind() == MCExpr::SymbolRef && "unexpected value");
2911       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(ValueExpr));
2912     }
2913 
2914     return Relocation({RelOffset, Symbol, RelType, Addend, 0});
2915   }
2916 
2917   bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
2918                                int64_t Addend, MCContext *Ctx, int64_t &Value,
2919                                uint64_t RelType) const override {
2920     unsigned ImmOpNo = -1U;
2921 
2922     for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
2923          ++Index) {
2924       if (Inst.getOperand(Index).isImm()) {
2925         ImmOpNo = Index;
2926         // TODO: this is a bit hacky.  It finds the correct operand by
2927         // searching for a specific immediate value.  If no value is
2928         // provided it defaults to the last immediate operand found.
2929         // This could lead to unexpected results if the instruction
2930         // has more than one immediate with the same value.
2931         if (Inst.getOperand(ImmOpNo).getImm() == Value)
2932           break;
2933       }
2934     }
2935 
2936     if (ImmOpNo == -1U)
2937       return false;
2938 
2939     Value = Inst.getOperand(ImmOpNo).getImm();
2940 
2941     setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
2942 
2943     return true;
2944   }
2945 
2946   bool replaceRegWithImm(MCInst &Inst, unsigned Register,
2947                          int64_t Imm) const override {
2948 
2949     enum CheckSignExt : uint8_t {
2950       NOCHECK = 0,
2951       CHECK8,
2952       CHECK32,
2953     };
2954 
2955     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
2956     struct InstInfo {
2957       // Size in bytes that Inst loads from memory.
2958       uint8_t DataSize;
2959 
2960       // True when the target operand has to be duplicated because the opcode
2961       // expects a LHS operand.
2962       bool HasLHS;
2963 
2964       // List of checks and corresponding opcodes to be used. We try to use the
2965       // smallest possible immediate value when various sizes are available,
2966       // hence we may need to check whether a larger constant fits in a smaller
2967       // immediate.
2968       CheckList Checks;
2969     };
2970 
2971     InstInfo I;
2972 
2973     switch (Inst.getOpcode()) {
2974     default: {
2975       switch (getPushSize(Inst)) {
2976 
2977       case 2: I = {2, false, {{CHECK8, X86::PUSH16i8}, {NOCHECK, X86::PUSHi16}}}; break;
2978       case 4: I = {4, false, {{CHECK8, X86::PUSH32i8}, {NOCHECK, X86::PUSHi32}}}; break;
2979       case 8: I = {8, false, {{CHECK8, X86::PUSH64i8},
2980                               {CHECK32, X86::PUSH64i32},
2981                               {NOCHECK, Inst.getOpcode()}}}; break;
2982       default: return false;
2983       }
2984       break;
2985     }
2986 
2987     // MOV
2988     case X86::MOV8rr:       I = {1, false, {{NOCHECK, X86::MOV8ri}}}; break;
2989     case X86::MOV16rr:      I = {2, false, {{NOCHECK, X86::MOV16ri}}}; break;
2990     case X86::MOV32rr:      I = {4, false, {{NOCHECK, X86::MOV32ri}}}; break;
2991     case X86::MOV64rr:      I = {8, false, {{CHECK32, X86::MOV64ri32},
2992                                             {NOCHECK, X86::MOV64ri}}}; break;
2993 
2994     case X86::MOV8mr:       I = {1, false, {{NOCHECK, X86::MOV8mi}}}; break;
2995     case X86::MOV16mr:      I = {2, false, {{NOCHECK, X86::MOV16mi}}}; break;
2996     case X86::MOV32mr:      I = {4, false, {{NOCHECK, X86::MOV32mi}}}; break;
2997     case X86::MOV64mr:      I = {8, false, {{CHECK32, X86::MOV64mi32},
2998                                             {NOCHECK, X86::MOV64mr}}}; break;
2999 
3000     // MOVZX
3001     case X86::MOVZX16rr8:   I = {1, false, {{NOCHECK, X86::MOV16ri}}}; break;
3002     case X86::MOVZX32rr8:   I = {1, false, {{NOCHECK, X86::MOV32ri}}}; break;
3003     case X86::MOVZX32rr16:  I = {2, false, {{NOCHECK, X86::MOV32ri}}}; break;
3004 
3005     // CMP
3006     case X86::CMP8rr:       I = {1, false, {{NOCHECK, X86::CMP8ri}}}; break;
3007     case X86::CMP16rr:      I = {2, false, {{CHECK8, X86::CMP16ri8},
3008                                             {NOCHECK, X86::CMP16ri}}}; break;
3009     case X86::CMP32rr:      I = {4, false, {{CHECK8, X86::CMP32ri8},
3010                                             {NOCHECK, X86::CMP32ri}}}; break;
3011     case X86::CMP64rr:      I = {8, false, {{CHECK8, X86::CMP64ri8},
3012                                             {CHECK32, X86::CMP64ri32},
3013                                             {NOCHECK, X86::CMP64rr}}}; break;
3014 
3015     // TEST
3016     case X86::TEST8rr:      I = {1, false, {{NOCHECK, X86::TEST8ri}}}; break;
3017     case X86::TEST16rr:     I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
3018     case X86::TEST32rr:     I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
3019     case X86::TEST64rr:     I = {8, false, {{CHECK32, X86::TEST64ri32},
3020                                             {NOCHECK, X86::TEST64rr}}}; break;
3021 
3022     // ADD
3023     case X86::ADD8rr:       I = {1, true, {{NOCHECK, X86::ADD8ri}}}; break;
3024     case X86::ADD16rr:      I = {2, true, {{CHECK8, X86::ADD16ri8},
3025                                            {NOCHECK, X86::ADD16ri}}}; break;
3026     case X86::ADD32rr:      I = {4, true, {{CHECK8, X86::ADD32ri8},
3027                                            {NOCHECK, X86::ADD32ri}}}; break;
3028     case X86::ADD64rr:      I = {8, true, {{CHECK8, X86::ADD64ri8},
3029                                            {CHECK32, X86::ADD64ri32},
3030                                            {NOCHECK, X86::ADD64rr}}}; break;
3031 
3032     // SUB
3033     case X86::SUB8rr:       I = {1, true, {{NOCHECK, X86::SUB8ri}}}; break;
3034     case X86::SUB16rr:      I = {2, true, {{CHECK8, X86::SUB16ri8},
3035                                            {NOCHECK, X86::SUB16ri}}}; break;
3036     case X86::SUB32rr:      I = {4, true, {{CHECK8, X86::SUB32ri8},
3037                                            {NOCHECK, X86::SUB32ri}}}; break;
3038     case X86::SUB64rr:      I = {8, true, {{CHECK8, X86::SUB64ri8},
3039                                            {CHECK32, X86::SUB64ri32},
3040                                            {NOCHECK, X86::SUB64rr}}}; break;
3041 
3042     // AND
3043     case X86::AND8rr:       I = {1, true, {{NOCHECK, X86::AND8ri}}}; break;
3044     case X86::AND16rr:      I = {2, true, {{CHECK8, X86::AND16ri8},
3045                                            {NOCHECK, X86::AND16ri}}}; break;
3046     case X86::AND32rr:      I = {4, true, {{CHECK8, X86::AND32ri8},
3047                                            {NOCHECK, X86::AND32ri}}}; break;
3048     case X86::AND64rr:      I = {8, true, {{CHECK8, X86::AND64ri8},
3049                                            {CHECK32, X86::AND64ri32},
3050                                            {NOCHECK, X86::AND64rr}}}; break;
3051 
3052     // OR
3053     case X86::OR8rr:        I = {1, true, {{NOCHECK, X86::OR8ri}}}; break;
3054     case X86::OR16rr:       I = {2, true, {{CHECK8, X86::OR16ri8},
3055                                            {NOCHECK, X86::OR16ri}}}; break;
3056     case X86::OR32rr:       I = {4, true, {{CHECK8, X86::OR32ri8},
3057                                            {NOCHECK, X86::OR32ri}}}; break;
3058     case X86::OR64rr:       I = {8, true, {{CHECK8, X86::OR64ri8},
3059                                            {CHECK32, X86::OR64ri32},
3060                                            {NOCHECK, X86::OR64rr}}}; break;
3061 
3062     // XOR
3063     case X86::XOR8rr:       I = {1, true, {{NOCHECK, X86::XOR8ri}}}; break;
3064     case X86::XOR16rr:      I = {2, true, {{CHECK8, X86::XOR16ri8},
3065                                            {NOCHECK, X86::XOR16ri}}}; break;
3066     case X86::XOR32rr:      I = {4, true, {{CHECK8, X86::XOR32ri8},
3067                                            {NOCHECK, X86::XOR32ri}}}; break;
3068     case X86::XOR64rr:      I = {8, true, {{CHECK8, X86::XOR64ri8},
3069                                            {CHECK32, X86::XOR64ri32},
3070                                            {NOCHECK, X86::XOR64rr}}}; break;
3071     }
3072 
3073     // Compute the new opcode.
3074     unsigned NewOpcode = 0;
3075     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
3076       NewOpcode = Check.second;
3077       if (Check.first == NOCHECK)
3078         break;
3079       if (Check.first == CHECK8 && isInt<8>(Imm))
3080         break;
3081       if (Check.first == CHECK32 && isInt<32>(Imm))
3082         break;
3083     }
3084     if (NewOpcode == Inst.getOpcode())
3085       return false;
3086 
3087     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
3088 
3089     unsigned NumFound = 0;
3090     for (unsigned Index = InstDesc.getNumDefs() + (I.HasLHS ? 1 : 0),
3091                   E = InstDesc.getNumOperands();
3092          Index != E; ++Index)
3093       if (Inst.getOperand(Index).isReg() &&
3094           Inst.getOperand(Index).getReg() == Register)
3095         NumFound++;
3096 
3097     if (NumFound != 1)
3098       return false;
3099 
3100     MCOperand TargetOp = Inst.getOperand(0);
3101     Inst.clear();
3102     Inst.setOpcode(NewOpcode);
3103     Inst.addOperand(TargetOp);
3104     if (I.HasLHS)
3105       Inst.addOperand(TargetOp);
3106     Inst.addOperand(MCOperand::createImm(Imm));
3107 
3108     return true;
3109   }
3110 
3111   bool replaceRegWithReg(MCInst &Inst, unsigned ToReplace,
3112                          unsigned ReplaceWith) const override {
3113 
3114     // Get the HasLHS value so that iteration can be done
3115     bool HasLHS;
3116     if (isAND(Inst.getOpcode()) || isADD(Inst.getOpcode()) || isSUB(Inst)) {
3117       HasLHS = true;
3118     } else if (isPop(Inst) || isPush(Inst) || isCMP(Inst.getOpcode()) ||
3119                isTEST(Inst.getOpcode())) {
3120       HasLHS = false;
3121     } else {
3122       switch (Inst.getOpcode()) {
3123       case X86::MOV8rr:
3124       case X86::MOV8rm:
3125       case X86::MOV8mr:
3126       case X86::MOV8ri:
3127       case X86::MOV16rr:
3128       case X86::MOV16rm:
3129       case X86::MOV16mr:
3130       case X86::MOV16ri:
3131       case X86::MOV32rr:
3132       case X86::MOV32rm:
3133       case X86::MOV32mr:
3134       case X86::MOV32ri:
3135       case X86::MOV64rr:
3136       case X86::MOV64rm:
3137       case X86::MOV64mr:
3138       case X86::MOV64ri:
3139       case X86::MOVZX16rr8:
3140       case X86::MOVZX32rr8:
3141       case X86::MOVZX32rr16:
3142       case X86::MOVSX32rm8:
3143       case X86::MOVSX32rr8:
3144       case X86::MOVSX64rm32:
3145       case X86::LEA64r:
3146         HasLHS = false;
3147         break;
3148       default:
3149         return false;
3150       }
3151     }
3152 
3153     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
3154 
3155     bool FoundOne = false;
3156 
3157     // Iterate only through src operands that arent also dest operands
3158     for (unsigned Index = InstDesc.getNumDefs() + (HasLHS ? 1 : 0),
3159                   E = InstDesc.getNumOperands();
3160          Index != E; ++Index) {
3161       BitVector RegAliases = getAliases(ToReplace, true);
3162       if (!Inst.getOperand(Index).isReg() ||
3163           !RegAliases.test(Inst.getOperand(Index).getReg()))
3164         continue;
3165       // Resize register if needed
3166       unsigned SizedReplaceWith = getAliasSized(
3167           ReplaceWith, getRegSize(Inst.getOperand(Index).getReg()));
3168       MCOperand NewOperand = MCOperand::createReg(SizedReplaceWith);
3169       Inst.getOperand(Index) = NewOperand;
3170       FoundOne = true;
3171     }
3172 
3173     // Return true if at least one operand was replaced
3174     return FoundOne;
3175   }
3176 
3177   bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
3178                           MCContext *Ctx) const override {
3179     Inst.setOpcode(X86::JMP_1);
3180     Inst.addOperand(MCOperand::createExpr(
3181         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx)));
3182     return true;
3183   }
3184 
3185   bool createCall(MCInst &Inst, const MCSymbol *Target,
3186                   MCContext *Ctx) override {
3187     Inst.setOpcode(X86::CALL64pcrel32);
3188     Inst.addOperand(MCOperand::createExpr(
3189         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3190     return true;
3191   }
3192 
3193   bool createTailCall(MCInst &Inst, const MCSymbol *Target,
3194                       MCContext *Ctx) override {
3195     return createDirectCall(Inst, Target, Ctx, /*IsTailCall*/ true);
3196   }
3197 
3198   void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
3199                           MCContext *Ctx) override {
3200     Seq.clear();
3201     Seq.emplace_back();
3202     createDirectCall(Seq.back(), Target, Ctx, /*IsTailCall*/ true);
3203   }
3204 
3205   bool createTrap(MCInst &Inst) const override {
3206     Inst.clear();
3207     Inst.setOpcode(X86::TRAP);
3208     return true;
3209   }
3210 
3211   bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
3212                               MCContext *Ctx) const override {
3213     unsigned InvCC = getInvertedCondCode(getCondCode(Inst));
3214     assert(InvCC != X86::COND_INVALID && "invalid branch instruction");
3215     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(InvCC);
3216     Inst.getOperand(0) = MCOperand::createExpr(
3217         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3218     return true;
3219   }
3220 
3221   bool replaceBranchCondition(MCInst &Inst, const MCSymbol *TBB, MCContext *Ctx,
3222                               unsigned CC) const override {
3223     if (CC == X86::COND_INVALID)
3224       return false;
3225     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(CC);
3226     Inst.getOperand(0) = MCOperand::createExpr(
3227         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3228     return true;
3229   }
3230 
3231   unsigned getCanonicalBranchCondCode(unsigned CC) const override {
3232     switch (CC) {
3233     default:           return X86::COND_INVALID;
3234 
3235     case X86::COND_E:  return X86::COND_E;
3236     case X86::COND_NE: return X86::COND_E;
3237 
3238     case X86::COND_L:  return X86::COND_L;
3239     case X86::COND_GE: return X86::COND_L;
3240 
3241     case X86::COND_LE: return X86::COND_G;
3242     case X86::COND_G:  return X86::COND_G;
3243 
3244     case X86::COND_B:  return X86::COND_B;
3245     case X86::COND_AE: return X86::COND_B;
3246 
3247     case X86::COND_BE: return X86::COND_A;
3248     case X86::COND_A:  return X86::COND_A;
3249 
3250     case X86::COND_S:  return X86::COND_S;
3251     case X86::COND_NS: return X86::COND_S;
3252 
3253     case X86::COND_P:  return X86::COND_P;
3254     case X86::COND_NP: return X86::COND_P;
3255 
3256     case X86::COND_O:  return X86::COND_O;
3257     case X86::COND_NO: return X86::COND_O;
3258     }
3259   }
3260 
3261   bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
3262                            MCContext *Ctx) const override {
3263     assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
3264            "Invalid instruction");
3265     Inst.getOperand(0) = MCOperand::createExpr(
3266         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3267     return true;
3268   }
3269 
3270   MCPhysReg getX86R11() const override { return X86::R11; }
3271 
3272   MCPhysReg getIntArgRegister(unsigned ArgNo) const override {
3273     // FIXME: this should depend on the calling convention.
3274     switch (ArgNo) {
3275     case 0:   return X86::RDI;
3276     case 1:   return X86::RSI;
3277     case 2:   return X86::RDX;
3278     case 3:   return X86::RCX;
3279     case 4:   return X86::R8;
3280     case 5:   return X86::R9;
3281     default:  return getNoRegister();
3282     }
3283   }
3284 
3285   void createPause(MCInst &Inst) const override {
3286     Inst.clear();
3287     Inst.setOpcode(X86::PAUSE);
3288   }
3289 
3290   void createLfence(MCInst &Inst) const override {
3291     Inst.clear();
3292     Inst.setOpcode(X86::LFENCE);
3293   }
3294 
3295   bool createDirectCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx,
3296                         bool IsTailCall) override {
3297     Inst.clear();
3298     Inst.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
3299     Inst.addOperand(MCOperand::createExpr(
3300         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3301     if (IsTailCall)
3302       setTailCall(Inst);
3303     return true;
3304   }
3305 
3306   void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
3307                       MCContext *Ctx, bool IsTailCall) override {
3308     Seq.clear();
3309     MCInst Inst;
3310     Inst.setOpcode(X86::JMP_1);
3311     Inst.addOperand(MCOperand::createExpr(
3312         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3313     if (IsTailCall)
3314       setTailCall(Inst);
3315     Seq.emplace_back(Inst);
3316   }
3317 
3318   bool isConditionalMove(const MCInst &Inst) const override {
3319     unsigned OpCode = Inst.getOpcode();
3320     return (OpCode == X86::CMOV16rr || OpCode == X86::CMOV32rr ||
3321             OpCode == X86::CMOV64rr);
3322   }
3323 
3324   bool isBranchOnMem(const MCInst &Inst) const override {
3325     unsigned OpCode = Inst.getOpcode();
3326     if (OpCode == X86::CALL64m || (OpCode == X86::JMP32m && isTailCall(Inst)) ||
3327         OpCode == X86::JMP64m)
3328       return true;
3329 
3330     return false;
3331   }
3332 
3333   bool isBranchOnReg(const MCInst &Inst) const override {
3334     unsigned OpCode = Inst.getOpcode();
3335     if (OpCode == X86::CALL64r || (OpCode == X86::JMP32r && isTailCall(Inst)) ||
3336         OpCode == X86::JMP64r)
3337       return true;
3338 
3339     return false;
3340   }
3341 
3342   void createPushRegister(MCInst &Inst, MCPhysReg Reg,
3343                           unsigned Size) const override {
3344     Inst.clear();
3345     unsigned NewOpcode = 0;
3346     if (Reg == X86::EFLAGS) {
3347       switch (Size) {
3348       case 2: NewOpcode = X86::PUSHF16;  break;
3349       case 4: NewOpcode = X86::PUSHF32;  break;
3350       case 8: NewOpcode = X86::PUSHF64;  break;
3351       default:
3352         llvm_unreachable("Unexpected size");
3353       }
3354       Inst.setOpcode(NewOpcode);
3355       return;
3356     }
3357     switch (Size) {
3358     case 2: NewOpcode = X86::PUSH16r;  break;
3359     case 4: NewOpcode = X86::PUSH32r;  break;
3360     case 8: NewOpcode = X86::PUSH64r;  break;
3361     default:
3362       llvm_unreachable("Unexpected size");
3363     }
3364     Inst.setOpcode(NewOpcode);
3365     Inst.addOperand(MCOperand::createReg(Reg));
3366   }
3367 
3368   void createPopRegister(MCInst &Inst, MCPhysReg Reg,
3369                          unsigned Size) const override {
3370     Inst.clear();
3371     unsigned NewOpcode = 0;
3372     if (Reg == X86::EFLAGS) {
3373       switch (Size) {
3374       case 2: NewOpcode = X86::POPF16;  break;
3375       case 4: NewOpcode = X86::POPF32;  break;
3376       case 8: NewOpcode = X86::POPF64;  break;
3377       default:
3378         llvm_unreachable("Unexpected size");
3379       }
3380       Inst.setOpcode(NewOpcode);
3381       return;
3382     }
3383     switch (Size) {
3384     case 2: NewOpcode = X86::POP16r;  break;
3385     case 4: NewOpcode = X86::POP32r;  break;
3386     case 8: NewOpcode = X86::POP64r;  break;
3387     default:
3388       llvm_unreachable("Unexpected size");
3389     }
3390     Inst.setOpcode(NewOpcode);
3391     Inst.addOperand(MCOperand::createReg(Reg));
3392   }
3393 
3394   void createPushFlags(MCInst &Inst, unsigned Size) const override {
3395     return createPushRegister(Inst, X86::EFLAGS, Size);
3396   }
3397 
3398   void createPopFlags(MCInst &Inst, unsigned Size) const override {
3399     return createPopRegister(Inst, X86::EFLAGS, Size);
3400   }
3401 
3402   void createAddRegImm(MCInst &Inst, MCPhysReg Reg, int64_t Value,
3403                        unsigned Size) const {
3404     unsigned int Opcode;
3405     switch (Size) {
3406     case 1: Opcode = X86::ADD8ri; break;
3407     case 2: Opcode = X86::ADD16ri; break;
3408     case 4: Opcode = X86::ADD32ri; break;
3409     default:
3410       llvm_unreachable("Unexpected size");
3411     }
3412     Inst.setOpcode(Opcode);
3413     Inst.clear();
3414     Inst.addOperand(MCOperand::createReg(Reg));
3415     Inst.addOperand(MCOperand::createReg(Reg));
3416     Inst.addOperand(MCOperand::createImm(Value));
3417   }
3418 
3419   void createClearRegWithNoEFlagsUpdate(MCInst &Inst, MCPhysReg Reg,
3420                                         unsigned Size) const {
3421     unsigned int Opcode;
3422     switch (Size) {
3423     case 1: Opcode = X86::MOV8ri; break;
3424     case 2: Opcode = X86::MOV16ri; break;
3425     case 4: Opcode = X86::MOV32ri; break;
3426     case 8: Opcode = X86::MOV64ri; break;
3427     default:
3428       llvm_unreachable("Unexpected size");
3429     }
3430     Inst.setOpcode(Opcode);
3431     Inst.clear();
3432     Inst.addOperand(MCOperand::createReg(Reg));
3433     Inst.addOperand(MCOperand::createImm(0));
3434   }
3435 
3436   void createX86SaveOVFlagToRegister(MCInst &Inst, MCPhysReg Reg) const {
3437     Inst.setOpcode(X86::SETCCr);
3438     Inst.clear();
3439     Inst.addOperand(MCOperand::createReg(Reg));
3440     Inst.addOperand(MCOperand::createImm(X86::COND_O));
3441   }
3442 
3443   void createX86Lahf(MCInst &Inst) const {
3444     Inst.setOpcode(X86::LAHF);
3445     Inst.clear();
3446   }
3447 
3448   void createX86Sahf(MCInst &Inst) const {
3449     Inst.setOpcode(X86::SAHF);
3450     Inst.clear();
3451   }
3452 
3453   void createInstrIncMemory(InstructionListType &Instrs, const MCSymbol *Target,
3454                             MCContext *Ctx, bool IsLeaf) const override {
3455     unsigned int I = 0;
3456 
3457     Instrs.resize(IsLeaf ? 13 : 11);
3458     // Don't clobber application red zone (ABI dependent)
3459     if (IsLeaf)
3460       createStackPointerIncrement(Instrs[I++], 128,
3461                                   /*NoFlagsClobber=*/true);
3462 
3463     // Performance improvements based on the optimization discussed at
3464     // https://reviews.llvm.org/D6629
3465     // LAHF/SAHF are used instead of PUSHF/POPF
3466     // PUSHF
3467     createPushRegister(Instrs[I++], X86::RAX, 8);
3468     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3469     createX86Lahf(Instrs[I++]);
3470     createPushRegister(Instrs[I++], X86::RAX, 8);
3471     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3472     createX86SaveOVFlagToRegister(Instrs[I++], X86::AL);
3473     // LOCK INC
3474     createIncMemory(Instrs[I++], Target, Ctx);
3475     // POPF
3476     createAddRegImm(Instrs[I++], X86::AL, 127, 1);
3477     createPopRegister(Instrs[I++], X86::RAX, 8);
3478     createX86Sahf(Instrs[I++]);
3479     createPopRegister(Instrs[I++], X86::RAX, 8);
3480 
3481     if (IsLeaf)
3482       createStackPointerDecrement(Instrs[I], 128,
3483                                   /*NoFlagsClobber=*/true);
3484   }
3485 
3486   void createSwap(MCInst &Inst, MCPhysReg Source, MCPhysReg MemBaseReg,
3487                   int64_t Disp) const {
3488     Inst.setOpcode(X86::XCHG64rm);
3489     Inst.addOperand(MCOperand::createReg(Source));
3490     Inst.addOperand(MCOperand::createReg(Source));
3491     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3492     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3493     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3494     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3495     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3496   }
3497 
3498   void createIndirectBranch(MCInst &Inst, MCPhysReg MemBaseReg,
3499                             int64_t Disp) const {
3500     Inst.setOpcode(X86::JMP64m);
3501     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3502     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3503     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3504     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3505     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3506   }
3507 
3508   InstructionListType createInstrumentedIndirectCall(const MCInst &CallInst,
3509                                                      bool TailCall,
3510                                                      MCSymbol *HandlerFuncAddr,
3511                                                      int CallSiteID,
3512                                                      MCContext *Ctx) override {
3513     // Check if the target address expression used in the original indirect call
3514     // uses the stack pointer, which we are going to clobber.
3515     static BitVector SPAliases(getAliases(X86::RSP));
3516     bool UsesSP = false;
3517     // Skip defs.
3518     for (unsigned I = Info->get(CallInst.getOpcode()).getNumDefs(),
3519                   E = MCPlus::getNumPrimeOperands(CallInst);
3520          I != E; ++I) {
3521       const MCOperand &Operand = CallInst.getOperand(I);
3522       if (Operand.isReg() && SPAliases[Operand.getReg()]) {
3523         UsesSP = true;
3524         break;
3525       }
3526     }
3527 
3528     InstructionListType Insts;
3529     MCPhysReg TempReg = getIntArgRegister(0);
3530     // Code sequence used to enter indirect call instrumentation helper:
3531     //   push %rdi
3532     //   add $8, %rsp       ;; $rsp may be used in target, so fix it to prev val
3533     //   movq target, %rdi  ;; via convertIndirectCallTargetToLoad
3534     //   sub $8, %rsp       ;; restore correct stack value
3535     //   push %rdi
3536     //   movq $CallSiteID, %rdi
3537     //   push %rdi
3538     //   callq/jmp HandlerFuncAddr
3539     Insts.emplace_back();
3540     createPushRegister(Insts.back(), TempReg, 8);
3541     if (UsesSP) { // Only adjust SP if we really need to
3542       Insts.emplace_back();
3543       createStackPointerDecrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3544     }
3545     Insts.emplace_back(CallInst);
3546     // Insts.back() and CallInst now share the same annotation instruction.
3547     // Strip it from Insts.back(), only preserving tail call annotation.
3548     stripAnnotations(Insts.back(), /*KeepTC=*/true);
3549     convertIndirectCallToLoad(Insts.back(), TempReg);
3550     if (UsesSP) {
3551       Insts.emplace_back();
3552       createStackPointerIncrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3553     }
3554     Insts.emplace_back();
3555     createPushRegister(Insts.back(), TempReg, 8);
3556     Insts.emplace_back();
3557     createLoadImmediate(Insts.back(), TempReg, CallSiteID);
3558     Insts.emplace_back();
3559     createPushRegister(Insts.back(), TempReg, 8);
3560     Insts.emplace_back();
3561     createDirectCall(Insts.back(), HandlerFuncAddr, Ctx,
3562                      /*TailCall=*/TailCall);
3563     // Carry over metadata
3564     for (int I = MCPlus::getNumPrimeOperands(CallInst),
3565              E = CallInst.getNumOperands();
3566          I != E; ++I)
3567       Insts.back().addOperand(CallInst.getOperand(I));
3568 
3569     return Insts;
3570   }
3571 
3572   InstructionListType createInstrumentedIndCallHandlerExitBB() const override {
3573     const MCPhysReg TempReg = getIntArgRegister(0);
3574     // We just need to undo the sequence created for every ind call in
3575     // instrumentIndirectTarget(), which can be accomplished minimally with:
3576     //   popfq
3577     //   pop %rdi
3578     //   add $16, %rsp
3579     //   xchg (%rsp), %rdi
3580     //   jmp *-8(%rsp)
3581     InstructionListType Insts(5);
3582     createPopFlags(Insts[0], 8);
3583     createPopRegister(Insts[1], TempReg, 8);
3584     createStackPointerDecrement(Insts[2], 16, /*NoFlagsClobber=*/false);
3585     createSwap(Insts[3], TempReg, X86::RSP, 0);
3586     createIndirectBranch(Insts[4], X86::RSP, -8);
3587     return Insts;
3588   }
3589 
3590   InstructionListType
3591   createInstrumentedIndTailCallHandlerExitBB() const override {
3592     const MCPhysReg TempReg = getIntArgRegister(0);
3593     // Same thing as above, but for tail calls
3594     //   popfq
3595     //   add $16, %rsp
3596     //   pop %rdi
3597     //   jmp *-16(%rsp)
3598     InstructionListType Insts(4);
3599     createPopFlags(Insts[0], 8);
3600     createStackPointerDecrement(Insts[1], 16, /*NoFlagsClobber=*/false);
3601     createPopRegister(Insts[2], TempReg, 8);
3602     createIndirectBranch(Insts[3], X86::RSP, -16);
3603     return Insts;
3604   }
3605 
3606   InstructionListType
3607   createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline,
3608                                           const MCSymbol *IndCallHandler,
3609                                           MCContext *Ctx) override {
3610     const MCPhysReg TempReg = getIntArgRegister(0);
3611     // Code sequence used to check whether InstrTampoline was initialized
3612     // and call it if so, returns via IndCallHandler.
3613     //   pushfq
3614     //   mov    InstrTrampoline,%rdi
3615     //   cmp    $0x0,%rdi
3616     //   je     IndCallHandler
3617     //   callq  *%rdi
3618     //   jmpq   IndCallHandler
3619     InstructionListType Insts;
3620     Insts.emplace_back();
3621     createPushFlags(Insts.back(), 8);
3622     Insts.emplace_back();
3623     createMove(Insts.back(), InstrTrampoline, TempReg, Ctx);
3624     InstructionListType cmpJmp = createCmpJE(TempReg, 0, IndCallHandler, Ctx);
3625     Insts.insert(Insts.end(), cmpJmp.begin(), cmpJmp.end());
3626     Insts.emplace_back();
3627     Insts.back().setOpcode(X86::CALL64r);
3628     Insts.back().addOperand(MCOperand::createReg(TempReg));
3629     Insts.emplace_back();
3630     createDirectCall(Insts.back(), IndCallHandler, Ctx, /*IsTailCall*/ true);
3631     return Insts;
3632   }
3633 
3634   InstructionListType createNumCountersGetter(MCContext *Ctx) const override {
3635     InstructionListType Insts(2);
3636     MCSymbol *NumLocs = Ctx->getOrCreateSymbol("__bolt_num_counters");
3637     createMove(Insts[0], NumLocs, X86::EAX, Ctx);
3638     createReturn(Insts[1]);
3639     return Insts;
3640   }
3641 
3642   InstructionListType
3643   createInstrLocationsGetter(MCContext *Ctx) const override {
3644     InstructionListType Insts(2);
3645     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_locations");
3646     createLea(Insts[0], Locs, X86::EAX, Ctx);
3647     createReturn(Insts[1]);
3648     return Insts;
3649   }
3650 
3651   InstructionListType createInstrTablesGetter(MCContext *Ctx) const override {
3652     InstructionListType Insts(2);
3653     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_tables");
3654     createLea(Insts[0], Locs, X86::EAX, Ctx);
3655     createReturn(Insts[1]);
3656     return Insts;
3657   }
3658 
3659   InstructionListType createInstrNumFuncsGetter(MCContext *Ctx) const override {
3660     InstructionListType Insts(2);
3661     MCSymbol *NumFuncs = Ctx->getOrCreateSymbol("__bolt_instr_num_funcs");
3662     createMove(Insts[0], NumFuncs, X86::EAX, Ctx);
3663     createReturn(Insts[1]);
3664     return Insts;
3665   }
3666 
3667   InstructionListType createSymbolTrampoline(const MCSymbol *TgtSym,
3668                                              MCContext *Ctx) const override {
3669     InstructionListType Insts(1);
3670     createUncondBranch(Insts[0], TgtSym, Ctx);
3671     return Insts;
3672   }
3673 
3674   InstructionListType createDummyReturnFunction(MCContext *Ctx) const override {
3675     InstructionListType Insts(1);
3676     createReturn(Insts[0]);
3677     return Insts;
3678   }
3679 
3680   BlocksVectorTy indirectCallPromotion(
3681       const MCInst &CallInst,
3682       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3683       const std::vector<std::pair<MCSymbol *, uint64_t>> &VtableSyms,
3684       const std::vector<MCInst *> &MethodFetchInsns,
3685       const bool MinimizeCodeSize, MCContext *Ctx) override {
3686     const bool IsTailCall = isTailCall(CallInst);
3687     const bool IsJumpTable = getJumpTable(CallInst) != 0;
3688     BlocksVectorTy Results;
3689 
3690     // Label for the current code block.
3691     MCSymbol *NextTarget = nullptr;
3692 
3693     // The join block which contains all the instructions following CallInst.
3694     // MergeBlock remains null if CallInst is a tail call.
3695     MCSymbol *MergeBlock = nullptr;
3696 
3697     unsigned FuncAddrReg = X86::R10;
3698 
3699     const bool LoadElim = !VtableSyms.empty();
3700     assert((!LoadElim || VtableSyms.size() == Targets.size()) &&
3701            "There must be a vtable entry for every method "
3702            "in the targets vector.");
3703 
3704     if (MinimizeCodeSize && !LoadElim) {
3705       std::set<unsigned> UsedRegs;
3706 
3707       for (unsigned int I = 0; I < MCPlus::getNumPrimeOperands(CallInst); ++I) {
3708         const MCOperand &Op = CallInst.getOperand(I);
3709         if (Op.isReg())
3710           UsedRegs.insert(Op.getReg());
3711       }
3712 
3713       if (UsedRegs.count(X86::R10) == 0)
3714         FuncAddrReg = X86::R10;
3715       else if (UsedRegs.count(X86::R11) == 0)
3716         FuncAddrReg = X86::R11;
3717       else
3718         return Results;
3719     }
3720 
3721     const auto jumpToMergeBlock = [&](InstructionListType &NewCall) {
3722       assert(MergeBlock);
3723       NewCall.push_back(CallInst);
3724       MCInst &Merge = NewCall.back();
3725       Merge.clear();
3726       createUncondBranch(Merge, MergeBlock, Ctx);
3727     };
3728 
3729     for (unsigned int i = 0; i < Targets.size(); ++i) {
3730       Results.emplace_back(NextTarget, InstructionListType());
3731       InstructionListType *NewCall = &Results.back().second;
3732 
3733       if (MinimizeCodeSize && !LoadElim) {
3734         // Load the call target into FuncAddrReg.
3735         NewCall->push_back(CallInst); // Copy CallInst in order to get SMLoc
3736         MCInst &Target = NewCall->back();
3737         Target.clear();
3738         Target.setOpcode(X86::MOV64ri32);
3739         Target.addOperand(MCOperand::createReg(FuncAddrReg));
3740         if (Targets[i].first) {
3741           // Is this OK?
3742           Target.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3743               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3744         } else {
3745           const uint64_t Addr = Targets[i].second;
3746           // Immediate address is out of sign extended 32 bit range.
3747           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3748             return BlocksVectorTy();
3749 
3750           Target.addOperand(MCOperand::createImm(Addr));
3751         }
3752 
3753         // Compare current call target to a specific address.
3754         NewCall->push_back(CallInst);
3755         MCInst &Compare = NewCall->back();
3756         Compare.clear();
3757         if (isBranchOnReg(CallInst))
3758           Compare.setOpcode(X86::CMP64rr);
3759         else if (CallInst.getOpcode() == X86::CALL64pcrel32)
3760           Compare.setOpcode(X86::CMP64ri32);
3761         else
3762           Compare.setOpcode(X86::CMP64rm);
3763 
3764         Compare.addOperand(MCOperand::createReg(FuncAddrReg));
3765 
3766         // TODO: Would be preferable to only load this value once.
3767         for (unsigned i = 0;
3768              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3769           if (!CallInst.getOperand(i).isInst())
3770             Compare.addOperand(CallInst.getOperand(i));
3771       } else {
3772         // Compare current call target to a specific address.
3773         NewCall->push_back(CallInst);
3774         MCInst &Compare = NewCall->back();
3775         Compare.clear();
3776         if (isBranchOnReg(CallInst))
3777           Compare.setOpcode(X86::CMP64ri32);
3778         else
3779           Compare.setOpcode(X86::CMP64mi32);
3780 
3781         // Original call address.
3782         for (unsigned i = 0;
3783              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3784           if (!CallInst.getOperand(i).isInst())
3785             Compare.addOperand(CallInst.getOperand(i));
3786 
3787         // Target address.
3788         if (Targets[i].first || LoadElim) {
3789           const MCSymbol *Sym =
3790               LoadElim ? VtableSyms[i].first : Targets[i].first;
3791           const uint64_t Addend = LoadElim ? VtableSyms[i].second : 0;
3792           const MCExpr *Expr = MCSymbolRefExpr::create(Sym, *Ctx);
3793           if (Addend)
3794             Expr = MCBinaryExpr::createAdd(
3795                 Expr, MCConstantExpr::create(Addend, *Ctx), *Ctx);
3796           Compare.addOperand(MCOperand::createExpr(Expr));
3797         } else {
3798           const uint64_t Addr = Targets[i].second;
3799           // Immediate address is out of sign extended 32 bit range.
3800           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3801             return BlocksVectorTy();
3802 
3803           Compare.addOperand(MCOperand::createImm(Addr));
3804         }
3805       }
3806 
3807       // jump to next target compare.
3808       NextTarget =
3809           Ctx->createNamedTempSymbol(); // generate label for the next block
3810       NewCall->push_back(CallInst);
3811 
3812       if (IsJumpTable) {
3813         MCInst &Je = NewCall->back();
3814 
3815         // Jump to next compare if target addresses don't match.
3816         Je.clear();
3817         Je.setOpcode(X86::JCC_1);
3818         if (Targets[i].first)
3819           Je.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3820               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3821         else
3822           Je.addOperand(MCOperand::createImm(Targets[i].second));
3823 
3824         Je.addOperand(MCOperand::createImm(X86::COND_E));
3825         assert(!isInvoke(CallInst));
3826       } else {
3827         MCInst &Jne = NewCall->back();
3828 
3829         // Jump to next compare if target addresses don't match.
3830         Jne.clear();
3831         Jne.setOpcode(X86::JCC_1);
3832         Jne.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3833             NextTarget, MCSymbolRefExpr::VK_None, *Ctx)));
3834         Jne.addOperand(MCOperand::createImm(X86::COND_NE));
3835 
3836         // Call specific target directly.
3837         Results.emplace_back(Ctx->createNamedTempSymbol(),
3838                              InstructionListType());
3839         NewCall = &Results.back().second;
3840         NewCall->push_back(CallInst);
3841         MCInst &CallOrJmp = NewCall->back();
3842 
3843         CallOrJmp.clear();
3844 
3845         if (MinimizeCodeSize && !LoadElim) {
3846           CallOrJmp.setOpcode(IsTailCall ? X86::JMP32r : X86::CALL64r);
3847           CallOrJmp.addOperand(MCOperand::createReg(FuncAddrReg));
3848         } else {
3849           CallOrJmp.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
3850 
3851           if (Targets[i].first)
3852             CallOrJmp.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3853                 Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3854           else
3855             CallOrJmp.addOperand(MCOperand::createImm(Targets[i].second));
3856         }
3857         if (IsTailCall)
3858           setTailCall(CallOrJmp);
3859 
3860         if (CallOrJmp.getOpcode() == X86::CALL64r ||
3861             CallOrJmp.getOpcode() == X86::CALL64pcrel32) {
3862           if (Optional<uint32_t> Offset = getOffset(CallInst))
3863             // Annotated as duplicated call
3864             setOffset(CallOrJmp, *Offset);
3865         }
3866 
3867         if (isInvoke(CallInst) && !isInvoke(CallOrJmp)) {
3868           // Copy over any EH or GNU args size information from the original
3869           // call.
3870           Optional<MCPlus::MCLandingPad> EHInfo = getEHInfo(CallInst);
3871           if (EHInfo)
3872             addEHInfo(CallOrJmp, *EHInfo);
3873           int64_t GnuArgsSize = getGnuArgsSize(CallInst);
3874           if (GnuArgsSize >= 0)
3875             addGnuArgsSize(CallOrJmp, GnuArgsSize);
3876         }
3877 
3878         if (!IsTailCall) {
3879           // The fallthrough block for the most common target should be
3880           // the merge block.
3881           if (i == 0) {
3882             // Fallthrough to merge block.
3883             MergeBlock = Ctx->createNamedTempSymbol();
3884           } else {
3885             // Insert jump to the merge block if we are not doing a fallthrough.
3886             jumpToMergeBlock(*NewCall);
3887           }
3888         }
3889       }
3890     }
3891 
3892     // Cold call block.
3893     Results.emplace_back(NextTarget, InstructionListType());
3894     InstructionListType &NewCall = Results.back().second;
3895     for (const MCInst *Inst : MethodFetchInsns)
3896       if (Inst != &CallInst)
3897         NewCall.push_back(*Inst);
3898     NewCall.push_back(CallInst);
3899 
3900     // Jump to merge block from cold call block
3901     if (!IsTailCall && !IsJumpTable) {
3902       jumpToMergeBlock(NewCall);
3903 
3904       // Record merge block
3905       Results.emplace_back(MergeBlock, InstructionListType());
3906     }
3907 
3908     return Results;
3909   }
3910 
3911   BlocksVectorTy jumpTablePromotion(
3912       const MCInst &IJmpInst,
3913       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3914       const std::vector<MCInst *> &TargetFetchInsns,
3915       MCContext *Ctx) const override {
3916     assert(getJumpTable(IJmpInst) != 0);
3917     uint16_t IndexReg = getAnnotationAs<uint16_t>(IJmpInst, "JTIndexReg");
3918     if (IndexReg == 0)
3919       return BlocksVectorTy();
3920 
3921     BlocksVectorTy Results;
3922 
3923     // Label for the current code block.
3924     MCSymbol *NextTarget = nullptr;
3925 
3926     for (unsigned int i = 0; i < Targets.size(); ++i) {
3927       Results.emplace_back(NextTarget, InstructionListType());
3928       InstructionListType *CurBB = &Results.back().second;
3929 
3930       // Compare current index to a specific index.
3931       CurBB->emplace_back(MCInst());
3932       MCInst &CompareInst = CurBB->back();
3933       CompareInst.setLoc(IJmpInst.getLoc());
3934       CompareInst.setOpcode(X86::CMP64ri32);
3935       CompareInst.addOperand(MCOperand::createReg(IndexReg));
3936 
3937       const uint64_t CaseIdx = Targets[i].second;
3938       // Immediate address is out of sign extended 32 bit range.
3939       if (int64_t(CaseIdx) != int64_t(int32_t(CaseIdx)))
3940         return BlocksVectorTy();
3941 
3942       CompareInst.addOperand(MCOperand::createImm(CaseIdx));
3943       shortenInstruction(CompareInst);
3944 
3945       // jump to next target compare.
3946       NextTarget =
3947           Ctx->createNamedTempSymbol(); // generate label for the next block
3948       CurBB->push_back(MCInst());
3949 
3950       MCInst &JEInst = CurBB->back();
3951       JEInst.setLoc(IJmpInst.getLoc());
3952 
3953       // Jump to target if indices match
3954       JEInst.setOpcode(X86::JCC_1);
3955       JEInst.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3956           Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3957       JEInst.addOperand(MCOperand::createImm(X86::COND_E));
3958     }
3959 
3960     // Cold call block.
3961     Results.emplace_back(NextTarget, InstructionListType());
3962     InstructionListType &CurBB = Results.back().second;
3963     for (const MCInst *Inst : TargetFetchInsns)
3964       if (Inst != &IJmpInst)
3965         CurBB.push_back(*Inst);
3966 
3967     CurBB.push_back(IJmpInst);
3968 
3969     return Results;
3970   }
3971 
3972 private:
3973   bool createMove(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3974                   MCContext *Ctx) const {
3975     Inst.setOpcode(X86::MOV64rm);
3976     Inst.addOperand(MCOperand::createReg(Reg));
3977     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3978     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3979     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3980     Inst.addOperand(MCOperand::createExpr(
3981         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3982                                 *Ctx)));                    // Displacement
3983     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3984 
3985     return true;
3986   }
3987 
3988   bool createLea(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3989                  MCContext *Ctx) const {
3990     Inst.setOpcode(X86::LEA64r);
3991     Inst.addOperand(MCOperand::createReg(Reg));
3992     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3993     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3994     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3995     Inst.addOperand(MCOperand::createExpr(
3996         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3997                                 *Ctx)));                    // Displacement
3998     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3999     return true;
4000   }
4001 };
4002 
4003 } // namespace
4004 
4005 namespace llvm {
4006 namespace bolt {
4007 
4008 MCPlusBuilder *createX86MCPlusBuilder(const MCInstrAnalysis *Analysis,
4009                                       const MCInstrInfo *Info,
4010                                       const MCRegisterInfo *RegInfo) {
4011   return new X86MCPlusBuilder(Analysis, Info, RegInfo);
4012 }
4013 
4014 } // namespace bolt
4015 } // namespace llvm
4016