xref: /llvm-project/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp (revision 2f09f445b2d6b3ef197aecd8d1e06d08140380f3)
1 //===- bolt/Target/AArch64/AArch64MCPlusBuilder.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides AArch64-specific MCPlus builder.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64MCExpr.h"
15 #include "MCTargetDesc/AArch64MCTargetDesc.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "bolt/Core/MCPlusBuilder.h"
18 #include "llvm/BinaryFormat/ELF.h"
19 #include "llvm/MC/MCInstrInfo.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 
24 #define DEBUG_TYPE "bolt-aarch64"
25 
26 using namespace llvm;
27 using namespace bolt;
28 
29 namespace {
30 
31 class AArch64MCPlusBuilder : public MCPlusBuilder {
32 public:
33   AArch64MCPlusBuilder(const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
34                        const MCRegisterInfo *RegInfo)
35       : MCPlusBuilder(Analysis, Info, RegInfo) {}
36 
37   bool equals(const MCTargetExpr &A, const MCTargetExpr &B,
38               CompFuncTy Comp) const override {
39     const auto &AArch64ExprA = cast<AArch64MCExpr>(A);
40     const auto &AArch64ExprB = cast<AArch64MCExpr>(B);
41     if (AArch64ExprA.getKind() != AArch64ExprB.getKind())
42       return false;
43 
44     return MCPlusBuilder::equals(*AArch64ExprA.getSubExpr(),
45                                  *AArch64ExprB.getSubExpr(), Comp);
46   }
47 
48   bool hasEVEXEncoding(const MCInst &) const override { return false; }
49 
50   bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
51     return false;
52   }
53 
54   bool shortenInstruction(MCInst &) const override { return false; }
55 
56   bool isADRP(const MCInst &Inst) const override {
57     return Inst.getOpcode() == AArch64::ADRP;
58   }
59 
60   bool isADR(const MCInst &Inst) const override {
61     return Inst.getOpcode() == AArch64::ADR;
62   }
63 
64   void getADRReg(const MCInst &Inst, MCPhysReg &RegName) const override {
65     assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction");
66     assert(MCPlus::getNumPrimeOperands(Inst) != 0 &&
67            "No operands for ADR instruction");
68     assert(Inst.getOperand(0).isReg() &&
69            "Unexpected operand in ADR instruction");
70     RegName = Inst.getOperand(0).getReg();
71   }
72 
73   bool isTB(const MCInst &Inst) const {
74     return (Inst.getOpcode() == AArch64::TBNZW ||
75             Inst.getOpcode() == AArch64::TBNZX ||
76             Inst.getOpcode() == AArch64::TBZW ||
77             Inst.getOpcode() == AArch64::TBZX);
78   }
79 
80   bool isCB(const MCInst &Inst) const {
81     return (Inst.getOpcode() == AArch64::CBNZW ||
82             Inst.getOpcode() == AArch64::CBNZX ||
83             Inst.getOpcode() == AArch64::CBZW ||
84             Inst.getOpcode() == AArch64::CBZX);
85   }
86 
87   bool isMOVW(const MCInst &Inst) const {
88     return (Inst.getOpcode() == AArch64::MOVKWi ||
89             Inst.getOpcode() == AArch64::MOVKXi ||
90             Inst.getOpcode() == AArch64::MOVNWi ||
91             Inst.getOpcode() == AArch64::MOVNXi ||
92             Inst.getOpcode() == AArch64::MOVZXi ||
93             Inst.getOpcode() == AArch64::MOVZWi);
94   }
95 
96   bool isADD(const MCInst &Inst) const {
97     return (Inst.getOpcode() == AArch64::ADDSWri ||
98             Inst.getOpcode() == AArch64::ADDSWrr ||
99             Inst.getOpcode() == AArch64::ADDSWrs ||
100             Inst.getOpcode() == AArch64::ADDSWrx ||
101             Inst.getOpcode() == AArch64::ADDSXri ||
102             Inst.getOpcode() == AArch64::ADDSXrr ||
103             Inst.getOpcode() == AArch64::ADDSXrs ||
104             Inst.getOpcode() == AArch64::ADDSXrx ||
105             Inst.getOpcode() == AArch64::ADDSXrx64 ||
106             Inst.getOpcode() == AArch64::ADDWri ||
107             Inst.getOpcode() == AArch64::ADDWrr ||
108             Inst.getOpcode() == AArch64::ADDWrs ||
109             Inst.getOpcode() == AArch64::ADDWrx ||
110             Inst.getOpcode() == AArch64::ADDXri ||
111             Inst.getOpcode() == AArch64::ADDXrr ||
112             Inst.getOpcode() == AArch64::ADDXrs ||
113             Inst.getOpcode() == AArch64::ADDXrx ||
114             Inst.getOpcode() == AArch64::ADDXrx64);
115   }
116 
117   bool isLDRB(const MCInst &Inst) const {
118     return (Inst.getOpcode() == AArch64::LDRBBpost ||
119             Inst.getOpcode() == AArch64::LDRBBpre ||
120             Inst.getOpcode() == AArch64::LDRBBroW ||
121             Inst.getOpcode() == AArch64::LDRBBroX ||
122             Inst.getOpcode() == AArch64::LDRBBui ||
123             Inst.getOpcode() == AArch64::LDRSBWpost ||
124             Inst.getOpcode() == AArch64::LDRSBWpre ||
125             Inst.getOpcode() == AArch64::LDRSBWroW ||
126             Inst.getOpcode() == AArch64::LDRSBWroX ||
127             Inst.getOpcode() == AArch64::LDRSBWui ||
128             Inst.getOpcode() == AArch64::LDRSBXpost ||
129             Inst.getOpcode() == AArch64::LDRSBXpre ||
130             Inst.getOpcode() == AArch64::LDRSBXroW ||
131             Inst.getOpcode() == AArch64::LDRSBXroX ||
132             Inst.getOpcode() == AArch64::LDRSBXui);
133   }
134 
135   bool isLDRH(const MCInst &Inst) const {
136     return (Inst.getOpcode() == AArch64::LDRHHpost ||
137             Inst.getOpcode() == AArch64::LDRHHpre ||
138             Inst.getOpcode() == AArch64::LDRHHroW ||
139             Inst.getOpcode() == AArch64::LDRHHroX ||
140             Inst.getOpcode() == AArch64::LDRHHui ||
141             Inst.getOpcode() == AArch64::LDRSHWpost ||
142             Inst.getOpcode() == AArch64::LDRSHWpre ||
143             Inst.getOpcode() == AArch64::LDRSHWroW ||
144             Inst.getOpcode() == AArch64::LDRSHWroX ||
145             Inst.getOpcode() == AArch64::LDRSHWui ||
146             Inst.getOpcode() == AArch64::LDRSHXpost ||
147             Inst.getOpcode() == AArch64::LDRSHXpre ||
148             Inst.getOpcode() == AArch64::LDRSHXroW ||
149             Inst.getOpcode() == AArch64::LDRSHXroX ||
150             Inst.getOpcode() == AArch64::LDRSHXui);
151   }
152 
153   bool isLDRW(const MCInst &Inst) const {
154     return (Inst.getOpcode() == AArch64::LDRWpost ||
155             Inst.getOpcode() == AArch64::LDRWpre ||
156             Inst.getOpcode() == AArch64::LDRWroW ||
157             Inst.getOpcode() == AArch64::LDRWroX ||
158             Inst.getOpcode() == AArch64::LDRWui);
159   }
160 
161   bool isLDRX(const MCInst &Inst) const {
162     return (Inst.getOpcode() == AArch64::LDRXpost ||
163             Inst.getOpcode() == AArch64::LDRXpre ||
164             Inst.getOpcode() == AArch64::LDRXroW ||
165             Inst.getOpcode() == AArch64::LDRXroX ||
166             Inst.getOpcode() == AArch64::LDRXui);
167   }
168 
169   bool isLoad(const MCInst &Inst) const override {
170     return isLDRB(Inst) || isLDRH(Inst) || isLDRW(Inst) || isLDRX(Inst);
171   }
172 
173   bool isLoadFromStack(const MCInst &Inst) const {
174     if (!isLoad(Inst))
175       return false;
176     const MCInstrDesc &InstInfo = Info->get(Inst.getOpcode());
177     unsigned NumDefs = InstInfo.getNumDefs();
178     for (unsigned I = NumDefs, E = InstInfo.getNumOperands(); I < E; ++I) {
179       const MCOperand &Operand = Inst.getOperand(I);
180       if (!Operand.isReg())
181         continue;
182       unsigned Reg = Operand.getReg();
183       if (Reg == AArch64::SP || Reg == AArch64::WSP || Reg == AArch64::FP ||
184           Reg == AArch64::W29)
185         return true;
186     }
187     return false;
188   }
189 
190   bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
191                       MCPhysReg &To) const override {
192     if (Inst.getOpcode() != AArch64::ORRXrs)
193       return false;
194     if (Inst.getOperand(1).getReg() != AArch64::XZR)
195       return false;
196     if (Inst.getOperand(3).getImm() != 0)
197       return false;
198     From = Inst.getOperand(2).getReg();
199     To = Inst.getOperand(0).getReg();
200     return true;
201   }
202 
203   bool isIndirectCall(const MCInst &Inst) const override {
204     return Inst.getOpcode() == AArch64::BLR;
205   }
206 
207   MCPhysReg getNoRegister() const override { return AArch64::NoRegister; }
208 
209   bool hasPCRelOperand(const MCInst &Inst) const override {
210     // ADRP is blacklisted and is an exception. Even though it has a
211     // PC-relative operand, this operand is not a complete symbol reference
212     // and BOLT shouldn't try to process it in isolation.
213     if (isADRP(Inst))
214       return false;
215 
216     if (isADR(Inst))
217       return true;
218 
219     // Look for literal addressing mode (see C1-143 ARM DDI 0487B.a)
220     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
221     for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
222       if (MCII.OpInfo[I].OperandType == MCOI::OPERAND_PCREL)
223         return true;
224     }
225     return false;
226   }
227 
228   bool evaluateADR(const MCInst &Inst, int64_t &Imm,
229                    const MCExpr **DispExpr) const {
230     assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction");
231 
232     const MCOperand &Label = Inst.getOperand(1);
233     if (!Label.isImm()) {
234       assert(Label.isExpr() && "Unexpected ADR operand");
235       assert(DispExpr && "DispExpr must be set");
236       *DispExpr = Label.getExpr();
237       return false;
238     }
239 
240     if (Inst.getOpcode() == AArch64::ADR) {
241       Imm = Label.getImm();
242       return true;
243     }
244     Imm = Label.getImm() << 12;
245     return true;
246   }
247 
248   bool evaluateAArch64MemoryOperand(const MCInst &Inst, int64_t &DispImm,
249                                     const MCExpr **DispExpr = nullptr) const {
250     if (isADR(Inst) || isADRP(Inst))
251       return evaluateADR(Inst, DispImm, DispExpr);
252 
253     // Literal addressing mode
254     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
255     for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
256       if (MCII.OpInfo[I].OperandType != MCOI::OPERAND_PCREL)
257         continue;
258 
259       if (!Inst.getOperand(I).isImm()) {
260         assert(Inst.getOperand(I).isExpr() && "Unexpected PCREL operand");
261         assert(DispExpr && "DispExpr must be set");
262         *DispExpr = Inst.getOperand(I).getExpr();
263         return true;
264       }
265 
266       DispImm = Inst.getOperand(I).getImm() << 2;
267       return true;
268     }
269     return false;
270   }
271 
272   bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
273                                 uint64_t Address,
274                                 uint64_t Size) const override {
275     int64_t DispValue;
276     const MCExpr *DispExpr = nullptr;
277     if (!evaluateAArch64MemoryOperand(Inst, DispValue, &DispExpr))
278       return false;
279 
280     // Make sure it's a well-formed addressing we can statically evaluate.
281     if (DispExpr)
282       return false;
283 
284     Target = DispValue;
285     if (Inst.getOpcode() == AArch64::ADRP)
286       Target += Address & ~0xFFFULL;
287     else
288       Target += Address;
289     return true;
290   }
291 
292   bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
293     MCInst::iterator OI = Inst.begin();
294     if (isADR(Inst) || isADRP(Inst)) {
295       assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&
296              "Unexpected number of operands");
297       ++OI;
298     } else {
299       const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
300       for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
301         if (MCII.OpInfo[I].OperandType == MCOI::OPERAND_PCREL) {
302           break;
303         }
304         ++OI;
305       }
306       assert(OI != Inst.end() && "Literal operand not found");
307     }
308     *OI = Operand;
309     return true;
310   }
311 
312   const MCExpr *getTargetExprFor(MCInst &Inst, const MCExpr *Expr,
313                                  MCContext &Ctx,
314                                  uint64_t RelType) const override {
315 
316     if (isADR(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_LO21 ||
317         RelType == ELF::R_AARCH64_TLSDESC_ADR_PREL21) {
318       return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, Ctx);
319     } else if (isADRP(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21 ||
320                RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21_NC ||
321                RelType == ELF::R_AARCH64_TLSDESC_ADR_PAGE21 ||
322                RelType == ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
323                RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
324       // Never emit a GOT reloc, we handled this in
325       // RewriteInstance::readRelocations().
326       return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, Ctx);
327     } else {
328       switch (RelType) {
329       case ELF::R_AARCH64_ADD_ABS_LO12_NC:
330       case ELF::R_AARCH64_LD64_GOT_LO12_NC:
331       case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
332       case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
333       case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
334       case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
335       case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
336       case ELF::R_AARCH64_TLSDESC_ADD_LO12:
337       case ELF::R_AARCH64_TLSDESC_LD64_LO12:
338       case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
339       case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
340         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_LO12, Ctx);
341       case ELF::R_AARCH64_MOVW_UABS_G3:
342         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G3, Ctx);
343       case ELF::R_AARCH64_MOVW_UABS_G2:
344       case ELF::R_AARCH64_MOVW_UABS_G2_NC:
345         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G2_NC, Ctx);
346       case ELF::R_AARCH64_MOVW_UABS_G1:
347       case ELF::R_AARCH64_MOVW_UABS_G1_NC:
348         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G1_NC, Ctx);
349       case ELF::R_AARCH64_MOVW_UABS_G0:
350       case ELF::R_AARCH64_MOVW_UABS_G0_NC:
351         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G0_NC, Ctx);
352       default:
353         break;
354       }
355     }
356     return Expr;
357   }
358 
359   bool getSymbolRefOperandNum(const MCInst &Inst, unsigned &OpNum) const {
360     if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
361       return false;
362 
363     // Auto-select correct operand number
364     if (OpNum == 0) {
365       if (isConditionalBranch(Inst) || isADR(Inst) || isADRP(Inst))
366         OpNum = 1;
367       if (isTB(Inst))
368         OpNum = 2;
369       if (isMOVW(Inst))
370         OpNum = 1;
371     }
372 
373     return true;
374   }
375 
376   const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
377     auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
378     if (AArchExpr && AArchExpr->getSubExpr())
379       return getTargetSymbol(AArchExpr->getSubExpr());
380 
381     auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
382     if (BinExpr)
383       return getTargetSymbol(BinExpr->getLHS());
384 
385     auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Expr);
386     if (SymExpr && SymExpr->getKind() == MCSymbolRefExpr::VK_None)
387       return &SymExpr->getSymbol();
388 
389     return nullptr;
390   }
391 
392   const MCSymbol *getTargetSymbol(const MCInst &Inst,
393                                   unsigned OpNum = 0) const override {
394     if (!getSymbolRefOperandNum(Inst, OpNum))
395       return nullptr;
396 
397     const MCOperand &Op = Inst.getOperand(OpNum);
398     if (!Op.isExpr())
399       return nullptr;
400 
401     return getTargetSymbol(Op.getExpr());
402   }
403 
404   int64_t getTargetAddend(const MCExpr *Expr) const override {
405     auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
406     if (AArchExpr && AArchExpr->getSubExpr())
407       return getTargetAddend(AArchExpr->getSubExpr());
408 
409     auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
410     if (BinExpr && BinExpr->getOpcode() == MCBinaryExpr::Add)
411       return getTargetAddend(BinExpr->getRHS());
412 
413     auto *ConstExpr = dyn_cast<MCConstantExpr>(Expr);
414     if (ConstExpr)
415       return ConstExpr->getValue();
416 
417     return 0;
418   }
419 
420   int64_t getTargetAddend(const MCInst &Inst,
421                           unsigned OpNum = 0) const override {
422     if (!getSymbolRefOperandNum(Inst, OpNum))
423       return 0;
424 
425     const MCOperand &Op = Inst.getOperand(OpNum);
426     if (!Op.isExpr())
427       return 0;
428 
429     return getTargetAddend(Op.getExpr());
430   }
431 
432   bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
433                       uint64_t &Target) const override {
434     size_t OpNum = 0;
435 
436     if (isConditionalBranch(Inst)) {
437       assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&
438              "Invalid number of operands");
439       OpNum = 1;
440     }
441 
442     if (isTB(Inst)) {
443       assert(MCPlus::getNumPrimeOperands(Inst) >= 3 &&
444              "Invalid number of operands");
445       OpNum = 2;
446     }
447 
448     if (Info->get(Inst.getOpcode()).OpInfo[OpNum].OperandType !=
449         MCOI::OPERAND_PCREL) {
450       assert((isIndirectBranch(Inst) || isIndirectCall(Inst)) &&
451              "FAILED evaluateBranch");
452       return false;
453     }
454 
455     int64_t Imm = Inst.getOperand(OpNum).getImm() << 2;
456     Target = Addr + Imm;
457     return true;
458   }
459 
460   bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
461                            MCContext *Ctx) const override {
462     assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
463            "Invalid instruction");
464     assert(MCPlus::getNumPrimeOperands(Inst) >= 1 &&
465            "Invalid number of operands");
466     MCInst::iterator OI = Inst.begin();
467 
468     if (isConditionalBranch(Inst)) {
469       assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&
470              "Invalid number of operands");
471       ++OI;
472     }
473 
474     if (isTB(Inst)) {
475       assert(MCPlus::getNumPrimeOperands(Inst) >= 3 &&
476              "Invalid number of operands");
477       OI = Inst.begin() + 2;
478     }
479 
480     *OI = MCOperand::createExpr(
481         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
482     return true;
483   }
484 
485   /// Matches indirect branch patterns in AArch64 related to a jump table (JT),
486   /// helping us to build the complete CFG. A typical indirect branch to
487   /// a jump table entry in AArch64 looks like the following:
488   ///
489   ///   adrp    x1, #-7585792           # Get JT Page location
490   ///   add     x1, x1, #692            # Complement with JT Page offset
491   ///   ldrh    w0, [x1, w0, uxtw #1]   # Loads JT entry
492   ///   adr     x1, #12                 # Get PC + 12 (end of this BB) used next
493   ///   add     x0, x1, w0, sxth #2     # Finish building branch target
494   ///                                   # (entries in JT are relative to the end
495   ///                                   #  of this BB)
496   ///   br      x0                      # Indirect jump instruction
497   ///
498   bool analyzeIndirectBranchFragment(
499       const MCInst &Inst,
500       DenseMap<const MCInst *, SmallVector<MCInst *, 4>> &UDChain,
501       const MCExpr *&JumpTable, int64_t &Offset, int64_t &ScaleValue,
502       MCInst *&PCRelBase) const {
503     // Expect AArch64 BR
504     assert(Inst.getOpcode() == AArch64::BR && "Unexpected opcode");
505 
506     // Match the indirect branch pattern for aarch64
507     SmallVector<MCInst *, 4> &UsesRoot = UDChain[&Inst];
508     if (UsesRoot.size() == 0 || UsesRoot[0] == nullptr) {
509       return false;
510     }
511     const MCInst *DefAdd = UsesRoot[0];
512 
513     // Now we match an ADD
514     if (!isADD(*DefAdd)) {
515       // If the address is not broken up in two parts, this is not branching
516       // according to a jump table entry. Fail.
517       return false;
518     }
519     if (DefAdd->getOpcode() == AArch64::ADDXri) {
520       // This can happen when there is no offset, but a direct jump that was
521       // transformed into an indirect one  (indirect tail call) :
522       //   ADRP   x2, Perl_re_compiler
523       //   ADD    x2, x2, :lo12:Perl_re_compiler
524       //   BR     x2
525       return false;
526     }
527     if (DefAdd->getOpcode() == AArch64::ADDXrs) {
528       // Covers the less common pattern where JT entries are relative to
529       // the JT itself (like x86). Seems less efficient since we can't
530       // assume the JT is aligned at 4B boundary and thus drop 2 bits from
531       // JT values.
532       // cde264:
533       //    adrp    x12, #21544960  ; 216a000
534       //    add     x12, x12, #1696 ; 216a6a0  (JT object in .rodata)
535       //    ldrsw   x8, [x12, x8, lsl #2]   --> loads e.g. 0xfeb73bd8
536       //  * add     x8, x8, x12   --> = cde278, next block
537       //    br      x8
538       // cde278:
539       //
540       // Parsed as ADDXrs reg:x8 reg:x8 reg:x12 imm:0
541       return false;
542     }
543     assert(DefAdd->getOpcode() == AArch64::ADDXrx &&
544            "Failed to match indirect branch!");
545 
546     // Validate ADD operands
547     int64_t OperandExtension = DefAdd->getOperand(3).getImm();
548     unsigned ShiftVal = AArch64_AM::getArithShiftValue(OperandExtension);
549     AArch64_AM::ShiftExtendType ExtendType =
550         AArch64_AM::getArithExtendType(OperandExtension);
551     if (ShiftVal != 2) {
552       llvm_unreachable("Failed to match indirect branch! (fragment 2)");
553     }
554     if (ExtendType == AArch64_AM::SXTB) {
555       ScaleValue = 1LL;
556     } else if (ExtendType == AArch64_AM::SXTH) {
557       ScaleValue = 2LL;
558     } else if (ExtendType == AArch64_AM::SXTW) {
559       ScaleValue = 4LL;
560     } else {
561       llvm_unreachable("Failed to match indirect branch! (fragment 3)");
562     }
563 
564     // Match an ADR to load base address to be used when addressing JT targets
565     SmallVector<MCInst *, 4> &UsesAdd = UDChain[DefAdd];
566     if (UsesAdd.size() <= 1 || UsesAdd[1] == nullptr || UsesAdd[2] == nullptr) {
567       // This happens when we don't have enough context about this jump table
568       // because the jumping code sequence was split in multiple basic blocks.
569       // This was observed in the wild in HHVM code (dispatchImpl).
570       return false;
571     }
572     MCInst *DefBaseAddr = UsesAdd[1];
573     assert(DefBaseAddr->getOpcode() == AArch64::ADR &&
574            "Failed to match indirect branch pattern! (fragment 3)");
575 
576     PCRelBase = DefBaseAddr;
577     // Match LOAD to load the jump table (relative) target
578     const MCInst *DefLoad = UsesAdd[2];
579     assert(isLoad(*DefLoad) &&
580            "Failed to match indirect branch load pattern! (1)");
581     assert((ScaleValue != 1LL || isLDRB(*DefLoad)) &&
582            "Failed to match indirect branch load pattern! (2)");
583     assert((ScaleValue != 2LL || isLDRH(*DefLoad)) &&
584            "Failed to match indirect branch load pattern! (3)");
585 
586     // Match ADD that calculates the JumpTable Base Address (not the offset)
587     SmallVector<MCInst *, 4> &UsesLoad = UDChain[DefLoad];
588     const MCInst *DefJTBaseAdd = UsesLoad[1];
589     MCPhysReg From, To;
590     if (DefJTBaseAdd == nullptr || isLoadFromStack(*DefJTBaseAdd) ||
591         isRegToRegMove(*DefJTBaseAdd, From, To)) {
592       // Sometimes base address may have been defined in another basic block
593       // (hoisted). Return with no jump table info.
594       JumpTable = nullptr;
595       return true;
596     }
597 
598     assert(DefJTBaseAdd->getOpcode() == AArch64::ADDXri &&
599            "Failed to match jump table base address pattern! (1)");
600 
601     if (DefJTBaseAdd->getOperand(2).isImm())
602       Offset = DefJTBaseAdd->getOperand(2).getImm();
603     SmallVector<MCInst *, 4> &UsesJTBaseAdd = UDChain[DefJTBaseAdd];
604     const MCInst *DefJTBasePage = UsesJTBaseAdd[1];
605     if (DefJTBasePage == nullptr || isLoadFromStack(*DefJTBasePage)) {
606       JumpTable = nullptr;
607       return true;
608     }
609     assert(DefJTBasePage->getOpcode() == AArch64::ADRP &&
610            "Failed to match jump table base page pattern! (2)");
611     if (DefJTBasePage->getOperand(1).isExpr())
612       JumpTable = DefJTBasePage->getOperand(1).getExpr();
613     return true;
614   }
615 
616   DenseMap<const MCInst *, SmallVector<MCInst *, 4>>
617   computeLocalUDChain(const MCInst *CurInstr, InstructionIterator Begin,
618                       InstructionIterator End) const {
619     DenseMap<int, MCInst *> RegAliasTable;
620     DenseMap<const MCInst *, SmallVector<MCInst *, 4>> Uses;
621 
622     auto addInstrOperands = [&](const MCInst &Instr) {
623       // Update Uses table
624       for (unsigned OpNum = 0, OpEnd = MCPlus::getNumPrimeOperands(Instr);
625            OpNum != OpEnd; ++OpNum) {
626         if (!Instr.getOperand(OpNum).isReg())
627           continue;
628         unsigned Reg = Instr.getOperand(OpNum).getReg();
629         MCInst *AliasInst = RegAliasTable[Reg];
630         Uses[&Instr].push_back(AliasInst);
631         LLVM_DEBUG({
632           dbgs() << "Adding reg operand " << Reg << " refs ";
633           if (AliasInst != nullptr)
634             AliasInst->dump();
635           else
636             dbgs() << "\n";
637         });
638       }
639     };
640 
641     LLVM_DEBUG(dbgs() << "computeLocalUDChain\n");
642     bool TerminatorSeen = false;
643     for (auto II = Begin; II != End; ++II) {
644       MCInst &Instr = *II;
645       // Ignore nops and CFIs
646       if (isPseudo(Instr) || isNoop(Instr))
647         continue;
648       if (TerminatorSeen) {
649         RegAliasTable.clear();
650         Uses.clear();
651       }
652 
653       LLVM_DEBUG(dbgs() << "Now updating for:\n ");
654       LLVM_DEBUG(Instr.dump());
655       addInstrOperands(Instr);
656 
657       BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
658       getWrittenRegs(Instr, Regs);
659 
660       // Update register definitions after this point
661       int Idx = Regs.find_first();
662       while (Idx != -1) {
663         RegAliasTable[Idx] = &Instr;
664         LLVM_DEBUG(dbgs() << "Setting reg " << Idx
665                           << " def to current instr.\n");
666         Idx = Regs.find_next(Idx);
667       }
668 
669       TerminatorSeen = isTerminator(Instr);
670     }
671 
672     // Process the last instruction, which is not currently added into the
673     // instruction stream
674     if (CurInstr) {
675       addInstrOperands(*CurInstr);
676     }
677     return Uses;
678   }
679 
680   IndirectBranchType analyzeIndirectBranch(
681      MCInst &Instruction,
682      InstructionIterator Begin,
683      InstructionIterator End,
684      const unsigned PtrSize,
685      MCInst *&MemLocInstrOut,
686      unsigned &BaseRegNumOut,
687      unsigned &IndexRegNumOut,
688      int64_t &DispValueOut,
689      const MCExpr *&DispExprOut,
690      MCInst *&PCRelBaseOut
691   ) const override {
692     MemLocInstrOut = nullptr;
693     BaseRegNumOut = AArch64::NoRegister;
694     IndexRegNumOut = AArch64::NoRegister;
695     DispValueOut = 0;
696     DispExprOut = nullptr;
697 
698     // An instruction referencing memory used by jump instruction (directly or
699     // via register). This location could be an array of function pointers
700     // in case of indirect tail call, or a jump table.
701     MCInst *MemLocInstr = nullptr;
702 
703     // Analyze the memory location.
704     int64_t       ScaleValue, DispValue;
705     const MCExpr *DispExpr;
706 
707     DenseMap<const MCInst *, SmallVector<llvm::MCInst *, 4>> UDChain =
708         computeLocalUDChain(&Instruction, Begin, End);
709     MCInst *PCRelBase;
710     if (!analyzeIndirectBranchFragment(Instruction, UDChain, DispExpr,
711                                        DispValue, ScaleValue, PCRelBase)) {
712       return IndirectBranchType::UNKNOWN;
713     }
714 
715     MemLocInstrOut = MemLocInstr;
716     DispValueOut = DispValue;
717     DispExprOut = DispExpr;
718     PCRelBaseOut = PCRelBase;
719     return IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE;
720   }
721 
722   unsigned getInvertedBranchOpcode(unsigned Opcode) const {
723     switch (Opcode) {
724     default:
725       llvm_unreachable("Failed to invert branch opcode");
726       return Opcode;
727     case AArch64::TBZW:     return AArch64::TBNZW;
728     case AArch64::TBZX:     return AArch64::TBNZX;
729     case AArch64::TBNZW:    return AArch64::TBZW;
730     case AArch64::TBNZX:    return AArch64::TBZX;
731     case AArch64::CBZW:     return AArch64::CBNZW;
732     case AArch64::CBZX:     return AArch64::CBNZX;
733     case AArch64::CBNZW:    return AArch64::CBZW;
734     case AArch64::CBNZX:    return AArch64::CBZX;
735     }
736   }
737 
738   unsigned getCondCode(const MCInst &Inst) const override {
739     // AArch64 does not use conditional codes, so we just return the opcode
740     // of the conditional branch here.
741     return Inst.getOpcode();
742   }
743 
744   unsigned getCanonicalBranchCondCode(unsigned Opcode) const override {
745     switch (Opcode) {
746     default:
747       return Opcode;
748     case AArch64::TBNZW:    return AArch64::TBZW;
749     case AArch64::TBNZX:    return AArch64::TBZX;
750     case AArch64::CBNZW:    return AArch64::CBZW;
751     case AArch64::CBNZX:    return AArch64::CBZX;
752     }
753   }
754 
755   bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
756                               MCContext *Ctx) const override {
757     if (isTB(Inst) || isCB(Inst)) {
758       Inst.setOpcode(getInvertedBranchOpcode(Inst.getOpcode()));
759       assert(Inst.getOpcode() != 0 && "Invalid branch instruction");
760     } else if (Inst.getOpcode() == AArch64::Bcc) {
761       Inst.getOperand(0).setImm(AArch64CC::getInvertedCondCode(
762           static_cast<AArch64CC::CondCode>(Inst.getOperand(0).getImm())));
763       assert(Inst.getOperand(0).getImm() != AArch64CC::AL &&
764              Inst.getOperand(0).getImm() != AArch64CC::NV &&
765              "Can't reverse ALWAYS cond code");
766     } else {
767       LLVM_DEBUG(Inst.dump());
768       llvm_unreachable("Unrecognized branch instruction");
769     }
770     return replaceBranchTarget(Inst, TBB, Ctx);
771   }
772 
773   int getPCRelEncodingSize(const MCInst &Inst) const override {
774     switch (Inst.getOpcode()) {
775     default:
776       llvm_unreachable("Failed to get pcrel encoding size");
777       return 0;
778     case AArch64::TBZW:     return 16;
779     case AArch64::TBZX:     return 16;
780     case AArch64::TBNZW:    return 16;
781     case AArch64::TBNZX:    return 16;
782     case AArch64::CBZW:     return 21;
783     case AArch64::CBZX:     return 21;
784     case AArch64::CBNZW:    return 21;
785     case AArch64::CBNZX:    return 21;
786     case AArch64::B:        return 28;
787     case AArch64::BL:       return 28;
788     case AArch64::Bcc:      return 21;
789     }
790   }
791 
792   int getShortJmpEncodingSize() const override { return 33; }
793 
794   int getUncondBranchEncodingSize() const override { return 28; }
795 
796   bool createTailCall(MCInst &Inst, const MCSymbol *Target,
797                       MCContext *Ctx) override {
798     Inst.setOpcode(AArch64::B);
799     Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
800         Inst, MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
801         *Ctx, 0)));
802     setTailCall(Inst);
803     return true;
804   }
805 
806   void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
807                           MCContext *Ctx) override {
808     createShortJmp(Seq, Target, Ctx, /*IsTailCall*/ true);
809   }
810 
811   bool convertJmpToTailCall(MCInst &Inst) override {
812     setTailCall(Inst);
813     return true;
814   }
815 
816   bool convertTailCallToJmp(MCInst &Inst) override {
817     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
818     removeAnnotation(Inst, "Offset");
819     if (getConditionalTailCall(Inst))
820       unsetConditionalTailCall(Inst);
821     return true;
822   }
823 
824   bool lowerTailCall(MCInst &Inst) override {
825     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
826     if (getConditionalTailCall(Inst))
827       unsetConditionalTailCall(Inst);
828     return true;
829   }
830 
831   bool isNoop(const MCInst &Inst) const override {
832     return Inst.getOpcode() == AArch64::HINT &&
833            Inst.getOperand(0).getImm() == 0;
834   }
835 
836   bool createNoop(MCInst &Inst) const override {
837     Inst.setOpcode(AArch64::HINT);
838     Inst.clear();
839     Inst.addOperand(MCOperand::createImm(0));
840     return true;
841   }
842 
843   bool isStore(const MCInst &Inst) const override { return false; }
844 
845   bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
846                      const MCSymbol *&TBB, const MCSymbol *&FBB,
847                      MCInst *&CondBranch,
848                      MCInst *&UncondBranch) const override {
849     auto I = End;
850 
851     while (I != Begin) {
852       --I;
853 
854       // Ignore nops and CFIs
855       if (isPseudo(*I) || isNoop(*I))
856         continue;
857 
858       // Stop when we find the first non-terminator
859       if (!isTerminator(*I) || isTailCall(*I) || !isBranch(*I))
860         break;
861 
862       // Handle unconditional branches.
863       if (isUnconditionalBranch(*I)) {
864         // If any code was seen after this unconditional branch, we've seen
865         // unreachable code. Ignore them.
866         CondBranch = nullptr;
867         UncondBranch = &*I;
868         const MCSymbol *Sym = getTargetSymbol(*I);
869         assert(Sym != nullptr &&
870                "Couldn't extract BB symbol from jump operand");
871         TBB = Sym;
872         continue;
873       }
874 
875       // Handle conditional branches and ignore indirect branches
876       if (isIndirectBranch(*I)) {
877         return false;
878       }
879 
880       if (CondBranch == nullptr) {
881         const MCSymbol *TargetBB = getTargetSymbol(*I);
882         if (TargetBB == nullptr) {
883           // Unrecognized branch target
884           return false;
885         }
886         FBB = TBB;
887         TBB = TargetBB;
888         CondBranch = &*I;
889         continue;
890       }
891 
892       llvm_unreachable("multiple conditional branches in one BB");
893     }
894     return true;
895   }
896 
897   void createLongJmp(InstructionListType &Seq, const MCSymbol *Target,
898                      MCContext *Ctx, bool IsTailCall) override {
899     // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
900     //   Standard for the ARM 64-bit Architecture (AArch64)".
901     // The sequence of instructions we create here is the following:
902     //  movz ip0, #:abs_g3:<addr>
903     //  movk ip0, #:abs_g2_nc:<addr>
904     //  movk ip0, #:abs_g1_nc:<addr>
905     //  movk ip0, #:abs_g0_nc:<addr>
906     //  br ip0
907     MCInst Inst;
908     Inst.setOpcode(AArch64::MOVZXi);
909     Inst.addOperand(MCOperand::createReg(AArch64::X16));
910     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
911         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
912         AArch64MCExpr::VK_ABS_G3, *Ctx)));
913     Inst.addOperand(MCOperand::createImm(0x30));
914     Seq.emplace_back(Inst);
915 
916     Inst.clear();
917     Inst.setOpcode(AArch64::MOVKXi);
918     Inst.addOperand(MCOperand::createReg(AArch64::X16));
919     Inst.addOperand(MCOperand::createReg(AArch64::X16));
920     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
921         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
922         AArch64MCExpr::VK_ABS_G2_NC, *Ctx)));
923     Inst.addOperand(MCOperand::createImm(0x20));
924     Seq.emplace_back(Inst);
925 
926     Inst.clear();
927     Inst.setOpcode(AArch64::MOVKXi);
928     Inst.addOperand(MCOperand::createReg(AArch64::X16));
929     Inst.addOperand(MCOperand::createReg(AArch64::X16));
930     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
931         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
932         AArch64MCExpr::VK_ABS_G1_NC, *Ctx)));
933     Inst.addOperand(MCOperand::createImm(0x10));
934     Seq.emplace_back(Inst);
935 
936     Inst.clear();
937     Inst.setOpcode(AArch64::MOVKXi);
938     Inst.addOperand(MCOperand::createReg(AArch64::X16));
939     Inst.addOperand(MCOperand::createReg(AArch64::X16));
940     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
941         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
942         AArch64MCExpr::VK_ABS_G0_NC, *Ctx)));
943     Inst.addOperand(MCOperand::createImm(0));
944     Seq.emplace_back(Inst);
945 
946     Inst.clear();
947     Inst.setOpcode(AArch64::BR);
948     Inst.addOperand(MCOperand::createReg(AArch64::X16));
949     if (IsTailCall)
950       setTailCall(Inst);
951     Seq.emplace_back(Inst);
952   }
953 
954   void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
955                       MCContext *Ctx, bool IsTailCall) override {
956     // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
957     //   Standard for the ARM 64-bit Architecture (AArch64)".
958     // The sequence of instructions we create here is the following:
959     //  adrp ip0, imm
960     //  add ip0, ip0, imm
961     //  br ip0
962     MCPhysReg Reg = AArch64::X16;
963     InstructionListType Insts = materializeAddress(Target, Ctx, Reg);
964     Insts.emplace_back();
965     MCInst &Inst = Insts.back();
966     Inst.clear();
967     Inst.setOpcode(AArch64::BR);
968     Inst.addOperand(MCOperand::createReg(Reg));
969     if (IsTailCall)
970       setTailCall(Inst);
971     Seq.swap(Insts);
972   }
973 
974   /// Matching pattern here is
975   ///
976   ///    ADRP  x16, imm
977   ///    ADD   x16, x16, imm
978   ///    BR    x16
979   ///
980   bool matchLinkerVeneer(InstructionIterator Begin, InstructionIterator End,
981                          uint64_t Address, const MCInst &CurInst,
982                          MCInst *&TargetHiBits, MCInst *&TargetLowBits,
983                          uint64_t &Target) const override {
984     if (CurInst.getOpcode() != AArch64::BR || !CurInst.getOperand(0).isReg() ||
985         CurInst.getOperand(0).getReg() != AArch64::X16)
986       return false;
987 
988     auto I = End;
989     if (I == Begin)
990       return false;
991 
992     --I;
993     Address -= 4;
994     if (I == Begin ||
995         I->getOpcode() != AArch64::ADDXri ||
996         MCPlus::getNumPrimeOperands(*I) < 3 ||
997         !I->getOperand(0).isReg() ||
998         !I->getOperand(1).isReg() ||
999         I->getOperand(0).getReg() != AArch64::X16 ||
1000         I->getOperand(1).getReg() != AArch64::X16 ||
1001         !I->getOperand(2).isImm())
1002       return false;
1003     TargetLowBits = &*I;
1004     uint64_t Addr = I->getOperand(2).getImm() & 0xFFF;
1005 
1006     --I;
1007     Address -= 4;
1008     if (I->getOpcode() != AArch64::ADRP ||
1009         MCPlus::getNumPrimeOperands(*I) < 2 ||
1010         !I->getOperand(0).isReg() ||
1011         !I->getOperand(1).isImm() ||
1012         I->getOperand(0).getReg() != AArch64::X16)
1013       return false;
1014     TargetHiBits = &*I;
1015     Addr |= (Address + ((int64_t)I->getOperand(1).getImm() << 12)) &
1016             0xFFFFFFFFFFFFF000ULL;
1017     Target = Addr;
1018     return true;
1019   }
1020 
1021   bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
1022                                int64_t Addend, MCContext *Ctx, int64_t &Value,
1023                                uint64_t RelType) const override {
1024     unsigned ImmOpNo = -1U;
1025     for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1026          ++Index) {
1027       if (Inst.getOperand(Index).isImm()) {
1028         ImmOpNo = Index;
1029         break;
1030       }
1031     }
1032     if (ImmOpNo == -1U)
1033       return false;
1034 
1035     Value = Inst.getOperand(ImmOpNo).getImm();
1036 
1037     setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
1038 
1039     return true;
1040   }
1041 
1042   bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
1043                           MCContext *Ctx) const override {
1044     Inst.setOpcode(AArch64::B);
1045     Inst.clear();
1046     Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
1047         Inst, MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx),
1048         *Ctx, 0)));
1049     return true;
1050   }
1051 
1052   bool isMoveMem2Reg(const MCInst &Inst) const override { return false; }
1053 
1054   bool isADD64rr(const MCInst &Inst) const override { return false; }
1055 
1056   bool isLeave(const MCInst &Inst) const override { return false; }
1057 
1058   bool isPop(const MCInst &Inst) const override { return false; }
1059 
1060   bool isPrefix(const MCInst &Inst) const override { return false; }
1061 
1062   bool deleteREPPrefix(MCInst &Inst) const override { return false; }
1063 
1064   bool createReturn(MCInst &Inst) const override {
1065     Inst.setOpcode(AArch64::RET);
1066     Inst.clear();
1067     Inst.addOperand(MCOperand::createReg(AArch64::LR));
1068     return true;
1069   }
1070 
1071   InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx,
1072                                          MCPhysReg RegName,
1073                                          int64_t Addend = 0) const override {
1074     // Get page-aligned address and add page offset
1075     InstructionListType Insts(2);
1076     Insts[0].setOpcode(AArch64::ADRP);
1077     Insts[0].clear();
1078     Insts[0].addOperand(MCOperand::createReg(RegName));
1079     Insts[0].addOperand(MCOperand::createImm(0));
1080     setOperandToSymbolRef(Insts[0], /* OpNum */ 1, Target, Addend, Ctx,
1081                           ELF::R_AARCH64_NONE);
1082     Insts[1].setOpcode(AArch64::ADDXri);
1083     Insts[1].clear();
1084     Insts[1].addOperand(MCOperand::createReg(RegName));
1085     Insts[1].addOperand(MCOperand::createReg(RegName));
1086     Insts[1].addOperand(MCOperand::createImm(0));
1087     Insts[1].addOperand(MCOperand::createImm(0));
1088     setOperandToSymbolRef(Insts[1], /* OpNum */ 2, Target, Addend, Ctx,
1089                           ELF::R_AARCH64_ADD_ABS_LO12_NC);
1090     return Insts;
1091   }
1092 };
1093 
1094 } // end anonymous namespace
1095 
1096 namespace llvm {
1097 namespace bolt {
1098 
1099 MCPlusBuilder *createAArch64MCPlusBuilder(const MCInstrAnalysis *Analysis,
1100                                           const MCInstrInfo *Info,
1101                                           const MCRegisterInfo *RegInfo) {
1102   return new AArch64MCPlusBuilder(Analysis, Info, RegInfo);
1103 }
1104 
1105 } // namespace bolt
1106 } // namespace llvm
1107