xref: /llvm-project/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp (revision b6b492880f5ac6357d85ba2f3ad8e41fded1d97c)
1 //===- bolt/Target/AArch64/AArch64MCPlusBuilder.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides AArch64-specific MCPlus builder.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "bolt/Core/MCPlusBuilder.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCFixupKindInfo.h"
22 #include "llvm/MC/MCInstBuilder.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/ErrorHandling.h"
27 
28 #define DEBUG_TYPE "mcplus"
29 
30 using namespace llvm;
31 using namespace bolt;
32 
33 namespace {
34 
35 static void getSystemFlag(MCInst &Inst, MCPhysReg RegName) {
36   Inst.setOpcode(AArch64::MRS);
37   Inst.clear();
38   Inst.addOperand(MCOperand::createReg(RegName));
39   Inst.addOperand(MCOperand::createImm(AArch64SysReg::NZCV));
40 }
41 
42 static void setSystemFlag(MCInst &Inst, MCPhysReg RegName) {
43   Inst.setOpcode(AArch64::MSR);
44   Inst.clear();
45   Inst.addOperand(MCOperand::createImm(AArch64SysReg::NZCV));
46   Inst.addOperand(MCOperand::createReg(RegName));
47 }
48 
49 static void createPushRegisters(MCInst &Inst, MCPhysReg Reg1, MCPhysReg Reg2) {
50   Inst.clear();
51   unsigned NewOpcode = AArch64::STPXpre;
52   Inst.setOpcode(NewOpcode);
53   Inst.addOperand(MCOperand::createReg(AArch64::SP));
54   Inst.addOperand(MCOperand::createReg(Reg1));
55   Inst.addOperand(MCOperand::createReg(Reg2));
56   Inst.addOperand(MCOperand::createReg(AArch64::SP));
57   Inst.addOperand(MCOperand::createImm(-2));
58 }
59 
60 static void createPopRegisters(MCInst &Inst, MCPhysReg Reg1, MCPhysReg Reg2) {
61   Inst.clear();
62   unsigned NewOpcode = AArch64::LDPXpost;
63   Inst.setOpcode(NewOpcode);
64   Inst.addOperand(MCOperand::createReg(AArch64::SP));
65   Inst.addOperand(MCOperand::createReg(Reg1));
66   Inst.addOperand(MCOperand::createReg(Reg2));
67   Inst.addOperand(MCOperand::createReg(AArch64::SP));
68   Inst.addOperand(MCOperand::createImm(2));
69 }
70 
71 static void loadReg(MCInst &Inst, MCPhysReg To, MCPhysReg From) {
72   Inst.setOpcode(AArch64::LDRXui);
73   Inst.clear();
74   if (From == AArch64::SP) {
75     Inst.setOpcode(AArch64::LDRXpost);
76     Inst.addOperand(MCOperand::createReg(From));
77     Inst.addOperand(MCOperand::createReg(To));
78     Inst.addOperand(MCOperand::createReg(From));
79     Inst.addOperand(MCOperand::createImm(16));
80   } else {
81     Inst.addOperand(MCOperand::createReg(To));
82     Inst.addOperand(MCOperand::createReg(From));
83     Inst.addOperand(MCOperand::createImm(0));
84   }
85 }
86 
87 static void storeReg(MCInst &Inst, MCPhysReg From, MCPhysReg To) {
88   Inst.setOpcode(AArch64::STRXui);
89   Inst.clear();
90   if (To == AArch64::SP) {
91     Inst.setOpcode(AArch64::STRXpre);
92     Inst.addOperand(MCOperand::createReg(To));
93     Inst.addOperand(MCOperand::createReg(From));
94     Inst.addOperand(MCOperand::createReg(To));
95     Inst.addOperand(MCOperand::createImm(-16));
96   } else {
97     Inst.addOperand(MCOperand::createReg(From));
98     Inst.addOperand(MCOperand::createReg(To));
99     Inst.addOperand(MCOperand::createImm(0));
100   }
101 }
102 
103 static void atomicAdd(MCInst &Inst, MCPhysReg RegTo, MCPhysReg RegCnt) {
104   // NOTE: Supports only ARM with LSE extension
105   Inst.setOpcode(AArch64::LDADDX);
106   Inst.clear();
107   Inst.addOperand(MCOperand::createReg(AArch64::XZR));
108   Inst.addOperand(MCOperand::createReg(RegCnt));
109   Inst.addOperand(MCOperand::createReg(RegTo));
110 }
111 
112 static void createMovz(MCInst &Inst, MCPhysReg Reg, uint64_t Imm) {
113   assert(Imm <= UINT16_MAX && "Invalid Imm size");
114   Inst.clear();
115   Inst.setOpcode(AArch64::MOVZXi);
116   Inst.addOperand(MCOperand::createReg(Reg));
117   Inst.addOperand(MCOperand::createImm(Imm & 0xFFFF));
118   Inst.addOperand(MCOperand::createImm(0));
119 }
120 
121 static InstructionListType createIncMemory(MCPhysReg RegTo, MCPhysReg RegTmp) {
122   InstructionListType Insts;
123   Insts.emplace_back();
124   createMovz(Insts.back(), RegTmp, 1);
125   Insts.emplace_back();
126   atomicAdd(Insts.back(), RegTo, RegTmp);
127   return Insts;
128 }
129 class AArch64MCPlusBuilder : public MCPlusBuilder {
130 public:
131   using MCPlusBuilder::MCPlusBuilder;
132 
133   bool equals(const MCTargetExpr &A, const MCTargetExpr &B,
134               CompFuncTy Comp) const override {
135     const auto &AArch64ExprA = cast<AArch64MCExpr>(A);
136     const auto &AArch64ExprB = cast<AArch64MCExpr>(B);
137     if (AArch64ExprA.getKind() != AArch64ExprB.getKind())
138       return false;
139 
140     return MCPlusBuilder::equals(*AArch64ExprA.getSubExpr(),
141                                  *AArch64ExprB.getSubExpr(), Comp);
142   }
143 
144   bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
145     return false;
146   }
147 
148   bool shortenInstruction(MCInst &, const MCSubtargetInfo &) const override {
149     return false;
150   }
151 
152   bool isADRP(const MCInst &Inst) const override {
153     return Inst.getOpcode() == AArch64::ADRP;
154   }
155 
156   bool isADR(const MCInst &Inst) const override {
157     return Inst.getOpcode() == AArch64::ADR;
158   }
159 
160   bool isAddXri(const MCInst &Inst) const {
161     return Inst.getOpcode() == AArch64::ADDXri;
162   }
163 
164   void getADRReg(const MCInst &Inst, MCPhysReg &RegName) const override {
165     assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction");
166     assert(MCPlus::getNumPrimeOperands(Inst) != 0 &&
167            "No operands for ADR instruction");
168     assert(Inst.getOperand(0).isReg() &&
169            "Unexpected operand in ADR instruction");
170     RegName = Inst.getOperand(0).getReg();
171   }
172 
173   bool isTB(const MCInst &Inst) const {
174     return (Inst.getOpcode() == AArch64::TBNZW ||
175             Inst.getOpcode() == AArch64::TBNZX ||
176             Inst.getOpcode() == AArch64::TBZW ||
177             Inst.getOpcode() == AArch64::TBZX);
178   }
179 
180   bool isCB(const MCInst &Inst) const {
181     return (Inst.getOpcode() == AArch64::CBNZW ||
182             Inst.getOpcode() == AArch64::CBNZX ||
183             Inst.getOpcode() == AArch64::CBZW ||
184             Inst.getOpcode() == AArch64::CBZX);
185   }
186 
187   bool isMOVW(const MCInst &Inst) const {
188     return (Inst.getOpcode() == AArch64::MOVKWi ||
189             Inst.getOpcode() == AArch64::MOVKXi ||
190             Inst.getOpcode() == AArch64::MOVNWi ||
191             Inst.getOpcode() == AArch64::MOVNXi ||
192             Inst.getOpcode() == AArch64::MOVZXi ||
193             Inst.getOpcode() == AArch64::MOVZWi);
194   }
195 
196   bool isADD(const MCInst &Inst) const {
197     return (Inst.getOpcode() == AArch64::ADDSWri ||
198             Inst.getOpcode() == AArch64::ADDSWrr ||
199             Inst.getOpcode() == AArch64::ADDSWrs ||
200             Inst.getOpcode() == AArch64::ADDSWrx ||
201             Inst.getOpcode() == AArch64::ADDSXri ||
202             Inst.getOpcode() == AArch64::ADDSXrr ||
203             Inst.getOpcode() == AArch64::ADDSXrs ||
204             Inst.getOpcode() == AArch64::ADDSXrx ||
205             Inst.getOpcode() == AArch64::ADDSXrx64 ||
206             Inst.getOpcode() == AArch64::ADDWri ||
207             Inst.getOpcode() == AArch64::ADDWrr ||
208             Inst.getOpcode() == AArch64::ADDWrs ||
209             Inst.getOpcode() == AArch64::ADDWrx ||
210             Inst.getOpcode() == AArch64::ADDXri ||
211             Inst.getOpcode() == AArch64::ADDXrr ||
212             Inst.getOpcode() == AArch64::ADDXrs ||
213             Inst.getOpcode() == AArch64::ADDXrx ||
214             Inst.getOpcode() == AArch64::ADDXrx64);
215   }
216 
217   bool isLDRB(const MCInst &Inst) const {
218     return (Inst.getOpcode() == AArch64::LDRBBpost ||
219             Inst.getOpcode() == AArch64::LDRBBpre ||
220             Inst.getOpcode() == AArch64::LDRBBroW ||
221             Inst.getOpcode() == AArch64::LDRBBroX ||
222             Inst.getOpcode() == AArch64::LDRBBui ||
223             Inst.getOpcode() == AArch64::LDRSBWpost ||
224             Inst.getOpcode() == AArch64::LDRSBWpre ||
225             Inst.getOpcode() == AArch64::LDRSBWroW ||
226             Inst.getOpcode() == AArch64::LDRSBWroX ||
227             Inst.getOpcode() == AArch64::LDRSBWui ||
228             Inst.getOpcode() == AArch64::LDRSBXpost ||
229             Inst.getOpcode() == AArch64::LDRSBXpre ||
230             Inst.getOpcode() == AArch64::LDRSBXroW ||
231             Inst.getOpcode() == AArch64::LDRSBXroX ||
232             Inst.getOpcode() == AArch64::LDRSBXui);
233   }
234 
235   bool isLDRH(const MCInst &Inst) const {
236     return (Inst.getOpcode() == AArch64::LDRHHpost ||
237             Inst.getOpcode() == AArch64::LDRHHpre ||
238             Inst.getOpcode() == AArch64::LDRHHroW ||
239             Inst.getOpcode() == AArch64::LDRHHroX ||
240             Inst.getOpcode() == AArch64::LDRHHui ||
241             Inst.getOpcode() == AArch64::LDRSHWpost ||
242             Inst.getOpcode() == AArch64::LDRSHWpre ||
243             Inst.getOpcode() == AArch64::LDRSHWroW ||
244             Inst.getOpcode() == AArch64::LDRSHWroX ||
245             Inst.getOpcode() == AArch64::LDRSHWui ||
246             Inst.getOpcode() == AArch64::LDRSHXpost ||
247             Inst.getOpcode() == AArch64::LDRSHXpre ||
248             Inst.getOpcode() == AArch64::LDRSHXroW ||
249             Inst.getOpcode() == AArch64::LDRSHXroX ||
250             Inst.getOpcode() == AArch64::LDRSHXui);
251   }
252 
253   bool isLDRW(const MCInst &Inst) const {
254     return (Inst.getOpcode() == AArch64::LDRWpost ||
255             Inst.getOpcode() == AArch64::LDRWpre ||
256             Inst.getOpcode() == AArch64::LDRWroW ||
257             Inst.getOpcode() == AArch64::LDRWroX ||
258             Inst.getOpcode() == AArch64::LDRWui);
259   }
260 
261   bool isLDRX(const MCInst &Inst) const {
262     return (Inst.getOpcode() == AArch64::LDRXpost ||
263             Inst.getOpcode() == AArch64::LDRXpre ||
264             Inst.getOpcode() == AArch64::LDRXroW ||
265             Inst.getOpcode() == AArch64::LDRXroX ||
266             Inst.getOpcode() == AArch64::LDRXui);
267   }
268 
269   bool mayLoad(const MCInst &Inst) const override {
270     return isLDRB(Inst) || isLDRH(Inst) || isLDRW(Inst) || isLDRX(Inst);
271   }
272 
273   bool isAArch64Exclusive(const MCInst &Inst) const override {
274     return (Inst.getOpcode() == AArch64::LDXPX ||
275             Inst.getOpcode() == AArch64::LDXPW ||
276             Inst.getOpcode() == AArch64::LDXRX ||
277             Inst.getOpcode() == AArch64::LDXRW ||
278             Inst.getOpcode() == AArch64::LDXRH ||
279             Inst.getOpcode() == AArch64::LDXRB ||
280             Inst.getOpcode() == AArch64::STXPX ||
281             Inst.getOpcode() == AArch64::STXPW ||
282             Inst.getOpcode() == AArch64::STXRX ||
283             Inst.getOpcode() == AArch64::STXRW ||
284             Inst.getOpcode() == AArch64::STXRH ||
285             Inst.getOpcode() == AArch64::STXRB ||
286             Inst.getOpcode() == AArch64::LDAXPX ||
287             Inst.getOpcode() == AArch64::LDAXPW ||
288             Inst.getOpcode() == AArch64::LDAXRX ||
289             Inst.getOpcode() == AArch64::LDAXRW ||
290             Inst.getOpcode() == AArch64::LDAXRH ||
291             Inst.getOpcode() == AArch64::LDAXRB ||
292             Inst.getOpcode() == AArch64::STLXPX ||
293             Inst.getOpcode() == AArch64::STLXPW ||
294             Inst.getOpcode() == AArch64::STLXRX ||
295             Inst.getOpcode() == AArch64::STLXRW ||
296             Inst.getOpcode() == AArch64::STLXRH ||
297             Inst.getOpcode() == AArch64::STLXRB ||
298             Inst.getOpcode() == AArch64::CLREX);
299   }
300 
301   bool isLoadFromStack(const MCInst &Inst) const {
302     if (!mayLoad(Inst))
303       return false;
304     for (const MCOperand &Operand : useOperands(Inst)) {
305       if (!Operand.isReg())
306         continue;
307       unsigned Reg = Operand.getReg();
308       if (Reg == AArch64::SP || Reg == AArch64::WSP || Reg == AArch64::FP ||
309           Reg == AArch64::W29)
310         return true;
311     }
312     return false;
313   }
314 
315   bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
316                       MCPhysReg &To) const override {
317     if (Inst.getOpcode() != AArch64::ORRXrs)
318       return false;
319     if (Inst.getOperand(1).getReg() != AArch64::XZR)
320       return false;
321     if (Inst.getOperand(3).getImm() != 0)
322       return false;
323     From = Inst.getOperand(2).getReg();
324     To = Inst.getOperand(0).getReg();
325     return true;
326   }
327 
328   bool isIndirectCall(const MCInst &Inst) const override {
329     return Inst.getOpcode() == AArch64::BLR;
330   }
331 
332   MCPhysReg getSpRegister(int Size) const {
333     switch (Size) {
334     case 4:
335       return AArch64::WSP;
336     case 8:
337       return AArch64::SP;
338     default:
339       llvm_unreachable("Unexpected size");
340     }
341   }
342 
343   MCPhysReg getIntArgRegister(unsigned ArgNo) const override {
344     switch (ArgNo) {
345     case 0:
346       return AArch64::X0;
347     case 1:
348       return AArch64::X1;
349     case 2:
350       return AArch64::X2;
351     case 3:
352       return AArch64::X3;
353     case 4:
354       return AArch64::X4;
355     case 5:
356       return AArch64::X5;
357     case 6:
358       return AArch64::X6;
359     case 7:
360       return AArch64::X7;
361     default:
362       return getNoRegister();
363     }
364   }
365 
366   bool hasPCRelOperand(const MCInst &Inst) const override {
367     // ADRP is blacklisted and is an exception. Even though it has a
368     // PC-relative operand, this operand is not a complete symbol reference
369     // and BOLT shouldn't try to process it in isolation.
370     if (isADRP(Inst))
371       return false;
372 
373     if (isADR(Inst))
374       return true;
375 
376     // Look for literal addressing mode (see C1-143 ARM DDI 0487B.a)
377     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
378     for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I)
379       if (MCII.operands()[I].OperandType == MCOI::OPERAND_PCREL)
380         return true;
381 
382     return false;
383   }
384 
385   bool evaluateADR(const MCInst &Inst, int64_t &Imm,
386                    const MCExpr **DispExpr) const {
387     assert((isADR(Inst) || isADRP(Inst)) && "Not an ADR instruction");
388 
389     const MCOperand &Label = Inst.getOperand(1);
390     if (!Label.isImm()) {
391       assert(Label.isExpr() && "Unexpected ADR operand");
392       assert(DispExpr && "DispExpr must be set");
393       *DispExpr = Label.getExpr();
394       return false;
395     }
396 
397     if (Inst.getOpcode() == AArch64::ADR) {
398       Imm = Label.getImm();
399       return true;
400     }
401     Imm = Label.getImm() << 12;
402     return true;
403   }
404 
405   bool evaluateAArch64MemoryOperand(const MCInst &Inst, int64_t &DispImm,
406                                     const MCExpr **DispExpr = nullptr) const {
407     if (isADR(Inst) || isADRP(Inst))
408       return evaluateADR(Inst, DispImm, DispExpr);
409 
410     // Literal addressing mode
411     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
412     for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
413       if (MCII.operands()[I].OperandType != MCOI::OPERAND_PCREL)
414         continue;
415 
416       if (!Inst.getOperand(I).isImm()) {
417         assert(Inst.getOperand(I).isExpr() && "Unexpected PCREL operand");
418         assert(DispExpr && "DispExpr must be set");
419         *DispExpr = Inst.getOperand(I).getExpr();
420         return true;
421       }
422 
423       DispImm = Inst.getOperand(I).getImm() * 4;
424       return true;
425     }
426     return false;
427   }
428 
429   bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
430                                 uint64_t Address,
431                                 uint64_t Size) const override {
432     int64_t DispValue;
433     const MCExpr *DispExpr = nullptr;
434     if (!evaluateAArch64MemoryOperand(Inst, DispValue, &DispExpr))
435       return false;
436 
437     // Make sure it's a well-formed addressing we can statically evaluate.
438     if (DispExpr)
439       return false;
440 
441     Target = DispValue;
442     if (Inst.getOpcode() == AArch64::ADRP)
443       Target += Address & ~0xFFFULL;
444     else
445       Target += Address;
446     return true;
447   }
448 
449   MCInst::iterator getMemOperandDisp(MCInst &Inst) const override {
450     MCInst::iterator OI = Inst.begin();
451     if (isADR(Inst) || isADRP(Inst)) {
452       assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&
453              "Unexpected number of operands");
454       return ++OI;
455     }
456     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
457     for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
458       if (MCII.operands()[I].OperandType == MCOI::OPERAND_PCREL)
459         break;
460       ++OI;
461     }
462     assert(OI != Inst.end() && "Literal operand not found");
463     return OI;
464   }
465 
466   bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
467     MCInst::iterator OI = getMemOperandDisp(Inst);
468     *OI = Operand;
469     return true;
470   }
471 
472   void getCalleeSavedRegs(BitVector &Regs) const override {
473     Regs |= getAliases(AArch64::X18);
474     Regs |= getAliases(AArch64::X19);
475     Regs |= getAliases(AArch64::X20);
476     Regs |= getAliases(AArch64::X21);
477     Regs |= getAliases(AArch64::X22);
478     Regs |= getAliases(AArch64::X23);
479     Regs |= getAliases(AArch64::X24);
480     Regs |= getAliases(AArch64::X25);
481     Regs |= getAliases(AArch64::X26);
482     Regs |= getAliases(AArch64::X27);
483     Regs |= getAliases(AArch64::X28);
484     Regs |= getAliases(AArch64::LR);
485     Regs |= getAliases(AArch64::FP);
486   }
487 
488   const MCExpr *getTargetExprFor(MCInst &Inst, const MCExpr *Expr,
489                                  MCContext &Ctx,
490                                  uint64_t RelType) const override {
491 
492     if (isADR(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_LO21 ||
493         RelType == ELF::R_AARCH64_TLSDESC_ADR_PREL21) {
494       return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, Ctx);
495     } else if (isADRP(Inst) || RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21 ||
496                RelType == ELF::R_AARCH64_ADR_PREL_PG_HI21_NC ||
497                RelType == ELF::R_AARCH64_TLSDESC_ADR_PAGE21 ||
498                RelType == ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
499                RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
500       // Never emit a GOT reloc, we handled this in
501       // RewriteInstance::readRelocations().
502       return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, Ctx);
503     } else {
504       switch (RelType) {
505       case ELF::R_AARCH64_ADD_ABS_LO12_NC:
506       case ELF::R_AARCH64_LD64_GOT_LO12_NC:
507       case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
508       case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
509       case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
510       case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
511       case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
512       case ELF::R_AARCH64_TLSDESC_ADD_LO12:
513       case ELF::R_AARCH64_TLSDESC_LD64_LO12:
514       case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
515       case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
516         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_LO12, Ctx);
517       case ELF::R_AARCH64_MOVW_UABS_G3:
518         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G3, Ctx);
519       case ELF::R_AARCH64_MOVW_UABS_G2:
520       case ELF::R_AARCH64_MOVW_UABS_G2_NC:
521         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G2_NC, Ctx);
522       case ELF::R_AARCH64_MOVW_UABS_G1:
523       case ELF::R_AARCH64_MOVW_UABS_G1_NC:
524         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G1_NC, Ctx);
525       case ELF::R_AARCH64_MOVW_UABS_G0:
526       case ELF::R_AARCH64_MOVW_UABS_G0_NC:
527         return AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_G0_NC, Ctx);
528       default:
529         break;
530       }
531     }
532     return Expr;
533   }
534 
535   bool getSymbolRefOperandNum(const MCInst &Inst, unsigned &OpNum) const {
536     if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
537       return false;
538 
539     // Auto-select correct operand number
540     if (OpNum == 0) {
541       if (isConditionalBranch(Inst) || isADR(Inst) || isADRP(Inst) ||
542           isMOVW(Inst))
543         OpNum = 1;
544       if (isTB(Inst) || isAddXri(Inst))
545         OpNum = 2;
546     }
547 
548     return true;
549   }
550 
551   const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
552     auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
553     if (AArchExpr && AArchExpr->getSubExpr())
554       return getTargetSymbol(AArchExpr->getSubExpr());
555 
556     auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
557     if (BinExpr)
558       return getTargetSymbol(BinExpr->getLHS());
559 
560     auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Expr);
561     if (SymExpr && SymExpr->getKind() == MCSymbolRefExpr::VK_None)
562       return &SymExpr->getSymbol();
563 
564     return nullptr;
565   }
566 
567   const MCSymbol *getTargetSymbol(const MCInst &Inst,
568                                   unsigned OpNum = 0) const override {
569     if (!getSymbolRefOperandNum(Inst, OpNum))
570       return nullptr;
571 
572     const MCOperand &Op = Inst.getOperand(OpNum);
573     if (!Op.isExpr())
574       return nullptr;
575 
576     return getTargetSymbol(Op.getExpr());
577   }
578 
579   int64_t getTargetAddend(const MCExpr *Expr) const override {
580     auto *AArchExpr = dyn_cast<AArch64MCExpr>(Expr);
581     if (AArchExpr && AArchExpr->getSubExpr())
582       return getTargetAddend(AArchExpr->getSubExpr());
583 
584     auto *BinExpr = dyn_cast<MCBinaryExpr>(Expr);
585     if (BinExpr && BinExpr->getOpcode() == MCBinaryExpr::Add)
586       return getTargetAddend(BinExpr->getRHS());
587 
588     auto *ConstExpr = dyn_cast<MCConstantExpr>(Expr);
589     if (ConstExpr)
590       return ConstExpr->getValue();
591 
592     return 0;
593   }
594 
595   int64_t getTargetAddend(const MCInst &Inst,
596                           unsigned OpNum = 0) const override {
597     if (!getSymbolRefOperandNum(Inst, OpNum))
598       return 0;
599 
600     const MCOperand &Op = Inst.getOperand(OpNum);
601     if (!Op.isExpr())
602       return 0;
603 
604     return getTargetAddend(Op.getExpr());
605   }
606 
607   bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
608                            MCContext *Ctx) const override {
609     assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
610            "Invalid instruction");
611     assert(MCPlus::getNumPrimeOperands(Inst) >= 1 &&
612            "Invalid number of operands");
613     MCInst::iterator OI = Inst.begin();
614 
615     if (isConditionalBranch(Inst)) {
616       assert(MCPlus::getNumPrimeOperands(Inst) >= 2 &&
617              "Invalid number of operands");
618       ++OI;
619     }
620 
621     if (isTB(Inst)) {
622       assert(MCPlus::getNumPrimeOperands(Inst) >= 3 &&
623              "Invalid number of operands");
624       OI = Inst.begin() + 2;
625     }
626 
627     *OI = MCOperand::createExpr(
628         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
629     return true;
630   }
631 
632   /// Matches indirect branch patterns in AArch64 related to a jump table (JT),
633   /// helping us to build the complete CFG. A typical indirect branch to
634   /// a jump table entry in AArch64 looks like the following:
635   ///
636   ///   adrp    x1, #-7585792           # Get JT Page location
637   ///   add     x1, x1, #692            # Complement with JT Page offset
638   ///   ldrh    w0, [x1, w0, uxtw #1]   # Loads JT entry
639   ///   adr     x1, #12                 # Get PC + 12 (end of this BB) used next
640   ///   add     x0, x1, w0, sxth #2     # Finish building branch target
641   ///                                   # (entries in JT are relative to the end
642   ///                                   #  of this BB)
643   ///   br      x0                      # Indirect jump instruction
644   ///
645   bool analyzeIndirectBranchFragment(
646       const MCInst &Inst,
647       DenseMap<const MCInst *, SmallVector<MCInst *, 4>> &UDChain,
648       const MCExpr *&JumpTable, int64_t &Offset, int64_t &ScaleValue,
649       MCInst *&PCRelBase) const {
650     // Expect AArch64 BR
651     assert(Inst.getOpcode() == AArch64::BR && "Unexpected opcode");
652 
653     // Match the indirect branch pattern for aarch64
654     SmallVector<MCInst *, 4> &UsesRoot = UDChain[&Inst];
655     if (UsesRoot.size() == 0 || UsesRoot[0] == nullptr)
656       return false;
657 
658     const MCInst *DefAdd = UsesRoot[0];
659 
660     // Now we match an ADD
661     if (!isADD(*DefAdd)) {
662       // If the address is not broken up in two parts, this is not branching
663       // according to a jump table entry. Fail.
664       return false;
665     }
666     if (DefAdd->getOpcode() == AArch64::ADDXri) {
667       // This can happen when there is no offset, but a direct jump that was
668       // transformed into an indirect one  (indirect tail call) :
669       //   ADRP   x2, Perl_re_compiler
670       //   ADD    x2, x2, :lo12:Perl_re_compiler
671       //   BR     x2
672       return false;
673     }
674     if (DefAdd->getOpcode() == AArch64::ADDXrs) {
675       // Covers the less common pattern where JT entries are relative to
676       // the JT itself (like x86). Seems less efficient since we can't
677       // assume the JT is aligned at 4B boundary and thus drop 2 bits from
678       // JT values.
679       // cde264:
680       //    adrp    x12, #21544960  ; 216a000
681       //    add     x12, x12, #1696 ; 216a6a0  (JT object in .rodata)
682       //    ldrsw   x8, [x12, x8, lsl #2]   --> loads e.g. 0xfeb73bd8
683       //  * add     x8, x8, x12   --> = cde278, next block
684       //    br      x8
685       // cde278:
686       //
687       // Parsed as ADDXrs reg:x8 reg:x8 reg:x12 imm:0
688       return false;
689     }
690     assert(DefAdd->getOpcode() == AArch64::ADDXrx &&
691            "Failed to match indirect branch!");
692 
693     // Validate ADD operands
694     int64_t OperandExtension = DefAdd->getOperand(3).getImm();
695     unsigned ShiftVal = AArch64_AM::getArithShiftValue(OperandExtension);
696     AArch64_AM::ShiftExtendType ExtendType =
697         AArch64_AM::getArithExtendType(OperandExtension);
698     if (ShiftVal != 2)
699       llvm_unreachable("Failed to match indirect branch! (fragment 2)");
700 
701     if (ExtendType == AArch64_AM::SXTB)
702       ScaleValue = 1LL;
703     else if (ExtendType == AArch64_AM::SXTH)
704       ScaleValue = 2LL;
705     else if (ExtendType == AArch64_AM::SXTW)
706       ScaleValue = 4LL;
707     else
708       llvm_unreachable("Failed to match indirect branch! (fragment 3)");
709 
710     // Match an ADR to load base address to be used when addressing JT targets
711     SmallVector<MCInst *, 4> &UsesAdd = UDChain[DefAdd];
712     if (UsesAdd.size() <= 1 || UsesAdd[1] == nullptr || UsesAdd[2] == nullptr) {
713       // This happens when we don't have enough context about this jump table
714       // because the jumping code sequence was split in multiple basic blocks.
715       // This was observed in the wild in HHVM code (dispatchImpl).
716       return false;
717     }
718     MCInst *DefBaseAddr = UsesAdd[1];
719     assert(DefBaseAddr->getOpcode() == AArch64::ADR &&
720            "Failed to match indirect branch pattern! (fragment 3)");
721 
722     PCRelBase = DefBaseAddr;
723     // Match LOAD to load the jump table (relative) target
724     const MCInst *DefLoad = UsesAdd[2];
725     assert(mayLoad(*DefLoad) &&
726            "Failed to match indirect branch load pattern! (1)");
727     assert((ScaleValue != 1LL || isLDRB(*DefLoad)) &&
728            "Failed to match indirect branch load pattern! (2)");
729     assert((ScaleValue != 2LL || isLDRH(*DefLoad)) &&
730            "Failed to match indirect branch load pattern! (3)");
731 
732     // Match ADD that calculates the JumpTable Base Address (not the offset)
733     SmallVector<MCInst *, 4> &UsesLoad = UDChain[DefLoad];
734     const MCInst *DefJTBaseAdd = UsesLoad[1];
735     MCPhysReg From, To;
736     if (DefJTBaseAdd == nullptr || isLoadFromStack(*DefJTBaseAdd) ||
737         isRegToRegMove(*DefJTBaseAdd, From, To)) {
738       // Sometimes base address may have been defined in another basic block
739       // (hoisted). Return with no jump table info.
740       JumpTable = nullptr;
741       return true;
742     }
743 
744     assert(DefJTBaseAdd->getOpcode() == AArch64::ADDXri &&
745            "Failed to match jump table base address pattern! (1)");
746 
747     if (DefJTBaseAdd->getOperand(2).isImm())
748       Offset = DefJTBaseAdd->getOperand(2).getImm();
749     SmallVector<MCInst *, 4> &UsesJTBaseAdd = UDChain[DefJTBaseAdd];
750     const MCInst *DefJTBasePage = UsesJTBaseAdd[1];
751     if (DefJTBasePage == nullptr || isLoadFromStack(*DefJTBasePage)) {
752       JumpTable = nullptr;
753       return true;
754     }
755     assert(DefJTBasePage->getOpcode() == AArch64::ADRP &&
756            "Failed to match jump table base page pattern! (2)");
757     if (DefJTBasePage->getOperand(1).isExpr())
758       JumpTable = DefJTBasePage->getOperand(1).getExpr();
759     return true;
760   }
761 
762   DenseMap<const MCInst *, SmallVector<MCInst *, 4>>
763   computeLocalUDChain(const MCInst *CurInstr, InstructionIterator Begin,
764                       InstructionIterator End) const {
765     DenseMap<int, MCInst *> RegAliasTable;
766     DenseMap<const MCInst *, SmallVector<MCInst *, 4>> Uses;
767 
768     auto addInstrOperands = [&](const MCInst &Instr) {
769       // Update Uses table
770       for (const MCOperand &Operand : MCPlus::primeOperands(Instr)) {
771         if (!Operand.isReg())
772           continue;
773         unsigned Reg = Operand.getReg();
774         MCInst *AliasInst = RegAliasTable[Reg];
775         Uses[&Instr].push_back(AliasInst);
776         LLVM_DEBUG({
777           dbgs() << "Adding reg operand " << Reg << " refs ";
778           if (AliasInst != nullptr)
779             AliasInst->dump();
780           else
781             dbgs() << "\n";
782         });
783       }
784     };
785 
786     LLVM_DEBUG(dbgs() << "computeLocalUDChain\n");
787     bool TerminatorSeen = false;
788     for (auto II = Begin; II != End; ++II) {
789       MCInst &Instr = *II;
790       // Ignore nops and CFIs
791       if (isPseudo(Instr) || isNoop(Instr))
792         continue;
793       if (TerminatorSeen) {
794         RegAliasTable.clear();
795         Uses.clear();
796       }
797 
798       LLVM_DEBUG(dbgs() << "Now updating for:\n ");
799       LLVM_DEBUG(Instr.dump());
800       addInstrOperands(Instr);
801 
802       BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
803       getWrittenRegs(Instr, Regs);
804 
805       // Update register definitions after this point
806       for (int Idx : Regs.set_bits()) {
807         RegAliasTable[Idx] = &Instr;
808         LLVM_DEBUG(dbgs() << "Setting reg " << Idx
809                           << " def to current instr.\n");
810       }
811 
812       TerminatorSeen = isTerminator(Instr);
813     }
814 
815     // Process the last instruction, which is not currently added into the
816     // instruction stream
817     if (CurInstr)
818       addInstrOperands(*CurInstr);
819 
820     return Uses;
821   }
822 
823   IndirectBranchType analyzeIndirectBranch(
824       MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
825       const unsigned PtrSize, MCInst *&MemLocInstrOut, unsigned &BaseRegNumOut,
826       unsigned &IndexRegNumOut, int64_t &DispValueOut,
827       const MCExpr *&DispExprOut, MCInst *&PCRelBaseOut) const override {
828     MemLocInstrOut = nullptr;
829     BaseRegNumOut = AArch64::NoRegister;
830     IndexRegNumOut = AArch64::NoRegister;
831     DispValueOut = 0;
832     DispExprOut = nullptr;
833 
834     // An instruction referencing memory used by jump instruction (directly or
835     // via register). This location could be an array of function pointers
836     // in case of indirect tail call, or a jump table.
837     MCInst *MemLocInstr = nullptr;
838 
839     // Analyze the memory location.
840     int64_t ScaleValue, DispValue;
841     const MCExpr *DispExpr;
842 
843     DenseMap<const MCInst *, SmallVector<llvm::MCInst *, 4>> UDChain =
844         computeLocalUDChain(&Instruction, Begin, End);
845     MCInst *PCRelBase;
846     if (!analyzeIndirectBranchFragment(Instruction, UDChain, DispExpr,
847                                        DispValue, ScaleValue, PCRelBase))
848       return IndirectBranchType::UNKNOWN;
849 
850     MemLocInstrOut = MemLocInstr;
851     DispValueOut = DispValue;
852     DispExprOut = DispExpr;
853     PCRelBaseOut = PCRelBase;
854     return IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE;
855   }
856 
857   ///  Matches PLT entry pattern and returns the associated GOT entry address.
858   ///  Typical PLT entry looks like the following:
859   ///
860   ///    adrp    x16, 230000
861   ///    ldr     x17, [x16, #3040]
862   ///    add     x16, x16, #0xbe0
863   ///    br      x17
864   ///
865   uint64_t analyzePLTEntry(MCInst &Instruction, InstructionIterator Begin,
866                            InstructionIterator End,
867                            uint64_t BeginPC) const override {
868     // Check branch instruction
869     MCInst *Branch = &Instruction;
870     assert(Branch->getOpcode() == AArch64::BR && "Unexpected opcode");
871 
872     DenseMap<const MCInst *, SmallVector<llvm::MCInst *, 4>> UDChain =
873         computeLocalUDChain(Branch, Begin, End);
874 
875     // Match ldr instruction
876     SmallVector<MCInst *, 4> &BranchUses = UDChain[Branch];
877     if (BranchUses.size() < 1 || BranchUses[0] == nullptr)
878       return 0;
879 
880     // Check ldr instruction
881     const MCInst *Ldr = BranchUses[0];
882     if (Ldr->getOpcode() != AArch64::LDRXui)
883       return 0;
884 
885     // Get ldr value
886     const unsigned ScaleLdr = 8; // LDRX operates on 8 bytes segments
887     assert(Ldr->getOperand(2).isImm() && "Unexpected ldr operand");
888     const uint64_t Offset = Ldr->getOperand(2).getImm() * ScaleLdr;
889 
890     // Match adrp instruction
891     SmallVector<MCInst *, 4> &LdrUses = UDChain[Ldr];
892     if (LdrUses.size() < 2 || LdrUses[1] == nullptr)
893       return 0;
894 
895     // Check adrp instruction
896     MCInst *Adrp = LdrUses[1];
897     if (Adrp->getOpcode() != AArch64::ADRP)
898       return 0;
899 
900     // Get adrp instruction PC
901     const unsigned InstSize = 4;
902     uint64_t AdrpPC = BeginPC;
903     for (InstructionIterator It = Begin; It != End; ++It) {
904       if (&(*It) == Adrp)
905         break;
906       AdrpPC += InstSize;
907     }
908 
909     // Get adrp value
910     uint64_t Base;
911     assert(Adrp->getOperand(1).isImm() && "Unexpected adrp operand");
912     bool Ret = evaluateMemOperandTarget(*Adrp, Base, AdrpPC, InstSize);
913     assert(Ret && "Failed to evaluate adrp");
914     (void)Ret;
915 
916     return Base + Offset;
917   }
918 
919   unsigned getInvertedBranchOpcode(unsigned Opcode) const {
920     switch (Opcode) {
921     default:
922       llvm_unreachable("Failed to invert branch opcode");
923       return Opcode;
924     case AArch64::TBZW:     return AArch64::TBNZW;
925     case AArch64::TBZX:     return AArch64::TBNZX;
926     case AArch64::TBNZW:    return AArch64::TBZW;
927     case AArch64::TBNZX:    return AArch64::TBZX;
928     case AArch64::CBZW:     return AArch64::CBNZW;
929     case AArch64::CBZX:     return AArch64::CBNZX;
930     case AArch64::CBNZW:    return AArch64::CBZW;
931     case AArch64::CBNZX:    return AArch64::CBZX;
932     }
933   }
934 
935   unsigned getCondCode(const MCInst &Inst) const override {
936     // AArch64 does not use conditional codes, so we just return the opcode
937     // of the conditional branch here.
938     return Inst.getOpcode();
939   }
940 
941   unsigned getCanonicalBranchCondCode(unsigned Opcode) const override {
942     switch (Opcode) {
943     default:
944       return Opcode;
945     case AArch64::TBNZW:    return AArch64::TBZW;
946     case AArch64::TBNZX:    return AArch64::TBZX;
947     case AArch64::CBNZW:    return AArch64::CBZW;
948     case AArch64::CBNZX:    return AArch64::CBZX;
949     }
950   }
951 
952   bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
953                               MCContext *Ctx) const override {
954     if (isTB(Inst) || isCB(Inst)) {
955       Inst.setOpcode(getInvertedBranchOpcode(Inst.getOpcode()));
956       assert(Inst.getOpcode() != 0 && "Invalid branch instruction");
957     } else if (Inst.getOpcode() == AArch64::Bcc) {
958       Inst.getOperand(0).setImm(AArch64CC::getInvertedCondCode(
959           static_cast<AArch64CC::CondCode>(Inst.getOperand(0).getImm())));
960       assert(Inst.getOperand(0).getImm() != AArch64CC::AL &&
961              Inst.getOperand(0).getImm() != AArch64CC::NV &&
962              "Can't reverse ALWAYS cond code");
963     } else {
964       LLVM_DEBUG(Inst.dump());
965       llvm_unreachable("Unrecognized branch instruction");
966     }
967     return replaceBranchTarget(Inst, TBB, Ctx);
968   }
969 
970   int getPCRelEncodingSize(const MCInst &Inst) const override {
971     switch (Inst.getOpcode()) {
972     default:
973       llvm_unreachable("Failed to get pcrel encoding size");
974       return 0;
975     case AArch64::TBZW:     return 16;
976     case AArch64::TBZX:     return 16;
977     case AArch64::TBNZW:    return 16;
978     case AArch64::TBNZX:    return 16;
979     case AArch64::CBZW:     return 21;
980     case AArch64::CBZX:     return 21;
981     case AArch64::CBNZW:    return 21;
982     case AArch64::CBNZX:    return 21;
983     case AArch64::B:        return 28;
984     case AArch64::BL:       return 28;
985     case AArch64::Bcc:      return 21;
986     }
987   }
988 
989   int getShortJmpEncodingSize() const override { return 33; }
990 
991   int getUncondBranchEncodingSize() const override { return 28; }
992 
993   InstructionListType createCmpJE(MCPhysReg RegNo, int64_t Imm,
994                                   const MCSymbol *Target,
995                                   MCContext *Ctx) const override {
996     InstructionListType Code;
997     Code.emplace_back(MCInstBuilder(AArch64::SUBSXri)
998                           .addReg(RegNo)
999                           .addReg(RegNo)
1000                           .addImm(Imm)
1001                           .addImm(0));
1002     Code.emplace_back(MCInstBuilder(AArch64::Bcc)
1003                           .addImm(Imm)
1004                           .addExpr(MCSymbolRefExpr::create(
1005                               Target, MCSymbolRefExpr::VK_None, *Ctx)));
1006     return Code;
1007   }
1008 
1009   bool createTailCall(MCInst &Inst, const MCSymbol *Target,
1010                       MCContext *Ctx) override {
1011     return createDirectCall(Inst, Target, Ctx, /*IsTailCall*/ true);
1012   }
1013 
1014   void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
1015                           MCContext *Ctx) override {
1016     createShortJmp(Seq, Target, Ctx, /*IsTailCall*/ true);
1017   }
1018 
1019   bool createTrap(MCInst &Inst) const override {
1020     Inst.clear();
1021     Inst.setOpcode(AArch64::BRK);
1022     Inst.addOperand(MCOperand::createImm(1));
1023     return true;
1024   }
1025 
1026   bool convertJmpToTailCall(MCInst &Inst) override {
1027     setTailCall(Inst);
1028     return true;
1029   }
1030 
1031   bool convertTailCallToJmp(MCInst &Inst) override {
1032     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1033     clearOffset(Inst);
1034     if (getConditionalTailCall(Inst))
1035       unsetConditionalTailCall(Inst);
1036     return true;
1037   }
1038 
1039   bool lowerTailCall(MCInst &Inst) override {
1040     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1041     if (getConditionalTailCall(Inst))
1042       unsetConditionalTailCall(Inst);
1043     return true;
1044   }
1045 
1046   bool isNoop(const MCInst &Inst) const override {
1047     return Inst.getOpcode() == AArch64::HINT &&
1048            Inst.getOperand(0).getImm() == 0;
1049   }
1050 
1051   bool createNoop(MCInst &Inst) const override {
1052     Inst.setOpcode(AArch64::HINT);
1053     Inst.clear();
1054     Inst.addOperand(MCOperand::createImm(0));
1055     return true;
1056   }
1057 
1058   bool mayStore(const MCInst &Inst) const override { return false; }
1059 
1060   bool createDirectCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx,
1061                         bool IsTailCall) override {
1062     Inst.setOpcode(IsTailCall ? AArch64::B : AArch64::BL);
1063     Inst.clear();
1064     Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
1065         Inst, MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
1066         *Ctx, 0)));
1067     if (IsTailCall)
1068       convertJmpToTailCall(Inst);
1069     return true;
1070   }
1071 
1072   bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
1073                      const MCSymbol *&TBB, const MCSymbol *&FBB,
1074                      MCInst *&CondBranch,
1075                      MCInst *&UncondBranch) const override {
1076     auto I = End;
1077 
1078     while (I != Begin) {
1079       --I;
1080 
1081       // Ignore nops and CFIs
1082       if (isPseudo(*I) || isNoop(*I))
1083         continue;
1084 
1085       // Stop when we find the first non-terminator
1086       if (!isTerminator(*I) || isTailCall(*I) || !isBranch(*I))
1087         break;
1088 
1089       // Handle unconditional branches.
1090       if (isUnconditionalBranch(*I)) {
1091         // If any code was seen after this unconditional branch, we've seen
1092         // unreachable code. Ignore them.
1093         CondBranch = nullptr;
1094         UncondBranch = &*I;
1095         const MCSymbol *Sym = getTargetSymbol(*I);
1096         assert(Sym != nullptr &&
1097                "Couldn't extract BB symbol from jump operand");
1098         TBB = Sym;
1099         continue;
1100       }
1101 
1102       // Handle conditional branches and ignore indirect branches
1103       if (isIndirectBranch(*I))
1104         return false;
1105 
1106       if (CondBranch == nullptr) {
1107         const MCSymbol *TargetBB = getTargetSymbol(*I);
1108         if (TargetBB == nullptr) {
1109           // Unrecognized branch target
1110           return false;
1111         }
1112         FBB = TBB;
1113         TBB = TargetBB;
1114         CondBranch = &*I;
1115         continue;
1116       }
1117 
1118       llvm_unreachable("multiple conditional branches in one BB");
1119     }
1120     return true;
1121   }
1122 
1123   void createLongJmp(InstructionListType &Seq, const MCSymbol *Target,
1124                      MCContext *Ctx, bool IsTailCall) override {
1125     // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
1126     //   Standard for the ARM 64-bit Architecture (AArch64)".
1127     // The sequence of instructions we create here is the following:
1128     //  movz ip0, #:abs_g3:<addr>
1129     //  movk ip0, #:abs_g2_nc:<addr>
1130     //  movk ip0, #:abs_g1_nc:<addr>
1131     //  movk ip0, #:abs_g0_nc:<addr>
1132     //  br ip0
1133     MCInst Inst;
1134     Inst.setOpcode(AArch64::MOVZXi);
1135     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1136     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1137         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
1138         AArch64MCExpr::VK_ABS_G3, *Ctx)));
1139     Inst.addOperand(MCOperand::createImm(0x30));
1140     Seq.emplace_back(Inst);
1141 
1142     Inst.clear();
1143     Inst.setOpcode(AArch64::MOVKXi);
1144     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1145     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1146     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1147         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
1148         AArch64MCExpr::VK_ABS_G2_NC, *Ctx)));
1149     Inst.addOperand(MCOperand::createImm(0x20));
1150     Seq.emplace_back(Inst);
1151 
1152     Inst.clear();
1153     Inst.setOpcode(AArch64::MOVKXi);
1154     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1155     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1156     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1157         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
1158         AArch64MCExpr::VK_ABS_G1_NC, *Ctx)));
1159     Inst.addOperand(MCOperand::createImm(0x10));
1160     Seq.emplace_back(Inst);
1161 
1162     Inst.clear();
1163     Inst.setOpcode(AArch64::MOVKXi);
1164     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1165     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1166     Inst.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1167         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx),
1168         AArch64MCExpr::VK_ABS_G0_NC, *Ctx)));
1169     Inst.addOperand(MCOperand::createImm(0));
1170     Seq.emplace_back(Inst);
1171 
1172     Inst.clear();
1173     Inst.setOpcode(AArch64::BR);
1174     Inst.addOperand(MCOperand::createReg(AArch64::X16));
1175     if (IsTailCall)
1176       setTailCall(Inst);
1177     Seq.emplace_back(Inst);
1178   }
1179 
1180   void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
1181                       MCContext *Ctx, bool IsTailCall) override {
1182     // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
1183     //   Standard for the ARM 64-bit Architecture (AArch64)".
1184     // The sequence of instructions we create here is the following:
1185     //  adrp ip0, imm
1186     //  add ip0, ip0, imm
1187     //  br ip0
1188     MCPhysReg Reg = AArch64::X16;
1189     InstructionListType Insts = materializeAddress(Target, Ctx, Reg);
1190     Insts.emplace_back();
1191     MCInst &Inst = Insts.back();
1192     Inst.clear();
1193     Inst.setOpcode(AArch64::BR);
1194     Inst.addOperand(MCOperand::createReg(Reg));
1195     if (IsTailCall)
1196       setTailCall(Inst);
1197     Seq.swap(Insts);
1198   }
1199 
1200   /// Matching pattern here is
1201   ///
1202   ///    ADRP  x16, imm
1203   ///    ADD   x16, x16, imm
1204   ///    BR    x16
1205   ///
1206   uint64_t matchLinkerVeneer(InstructionIterator Begin, InstructionIterator End,
1207                              uint64_t Address, const MCInst &CurInst,
1208                              MCInst *&TargetHiBits, MCInst *&TargetLowBits,
1209                              uint64_t &Target) const override {
1210     if (CurInst.getOpcode() != AArch64::BR || !CurInst.getOperand(0).isReg() ||
1211         CurInst.getOperand(0).getReg() != AArch64::X16)
1212       return 0;
1213 
1214     auto I = End;
1215     if (I == Begin)
1216       return 0;
1217 
1218     --I;
1219     Address -= 4;
1220     if (I == Begin || I->getOpcode() != AArch64::ADDXri ||
1221         MCPlus::getNumPrimeOperands(*I) < 3 || !I->getOperand(0).isReg() ||
1222         !I->getOperand(1).isReg() ||
1223         I->getOperand(0).getReg() != AArch64::X16 ||
1224         I->getOperand(1).getReg() != AArch64::X16 || !I->getOperand(2).isImm())
1225       return 0;
1226     TargetLowBits = &*I;
1227     uint64_t Addr = I->getOperand(2).getImm() & 0xFFF;
1228 
1229     --I;
1230     Address -= 4;
1231     if (I->getOpcode() != AArch64::ADRP ||
1232         MCPlus::getNumPrimeOperands(*I) < 2 || !I->getOperand(0).isReg() ||
1233         !I->getOperand(1).isImm() || I->getOperand(0).getReg() != AArch64::X16)
1234       return 0;
1235     TargetHiBits = &*I;
1236     Addr |= (Address + ((int64_t)I->getOperand(1).getImm() << 12)) &
1237             0xFFFFFFFFFFFFF000ULL;
1238     Target = Addr;
1239     return 3;
1240   }
1241 
1242   bool matchAdrpAddPair(const MCInst &Adrp, const MCInst &Add) const override {
1243     if (!isADRP(Adrp) || !isAddXri(Add))
1244       return false;
1245 
1246     assert(Adrp.getOperand(0).isReg() &&
1247            "Unexpected operand in ADRP instruction");
1248     MCPhysReg AdrpReg = Adrp.getOperand(0).getReg();
1249     assert(Add.getOperand(1).isReg() &&
1250            "Unexpected operand in ADDXri instruction");
1251     MCPhysReg AddReg = Add.getOperand(1).getReg();
1252     return AdrpReg == AddReg;
1253   }
1254 
1255   bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
1256                                int64_t Addend, MCContext *Ctx, int64_t &Value,
1257                                uint64_t RelType) const override {
1258     unsigned ImmOpNo = -1U;
1259     for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1260          ++Index) {
1261       if (Inst.getOperand(Index).isImm()) {
1262         ImmOpNo = Index;
1263         break;
1264       }
1265     }
1266     if (ImmOpNo == -1U)
1267       return false;
1268 
1269     Value = Inst.getOperand(ImmOpNo).getImm();
1270 
1271     setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
1272 
1273     return true;
1274   }
1275 
1276   bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
1277                           MCContext *Ctx) const override {
1278     Inst.setOpcode(AArch64::B);
1279     Inst.clear();
1280     Inst.addOperand(MCOperand::createExpr(getTargetExprFor(
1281         Inst, MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx),
1282         *Ctx, 0)));
1283     return true;
1284   }
1285 
1286   bool shouldRecordCodeRelocation(uint64_t RelType) const override {
1287     switch (RelType) {
1288     case ELF::R_AARCH64_ABS64:
1289     case ELF::R_AARCH64_ABS32:
1290     case ELF::R_AARCH64_ABS16:
1291     case ELF::R_AARCH64_ADD_ABS_LO12_NC:
1292     case ELF::R_AARCH64_ADR_GOT_PAGE:
1293     case ELF::R_AARCH64_ADR_PREL_LO21:
1294     case ELF::R_AARCH64_ADR_PREL_PG_HI21:
1295     case ELF::R_AARCH64_ADR_PREL_PG_HI21_NC:
1296     case ELF::R_AARCH64_LD64_GOT_LO12_NC:
1297     case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
1298     case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
1299     case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
1300     case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
1301     case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
1302     case ELF::R_AARCH64_TLSDESC_ADD_LO12:
1303     case ELF::R_AARCH64_TLSDESC_ADR_PAGE21:
1304     case ELF::R_AARCH64_TLSDESC_ADR_PREL21:
1305     case ELF::R_AARCH64_TLSDESC_LD64_LO12:
1306     case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
1307     case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
1308     case ELF::R_AARCH64_MOVW_UABS_G0:
1309     case ELF::R_AARCH64_MOVW_UABS_G0_NC:
1310     case ELF::R_AARCH64_MOVW_UABS_G1:
1311     case ELF::R_AARCH64_MOVW_UABS_G1_NC:
1312     case ELF::R_AARCH64_MOVW_UABS_G2:
1313     case ELF::R_AARCH64_MOVW_UABS_G2_NC:
1314     case ELF::R_AARCH64_MOVW_UABS_G3:
1315     case ELF::R_AARCH64_PREL16:
1316     case ELF::R_AARCH64_PREL32:
1317     case ELF::R_AARCH64_PREL64:
1318       return true;
1319     case ELF::R_AARCH64_CALL26:
1320     case ELF::R_AARCH64_JUMP26:
1321     case ELF::R_AARCH64_TSTBR14:
1322     case ELF::R_AARCH64_CONDBR19:
1323     case ELF::R_AARCH64_TLSDESC_CALL:
1324     case ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12:
1325     case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
1326       return false;
1327     default:
1328       llvm_unreachable("Unexpected AArch64 relocation type in code");
1329     }
1330   }
1331 
1332   StringRef getTrapFillValue() const override {
1333     return StringRef("\0\0\0\0", 4);
1334   }
1335 
1336   bool createReturn(MCInst &Inst) const override {
1337     Inst.setOpcode(AArch64::RET);
1338     Inst.clear();
1339     Inst.addOperand(MCOperand::createReg(AArch64::LR));
1340     return true;
1341   }
1342 
1343   bool createStackPointerIncrement(
1344       MCInst &Inst, int Size,
1345       bool NoFlagsClobber = false /*unused for AArch64*/) const override {
1346     Inst.setOpcode(AArch64::SUBXri);
1347     Inst.clear();
1348     Inst.addOperand(MCOperand::createReg(AArch64::SP));
1349     Inst.addOperand(MCOperand::createReg(AArch64::SP));
1350     Inst.addOperand(MCOperand::createImm(Size));
1351     Inst.addOperand(MCOperand::createImm(0));
1352     return true;
1353   }
1354 
1355   bool createStackPointerDecrement(
1356       MCInst &Inst, int Size,
1357       bool NoFlagsClobber = false /*unused for AArch64*/) const override {
1358     Inst.setOpcode(AArch64::ADDXri);
1359     Inst.clear();
1360     Inst.addOperand(MCOperand::createReg(AArch64::SP));
1361     Inst.addOperand(MCOperand::createReg(AArch64::SP));
1362     Inst.addOperand(MCOperand::createImm(Size));
1363     Inst.addOperand(MCOperand::createImm(0));
1364     return true;
1365   }
1366 
1367   void createIndirectBranch(MCInst &Inst, MCPhysReg MemBaseReg,
1368                             int64_t Disp) const {
1369     Inst.setOpcode(AArch64::BR);
1370     Inst.addOperand(MCOperand::createReg(MemBaseReg));
1371   }
1372 
1373   InstructionListType createInstrumentedIndCallHandlerExitBB() const override {
1374     InstructionListType Insts(5);
1375     // Code sequence for instrumented indirect call handler:
1376     //   msr  nzcv, x1
1377     //   ldp  x0, x1, [sp], #16
1378     //   ldr  x16, [sp], #16
1379     //   ldp  x0, x1, [sp], #16
1380     //   br   x16
1381     setSystemFlag(Insts[0], AArch64::X1);
1382     createPopRegisters(Insts[1], AArch64::X0, AArch64::X1);
1383     // Here we load address of the next function which should be called in the
1384     // original binary to X16 register. Writing to X16 is permitted without
1385     // needing to restore.
1386     loadReg(Insts[2], AArch64::X16, AArch64::SP);
1387     createPopRegisters(Insts[3], AArch64::X0, AArch64::X1);
1388     createIndirectBranch(Insts[4], AArch64::X16, 0);
1389     return Insts;
1390   }
1391 
1392   InstructionListType
1393   createInstrumentedIndTailCallHandlerExitBB() const override {
1394     return createInstrumentedIndCallHandlerExitBB();
1395   }
1396 
1397   InstructionListType createGetter(MCContext *Ctx, const char *name) const {
1398     InstructionListType Insts(4);
1399     MCSymbol *Locs = Ctx->getOrCreateSymbol(name);
1400     InstructionListType Addr = materializeAddress(Locs, Ctx, AArch64::X0);
1401     std::copy(Addr.begin(), Addr.end(), Insts.begin());
1402     assert(Addr.size() == 2 && "Invalid Addr size");
1403     loadReg(Insts[2], AArch64::X0, AArch64::X0);
1404     createReturn(Insts[3]);
1405     return Insts;
1406   }
1407 
1408   InstructionListType createNumCountersGetter(MCContext *Ctx) const override {
1409     return createGetter(Ctx, "__bolt_num_counters");
1410   }
1411 
1412   InstructionListType
1413   createInstrLocationsGetter(MCContext *Ctx) const override {
1414     return createGetter(Ctx, "__bolt_instr_locations");
1415   }
1416 
1417   InstructionListType createInstrTablesGetter(MCContext *Ctx) const override {
1418     return createGetter(Ctx, "__bolt_instr_tables");
1419   }
1420 
1421   InstructionListType createInstrNumFuncsGetter(MCContext *Ctx) const override {
1422     return createGetter(Ctx, "__bolt_instr_num_funcs");
1423   }
1424 
1425   void convertIndirectCallToLoad(MCInst &Inst, MCPhysReg Reg) override {
1426     bool IsTailCall = isTailCall(Inst);
1427     if (IsTailCall)
1428       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
1429     if (Inst.getOpcode() == AArch64::BR || Inst.getOpcode() == AArch64::BLR) {
1430       Inst.setOpcode(AArch64::ORRXrs);
1431       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
1432       Inst.insert(Inst.begin() + 1, MCOperand::createReg(AArch64::XZR));
1433       Inst.insert(Inst.begin() + 3, MCOperand::createImm(0));
1434       return;
1435     }
1436     llvm_unreachable("not implemented");
1437   }
1438 
1439   InstructionListType createLoadImmediate(const MCPhysReg Dest,
1440                                           uint64_t Imm) const override {
1441     InstructionListType Insts(4);
1442     int Shift = 48;
1443     for (int I = 0; I < 4; I++, Shift -= 16) {
1444       Insts[I].setOpcode(AArch64::MOVKXi);
1445       Insts[I].addOperand(MCOperand::createReg(Dest));
1446       Insts[I].addOperand(MCOperand::createReg(Dest));
1447       Insts[I].addOperand(MCOperand::createImm((Imm >> Shift) & 0xFFFF));
1448       Insts[I].addOperand(MCOperand::createImm(Shift));
1449     }
1450     return Insts;
1451   }
1452 
1453   void createIndirectCallInst(MCInst &Inst, bool IsTailCall,
1454                               MCPhysReg Reg) const {
1455     Inst.clear();
1456     Inst.setOpcode(IsTailCall ? AArch64::BR : AArch64::BLR);
1457     Inst.addOperand(MCOperand::createReg(Reg));
1458   }
1459 
1460   InstructionListType createInstrumentedIndirectCall(MCInst &&CallInst,
1461                                                      MCSymbol *HandlerFuncAddr,
1462                                                      int CallSiteID,
1463                                                      MCContext *Ctx) override {
1464     InstructionListType Insts;
1465     // Code sequence used to enter indirect call instrumentation helper:
1466     //   stp x0, x1, [sp, #-16]! createPushRegisters
1467     //   mov target x0  convertIndirectCallToLoad -> orr x0 target xzr
1468     //   mov x1 CallSiteID createLoadImmediate ->
1469     //   movk    x1, #0x0, lsl #48
1470     //   movk    x1, #0x0, lsl #32
1471     //   movk    x1, #0x0, lsl #16
1472     //   movk    x1, #0x0
1473     //   stp x0, x1, [sp, #-16]!
1474     //   bl *HandlerFuncAddr createIndirectCall ->
1475     //   adr x0 *HandlerFuncAddr -> adrp + add
1476     //   blr x0
1477     Insts.emplace_back();
1478     createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1);
1479     Insts.emplace_back(CallInst);
1480     convertIndirectCallToLoad(Insts.back(), AArch64::X0);
1481     InstructionListType LoadImm =
1482         createLoadImmediate(getIntArgRegister(1), CallSiteID);
1483     Insts.insert(Insts.end(), LoadImm.begin(), LoadImm.end());
1484     Insts.emplace_back();
1485     createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1);
1486     Insts.resize(Insts.size() + 2);
1487     InstructionListType Addr =
1488         materializeAddress(HandlerFuncAddr, Ctx, AArch64::X0);
1489     assert(Addr.size() == 2 && "Invalid Addr size");
1490     std::copy(Addr.begin(), Addr.end(), Insts.end() - Addr.size());
1491     Insts.emplace_back();
1492     createIndirectCallInst(Insts.back(), isTailCall(CallInst), AArch64::X0);
1493 
1494     // Carry over metadata including tail call marker if present.
1495     stripAnnotations(Insts.back());
1496     moveAnnotations(std::move(CallInst), Insts.back());
1497 
1498     return Insts;
1499   }
1500 
1501   InstructionListType
1502   createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline,
1503                                           const MCSymbol *IndCallHandler,
1504                                           MCContext *Ctx) override {
1505     // Code sequence used to check whether InstrTampoline was initialized
1506     // and call it if so, returns via IndCallHandler
1507     //   stp     x0, x1, [sp, #-16]!
1508     //   mrs     x1, nzcv
1509     //   adr     x0, InstrTrampoline -> adrp + add
1510     //   ldr     x0, [x0]
1511     //   subs    x0, x0, #0x0
1512     //   b.eq    IndCallHandler
1513     //   str     x30, [sp, #-16]!
1514     //   blr     x0
1515     //   ldr     x30, [sp], #16
1516     //   b       IndCallHandler
1517     InstructionListType Insts;
1518     Insts.emplace_back();
1519     createPushRegisters(Insts.back(), AArch64::X0, AArch64::X1);
1520     Insts.emplace_back();
1521     getSystemFlag(Insts.back(), getIntArgRegister(1));
1522     Insts.emplace_back();
1523     Insts.emplace_back();
1524     InstructionListType Addr =
1525         materializeAddress(InstrTrampoline, Ctx, AArch64::X0);
1526     std::copy(Addr.begin(), Addr.end(), Insts.end() - Addr.size());
1527     assert(Addr.size() == 2 && "Invalid Addr size");
1528     Insts.emplace_back();
1529     loadReg(Insts.back(), AArch64::X0, AArch64::X0);
1530     InstructionListType cmpJmp =
1531         createCmpJE(AArch64::X0, 0, IndCallHandler, Ctx);
1532     Insts.insert(Insts.end(), cmpJmp.begin(), cmpJmp.end());
1533     Insts.emplace_back();
1534     storeReg(Insts.back(), AArch64::LR, AArch64::SP);
1535     Insts.emplace_back();
1536     Insts.back().setOpcode(AArch64::BLR);
1537     Insts.back().addOperand(MCOperand::createReg(AArch64::X0));
1538     Insts.emplace_back();
1539     loadReg(Insts.back(), AArch64::LR, AArch64::SP);
1540     Insts.emplace_back();
1541     createDirectCall(Insts.back(), IndCallHandler, Ctx, /*IsTailCall*/ true);
1542     return Insts;
1543   }
1544 
1545   InstructionListType
1546   createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf,
1547                        unsigned CodePointerSize) const override {
1548     unsigned int I = 0;
1549     InstructionListType Instrs(IsLeaf ? 12 : 10);
1550 
1551     if (IsLeaf)
1552       createStackPointerIncrement(Instrs[I++], 128);
1553     createPushRegisters(Instrs[I++], AArch64::X0, AArch64::X1);
1554     getSystemFlag(Instrs[I++], AArch64::X1);
1555     InstructionListType Addr = materializeAddress(Target, Ctx, AArch64::X0);
1556     assert(Addr.size() == 2 && "Invalid Addr size");
1557     std::copy(Addr.begin(), Addr.end(), Instrs.begin() + I);
1558     I += Addr.size();
1559     storeReg(Instrs[I++], AArch64::X2, AArch64::SP);
1560     InstructionListType Insts = createIncMemory(AArch64::X0, AArch64::X2);
1561     assert(Insts.size() == 2 && "Invalid Insts size");
1562     std::copy(Insts.begin(), Insts.end(), Instrs.begin() + I);
1563     I += Insts.size();
1564     loadReg(Instrs[I++], AArch64::X2, AArch64::SP);
1565     setSystemFlag(Instrs[I++], AArch64::X1);
1566     createPopRegisters(Instrs[I++], AArch64::X0, AArch64::X1);
1567     if (IsLeaf)
1568       createStackPointerDecrement(Instrs[I++], 128);
1569     return Instrs;
1570   }
1571 
1572   std::vector<MCInst> createSymbolTrampoline(const MCSymbol *TgtSym,
1573                                              MCContext *Ctx) override {
1574     std::vector<MCInst> Insts;
1575     createShortJmp(Insts, TgtSym, Ctx, /*IsTailCall*/ true);
1576     return Insts;
1577   }
1578 
1579   InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx,
1580                                          MCPhysReg RegName,
1581                                          int64_t Addend = 0) const override {
1582     // Get page-aligned address and add page offset
1583     InstructionListType Insts(2);
1584     Insts[0].setOpcode(AArch64::ADRP);
1585     Insts[0].clear();
1586     Insts[0].addOperand(MCOperand::createReg(RegName));
1587     Insts[0].addOperand(MCOperand::createImm(0));
1588     setOperandToSymbolRef(Insts[0], /* OpNum */ 1, Target, Addend, Ctx,
1589                           ELF::R_AARCH64_NONE);
1590     Insts[1].setOpcode(AArch64::ADDXri);
1591     Insts[1].clear();
1592     Insts[1].addOperand(MCOperand::createReg(RegName));
1593     Insts[1].addOperand(MCOperand::createReg(RegName));
1594     Insts[1].addOperand(MCOperand::createImm(0));
1595     Insts[1].addOperand(MCOperand::createImm(0));
1596     setOperandToSymbolRef(Insts[1], /* OpNum */ 2, Target, Addend, Ctx,
1597                           ELF::R_AARCH64_ADD_ABS_LO12_NC);
1598     return Insts;
1599   }
1600 
1601   std::optional<Relocation>
1602   createRelocation(const MCFixup &Fixup,
1603                    const MCAsmBackend &MAB) const override {
1604     const MCFixupKindInfo &FKI = MAB.getFixupKindInfo(Fixup.getKind());
1605 
1606     assert(FKI.TargetOffset == 0 && "0-bit relocation offset expected");
1607     const uint64_t RelOffset = Fixup.getOffset();
1608 
1609     uint64_t RelType;
1610     if (Fixup.getKind() == MCFixupKind(AArch64::fixup_aarch64_pcrel_call26))
1611       RelType = ELF::R_AARCH64_CALL26;
1612     else if (FKI.Flags & MCFixupKindInfo::FKF_IsPCRel) {
1613       switch (FKI.TargetSize) {
1614       default:
1615         return std::nullopt;
1616       case 16:
1617         RelType = ELF::R_AARCH64_PREL16;
1618         break;
1619       case 32:
1620         RelType = ELF::R_AARCH64_PREL32;
1621         break;
1622       case 64:
1623         RelType = ELF::R_AARCH64_PREL64;
1624         break;
1625       }
1626     } else {
1627       switch (FKI.TargetSize) {
1628       default:
1629         return std::nullopt;
1630       case 16:
1631         RelType = ELF::R_AARCH64_ABS16;
1632         break;
1633       case 32:
1634         RelType = ELF::R_AARCH64_ABS32;
1635         break;
1636       case 64:
1637         RelType = ELF::R_AARCH64_ABS64;
1638         break;
1639       }
1640     }
1641 
1642     auto [RelSymbol, RelAddend] = extractFixupExpr(Fixup);
1643 
1644     return Relocation({RelOffset, RelSymbol, RelType, RelAddend, 0});
1645   }
1646 
1647   uint16_t getMinFunctionAlignment() const override { return 4; }
1648 };
1649 
1650 } // end anonymous namespace
1651 
1652 namespace llvm {
1653 namespace bolt {
1654 
1655 MCPlusBuilder *createAArch64MCPlusBuilder(const MCInstrAnalysis *Analysis,
1656                                           const MCInstrInfo *Info,
1657                                           const MCRegisterInfo *RegInfo,
1658                                           const MCSubtargetInfo *STI) {
1659   return new AArch64MCPlusBuilder(Analysis, Info, RegInfo, STI);
1660 }
1661 
1662 } // namespace bolt
1663 } // namespace llvm
1664