xref: /llvm-project/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (revision a3cc4b61a0414a844058f064bd19d1d4085e0f6c)
1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// RISC-V.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
36 
37 namespace {
38 
39 class RISCVInstructionSelector : public InstructionSelector {
40 public:
41   RISCVInstructionSelector(const RISCVTargetMachine &TM,
42                            const RISCVSubtarget &STI,
43                            const RISCVRegisterBankInfo &RBI);
44 
45   bool select(MachineInstr &MI) override;
46 
47   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
48                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49                BlockFrequencyInfo *BFI) override {
50     InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
51     MRI = &MF.getRegInfo();
52   }
53 
54   static const char *getName() { return DEBUG_TYPE; }
55 
56 private:
57   const TargetRegisterClass *
58   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59 
60   bool isRegInGprb(Register Reg) const;
61   bool isRegInFprb(Register Reg) const;
62 
63   // tblgen-erated 'select' implementation, used as the initial selector for
64   // the patterns that don't require complex C++.
65   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
66 
67   // A lowering phase that runs before any selection attempts.
68   // Returns true if the instruction was modified.
69   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
70 
71   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
72 
73   // Custom selection methods
74   bool selectCopy(MachineInstr &MI) const;
75   bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
76   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
77   bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
78                   bool IsExternWeak = false) const;
79   bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const;
80   bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
81   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
82   void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
83                  MachineIRBuilder &MIB) const;
84   bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
85   bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
86 
87   ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
88   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
89 
90   ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
91   template <unsigned ShAmt>
92   ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
93     return selectSHXADDOp(Root, ShAmt);
94   }
95 
96   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
97                                        unsigned ShAmt) const;
98   template <unsigned ShAmt>
99   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
100     return selectSHXADD_UWOp(Root, ShAmt);
101   }
102 
103   // Custom renderers for tablegen
104   void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
105                     int OpIdx) const;
106   void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
107                             int OpIdx) const;
108   void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
109                           int OpIdx) const;
110   void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
111                       int OpIdx) const;
112   void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
113                  int OpIdx) const;
114 
115   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
116                            int OpIdx) const;
117 
118   const RISCVSubtarget &STI;
119   const RISCVInstrInfo &TII;
120   const RISCVRegisterInfo &TRI;
121   const RISCVRegisterBankInfo &RBI;
122   const RISCVTargetMachine &TM;
123 
124   MachineRegisterInfo *MRI = nullptr;
125 
126   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
127   // uses "STI." in the code generated by TableGen. We need to unify the name of
128   // Subtarget variable.
129   const RISCVSubtarget *Subtarget = &STI;
130 
131 #define GET_GLOBALISEL_PREDICATES_DECL
132 #include "RISCVGenGlobalISel.inc"
133 #undef GET_GLOBALISEL_PREDICATES_DECL
134 
135 #define GET_GLOBALISEL_TEMPORARIES_DECL
136 #include "RISCVGenGlobalISel.inc"
137 #undef GET_GLOBALISEL_TEMPORARIES_DECL
138 };
139 
140 } // end anonymous namespace
141 
142 #define GET_GLOBALISEL_IMPL
143 #include "RISCVGenGlobalISel.inc"
144 #undef GET_GLOBALISEL_IMPL
145 
146 RISCVInstructionSelector::RISCVInstructionSelector(
147     const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
148     const RISCVRegisterBankInfo &RBI)
149     : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
150       TM(TM),
151 
152 #define GET_GLOBALISEL_PREDICATES_INIT
153 #include "RISCVGenGlobalISel.inc"
154 #undef GET_GLOBALISEL_PREDICATES_INIT
155 #define GET_GLOBALISEL_TEMPORARIES_INIT
156 #include "RISCVGenGlobalISel.inc"
157 #undef GET_GLOBALISEL_TEMPORARIES_INIT
158 {
159 }
160 
161 InstructionSelector::ComplexRendererFns
162 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
163   if (!Root.isReg())
164     return std::nullopt;
165 
166   using namespace llvm::MIPatternMatch;
167 
168   Register RootReg = Root.getReg();
169   Register ShAmtReg = RootReg;
170   const LLT ShiftLLT = MRI->getType(RootReg);
171   unsigned ShiftWidth = ShiftLLT.getSizeInBits();
172   assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
173   // Peek through zext.
174   Register ZExtSrcReg;
175   if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg)))) {
176     ShAmtReg = ZExtSrcReg;
177   }
178 
179   APInt AndMask;
180   Register AndSrcReg;
181   // Try to combine the following pattern (applicable to other shift
182   // instructions as well as 32-bit ones):
183   //
184   //   %4:gprb(s64) = G_AND %3, %2
185   //   %5:gprb(s64) = G_LSHR %1, %4(s64)
186   //
187   // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
188   // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
189   // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
190   // then it can be eliminated. Given register rs1 or rs2 holding a constant
191   // (the and mask), there are two cases G_AND can be erased:
192   //
193   // 1. the lowest log2(XLEN) bits of the and mask are all set
194   // 2. the bits of the register being masked are already unset (zero set)
195   if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
196     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
197     if (ShMask.isSubsetOf(AndMask)) {
198       ShAmtReg = AndSrcReg;
199     } else {
200       // SimplifyDemandedBits may have optimized the mask so try restoring any
201       // bits that are known zero.
202       KnownBits Known = KB->getKnownBits(AndSrcReg);
203       if (ShMask.isSubsetOf(AndMask | Known.Zero))
204         ShAmtReg = AndSrcReg;
205     }
206   }
207 
208   APInt Imm;
209   Register Reg;
210   if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
211     if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
212       // If we are shifting by X+N where N == 0 mod Size, then just shift by X
213       // to avoid the ADD.
214       ShAmtReg = Reg;
215   } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
216     if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
217       // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
218       // to generate a NEG instead of a SUB of a constant.
219       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
220       unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
221       return {{[=](MachineInstrBuilder &MIB) {
222         MachineIRBuilder(*MIB.getInstr())
223             .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
224         MIB.addReg(ShAmtReg);
225       }}};
226     }
227     if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
228       // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
229       // to generate a NOT instead of a SUB of a constant.
230       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
231       return {{[=](MachineInstrBuilder &MIB) {
232         MachineIRBuilder(*MIB.getInstr())
233             .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
234             .addImm(-1);
235         MIB.addReg(ShAmtReg);
236       }}};
237     }
238   }
239 
240   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
241 }
242 
243 InstructionSelector::ComplexRendererFns
244 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
245                                          unsigned ShAmt) const {
246   using namespace llvm::MIPatternMatch;
247 
248   if (!Root.isReg())
249     return std::nullopt;
250   Register RootReg = Root.getReg();
251 
252   const unsigned XLen = STI.getXLen();
253   APInt Mask, C2;
254   Register RegY;
255   std::optional<bool> LeftShift;
256   // (and (shl y, c2), mask)
257   if (mi_match(RootReg, *MRI,
258                m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
259     LeftShift = true;
260   // (and (lshr y, c2), mask)
261   else if (mi_match(RootReg, *MRI,
262                     m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
263     LeftShift = false;
264 
265   if (LeftShift.has_value()) {
266     if (*LeftShift)
267       Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
268     else
269       Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
270 
271     if (Mask.isShiftedMask()) {
272       unsigned Leading = XLen - Mask.getActiveBits();
273       unsigned Trailing = Mask.countr_zero();
274       // Given (and (shl y, c2), mask) in which mask has no leading zeros and
275       // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
276       if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
277         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
278         return {{[=](MachineInstrBuilder &MIB) {
279           MachineIRBuilder(*MIB.getInstr())
280               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
281               .addImm(Trailing - C2.getLimitedValue());
282           MIB.addReg(DstReg);
283         }}};
284       }
285 
286       // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
287       // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
288       if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
289         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
290         return {{[=](MachineInstrBuilder &MIB) {
291           MachineIRBuilder(*MIB.getInstr())
292               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
293               .addImm(Leading + Trailing);
294           MIB.addReg(DstReg);
295         }}};
296       }
297     }
298   }
299 
300   LeftShift.reset();
301 
302   // (shl (and y, mask), c2)
303   if (mi_match(RootReg, *MRI,
304                m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
305                       m_ICst(C2))))
306     LeftShift = true;
307   // (lshr (and y, mask), c2)
308   else if (mi_match(RootReg, *MRI,
309                     m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
310                             m_ICst(C2))))
311     LeftShift = false;
312 
313   if (LeftShift.has_value() && Mask.isShiftedMask()) {
314     unsigned Leading = XLen - Mask.getActiveBits();
315     unsigned Trailing = Mask.countr_zero();
316 
317     // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
318     // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
319     bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
320                 (Trailing + C2.getLimitedValue()) == ShAmt;
321     if (!Cond)
322       // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
323       // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
324       Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
325              (Trailing - C2.getLimitedValue()) == ShAmt;
326 
327     if (Cond) {
328       Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
329       return {{[=](MachineInstrBuilder &MIB) {
330         MachineIRBuilder(*MIB.getInstr())
331             .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
332             .addImm(Trailing);
333         MIB.addReg(DstReg);
334       }}};
335     }
336   }
337 
338   return std::nullopt;
339 }
340 
341 InstructionSelector::ComplexRendererFns
342 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
343                                             unsigned ShAmt) const {
344   using namespace llvm::MIPatternMatch;
345 
346   if (!Root.isReg())
347     return std::nullopt;
348   Register RootReg = Root.getReg();
349 
350   // Given (and (shl x, c2), mask) in which mask is a shifted mask with
351   // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
352   // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
353   APInt Mask, C2;
354   Register RegX;
355   if (mi_match(
356           RootReg, *MRI,
357           m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
358                                 m_ICst(Mask))))) {
359     Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
360 
361     if (Mask.isShiftedMask()) {
362       unsigned Leading = Mask.countl_zero();
363       unsigned Trailing = Mask.countr_zero();
364       if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
365         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
366         return {{[=](MachineInstrBuilder &MIB) {
367           MachineIRBuilder(*MIB.getInstr())
368               .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
369               .addImm(C2.getLimitedValue() - ShAmt);
370           MIB.addReg(DstReg);
371         }}};
372       }
373     }
374   }
375 
376   return std::nullopt;
377 }
378 
379 InstructionSelector::ComplexRendererFns
380 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
381   if (!Root.isReg())
382     return std::nullopt;
383 
384   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
385   if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
386     return {{
387         [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
388         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
389     }};
390   }
391 
392   if (isBaseWithConstantOffset(Root, *MRI)) {
393     MachineOperand &LHS = RootDef->getOperand(1);
394     MachineOperand &RHS = RootDef->getOperand(2);
395     MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
396     MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
397 
398     int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
399     if (isInt<12>(RHSC)) {
400       if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
401         return {{
402             [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
403             [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
404         }};
405 
406       return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
407                [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
408     }
409   }
410 
411   // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
412   // the combiner?
413   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
414            [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
415 }
416 
417 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
418 /// CC Must be an ICMP Predicate.
419 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
420   switch (CC) {
421   default:
422     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
423   case CmpInst::Predicate::ICMP_EQ:
424     return RISCVCC::COND_EQ;
425   case CmpInst::Predicate::ICMP_NE:
426     return RISCVCC::COND_NE;
427   case CmpInst::Predicate::ICMP_ULT:
428     return RISCVCC::COND_LTU;
429   case CmpInst::Predicate::ICMP_SLT:
430     return RISCVCC::COND_LT;
431   case CmpInst::Predicate::ICMP_UGE:
432     return RISCVCC::COND_GEU;
433   case CmpInst::Predicate::ICMP_SGE:
434     return RISCVCC::COND_GE;
435   }
436 }
437 
438 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
439                                  Register &LHS, Register &RHS,
440                                  MachineRegisterInfo &MRI) {
441   // Try to fold an ICmp. If that fails, use a NE compare with X0.
442   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
443   if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
444     LHS = CondReg;
445     RHS = RISCV::X0;
446     CC = RISCVCC::COND_NE;
447     return;
448   }
449 
450   // We found an ICmp, do some canonicalizations.
451 
452   // Adjust comparisons to use comparison with 0 if possible.
453   if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
454     switch (Pred) {
455     case CmpInst::Predicate::ICMP_SGT:
456       // Convert X > -1 to X >= 0
457       if (*Constant == -1) {
458         CC = RISCVCC::COND_GE;
459         RHS = RISCV::X0;
460         return;
461       }
462       break;
463     case CmpInst::Predicate::ICMP_SLT:
464       // Convert X < 1 to 0 >= X
465       if (*Constant == 1) {
466         CC = RISCVCC::COND_GE;
467         RHS = LHS;
468         LHS = RISCV::X0;
469         return;
470       }
471       break;
472     default:
473       break;
474     }
475   }
476 
477   switch (Pred) {
478   default:
479     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
480   case CmpInst::Predicate::ICMP_EQ:
481   case CmpInst::Predicate::ICMP_NE:
482   case CmpInst::Predicate::ICMP_ULT:
483   case CmpInst::Predicate::ICMP_SLT:
484   case CmpInst::Predicate::ICMP_UGE:
485   case CmpInst::Predicate::ICMP_SGE:
486     // These CCs are supported directly by RISC-V branches.
487     break;
488   case CmpInst::Predicate::ICMP_SGT:
489   case CmpInst::Predicate::ICMP_SLE:
490   case CmpInst::Predicate::ICMP_UGT:
491   case CmpInst::Predicate::ICMP_ULE:
492     // These CCs are not supported directly by RISC-V branches, but changing the
493     // direction of the CC and swapping LHS and RHS are.
494     Pred = CmpInst::getSwappedPredicate(Pred);
495     std::swap(LHS, RHS);
496     break;
497   }
498 
499   CC = getRISCVCCFromICmp(Pred);
500   return;
501 }
502 
503 bool RISCVInstructionSelector::select(MachineInstr &MI) {
504   MachineBasicBlock &MBB = *MI.getParent();
505   MachineFunction &MF = *MBB.getParent();
506   MachineIRBuilder MIB(MI);
507 
508   preISelLower(MI, MIB);
509   const unsigned Opc = MI.getOpcode();
510 
511   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
512     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
513       const Register DefReg = MI.getOperand(0).getReg();
514       const LLT DefTy = MRI->getType(DefReg);
515 
516       const RegClassOrRegBank &RegClassOrBank =
517           MRI->getRegClassOrRegBank(DefReg);
518 
519       const TargetRegisterClass *DefRC =
520           RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
521       if (!DefRC) {
522         if (!DefTy.isValid()) {
523           LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
524           return false;
525         }
526 
527         const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
528         DefRC = getRegClassForTypeOnBank(DefTy, RB);
529         if (!DefRC) {
530           LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
531           return false;
532         }
533       }
534 
535       MI.setDesc(TII.get(TargetOpcode::PHI));
536       return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
537     }
538 
539     // Certain non-generic instructions also need some special handling.
540     if (MI.isCopy())
541       return selectCopy(MI);
542 
543     return true;
544   }
545 
546   if (selectImpl(MI, *CoverageInfo))
547     return true;
548 
549   switch (Opc) {
550   case TargetOpcode::G_ANYEXT:
551   case TargetOpcode::G_PTRTOINT:
552   case TargetOpcode::G_INTTOPTR:
553   case TargetOpcode::G_TRUNC:
554   case TargetOpcode::G_FREEZE:
555     return selectCopy(MI);
556   case TargetOpcode::G_CONSTANT: {
557     Register DstReg = MI.getOperand(0).getReg();
558     int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
559 
560     if (!materializeImm(DstReg, Imm, MIB))
561       return false;
562 
563     MI.eraseFromParent();
564     return true;
565   }
566   case TargetOpcode::G_FCONSTANT: {
567     // TODO: Use constant pool for complext constants.
568     // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
569     Register DstReg = MI.getOperand(0).getReg();
570     const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
571     APInt Imm = FPimm.bitcastToAPInt();
572     unsigned Size = MRI->getType(DstReg).getSizeInBits();
573     if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
574       Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
575       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
576         return false;
577 
578       unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
579                         : Size == 32 ? RISCV::FMV_W_X
580                                      : RISCV::FMV_H_X;
581       auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
582       if (!FMV.constrainAllUses(TII, TRI, RBI))
583         return false;
584     } else {
585       assert(Size == 64 && !Subtarget->is64Bit() &&
586              "Unexpected size or subtarget");
587       // Split into two pieces and build through the stack.
588       Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
589       Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
590       if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
591                           MIB))
592         return false;
593       if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
594         return false;
595       MachineInstrBuilder PairF64 = MIB.buildInstr(
596           RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
597       if (!PairF64.constrainAllUses(TII, TRI, RBI))
598         return false;
599     }
600 
601     MI.eraseFromParent();
602     return true;
603   }
604   case TargetOpcode::G_GLOBAL_VALUE: {
605     auto *GV = MI.getOperand(1).getGlobal();
606     if (GV->isThreadLocal()) {
607       // TODO: implement this case.
608       return false;
609     }
610 
611     return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
612   }
613   case TargetOpcode::G_JUMP_TABLE:
614   case TargetOpcode::G_CONSTANT_POOL:
615     return selectAddr(MI, MIB, MRI);
616   case TargetOpcode::G_BRCOND: {
617     Register LHS, RHS;
618     RISCVCC::CondCode CC;
619     getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
620 
621     auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
622                    .addMBB(MI.getOperand(1).getMBB());
623     MI.eraseFromParent();
624     return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
625   }
626   case TargetOpcode::G_BRJT: {
627     // FIXME: Move to legalization?
628     const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
629     unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout());
630     assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
631            "Unsupported jump-table entry size");
632     assert(
633         (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
634          MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 ||
635          MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) &&
636         "Unexpected jump-table entry kind");
637 
638     auto SLL =
639         MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)})
640             .addImm(Log2_32(EntrySize));
641     if (!SLL.constrainAllUses(TII, TRI, RBI))
642       return false;
643 
644     // TODO: Use SHXADD. Moving to legalization would fix this automatically.
645     auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
646                               {MI.getOperand(0), SLL.getReg(0)});
647     if (!ADD.constrainAllUses(TII, TRI, RBI))
648       return false;
649 
650     unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
651     auto Dest =
652         MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)})
653             .addImm(0)
654             .addMemOperand(MF.getMachineMemOperand(
655                 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad,
656                 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout()))));
657     if (!Dest.constrainAllUses(TII, TRI, RBI))
658       return false;
659 
660     // If the Kind is EK_LabelDifference32, the table stores an offset from
661     // the location of the table. Add the table address to get an absolute
662     // address.
663     if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
664       Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
665                             {Dest.getReg(0), MI.getOperand(0)});
666       if (!Dest.constrainAllUses(TII, TRI, RBI))
667         return false;
668     }
669 
670     auto Branch =
671         MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
672     if (!Branch.constrainAllUses(TII, TRI, RBI))
673       return false;
674 
675     MI.eraseFromParent();
676     return true;
677   }
678   case TargetOpcode::G_BRINDIRECT:
679     MI.setDesc(TII.get(RISCV::PseudoBRIND));
680     MI.addOperand(MachineOperand::CreateImm(0));
681     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
682   case TargetOpcode::G_SEXT_INREG:
683     return selectSExtInreg(MI, MIB);
684   case TargetOpcode::G_FRAME_INDEX: {
685     // TODO: We may want to replace this code with the SelectionDAG patterns,
686     // which fail to get imported because it uses FrameAddrRegImm, which is a
687     // ComplexPattern
688     MI.setDesc(TII.get(RISCV::ADDI));
689     MI.addOperand(MachineOperand::CreateImm(0));
690     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
691   }
692   case TargetOpcode::G_SELECT:
693     return selectSelect(MI, MIB);
694   case TargetOpcode::G_FCMP:
695     return selectFPCompare(MI, MIB);
696   case TargetOpcode::G_FENCE: {
697     AtomicOrdering FenceOrdering =
698         static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
699     SyncScope::ID FenceSSID =
700         static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
701     emitFence(FenceOrdering, FenceSSID, MIB);
702     MI.eraseFromParent();
703     return true;
704   }
705   case TargetOpcode::G_IMPLICIT_DEF:
706     return selectImplicitDef(MI, MIB);
707   case TargetOpcode::G_MERGE_VALUES:
708     return selectMergeValues(MI, MIB);
709   case TargetOpcode::G_UNMERGE_VALUES:
710     return selectUnmergeValues(MI, MIB);
711   default:
712     return false;
713   }
714 }
715 
716 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI,
717                                                  MachineIRBuilder &MIB) const {
718   assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
719 
720   // Build a F64 Pair from operands
721   if (MI.getNumOperands() != 3)
722     return false;
723   Register Dst = MI.getOperand(0).getReg();
724   Register Lo = MI.getOperand(1).getReg();
725   Register Hi = MI.getOperand(2).getReg();
726   if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
727     return false;
728   MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
729   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
730 }
731 
732 bool RISCVInstructionSelector::selectUnmergeValues(
733     MachineInstr &MI, MachineIRBuilder &MIB) const {
734   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
735 
736   // Split F64 Src into two s32 parts
737   if (MI.getNumOperands() != 3)
738     return false;
739   Register Src = MI.getOperand(2).getReg();
740   Register Lo = MI.getOperand(0).getReg();
741   Register Hi = MI.getOperand(1).getReg();
742   if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
743     return false;
744   MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
745   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
746 }
747 
748 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
749                                                  MachineIRBuilder &MIB) {
750   Register PtrReg = Op.getReg();
751   assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
752 
753   const LLT sXLen = LLT::scalar(STI.getXLen());
754   auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
755   MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
756   Op.setReg(PtrToInt.getReg(0));
757   return select(*PtrToInt);
758 }
759 
760 void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
761                                             MachineIRBuilder &MIB) {
762   switch (MI.getOpcode()) {
763   case TargetOpcode::G_PTR_ADD: {
764     Register DstReg = MI.getOperand(0).getReg();
765     const LLT sXLen = LLT::scalar(STI.getXLen());
766 
767     replacePtrWithInt(MI.getOperand(1), MIB);
768     MI.setDesc(TII.get(TargetOpcode::G_ADD));
769     MRI->setType(DstReg, sXLen);
770     break;
771   }
772   case TargetOpcode::G_PTRMASK: {
773     Register DstReg = MI.getOperand(0).getReg();
774     const LLT sXLen = LLT::scalar(STI.getXLen());
775     replacePtrWithInt(MI.getOperand(1), MIB);
776     MI.setDesc(TII.get(TargetOpcode::G_AND));
777     MRI->setType(DstReg, sXLen);
778   }
779   }
780 }
781 
782 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
783                                             const MachineInstr &MI,
784                                             int OpIdx) const {
785   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
786          "Expected G_CONSTANT");
787   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
788   MIB.addImm(-CstVal);
789 }
790 
791 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
792                                                     const MachineInstr &MI,
793                                                     int OpIdx) const {
794   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
795          "Expected G_CONSTANT");
796   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
797   MIB.addImm(STI.getXLen() - CstVal);
798 }
799 
800 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
801                                                   const MachineInstr &MI,
802                                                   int OpIdx) const {
803   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
804          "Expected G_CONSTANT");
805   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
806   MIB.addImm(32 - CstVal);
807 }
808 
809 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
810                                               const MachineInstr &MI,
811                                               int OpIdx) const {
812   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
813          "Expected G_CONSTANT");
814   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
815   MIB.addImm(CstVal + 1);
816 }
817 
818 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
819                                          const MachineInstr &MI,
820                                          int OpIdx) const {
821   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
822          "Expected G_CONSTANT");
823   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
824   MIB.addImm(CstVal);
825 }
826 
827 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
828                                                    const MachineInstr &MI,
829                                                    int OpIdx) const {
830   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
831          "Expected G_CONSTANT");
832   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
833   MIB.addImm(llvm::countr_zero(C));
834 }
835 
836 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
837     LLT Ty, const RegisterBank &RB) const {
838   if (RB.getID() == RISCV::GPRBRegBankID) {
839     if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
840       return &RISCV::GPRRegClass;
841   }
842 
843   if (RB.getID() == RISCV::FPRBRegBankID) {
844     if (Ty.getSizeInBits() == 16)
845       return &RISCV::FPR16RegClass;
846     if (Ty.getSizeInBits() == 32)
847       return &RISCV::FPR32RegClass;
848     if (Ty.getSizeInBits() == 64)
849       return &RISCV::FPR64RegClass;
850   }
851 
852   if (RB.getID() == RISCV::VRBRegBankID) {
853     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
854       return &RISCV::VRRegClass;
855 
856     if (Ty.getSizeInBits().getKnownMinValue() == 128)
857       return &RISCV::VRM2RegClass;
858 
859     if (Ty.getSizeInBits().getKnownMinValue() == 256)
860       return &RISCV::VRM4RegClass;
861 
862     if (Ty.getSizeInBits().getKnownMinValue() == 512)
863       return &RISCV::VRM8RegClass;
864   }
865 
866   return nullptr;
867 }
868 
869 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
870   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
871 }
872 
873 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
874   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
875 }
876 
877 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
878   Register DstReg = MI.getOperand(0).getReg();
879 
880   if (DstReg.isPhysical())
881     return true;
882 
883   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
884       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
885   assert(DstRC &&
886          "Register class not available for LLT, register bank combination");
887 
888   // No need to constrain SrcReg. It will get constrained when
889   // we hit another of its uses or its defs.
890   // Copies do not have constraints.
891   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
892     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
893                       << " operand\n");
894     return false;
895   }
896 
897   MI.setDesc(TII.get(RISCV::COPY));
898   return true;
899 }
900 
901 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
902                                                  MachineIRBuilder &MIB) const {
903   assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
904 
905   const Register DstReg = MI.getOperand(0).getReg();
906   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
907       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
908 
909   assert(DstRC &&
910          "Register class not available for LLT, register bank combination");
911 
912   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
913     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
914                       << " operand\n");
915   }
916   MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
917   return true;
918 }
919 
920 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
921                                               MachineIRBuilder &MIB) const {
922   if (Imm == 0) {
923     MIB.buildCopy(DstReg, Register(RISCV::X0));
924     RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
925     return true;
926   }
927 
928   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
929   unsigned NumInsts = Seq.size();
930   Register SrcReg = RISCV::X0;
931 
932   for (unsigned i = 0; i < NumInsts; i++) {
933     Register TmpReg = i < NumInsts - 1
934                           ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
935                           : DstReg;
936     const RISCVMatInt::Inst &I = Seq[i];
937     MachineInstr *Result;
938 
939     switch (I.getOpndKind()) {
940     case RISCVMatInt::Imm:
941       // clang-format off
942       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
943                    .addImm(I.getImm());
944       // clang-format on
945       break;
946     case RISCVMatInt::RegX0:
947       Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
948                               {SrcReg, Register(RISCV::X0)});
949       break;
950     case RISCVMatInt::RegReg:
951       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
952       break;
953     case RISCVMatInt::RegImm:
954       Result =
955           MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
956       break;
957     }
958 
959     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
960       return false;
961 
962     SrcReg = TmpReg;
963   }
964 
965   return true;
966 }
967 
968 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
969                                           MachineIRBuilder &MIB, bool IsLocal,
970                                           bool IsExternWeak) const {
971   assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
972           MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
973           MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
974          "Unexpected opcode");
975 
976   const MachineOperand &DispMO = MI.getOperand(1);
977 
978   Register DefReg = MI.getOperand(0).getReg();
979   const LLT DefTy = MRI->getType(DefReg);
980 
981   // When HWASAN is used and tagging of global variables is enabled
982   // they should be accessed via the GOT, since the tagged address of a global
983   // is incompatible with existing code models. This also applies to non-pic
984   // mode.
985   if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
986     if (IsLocal && !Subtarget->allowTaggedGlobals()) {
987       // Use PC-relative addressing to access the symbol. This generates the
988       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
989       // %pcrel_lo(auipc)).
990       MI.setDesc(TII.get(RISCV::PseudoLLA));
991       return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
992     }
993 
994     // Use PC-relative addressing to access the GOT for this symbol, then
995     // load the address from the GOT. This generates the pattern (PseudoLGA
996     // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
997     // %pcrel_lo(auipc))).
998     MachineFunction &MF = *MI.getParent()->getParent();
999     MachineMemOperand *MemOp = MF.getMachineMemOperand(
1000         MachinePointerInfo::getGOT(MF),
1001         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1002             MachineMemOperand::MOInvariant,
1003         DefTy, Align(DefTy.getSizeInBits() / 8));
1004 
1005     auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1006                       .addDisp(DispMO, 0)
1007                       .addMemOperand(MemOp);
1008 
1009     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1010       return false;
1011 
1012     MI.eraseFromParent();
1013     return true;
1014   }
1015 
1016   switch (TM.getCodeModel()) {
1017   default: {
1018     reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1019                        getName(), "Unsupported code model for lowering", MI);
1020     return false;
1021   }
1022   case CodeModel::Small: {
1023     // Must lie within a single 2 GiB address range and must lie between
1024     // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1025     // (lui %hi(sym)) %lo(sym)).
1026     Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1027     MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1028                                .addDisp(DispMO, 0, RISCVII::MO_HI);
1029 
1030     if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1031       return false;
1032 
1033     auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1034                       .addDisp(DispMO, 0, RISCVII::MO_LO);
1035 
1036     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1037       return false;
1038 
1039     MI.eraseFromParent();
1040     return true;
1041   }
1042   case CodeModel::Medium:
1043     // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1044     // relocation needs to reference a label that points to the auipc
1045     // instruction itself, not the global. This cannot be done inside the
1046     // instruction selector.
1047     if (IsExternWeak) {
1048       // An extern weak symbol may be undefined, i.e. have value 0, which may
1049       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1050       // symbol. This generates the pattern (PseudoLGA sym), which expands to
1051       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1052       MachineFunction &MF = *MI.getParent()->getParent();
1053       MachineMemOperand *MemOp = MF.getMachineMemOperand(
1054           MachinePointerInfo::getGOT(MF),
1055           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1056               MachineMemOperand::MOInvariant,
1057           DefTy, Align(DefTy.getSizeInBits() / 8));
1058 
1059       auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1060                         .addDisp(DispMO, 0)
1061                         .addMemOperand(MemOp);
1062 
1063       if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1064         return false;
1065 
1066       MI.eraseFromParent();
1067       return true;
1068     }
1069 
1070     // Generate a sequence for accessing addresses within any 2GiB range
1071     // within the address space. This generates the pattern (PseudoLLA sym),
1072     // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1073     MI.setDesc(TII.get(RISCV::PseudoLLA));
1074     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1075   }
1076 
1077   return false;
1078 }
1079 
1080 bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI,
1081                                                MachineIRBuilder &MIB) const {
1082   Register DstReg = MI.getOperand(0).getReg();
1083   Register SrcReg = MI.getOperand(1).getReg();
1084   unsigned SrcSize = MI.getOperand(2).getImm();
1085 
1086   MachineInstr *NewMI;
1087   if (SrcSize == 32) {
1088     assert(Subtarget->is64Bit() && "Unexpected extend");
1089     // addiw rd, rs, 0 (i.e. sext.w rd, rs)
1090     NewMI = MIB.buildInstr(RISCV::ADDIW, {DstReg}, {SrcReg}).addImm(0U);
1091   } else {
1092     assert(Subtarget->hasStdExtZbb() && "Unexpected extension");
1093     assert((SrcSize == 8 || SrcSize == 16) && "Unexpected size");
1094     unsigned Opc = SrcSize == 16 ? RISCV::SEXT_H : RISCV::SEXT_B;
1095     NewMI = MIB.buildInstr(Opc, {DstReg}, {SrcReg});
1096   }
1097 
1098   if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI))
1099     return false;
1100 
1101   MI.eraseFromParent();
1102   return true;
1103 }
1104 
1105 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1106                                             MachineIRBuilder &MIB) const {
1107   auto &SelectMI = cast<GSelect>(MI);
1108 
1109   Register LHS, RHS;
1110   RISCVCC::CondCode CC;
1111   getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1112 
1113   Register DstReg = SelectMI.getReg(0);
1114 
1115   unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1116   if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1117     unsigned Size = MRI->getType(DstReg).getSizeInBits();
1118     Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1119                      : RISCV::Select_FPR64_Using_CC_GPR;
1120   }
1121 
1122   MachineInstr *Result = MIB.buildInstr(Opc)
1123                              .addDef(DstReg)
1124                              .addReg(LHS)
1125                              .addReg(RHS)
1126                              .addImm(CC)
1127                              .addReg(SelectMI.getTrueReg())
1128                              .addReg(SelectMI.getFalseReg());
1129   MI.eraseFromParent();
1130   return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1131 }
1132 
1133 // Convert an FCMP predicate to one of the supported F or D instructions.
1134 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1135   assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1136   switch (Pred) {
1137   default:
1138     llvm_unreachable("Unsupported predicate");
1139   case CmpInst::FCMP_OLT:
1140     return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1141   case CmpInst::FCMP_OLE:
1142     return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1143   case CmpInst::FCMP_OEQ:
1144     return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1145   }
1146 }
1147 
1148 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1149 // is supported.
1150 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1151                                   CmpInst::Predicate &Pred, bool &NeedInvert) {
1152   auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1153     return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1154            Pred == CmpInst::FCMP_OEQ;
1155   };
1156 
1157   assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1158 
1159   CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
1160   if (isLegalFCmpPredicate(InvPred)) {
1161     Pred = InvPred;
1162     std::swap(LHS, RHS);
1163     return true;
1164   }
1165 
1166   InvPred = CmpInst::getInversePredicate(Pred);
1167   NeedInvert = true;
1168   if (isLegalFCmpPredicate(InvPred)) {
1169     Pred = InvPred;
1170     return true;
1171   }
1172   InvPred = CmpInst::getSwappedPredicate(InvPred);
1173   if (isLegalFCmpPredicate(InvPred)) {
1174     Pred = InvPred;
1175     std::swap(LHS, RHS);
1176     return true;
1177   }
1178 
1179   return false;
1180 }
1181 
1182 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1183 // the result in DstReg.
1184 // FIXME: Maybe we should expand this earlier.
1185 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1186                                                MachineIRBuilder &MIB) const {
1187   auto &CmpMI = cast<GFCmp>(MI);
1188   CmpInst::Predicate Pred = CmpMI.getCond();
1189 
1190   Register DstReg = CmpMI.getReg(0);
1191   Register LHS = CmpMI.getLHSReg();
1192   Register RHS = CmpMI.getRHSReg();
1193 
1194   unsigned Size = MRI->getType(LHS).getSizeInBits();
1195   assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1196 
1197   Register TmpReg = DstReg;
1198 
1199   bool NeedInvert = false;
1200   // First try swapping operands or inverting.
1201   if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1202     if (NeedInvert)
1203       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1204     auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1205     if (!Cmp.constrainAllUses(TII, TRI, RBI))
1206       return false;
1207   } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1208     // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1209     NeedInvert = Pred == CmpInst::FCMP_UEQ;
1210     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1211                                {&RISCV::GPRRegClass}, {LHS, RHS});
1212     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1213       return false;
1214     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1215                                {&RISCV::GPRRegClass}, {RHS, LHS});
1216     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1217       return false;
1218     if (NeedInvert)
1219       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1220     auto Or =
1221         MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1222     if (!Or.constrainAllUses(TII, TRI, RBI))
1223       return false;
1224   } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1225     // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1226     // FIXME: If LHS and RHS are the same we can use a single FEQ.
1227     NeedInvert = Pred == CmpInst::FCMP_UNO;
1228     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1229                                {&RISCV::GPRRegClass}, {LHS, LHS});
1230     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1231       return false;
1232     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1233                                {&RISCV::GPRRegClass}, {RHS, RHS});
1234     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1235       return false;
1236     if (NeedInvert)
1237       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1238     auto And =
1239         MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1240     if (!And.constrainAllUses(TII, TRI, RBI))
1241       return false;
1242   } else
1243     llvm_unreachable("Unhandled predicate");
1244 
1245   // Emit an XORI to invert the result if needed.
1246   if (NeedInvert) {
1247     auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1248     if (!Xor.constrainAllUses(TII, TRI, RBI))
1249       return false;
1250   }
1251 
1252   MI.eraseFromParent();
1253   return true;
1254 }
1255 
1256 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1257                                          SyncScope::ID FenceSSID,
1258                                          MachineIRBuilder &MIB) const {
1259   if (STI.hasStdExtZtso()) {
1260     // The only fence that needs an instruction is a sequentially-consistent
1261     // cross-thread fence.
1262     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1263         FenceSSID == SyncScope::System) {
1264       // fence rw, rw
1265       MIB.buildInstr(RISCV::FENCE, {}, {})
1266           .addImm(RISCVFenceField::R | RISCVFenceField::W)
1267           .addImm(RISCVFenceField::R | RISCVFenceField::W);
1268       return;
1269     }
1270 
1271     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1272     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1273     return;
1274   }
1275 
1276   // singlethread fences only synchronize with signal handlers on the same
1277   // thread and thus only need to preserve instruction order, not actually
1278   // enforce memory ordering.
1279   if (FenceSSID == SyncScope::SingleThread) {
1280     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1281     return;
1282   }
1283 
1284   // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1285   // Manual: Volume I.
1286   unsigned Pred, Succ;
1287   switch (FenceOrdering) {
1288   default:
1289     llvm_unreachable("Unexpected ordering");
1290   case AtomicOrdering::AcquireRelease:
1291     // fence acq_rel -> fence.tso
1292     MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1293     return;
1294   case AtomicOrdering::Acquire:
1295     // fence acquire -> fence r, rw
1296     Pred = RISCVFenceField::R;
1297     Succ = RISCVFenceField::R | RISCVFenceField::W;
1298     break;
1299   case AtomicOrdering::Release:
1300     // fence release -> fence rw, w
1301     Pred = RISCVFenceField::R | RISCVFenceField::W;
1302     Succ = RISCVFenceField::W;
1303     break;
1304   case AtomicOrdering::SequentiallyConsistent:
1305     // fence seq_cst -> fence rw, rw
1306     Pred = RISCVFenceField::R | RISCVFenceField::W;
1307     Succ = RISCVFenceField::R | RISCVFenceField::W;
1308     break;
1309   }
1310   MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1311 }
1312 
1313 namespace llvm {
1314 InstructionSelector *
1315 createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1316                                const RISCVSubtarget &Subtarget,
1317                                const RISCVRegisterBankInfo &RBI) {
1318   return new RISCVInstructionSelector(TM, Subtarget, RBI);
1319 }
1320 } // end namespace llvm
1321