xref: /llvm-project/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (revision 6ab8401f53deddbd79f930ba2bec2f824c9567e3)
1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// RISC-V.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
36 
37 namespace {
38 
39 class RISCVInstructionSelector : public InstructionSelector {
40 public:
41   RISCVInstructionSelector(const RISCVTargetMachine &TM,
42                            const RISCVSubtarget &STI,
43                            const RISCVRegisterBankInfo &RBI);
44 
45   bool select(MachineInstr &MI) override;
46 
47   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
48                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49                BlockFrequencyInfo *BFI) override {
50     InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
51     MRI = &MF.getRegInfo();
52   }
53 
54   static const char *getName() { return DEBUG_TYPE; }
55 
56 private:
57   const TargetRegisterClass *
58   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59 
60   bool isRegInGprb(Register Reg) const;
61   bool isRegInFprb(Register Reg) const;
62 
63   // tblgen-erated 'select' implementation, used as the initial selector for
64   // the patterns that don't require complex C++.
65   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
66 
67   // A lowering phase that runs before any selection attempts.
68   // Returns true if the instruction was modified.
69   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
70 
71   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
72 
73   // Custom selection methods
74   bool selectCopy(MachineInstr &MI) const;
75   bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
76   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
77   bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
78                   bool IsExternWeak = false) const;
79   bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
80   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
81   void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
82                  MachineIRBuilder &MIB) const;
83   bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
84   bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
85 
86   ComplexRendererFns selectShiftMask(MachineOperand &Root,
87                                      unsigned ShiftWidth) const;
88   ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
89     return selectShiftMask(Root, STI.getXLen());
90   }
91   ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
92     return selectShiftMask(Root, 32);
93   }
94   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
95 
96   ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
97   template <unsigned Bits>
98   ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
99     return selectSExtBits(Root, Bits);
100   }
101 
102   ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
103   template <unsigned Bits>
104   ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
105     return selectZExtBits(Root, Bits);
106   }
107 
108   ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
109   template <unsigned ShAmt>
110   ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
111     return selectSHXADDOp(Root, ShAmt);
112   }
113 
114   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
115                                        unsigned ShAmt) const;
116   template <unsigned ShAmt>
117   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
118     return selectSHXADD_UWOp(Root, ShAmt);
119   }
120 
121   ComplexRendererFns renderVLOp(MachineOperand &Root) const;
122 
123   // Custom renderers for tablegen
124   void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
125                     int OpIdx) const;
126   void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
127                             int OpIdx) const;
128   void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
129                           int OpIdx) const;
130   void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
131                       int OpIdx) const;
132   void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
133                  int OpIdx) const;
134 
135   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
136                            int OpIdx) const;
137   void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
138                                  const MachineInstr &MI, int OpIdx) const;
139 
140   void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
141                               int OpIdx) const;
142   void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
143                               int OpIdx) const;
144 
145   const RISCVSubtarget &STI;
146   const RISCVInstrInfo &TII;
147   const RISCVRegisterInfo &TRI;
148   const RISCVRegisterBankInfo &RBI;
149   const RISCVTargetMachine &TM;
150 
151   MachineRegisterInfo *MRI = nullptr;
152 
153   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
154   // uses "STI." in the code generated by TableGen. We need to unify the name of
155   // Subtarget variable.
156   const RISCVSubtarget *Subtarget = &STI;
157 
158 #define GET_GLOBALISEL_PREDICATES_DECL
159 #include "RISCVGenGlobalISel.inc"
160 #undef GET_GLOBALISEL_PREDICATES_DECL
161 
162 #define GET_GLOBALISEL_TEMPORARIES_DECL
163 #include "RISCVGenGlobalISel.inc"
164 #undef GET_GLOBALISEL_TEMPORARIES_DECL
165 };
166 
167 } // end anonymous namespace
168 
169 #define GET_GLOBALISEL_IMPL
170 #include "RISCVGenGlobalISel.inc"
171 #undef GET_GLOBALISEL_IMPL
172 
173 RISCVInstructionSelector::RISCVInstructionSelector(
174     const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
175     const RISCVRegisterBankInfo &RBI)
176     : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
177       TM(TM),
178 
179 #define GET_GLOBALISEL_PREDICATES_INIT
180 #include "RISCVGenGlobalISel.inc"
181 #undef GET_GLOBALISEL_PREDICATES_INIT
182 #define GET_GLOBALISEL_TEMPORARIES_INIT
183 #include "RISCVGenGlobalISel.inc"
184 #undef GET_GLOBALISEL_TEMPORARIES_INIT
185 {
186 }
187 
188 InstructionSelector::ComplexRendererFns
189 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
190                                           unsigned ShiftWidth) const {
191   if (!Root.isReg())
192     return std::nullopt;
193 
194   using namespace llvm::MIPatternMatch;
195 
196   Register ShAmtReg = Root.getReg();
197   // Peek through zext.
198   Register ZExtSrcReg;
199   if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
200     ShAmtReg = ZExtSrcReg;
201 
202   APInt AndMask;
203   Register AndSrcReg;
204   // Try to combine the following pattern (applicable to other shift
205   // instructions as well as 32-bit ones):
206   //
207   //   %4:gprb(s64) = G_AND %3, %2
208   //   %5:gprb(s64) = G_LSHR %1, %4(s64)
209   //
210   // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
211   // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
212   // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
213   // then it can be eliminated. Given register rs1 or rs2 holding a constant
214   // (the and mask), there are two cases G_AND can be erased:
215   //
216   // 1. the lowest log2(XLEN) bits of the and mask are all set
217   // 2. the bits of the register being masked are already unset (zero set)
218   if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
219     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
220     if (ShMask.isSubsetOf(AndMask)) {
221       ShAmtReg = AndSrcReg;
222     } else {
223       // SimplifyDemandedBits may have optimized the mask so try restoring any
224       // bits that are known zero.
225       KnownBits Known = KB->getKnownBits(AndSrcReg);
226       if (ShMask.isSubsetOf(AndMask | Known.Zero))
227         ShAmtReg = AndSrcReg;
228     }
229   }
230 
231   APInt Imm;
232   Register Reg;
233   if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
234     if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
235       // If we are shifting by X+N where N == 0 mod Size, then just shift by X
236       // to avoid the ADD.
237       ShAmtReg = Reg;
238   } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
239     if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
240       // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
241       // to generate a NEG instead of a SUB of a constant.
242       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
243       unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
244       return {{[=](MachineInstrBuilder &MIB) {
245         MachineIRBuilder(*MIB.getInstr())
246             .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
247         MIB.addReg(ShAmtReg);
248       }}};
249     }
250     if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
251       // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
252       // to generate a NOT instead of a SUB of a constant.
253       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
254       return {{[=](MachineInstrBuilder &MIB) {
255         MachineIRBuilder(*MIB.getInstr())
256             .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
257             .addImm(-1);
258         MIB.addReg(ShAmtReg);
259       }}};
260     }
261   }
262 
263   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
264 }
265 
266 InstructionSelector::ComplexRendererFns
267 RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
268                                          unsigned Bits) const {
269   if (!Root.isReg())
270     return std::nullopt;
271   Register RootReg = Root.getReg();
272   MachineInstr *RootDef = MRI->getVRegDef(RootReg);
273 
274   if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
275       RootDef->getOperand(2).getImm() == Bits) {
276     return {
277         {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
278   }
279 
280   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
281   if ((Size - KB->computeNumSignBits(RootReg)) < Bits)
282     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
283 
284   return std::nullopt;
285 }
286 
287 InstructionSelector::ComplexRendererFns
288 RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
289                                          unsigned Bits) const {
290   if (!Root.isReg())
291     return std::nullopt;
292   Register RootReg = Root.getReg();
293 
294   Register RegX;
295   uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
296   if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
297     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
298   }
299 
300   if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
301       MRI->getType(RegX).getScalarSizeInBits() == Bits)
302     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
303 
304   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
305   if (KB->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
306     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
307 
308   return std::nullopt;
309 }
310 
311 InstructionSelector::ComplexRendererFns
312 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
313                                          unsigned ShAmt) const {
314   using namespace llvm::MIPatternMatch;
315 
316   if (!Root.isReg())
317     return std::nullopt;
318   Register RootReg = Root.getReg();
319 
320   const unsigned XLen = STI.getXLen();
321   APInt Mask, C2;
322   Register RegY;
323   std::optional<bool> LeftShift;
324   // (and (shl y, c2), mask)
325   if (mi_match(RootReg, *MRI,
326                m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
327     LeftShift = true;
328   // (and (lshr y, c2), mask)
329   else if (mi_match(RootReg, *MRI,
330                     m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
331     LeftShift = false;
332 
333   if (LeftShift.has_value()) {
334     if (*LeftShift)
335       Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
336     else
337       Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
338 
339     if (Mask.isShiftedMask()) {
340       unsigned Leading = XLen - Mask.getActiveBits();
341       unsigned Trailing = Mask.countr_zero();
342       // Given (and (shl y, c2), mask) in which mask has no leading zeros and
343       // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
344       if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
345         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
346         return {{[=](MachineInstrBuilder &MIB) {
347           MachineIRBuilder(*MIB.getInstr())
348               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
349               .addImm(Trailing - C2.getLimitedValue());
350           MIB.addReg(DstReg);
351         }}};
352       }
353 
354       // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
355       // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
356       if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
357         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
358         return {{[=](MachineInstrBuilder &MIB) {
359           MachineIRBuilder(*MIB.getInstr())
360               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
361               .addImm(Leading + Trailing);
362           MIB.addReg(DstReg);
363         }}};
364       }
365     }
366   }
367 
368   LeftShift.reset();
369 
370   // (shl (and y, mask), c2)
371   if (mi_match(RootReg, *MRI,
372                m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
373                       m_ICst(C2))))
374     LeftShift = true;
375   // (lshr (and y, mask), c2)
376   else if (mi_match(RootReg, *MRI,
377                     m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
378                             m_ICst(C2))))
379     LeftShift = false;
380 
381   if (LeftShift.has_value() && Mask.isShiftedMask()) {
382     unsigned Leading = XLen - Mask.getActiveBits();
383     unsigned Trailing = Mask.countr_zero();
384 
385     // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
386     // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
387     bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
388                 (Trailing + C2.getLimitedValue()) == ShAmt;
389     if (!Cond)
390       // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
391       // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
392       Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
393              (Trailing - C2.getLimitedValue()) == ShAmt;
394 
395     if (Cond) {
396       Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
397       return {{[=](MachineInstrBuilder &MIB) {
398         MachineIRBuilder(*MIB.getInstr())
399             .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
400             .addImm(Trailing);
401         MIB.addReg(DstReg);
402       }}};
403     }
404   }
405 
406   return std::nullopt;
407 }
408 
409 InstructionSelector::ComplexRendererFns
410 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
411                                             unsigned ShAmt) const {
412   using namespace llvm::MIPatternMatch;
413 
414   if (!Root.isReg())
415     return std::nullopt;
416   Register RootReg = Root.getReg();
417 
418   // Given (and (shl x, c2), mask) in which mask is a shifted mask with
419   // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
420   // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
421   APInt Mask, C2;
422   Register RegX;
423   if (mi_match(
424           RootReg, *MRI,
425           m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
426                                 m_ICst(Mask))))) {
427     Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
428 
429     if (Mask.isShiftedMask()) {
430       unsigned Leading = Mask.countl_zero();
431       unsigned Trailing = Mask.countr_zero();
432       if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
433         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
434         return {{[=](MachineInstrBuilder &MIB) {
435           MachineIRBuilder(*MIB.getInstr())
436               .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
437               .addImm(C2.getLimitedValue() - ShAmt);
438           MIB.addReg(DstReg);
439         }}};
440       }
441     }
442   }
443 
444   return std::nullopt;
445 }
446 
447 InstructionSelector::ComplexRendererFns
448 RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
449   assert(Root.isReg() && "Expected operand to be a Register");
450   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
451 
452   if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
453     auto C = RootDef->getOperand(1).getCImm();
454     if (C->getValue().isAllOnes())
455       // If the operand is a G_CONSTANT with value of all ones it is larger than
456       // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
457       // recognized specially by the vsetvli insertion pass.
458       return {{[=](MachineInstrBuilder &MIB) {
459         MIB.addImm(RISCV::VLMaxSentinel);
460       }}};
461 
462     if (isUInt<5>(C->getZExtValue())) {
463       uint64_t ZExtC = C->getZExtValue();
464       return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
465     }
466   }
467   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
468 }
469 
470 InstructionSelector::ComplexRendererFns
471 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
472   if (!Root.isReg())
473     return std::nullopt;
474 
475   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
476   if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
477     return {{
478         [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
479         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
480     }};
481   }
482 
483   if (isBaseWithConstantOffset(Root, *MRI)) {
484     MachineOperand &LHS = RootDef->getOperand(1);
485     MachineOperand &RHS = RootDef->getOperand(2);
486     MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
487     MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
488 
489     int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
490     if (isInt<12>(RHSC)) {
491       if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
492         return {{
493             [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
494             [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
495         }};
496 
497       return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
498                [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
499     }
500   }
501 
502   // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
503   // the combiner?
504   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
505            [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
506 }
507 
508 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
509 /// CC Must be an ICMP Predicate.
510 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
511   switch (CC) {
512   default:
513     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
514   case CmpInst::Predicate::ICMP_EQ:
515     return RISCVCC::COND_EQ;
516   case CmpInst::Predicate::ICMP_NE:
517     return RISCVCC::COND_NE;
518   case CmpInst::Predicate::ICMP_ULT:
519     return RISCVCC::COND_LTU;
520   case CmpInst::Predicate::ICMP_SLT:
521     return RISCVCC::COND_LT;
522   case CmpInst::Predicate::ICMP_UGE:
523     return RISCVCC::COND_GEU;
524   case CmpInst::Predicate::ICMP_SGE:
525     return RISCVCC::COND_GE;
526   }
527 }
528 
529 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
530                                  Register &LHS, Register &RHS,
531                                  MachineRegisterInfo &MRI) {
532   // Try to fold an ICmp. If that fails, use a NE compare with X0.
533   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
534   if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
535     LHS = CondReg;
536     RHS = RISCV::X0;
537     CC = RISCVCC::COND_NE;
538     return;
539   }
540 
541   // We found an ICmp, do some canonicalizations.
542 
543   // Adjust comparisons to use comparison with 0 if possible.
544   if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
545     switch (Pred) {
546     case CmpInst::Predicate::ICMP_SGT:
547       // Convert X > -1 to X >= 0
548       if (*Constant == -1) {
549         CC = RISCVCC::COND_GE;
550         RHS = RISCV::X0;
551         return;
552       }
553       break;
554     case CmpInst::Predicate::ICMP_SLT:
555       // Convert X < 1 to 0 >= X
556       if (*Constant == 1) {
557         CC = RISCVCC::COND_GE;
558         RHS = LHS;
559         LHS = RISCV::X0;
560         return;
561       }
562       break;
563     default:
564       break;
565     }
566   }
567 
568   switch (Pred) {
569   default:
570     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
571   case CmpInst::Predicate::ICMP_EQ:
572   case CmpInst::Predicate::ICMP_NE:
573   case CmpInst::Predicate::ICMP_ULT:
574   case CmpInst::Predicate::ICMP_SLT:
575   case CmpInst::Predicate::ICMP_UGE:
576   case CmpInst::Predicate::ICMP_SGE:
577     // These CCs are supported directly by RISC-V branches.
578     break;
579   case CmpInst::Predicate::ICMP_SGT:
580   case CmpInst::Predicate::ICMP_SLE:
581   case CmpInst::Predicate::ICMP_UGT:
582   case CmpInst::Predicate::ICMP_ULE:
583     // These CCs are not supported directly by RISC-V branches, but changing the
584     // direction of the CC and swapping LHS and RHS are.
585     Pred = CmpInst::getSwappedPredicate(Pred);
586     std::swap(LHS, RHS);
587     break;
588   }
589 
590   CC = getRISCVCCFromICmp(Pred);
591 }
592 
593 bool RISCVInstructionSelector::select(MachineInstr &MI) {
594   MachineIRBuilder MIB(MI);
595 
596   preISelLower(MI, MIB);
597   const unsigned Opc = MI.getOpcode();
598 
599   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
600     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
601       const Register DefReg = MI.getOperand(0).getReg();
602       const LLT DefTy = MRI->getType(DefReg);
603 
604       const RegClassOrRegBank &RegClassOrBank =
605           MRI->getRegClassOrRegBank(DefReg);
606 
607       const TargetRegisterClass *DefRC =
608           dyn_cast<const TargetRegisterClass *>(RegClassOrBank);
609       if (!DefRC) {
610         if (!DefTy.isValid()) {
611           LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
612           return false;
613         }
614 
615         const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
616         DefRC = getRegClassForTypeOnBank(DefTy, RB);
617         if (!DefRC) {
618           LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
619           return false;
620         }
621       }
622 
623       MI.setDesc(TII.get(TargetOpcode::PHI));
624       return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
625     }
626 
627     // Certain non-generic instructions also need some special handling.
628     if (MI.isCopy())
629       return selectCopy(MI);
630 
631     return true;
632   }
633 
634   if (selectImpl(MI, *CoverageInfo))
635     return true;
636 
637   switch (Opc) {
638   case TargetOpcode::G_ANYEXT:
639   case TargetOpcode::G_PTRTOINT:
640   case TargetOpcode::G_INTTOPTR:
641   case TargetOpcode::G_TRUNC:
642   case TargetOpcode::G_FREEZE:
643     return selectCopy(MI);
644   case TargetOpcode::G_CONSTANT: {
645     Register DstReg = MI.getOperand(0).getReg();
646     int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
647 
648     if (!materializeImm(DstReg, Imm, MIB))
649       return false;
650 
651     MI.eraseFromParent();
652     return true;
653   }
654   case TargetOpcode::G_FCONSTANT: {
655     // TODO: Use constant pool for complext constants.
656     // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
657     Register DstReg = MI.getOperand(0).getReg();
658     const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
659     APInt Imm = FPimm.bitcastToAPInt();
660     unsigned Size = MRI->getType(DstReg).getSizeInBits();
661     if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
662       Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
663       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
664         return false;
665 
666       unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
667                         : Size == 32 ? RISCV::FMV_W_X
668                                      : RISCV::FMV_H_X;
669       auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
670       if (!FMV.constrainAllUses(TII, TRI, RBI))
671         return false;
672     } else {
673       assert(Size == 64 && !Subtarget->is64Bit() &&
674              "Unexpected size or subtarget");
675       // Split into two pieces and build through the stack.
676       Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
677       Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
678       if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
679                           MIB))
680         return false;
681       if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
682         return false;
683       MachineInstrBuilder PairF64 = MIB.buildInstr(
684           RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
685       if (!PairF64.constrainAllUses(TII, TRI, RBI))
686         return false;
687     }
688 
689     MI.eraseFromParent();
690     return true;
691   }
692   case TargetOpcode::G_GLOBAL_VALUE: {
693     auto *GV = MI.getOperand(1).getGlobal();
694     if (GV->isThreadLocal()) {
695       // TODO: implement this case.
696       return false;
697     }
698 
699     return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
700   }
701   case TargetOpcode::G_JUMP_TABLE:
702   case TargetOpcode::G_CONSTANT_POOL:
703     return selectAddr(MI, MIB, MRI);
704   case TargetOpcode::G_BRCOND: {
705     Register LHS, RHS;
706     RISCVCC::CondCode CC;
707     getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
708 
709     auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
710                    .addMBB(MI.getOperand(1).getMBB());
711     MI.eraseFromParent();
712     return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
713   }
714   case TargetOpcode::G_BRINDIRECT:
715     MI.setDesc(TII.get(RISCV::PseudoBRIND));
716     MI.addOperand(MachineOperand::CreateImm(0));
717     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
718   case TargetOpcode::G_FRAME_INDEX: {
719     // TODO: We may want to replace this code with the SelectionDAG patterns,
720     // which fail to get imported because it uses FrameAddrRegImm, which is a
721     // ComplexPattern
722     MI.setDesc(TII.get(RISCV::ADDI));
723     MI.addOperand(MachineOperand::CreateImm(0));
724     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
725   }
726   case TargetOpcode::G_SELECT:
727     return selectSelect(MI, MIB);
728   case TargetOpcode::G_FCMP:
729     return selectFPCompare(MI, MIB);
730   case TargetOpcode::G_FENCE: {
731     AtomicOrdering FenceOrdering =
732         static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
733     SyncScope::ID FenceSSID =
734         static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
735     emitFence(FenceOrdering, FenceSSID, MIB);
736     MI.eraseFromParent();
737     return true;
738   }
739   case TargetOpcode::G_IMPLICIT_DEF:
740     return selectImplicitDef(MI, MIB);
741   case TargetOpcode::G_MERGE_VALUES:
742     return selectMergeValues(MI, MIB);
743   case TargetOpcode::G_UNMERGE_VALUES:
744     return selectUnmergeValues(MI, MIB);
745   default:
746     return false;
747   }
748 }
749 
750 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI,
751                                                  MachineIRBuilder &MIB) const {
752   assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
753 
754   // Build a F64 Pair from operands
755   if (MI.getNumOperands() != 3)
756     return false;
757   Register Dst = MI.getOperand(0).getReg();
758   Register Lo = MI.getOperand(1).getReg();
759   Register Hi = MI.getOperand(2).getReg();
760   if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
761     return false;
762   MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
763   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
764 }
765 
766 bool RISCVInstructionSelector::selectUnmergeValues(
767     MachineInstr &MI, MachineIRBuilder &MIB) const {
768   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
769 
770   // Split F64 Src into two s32 parts
771   if (MI.getNumOperands() != 3)
772     return false;
773   Register Src = MI.getOperand(2).getReg();
774   Register Lo = MI.getOperand(0).getReg();
775   Register Hi = MI.getOperand(1).getReg();
776   if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
777     return false;
778   MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
779   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
780 }
781 
782 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
783                                                  MachineIRBuilder &MIB) {
784   Register PtrReg = Op.getReg();
785   assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
786 
787   const LLT sXLen = LLT::scalar(STI.getXLen());
788   auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
789   MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
790   Op.setReg(PtrToInt.getReg(0));
791   return select(*PtrToInt);
792 }
793 
794 void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
795                                             MachineIRBuilder &MIB) {
796   switch (MI.getOpcode()) {
797   case TargetOpcode::G_PTR_ADD: {
798     Register DstReg = MI.getOperand(0).getReg();
799     const LLT sXLen = LLT::scalar(STI.getXLen());
800 
801     replacePtrWithInt(MI.getOperand(1), MIB);
802     MI.setDesc(TII.get(TargetOpcode::G_ADD));
803     MRI->setType(DstReg, sXLen);
804     break;
805   }
806   case TargetOpcode::G_PTRMASK: {
807     Register DstReg = MI.getOperand(0).getReg();
808     const LLT sXLen = LLT::scalar(STI.getXLen());
809     replacePtrWithInt(MI.getOperand(1), MIB);
810     MI.setDesc(TII.get(TargetOpcode::G_AND));
811     MRI->setType(DstReg, sXLen);
812     break;
813   }
814   }
815 }
816 
817 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
818                                             const MachineInstr &MI,
819                                             int OpIdx) const {
820   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
821          "Expected G_CONSTANT");
822   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
823   MIB.addImm(-CstVal);
824 }
825 
826 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
827                                                     const MachineInstr &MI,
828                                                     int OpIdx) const {
829   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
830          "Expected G_CONSTANT");
831   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
832   MIB.addImm(STI.getXLen() - CstVal);
833 }
834 
835 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
836                                                   const MachineInstr &MI,
837                                                   int OpIdx) const {
838   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
839          "Expected G_CONSTANT");
840   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
841   MIB.addImm(32 - CstVal);
842 }
843 
844 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
845                                               const MachineInstr &MI,
846                                               int OpIdx) const {
847   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
848          "Expected G_CONSTANT");
849   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
850   MIB.addImm(CstVal + 1);
851 }
852 
853 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
854                                          const MachineInstr &MI,
855                                          int OpIdx) const {
856   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
857          "Expected G_CONSTANT");
858   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
859   MIB.addImm(CstVal);
860 }
861 
862 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
863                                                    const MachineInstr &MI,
864                                                    int OpIdx) const {
865   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
866          "Expected G_CONSTANT");
867   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
868   MIB.addImm(llvm::countr_zero(C));
869 }
870 
871 void RISCVInstructionSelector::renderXLenSubTrailingOnes(
872     MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
873   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
874          "Expected G_CONSTANT");
875   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
876   MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
877 }
878 
879 void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
880                                                       const MachineInstr &MI,
881                                                       int OpIdx) const {
882   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
883          "Expected G_CONSTANT");
884   int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
885   int64_t Adj = Imm < 0 ? -2048 : 2047;
886   MIB.addImm(Imm - Adj);
887 }
888 
889 void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
890                                                       const MachineInstr &MI,
891                                                       int OpIdx) const {
892   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
893          "Expected G_CONSTANT");
894   int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
895   MIB.addImm(Imm);
896 }
897 
898 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
899     LLT Ty, const RegisterBank &RB) const {
900   if (RB.getID() == RISCV::GPRBRegBankID) {
901     if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
902       return &RISCV::GPRRegClass;
903   }
904 
905   if (RB.getID() == RISCV::FPRBRegBankID) {
906     if (Ty.getSizeInBits() == 16)
907       return &RISCV::FPR16RegClass;
908     if (Ty.getSizeInBits() == 32)
909       return &RISCV::FPR32RegClass;
910     if (Ty.getSizeInBits() == 64)
911       return &RISCV::FPR64RegClass;
912   }
913 
914   if (RB.getID() == RISCV::VRBRegBankID) {
915     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
916       return &RISCV::VRRegClass;
917 
918     if (Ty.getSizeInBits().getKnownMinValue() == 128)
919       return &RISCV::VRM2RegClass;
920 
921     if (Ty.getSizeInBits().getKnownMinValue() == 256)
922       return &RISCV::VRM4RegClass;
923 
924     if (Ty.getSizeInBits().getKnownMinValue() == 512)
925       return &RISCV::VRM8RegClass;
926   }
927 
928   return nullptr;
929 }
930 
931 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
932   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
933 }
934 
935 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
936   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
937 }
938 
939 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
940   Register DstReg = MI.getOperand(0).getReg();
941 
942   if (DstReg.isPhysical())
943     return true;
944 
945   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
946       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
947   assert(DstRC &&
948          "Register class not available for LLT, register bank combination");
949 
950   // No need to constrain SrcReg. It will get constrained when
951   // we hit another of its uses or its defs.
952   // Copies do not have constraints.
953   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
954     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
955                       << " operand\n");
956     return false;
957   }
958 
959   MI.setDesc(TII.get(RISCV::COPY));
960   return true;
961 }
962 
963 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
964                                                  MachineIRBuilder &MIB) const {
965   assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
966 
967   const Register DstReg = MI.getOperand(0).getReg();
968   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
969       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
970 
971   assert(DstRC &&
972          "Register class not available for LLT, register bank combination");
973 
974   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
975     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
976                       << " operand\n");
977   }
978   MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
979   return true;
980 }
981 
982 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
983                                               MachineIRBuilder &MIB) const {
984   if (Imm == 0) {
985     MIB.buildCopy(DstReg, Register(RISCV::X0));
986     RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
987     return true;
988   }
989 
990   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
991   unsigned NumInsts = Seq.size();
992   Register SrcReg = RISCV::X0;
993 
994   for (unsigned i = 0; i < NumInsts; i++) {
995     Register TmpReg = i < NumInsts - 1
996                           ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
997                           : DstReg;
998     const RISCVMatInt::Inst &I = Seq[i];
999     MachineInstr *Result;
1000 
1001     switch (I.getOpndKind()) {
1002     case RISCVMatInt::Imm:
1003       // clang-format off
1004       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1005                    .addImm(I.getImm());
1006       // clang-format on
1007       break;
1008     case RISCVMatInt::RegX0:
1009       Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1010                               {SrcReg, Register(RISCV::X0)});
1011       break;
1012     case RISCVMatInt::RegReg:
1013       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1014       break;
1015     case RISCVMatInt::RegImm:
1016       Result =
1017           MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1018       break;
1019     }
1020 
1021     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1022       return false;
1023 
1024     SrcReg = TmpReg;
1025   }
1026 
1027   return true;
1028 }
1029 
1030 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1031                                           MachineIRBuilder &MIB, bool IsLocal,
1032                                           bool IsExternWeak) const {
1033   assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1034           MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1035           MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1036          "Unexpected opcode");
1037 
1038   const MachineOperand &DispMO = MI.getOperand(1);
1039 
1040   Register DefReg = MI.getOperand(0).getReg();
1041   const LLT DefTy = MRI->getType(DefReg);
1042 
1043   // When HWASAN is used and tagging of global variables is enabled
1044   // they should be accessed via the GOT, since the tagged address of a global
1045   // is incompatible with existing code models. This also applies to non-pic
1046   // mode.
1047   if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1048     if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1049       // Use PC-relative addressing to access the symbol. This generates the
1050       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1051       // %pcrel_lo(auipc)).
1052       MI.setDesc(TII.get(RISCV::PseudoLLA));
1053       return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1054     }
1055 
1056     // Use PC-relative addressing to access the GOT for this symbol, then
1057     // load the address from the GOT. This generates the pattern (PseudoLGA
1058     // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1059     // %pcrel_lo(auipc))).
1060     MachineFunction &MF = *MI.getParent()->getParent();
1061     MachineMemOperand *MemOp = MF.getMachineMemOperand(
1062         MachinePointerInfo::getGOT(MF),
1063         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1064             MachineMemOperand::MOInvariant,
1065         DefTy, Align(DefTy.getSizeInBits() / 8));
1066 
1067     auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1068                       .addDisp(DispMO, 0)
1069                       .addMemOperand(MemOp);
1070 
1071     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1072       return false;
1073 
1074     MI.eraseFromParent();
1075     return true;
1076   }
1077 
1078   switch (TM.getCodeModel()) {
1079   default: {
1080     reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1081                        getName(), "Unsupported code model for lowering", MI);
1082     return false;
1083   }
1084   case CodeModel::Small: {
1085     // Must lie within a single 2 GiB address range and must lie between
1086     // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1087     // (lui %hi(sym)) %lo(sym)).
1088     Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1089     MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1090                                .addDisp(DispMO, 0, RISCVII::MO_HI);
1091 
1092     if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1093       return false;
1094 
1095     auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1096                       .addDisp(DispMO, 0, RISCVII::MO_LO);
1097 
1098     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1099       return false;
1100 
1101     MI.eraseFromParent();
1102     return true;
1103   }
1104   case CodeModel::Medium:
1105     // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1106     // relocation needs to reference a label that points to the auipc
1107     // instruction itself, not the global. This cannot be done inside the
1108     // instruction selector.
1109     if (IsExternWeak) {
1110       // An extern weak symbol may be undefined, i.e. have value 0, which may
1111       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1112       // symbol. This generates the pattern (PseudoLGA sym), which expands to
1113       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1114       MachineFunction &MF = *MI.getParent()->getParent();
1115       MachineMemOperand *MemOp = MF.getMachineMemOperand(
1116           MachinePointerInfo::getGOT(MF),
1117           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1118               MachineMemOperand::MOInvariant,
1119           DefTy, Align(DefTy.getSizeInBits() / 8));
1120 
1121       auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1122                         .addDisp(DispMO, 0)
1123                         .addMemOperand(MemOp);
1124 
1125       if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1126         return false;
1127 
1128       MI.eraseFromParent();
1129       return true;
1130     }
1131 
1132     // Generate a sequence for accessing addresses within any 2GiB range
1133     // within the address space. This generates the pattern (PseudoLLA sym),
1134     // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1135     MI.setDesc(TII.get(RISCV::PseudoLLA));
1136     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1137   }
1138 
1139   return false;
1140 }
1141 
1142 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1143                                             MachineIRBuilder &MIB) const {
1144   auto &SelectMI = cast<GSelect>(MI);
1145 
1146   Register LHS, RHS;
1147   RISCVCC::CondCode CC;
1148   getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1149 
1150   Register DstReg = SelectMI.getReg(0);
1151 
1152   unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1153   if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1154     unsigned Size = MRI->getType(DstReg).getSizeInBits();
1155     Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1156                      : RISCV::Select_FPR64_Using_CC_GPR;
1157   }
1158 
1159   MachineInstr *Result = MIB.buildInstr(Opc)
1160                              .addDef(DstReg)
1161                              .addReg(LHS)
1162                              .addReg(RHS)
1163                              .addImm(CC)
1164                              .addReg(SelectMI.getTrueReg())
1165                              .addReg(SelectMI.getFalseReg());
1166   MI.eraseFromParent();
1167   return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1168 }
1169 
1170 // Convert an FCMP predicate to one of the supported F or D instructions.
1171 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1172   assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1173   switch (Pred) {
1174   default:
1175     llvm_unreachable("Unsupported predicate");
1176   case CmpInst::FCMP_OLT:
1177     return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1178   case CmpInst::FCMP_OLE:
1179     return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1180   case CmpInst::FCMP_OEQ:
1181     return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1182   }
1183 }
1184 
1185 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1186 // is supported.
1187 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1188                                   CmpInst::Predicate &Pred, bool &NeedInvert) {
1189   auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1190     return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1191            Pred == CmpInst::FCMP_OEQ;
1192   };
1193 
1194   assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1195 
1196   CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
1197   if (isLegalFCmpPredicate(InvPred)) {
1198     Pred = InvPred;
1199     std::swap(LHS, RHS);
1200     return true;
1201   }
1202 
1203   InvPred = CmpInst::getInversePredicate(Pred);
1204   NeedInvert = true;
1205   if (isLegalFCmpPredicate(InvPred)) {
1206     Pred = InvPred;
1207     return true;
1208   }
1209   InvPred = CmpInst::getSwappedPredicate(InvPred);
1210   if (isLegalFCmpPredicate(InvPred)) {
1211     Pred = InvPred;
1212     std::swap(LHS, RHS);
1213     return true;
1214   }
1215 
1216   return false;
1217 }
1218 
1219 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1220 // the result in DstReg.
1221 // FIXME: Maybe we should expand this earlier.
1222 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1223                                                MachineIRBuilder &MIB) const {
1224   auto &CmpMI = cast<GFCmp>(MI);
1225   CmpInst::Predicate Pred = CmpMI.getCond();
1226 
1227   Register DstReg = CmpMI.getReg(0);
1228   Register LHS = CmpMI.getLHSReg();
1229   Register RHS = CmpMI.getRHSReg();
1230 
1231   unsigned Size = MRI->getType(LHS).getSizeInBits();
1232   assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1233 
1234   Register TmpReg = DstReg;
1235 
1236   bool NeedInvert = false;
1237   // First try swapping operands or inverting.
1238   if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1239     if (NeedInvert)
1240       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1241     auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1242     if (!Cmp.constrainAllUses(TII, TRI, RBI))
1243       return false;
1244   } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1245     // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1246     NeedInvert = Pred == CmpInst::FCMP_UEQ;
1247     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1248                                {&RISCV::GPRRegClass}, {LHS, RHS});
1249     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1250       return false;
1251     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1252                                {&RISCV::GPRRegClass}, {RHS, LHS});
1253     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1254       return false;
1255     if (NeedInvert)
1256       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1257     auto Or =
1258         MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1259     if (!Or.constrainAllUses(TII, TRI, RBI))
1260       return false;
1261   } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1262     // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1263     // FIXME: If LHS and RHS are the same we can use a single FEQ.
1264     NeedInvert = Pred == CmpInst::FCMP_UNO;
1265     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1266                                {&RISCV::GPRRegClass}, {LHS, LHS});
1267     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1268       return false;
1269     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1270                                {&RISCV::GPRRegClass}, {RHS, RHS});
1271     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1272       return false;
1273     if (NeedInvert)
1274       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1275     auto And =
1276         MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1277     if (!And.constrainAllUses(TII, TRI, RBI))
1278       return false;
1279   } else
1280     llvm_unreachable("Unhandled predicate");
1281 
1282   // Emit an XORI to invert the result if needed.
1283   if (NeedInvert) {
1284     auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1285     if (!Xor.constrainAllUses(TII, TRI, RBI))
1286       return false;
1287   }
1288 
1289   MI.eraseFromParent();
1290   return true;
1291 }
1292 
1293 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1294                                          SyncScope::ID FenceSSID,
1295                                          MachineIRBuilder &MIB) const {
1296   if (STI.hasStdExtZtso()) {
1297     // The only fence that needs an instruction is a sequentially-consistent
1298     // cross-thread fence.
1299     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1300         FenceSSID == SyncScope::System) {
1301       // fence rw, rw
1302       MIB.buildInstr(RISCV::FENCE, {}, {})
1303           .addImm(RISCVFenceField::R | RISCVFenceField::W)
1304           .addImm(RISCVFenceField::R | RISCVFenceField::W);
1305       return;
1306     }
1307 
1308     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1309     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1310     return;
1311   }
1312 
1313   // singlethread fences only synchronize with signal handlers on the same
1314   // thread and thus only need to preserve instruction order, not actually
1315   // enforce memory ordering.
1316   if (FenceSSID == SyncScope::SingleThread) {
1317     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1318     return;
1319   }
1320 
1321   // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1322   // Manual: Volume I.
1323   unsigned Pred, Succ;
1324   switch (FenceOrdering) {
1325   default:
1326     llvm_unreachable("Unexpected ordering");
1327   case AtomicOrdering::AcquireRelease:
1328     // fence acq_rel -> fence.tso
1329     MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1330     return;
1331   case AtomicOrdering::Acquire:
1332     // fence acquire -> fence r, rw
1333     Pred = RISCVFenceField::R;
1334     Succ = RISCVFenceField::R | RISCVFenceField::W;
1335     break;
1336   case AtomicOrdering::Release:
1337     // fence release -> fence rw, w
1338     Pred = RISCVFenceField::R | RISCVFenceField::W;
1339     Succ = RISCVFenceField::W;
1340     break;
1341   case AtomicOrdering::SequentiallyConsistent:
1342     // fence seq_cst -> fence rw, rw
1343     Pred = RISCVFenceField::R | RISCVFenceField::W;
1344     Succ = RISCVFenceField::R | RISCVFenceField::W;
1345     break;
1346   }
1347   MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1348 }
1349 
1350 namespace llvm {
1351 InstructionSelector *
1352 createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1353                                const RISCVSubtarget &Subtarget,
1354                                const RISCVRegisterBankInfo &RBI) {
1355   return new RISCVInstructionSelector(TM, Subtarget, RBI);
1356 }
1357 } // end namespace llvm
1358