xref: /llvm-project/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (revision 10b80ff0cc3e6af8fddb9003571e2cc22f9c58b2)
1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// RISC-V.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
36 
37 namespace {
38 
39 class RISCVInstructionSelector : public InstructionSelector {
40 public:
41   RISCVInstructionSelector(const RISCVTargetMachine &TM,
42                            const RISCVSubtarget &STI,
43                            const RISCVRegisterBankInfo &RBI);
44 
45   bool select(MachineInstr &MI) override;
46 
47   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
48                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49                BlockFrequencyInfo *BFI) override {
50     InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
51     MRI = &MF.getRegInfo();
52   }
53 
54   static const char *getName() { return DEBUG_TYPE; }
55 
56 private:
57   const TargetRegisterClass *
58   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59 
60   bool isRegInGprb(Register Reg) const;
61   bool isRegInFprb(Register Reg) const;
62 
63   // tblgen-erated 'select' implementation, used as the initial selector for
64   // the patterns that don't require complex C++.
65   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
66 
67   // A lowering phase that runs before any selection attempts.
68   // Returns true if the instruction was modified.
69   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
70 
71   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
72 
73   // Custom selection methods
74   bool selectCopy(MachineInstr &MI) const;
75   bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
76   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
77   bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
78                   bool IsExternWeak = false) const;
79   bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
80   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
81   void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
82                  MachineIRBuilder &MIB) const;
83   bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
84   bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
85 
86   ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
87   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
88 
89   ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
90   template <unsigned Bits>
91   ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
92     return selectSExtBits(Root, Bits);
93   }
94 
95   ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
96   template <unsigned Bits>
97   ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
98     return selectZExtBits(Root, Bits);
99   }
100 
101   ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
102   template <unsigned ShAmt>
103   ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
104     return selectSHXADDOp(Root, ShAmt);
105   }
106 
107   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
108                                        unsigned ShAmt) const;
109   template <unsigned ShAmt>
110   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
111     return selectSHXADD_UWOp(Root, ShAmt);
112   }
113 
114   ComplexRendererFns renderVLOp(MachineOperand &Root) const;
115 
116   // Custom renderers for tablegen
117   void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
118                     int OpIdx) const;
119   void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
120                             int OpIdx) const;
121   void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
122                           int OpIdx) const;
123   void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
124                       int OpIdx) const;
125   void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
126                  int OpIdx) const;
127 
128   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
129                            int OpIdx) const;
130 
131   const RISCVSubtarget &STI;
132   const RISCVInstrInfo &TII;
133   const RISCVRegisterInfo &TRI;
134   const RISCVRegisterBankInfo &RBI;
135   const RISCVTargetMachine &TM;
136 
137   MachineRegisterInfo *MRI = nullptr;
138 
139   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
140   // uses "STI." in the code generated by TableGen. We need to unify the name of
141   // Subtarget variable.
142   const RISCVSubtarget *Subtarget = &STI;
143 
144 #define GET_GLOBALISEL_PREDICATES_DECL
145 #include "RISCVGenGlobalISel.inc"
146 #undef GET_GLOBALISEL_PREDICATES_DECL
147 
148 #define GET_GLOBALISEL_TEMPORARIES_DECL
149 #include "RISCVGenGlobalISel.inc"
150 #undef GET_GLOBALISEL_TEMPORARIES_DECL
151 };
152 
153 } // end anonymous namespace
154 
155 #define GET_GLOBALISEL_IMPL
156 #include "RISCVGenGlobalISel.inc"
157 #undef GET_GLOBALISEL_IMPL
158 
159 RISCVInstructionSelector::RISCVInstructionSelector(
160     const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
161     const RISCVRegisterBankInfo &RBI)
162     : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
163       TM(TM),
164 
165 #define GET_GLOBALISEL_PREDICATES_INIT
166 #include "RISCVGenGlobalISel.inc"
167 #undef GET_GLOBALISEL_PREDICATES_INIT
168 #define GET_GLOBALISEL_TEMPORARIES_INIT
169 #include "RISCVGenGlobalISel.inc"
170 #undef GET_GLOBALISEL_TEMPORARIES_INIT
171 {
172 }
173 
174 InstructionSelector::ComplexRendererFns
175 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
176   if (!Root.isReg())
177     return std::nullopt;
178 
179   using namespace llvm::MIPatternMatch;
180 
181   Register RootReg = Root.getReg();
182   Register ShAmtReg = RootReg;
183   const LLT ShiftLLT = MRI->getType(RootReg);
184   unsigned ShiftWidth = ShiftLLT.getSizeInBits();
185   assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
186   // Peek through zext.
187   Register ZExtSrcReg;
188   if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg)))) {
189     ShAmtReg = ZExtSrcReg;
190   }
191 
192   APInt AndMask;
193   Register AndSrcReg;
194   // Try to combine the following pattern (applicable to other shift
195   // instructions as well as 32-bit ones):
196   //
197   //   %4:gprb(s64) = G_AND %3, %2
198   //   %5:gprb(s64) = G_LSHR %1, %4(s64)
199   //
200   // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
201   // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
202   // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
203   // then it can be eliminated. Given register rs1 or rs2 holding a constant
204   // (the and mask), there are two cases G_AND can be erased:
205   //
206   // 1. the lowest log2(XLEN) bits of the and mask are all set
207   // 2. the bits of the register being masked are already unset (zero set)
208   if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
209     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
210     if (ShMask.isSubsetOf(AndMask)) {
211       ShAmtReg = AndSrcReg;
212     } else {
213       // SimplifyDemandedBits may have optimized the mask so try restoring any
214       // bits that are known zero.
215       KnownBits Known = KB->getKnownBits(AndSrcReg);
216       if (ShMask.isSubsetOf(AndMask | Known.Zero))
217         ShAmtReg = AndSrcReg;
218     }
219   }
220 
221   APInt Imm;
222   Register Reg;
223   if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
224     if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
225       // If we are shifting by X+N where N == 0 mod Size, then just shift by X
226       // to avoid the ADD.
227       ShAmtReg = Reg;
228   } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
229     if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
230       // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
231       // to generate a NEG instead of a SUB of a constant.
232       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
233       unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
234       return {{[=](MachineInstrBuilder &MIB) {
235         MachineIRBuilder(*MIB.getInstr())
236             .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
237         MIB.addReg(ShAmtReg);
238       }}};
239     }
240     if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
241       // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
242       // to generate a NOT instead of a SUB of a constant.
243       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
244       return {{[=](MachineInstrBuilder &MIB) {
245         MachineIRBuilder(*MIB.getInstr())
246             .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
247             .addImm(-1);
248         MIB.addReg(ShAmtReg);
249       }}};
250     }
251   }
252 
253   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
254 }
255 
256 InstructionSelector::ComplexRendererFns
257 RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
258                                          unsigned Bits) const {
259   if (!Root.isReg())
260     return std::nullopt;
261   Register RootReg = Root.getReg();
262   MachineInstr *RootDef = MRI->getVRegDef(RootReg);
263 
264   if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
265       RootDef->getOperand(2).getImm() == Bits) {
266     return {
267         {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
268   }
269 
270   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
271   if ((Size - KB->computeNumSignBits(RootReg)) < Bits)
272     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
273 
274   return std::nullopt;
275 }
276 
277 InstructionSelector::ComplexRendererFns
278 RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
279                                          unsigned Bits) const {
280   if (!Root.isReg())
281     return std::nullopt;
282   Register RootReg = Root.getReg();
283 
284   Register RegX;
285   uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
286   if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
287     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
288   }
289 
290   if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
291       MRI->getType(RegX).getScalarSizeInBits() == Bits)
292     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
293 
294   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
295   if (KB->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
296     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
297 
298   return std::nullopt;
299 }
300 
301 InstructionSelector::ComplexRendererFns
302 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
303                                          unsigned ShAmt) const {
304   using namespace llvm::MIPatternMatch;
305 
306   if (!Root.isReg())
307     return std::nullopt;
308   Register RootReg = Root.getReg();
309 
310   const unsigned XLen = STI.getXLen();
311   APInt Mask, C2;
312   Register RegY;
313   std::optional<bool> LeftShift;
314   // (and (shl y, c2), mask)
315   if (mi_match(RootReg, *MRI,
316                m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
317     LeftShift = true;
318   // (and (lshr y, c2), mask)
319   else if (mi_match(RootReg, *MRI,
320                     m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
321     LeftShift = false;
322 
323   if (LeftShift.has_value()) {
324     if (*LeftShift)
325       Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
326     else
327       Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
328 
329     if (Mask.isShiftedMask()) {
330       unsigned Leading = XLen - Mask.getActiveBits();
331       unsigned Trailing = Mask.countr_zero();
332       // Given (and (shl y, c2), mask) in which mask has no leading zeros and
333       // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
334       if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
335         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
336         return {{[=](MachineInstrBuilder &MIB) {
337           MachineIRBuilder(*MIB.getInstr())
338               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
339               .addImm(Trailing - C2.getLimitedValue());
340           MIB.addReg(DstReg);
341         }}};
342       }
343 
344       // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
345       // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
346       if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
347         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
348         return {{[=](MachineInstrBuilder &MIB) {
349           MachineIRBuilder(*MIB.getInstr())
350               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
351               .addImm(Leading + Trailing);
352           MIB.addReg(DstReg);
353         }}};
354       }
355     }
356   }
357 
358   LeftShift.reset();
359 
360   // (shl (and y, mask), c2)
361   if (mi_match(RootReg, *MRI,
362                m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
363                       m_ICst(C2))))
364     LeftShift = true;
365   // (lshr (and y, mask), c2)
366   else if (mi_match(RootReg, *MRI,
367                     m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
368                             m_ICst(C2))))
369     LeftShift = false;
370 
371   if (LeftShift.has_value() && Mask.isShiftedMask()) {
372     unsigned Leading = XLen - Mask.getActiveBits();
373     unsigned Trailing = Mask.countr_zero();
374 
375     // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
376     // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
377     bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
378                 (Trailing + C2.getLimitedValue()) == ShAmt;
379     if (!Cond)
380       // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
381       // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
382       Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
383              (Trailing - C2.getLimitedValue()) == ShAmt;
384 
385     if (Cond) {
386       Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
387       return {{[=](MachineInstrBuilder &MIB) {
388         MachineIRBuilder(*MIB.getInstr())
389             .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
390             .addImm(Trailing);
391         MIB.addReg(DstReg);
392       }}};
393     }
394   }
395 
396   return std::nullopt;
397 }
398 
399 InstructionSelector::ComplexRendererFns
400 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
401                                             unsigned ShAmt) const {
402   using namespace llvm::MIPatternMatch;
403 
404   if (!Root.isReg())
405     return std::nullopt;
406   Register RootReg = Root.getReg();
407 
408   // Given (and (shl x, c2), mask) in which mask is a shifted mask with
409   // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
410   // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
411   APInt Mask, C2;
412   Register RegX;
413   if (mi_match(
414           RootReg, *MRI,
415           m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
416                                 m_ICst(Mask))))) {
417     Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
418 
419     if (Mask.isShiftedMask()) {
420       unsigned Leading = Mask.countl_zero();
421       unsigned Trailing = Mask.countr_zero();
422       if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
423         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
424         return {{[=](MachineInstrBuilder &MIB) {
425           MachineIRBuilder(*MIB.getInstr())
426               .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
427               .addImm(C2.getLimitedValue() - ShAmt);
428           MIB.addReg(DstReg);
429         }}};
430       }
431     }
432   }
433 
434   return std::nullopt;
435 }
436 
437 InstructionSelector::ComplexRendererFns
438 RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
439   assert(Root.isReg() && "Expected operand to be a Register");
440   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
441 
442   if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
443     auto C = RootDef->getOperand(1).getCImm();
444     if (C->getValue().isAllOnes())
445       // If the operand is a G_CONSTANT with value of all ones it is larger than
446       // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
447       // recognized specially by the vsetvli insertion pass.
448       return {{[=](MachineInstrBuilder &MIB) {
449         MIB.addImm(RISCV::VLMaxSentinel);
450       }}};
451 
452     if (isUInt<5>(C->getZExtValue())) {
453       uint64_t ZExtC = C->getZExtValue();
454       return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
455     }
456   }
457   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
458 }
459 
460 InstructionSelector::ComplexRendererFns
461 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
462   if (!Root.isReg())
463     return std::nullopt;
464 
465   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
466   if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
467     return {{
468         [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
469         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
470     }};
471   }
472 
473   if (isBaseWithConstantOffset(Root, *MRI)) {
474     MachineOperand &LHS = RootDef->getOperand(1);
475     MachineOperand &RHS = RootDef->getOperand(2);
476     MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
477     MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
478 
479     int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
480     if (isInt<12>(RHSC)) {
481       if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
482         return {{
483             [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
484             [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
485         }};
486 
487       return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
488                [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
489     }
490   }
491 
492   // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
493   // the combiner?
494   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
495            [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
496 }
497 
498 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
499 /// CC Must be an ICMP Predicate.
500 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
501   switch (CC) {
502   default:
503     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
504   case CmpInst::Predicate::ICMP_EQ:
505     return RISCVCC::COND_EQ;
506   case CmpInst::Predicate::ICMP_NE:
507     return RISCVCC::COND_NE;
508   case CmpInst::Predicate::ICMP_ULT:
509     return RISCVCC::COND_LTU;
510   case CmpInst::Predicate::ICMP_SLT:
511     return RISCVCC::COND_LT;
512   case CmpInst::Predicate::ICMP_UGE:
513     return RISCVCC::COND_GEU;
514   case CmpInst::Predicate::ICMP_SGE:
515     return RISCVCC::COND_GE;
516   }
517 }
518 
519 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
520                                  Register &LHS, Register &RHS,
521                                  MachineRegisterInfo &MRI) {
522   // Try to fold an ICmp. If that fails, use a NE compare with X0.
523   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
524   if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
525     LHS = CondReg;
526     RHS = RISCV::X0;
527     CC = RISCVCC::COND_NE;
528     return;
529   }
530 
531   // We found an ICmp, do some canonicalizations.
532 
533   // Adjust comparisons to use comparison with 0 if possible.
534   if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
535     switch (Pred) {
536     case CmpInst::Predicate::ICMP_SGT:
537       // Convert X > -1 to X >= 0
538       if (*Constant == -1) {
539         CC = RISCVCC::COND_GE;
540         RHS = RISCV::X0;
541         return;
542       }
543       break;
544     case CmpInst::Predicate::ICMP_SLT:
545       // Convert X < 1 to 0 >= X
546       if (*Constant == 1) {
547         CC = RISCVCC::COND_GE;
548         RHS = LHS;
549         LHS = RISCV::X0;
550         return;
551       }
552       break;
553     default:
554       break;
555     }
556   }
557 
558   switch (Pred) {
559   default:
560     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
561   case CmpInst::Predicate::ICMP_EQ:
562   case CmpInst::Predicate::ICMP_NE:
563   case CmpInst::Predicate::ICMP_ULT:
564   case CmpInst::Predicate::ICMP_SLT:
565   case CmpInst::Predicate::ICMP_UGE:
566   case CmpInst::Predicate::ICMP_SGE:
567     // These CCs are supported directly by RISC-V branches.
568     break;
569   case CmpInst::Predicate::ICMP_SGT:
570   case CmpInst::Predicate::ICMP_SLE:
571   case CmpInst::Predicate::ICMP_UGT:
572   case CmpInst::Predicate::ICMP_ULE:
573     // These CCs are not supported directly by RISC-V branches, but changing the
574     // direction of the CC and swapping LHS and RHS are.
575     Pred = CmpInst::getSwappedPredicate(Pred);
576     std::swap(LHS, RHS);
577     break;
578   }
579 
580   CC = getRISCVCCFromICmp(Pred);
581   return;
582 }
583 
584 bool RISCVInstructionSelector::select(MachineInstr &MI) {
585   MachineBasicBlock &MBB = *MI.getParent();
586   MachineFunction &MF = *MBB.getParent();
587   MachineIRBuilder MIB(MI);
588 
589   preISelLower(MI, MIB);
590   const unsigned Opc = MI.getOpcode();
591 
592   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
593     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
594       const Register DefReg = MI.getOperand(0).getReg();
595       const LLT DefTy = MRI->getType(DefReg);
596 
597       const RegClassOrRegBank &RegClassOrBank =
598           MRI->getRegClassOrRegBank(DefReg);
599 
600       const TargetRegisterClass *DefRC =
601           dyn_cast<const TargetRegisterClass *>(RegClassOrBank);
602       if (!DefRC) {
603         if (!DefTy.isValid()) {
604           LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
605           return false;
606         }
607 
608         const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
609         DefRC = getRegClassForTypeOnBank(DefTy, RB);
610         if (!DefRC) {
611           LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
612           return false;
613         }
614       }
615 
616       MI.setDesc(TII.get(TargetOpcode::PHI));
617       return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
618     }
619 
620     // Certain non-generic instructions also need some special handling.
621     if (MI.isCopy())
622       return selectCopy(MI);
623 
624     return true;
625   }
626 
627   if (selectImpl(MI, *CoverageInfo))
628     return true;
629 
630   switch (Opc) {
631   case TargetOpcode::G_ANYEXT:
632   case TargetOpcode::G_PTRTOINT:
633   case TargetOpcode::G_INTTOPTR:
634   case TargetOpcode::G_TRUNC:
635   case TargetOpcode::G_FREEZE:
636     return selectCopy(MI);
637   case TargetOpcode::G_CONSTANT: {
638     Register DstReg = MI.getOperand(0).getReg();
639     int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
640 
641     if (!materializeImm(DstReg, Imm, MIB))
642       return false;
643 
644     MI.eraseFromParent();
645     return true;
646   }
647   case TargetOpcode::G_FCONSTANT: {
648     // TODO: Use constant pool for complext constants.
649     // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
650     Register DstReg = MI.getOperand(0).getReg();
651     const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
652     APInt Imm = FPimm.bitcastToAPInt();
653     unsigned Size = MRI->getType(DstReg).getSizeInBits();
654     if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
655       Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
656       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
657         return false;
658 
659       unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
660                         : Size == 32 ? RISCV::FMV_W_X
661                                      : RISCV::FMV_H_X;
662       auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
663       if (!FMV.constrainAllUses(TII, TRI, RBI))
664         return false;
665     } else {
666       assert(Size == 64 && !Subtarget->is64Bit() &&
667              "Unexpected size or subtarget");
668       // Split into two pieces and build through the stack.
669       Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
670       Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
671       if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
672                           MIB))
673         return false;
674       if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
675         return false;
676       MachineInstrBuilder PairF64 = MIB.buildInstr(
677           RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
678       if (!PairF64.constrainAllUses(TII, TRI, RBI))
679         return false;
680     }
681 
682     MI.eraseFromParent();
683     return true;
684   }
685   case TargetOpcode::G_GLOBAL_VALUE: {
686     auto *GV = MI.getOperand(1).getGlobal();
687     if (GV->isThreadLocal()) {
688       // TODO: implement this case.
689       return false;
690     }
691 
692     return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
693   }
694   case TargetOpcode::G_JUMP_TABLE:
695   case TargetOpcode::G_CONSTANT_POOL:
696     return selectAddr(MI, MIB, MRI);
697   case TargetOpcode::G_BRCOND: {
698     Register LHS, RHS;
699     RISCVCC::CondCode CC;
700     getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
701 
702     auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
703                    .addMBB(MI.getOperand(1).getMBB());
704     MI.eraseFromParent();
705     return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
706   }
707   case TargetOpcode::G_BRJT: {
708     // FIXME: Move to legalization?
709     const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
710     unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout());
711     assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
712            "Unsupported jump-table entry size");
713     assert(
714         (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
715          MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 ||
716          MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) &&
717         "Unexpected jump-table entry kind");
718 
719     auto SLL =
720         MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)})
721             .addImm(Log2_32(EntrySize));
722     if (!SLL.constrainAllUses(TII, TRI, RBI))
723       return false;
724 
725     // TODO: Use SHXADD. Moving to legalization would fix this automatically.
726     auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
727                               {MI.getOperand(0), SLL.getReg(0)});
728     if (!ADD.constrainAllUses(TII, TRI, RBI))
729       return false;
730 
731     unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
732     auto Dest =
733         MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)})
734             .addImm(0)
735             .addMemOperand(MF.getMachineMemOperand(
736                 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad,
737                 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout()))));
738     if (!Dest.constrainAllUses(TII, TRI, RBI))
739       return false;
740 
741     // If the Kind is EK_LabelDifference32, the table stores an offset from
742     // the location of the table. Add the table address to get an absolute
743     // address.
744     if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
745       Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
746                             {Dest.getReg(0), MI.getOperand(0)});
747       if (!Dest.constrainAllUses(TII, TRI, RBI))
748         return false;
749     }
750 
751     auto Branch =
752         MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
753     if (!Branch.constrainAllUses(TII, TRI, RBI))
754       return false;
755 
756     MI.eraseFromParent();
757     return true;
758   }
759   case TargetOpcode::G_BRINDIRECT:
760     MI.setDesc(TII.get(RISCV::PseudoBRIND));
761     MI.addOperand(MachineOperand::CreateImm(0));
762     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
763   case TargetOpcode::G_FRAME_INDEX: {
764     // TODO: We may want to replace this code with the SelectionDAG patterns,
765     // which fail to get imported because it uses FrameAddrRegImm, which is a
766     // ComplexPattern
767     MI.setDesc(TII.get(RISCV::ADDI));
768     MI.addOperand(MachineOperand::CreateImm(0));
769     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
770   }
771   case TargetOpcode::G_SELECT:
772     return selectSelect(MI, MIB);
773   case TargetOpcode::G_FCMP:
774     return selectFPCompare(MI, MIB);
775   case TargetOpcode::G_FENCE: {
776     AtomicOrdering FenceOrdering =
777         static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
778     SyncScope::ID FenceSSID =
779         static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
780     emitFence(FenceOrdering, FenceSSID, MIB);
781     MI.eraseFromParent();
782     return true;
783   }
784   case TargetOpcode::G_IMPLICIT_DEF:
785     return selectImplicitDef(MI, MIB);
786   case TargetOpcode::G_MERGE_VALUES:
787     return selectMergeValues(MI, MIB);
788   case TargetOpcode::G_UNMERGE_VALUES:
789     return selectUnmergeValues(MI, MIB);
790   default:
791     return false;
792   }
793 }
794 
795 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI,
796                                                  MachineIRBuilder &MIB) const {
797   assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
798 
799   // Build a F64 Pair from operands
800   if (MI.getNumOperands() != 3)
801     return false;
802   Register Dst = MI.getOperand(0).getReg();
803   Register Lo = MI.getOperand(1).getReg();
804   Register Hi = MI.getOperand(2).getReg();
805   if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
806     return false;
807   MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
808   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
809 }
810 
811 bool RISCVInstructionSelector::selectUnmergeValues(
812     MachineInstr &MI, MachineIRBuilder &MIB) const {
813   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
814 
815   // Split F64 Src into two s32 parts
816   if (MI.getNumOperands() != 3)
817     return false;
818   Register Src = MI.getOperand(2).getReg();
819   Register Lo = MI.getOperand(0).getReg();
820   Register Hi = MI.getOperand(1).getReg();
821   if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
822     return false;
823   MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
824   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
825 }
826 
827 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
828                                                  MachineIRBuilder &MIB) {
829   Register PtrReg = Op.getReg();
830   assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
831 
832   const LLT sXLen = LLT::scalar(STI.getXLen());
833   auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
834   MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
835   Op.setReg(PtrToInt.getReg(0));
836   return select(*PtrToInt);
837 }
838 
839 void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
840                                             MachineIRBuilder &MIB) {
841   switch (MI.getOpcode()) {
842   case TargetOpcode::G_PTR_ADD: {
843     Register DstReg = MI.getOperand(0).getReg();
844     const LLT sXLen = LLT::scalar(STI.getXLen());
845 
846     replacePtrWithInt(MI.getOperand(1), MIB);
847     MI.setDesc(TII.get(TargetOpcode::G_ADD));
848     MRI->setType(DstReg, sXLen);
849     break;
850   }
851   case TargetOpcode::G_PTRMASK: {
852     Register DstReg = MI.getOperand(0).getReg();
853     const LLT sXLen = LLT::scalar(STI.getXLen());
854     replacePtrWithInt(MI.getOperand(1), MIB);
855     MI.setDesc(TII.get(TargetOpcode::G_AND));
856     MRI->setType(DstReg, sXLen);
857     break;
858   }
859   }
860 }
861 
862 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
863                                             const MachineInstr &MI,
864                                             int OpIdx) const {
865   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
866          "Expected G_CONSTANT");
867   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
868   MIB.addImm(-CstVal);
869 }
870 
871 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
872                                                     const MachineInstr &MI,
873                                                     int OpIdx) const {
874   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
875          "Expected G_CONSTANT");
876   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
877   MIB.addImm(STI.getXLen() - CstVal);
878 }
879 
880 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
881                                                   const MachineInstr &MI,
882                                                   int OpIdx) const {
883   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
884          "Expected G_CONSTANT");
885   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
886   MIB.addImm(32 - CstVal);
887 }
888 
889 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
890                                               const MachineInstr &MI,
891                                               int OpIdx) const {
892   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
893          "Expected G_CONSTANT");
894   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
895   MIB.addImm(CstVal + 1);
896 }
897 
898 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
899                                          const MachineInstr &MI,
900                                          int OpIdx) const {
901   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
902          "Expected G_CONSTANT");
903   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
904   MIB.addImm(CstVal);
905 }
906 
907 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
908                                                    const MachineInstr &MI,
909                                                    int OpIdx) const {
910   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
911          "Expected G_CONSTANT");
912   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
913   MIB.addImm(llvm::countr_zero(C));
914 }
915 
916 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
917     LLT Ty, const RegisterBank &RB) const {
918   if (RB.getID() == RISCV::GPRBRegBankID) {
919     if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
920       return &RISCV::GPRRegClass;
921   }
922 
923   if (RB.getID() == RISCV::FPRBRegBankID) {
924     if (Ty.getSizeInBits() == 16)
925       return &RISCV::FPR16RegClass;
926     if (Ty.getSizeInBits() == 32)
927       return &RISCV::FPR32RegClass;
928     if (Ty.getSizeInBits() == 64)
929       return &RISCV::FPR64RegClass;
930   }
931 
932   if (RB.getID() == RISCV::VRBRegBankID) {
933     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
934       return &RISCV::VRRegClass;
935 
936     if (Ty.getSizeInBits().getKnownMinValue() == 128)
937       return &RISCV::VRM2RegClass;
938 
939     if (Ty.getSizeInBits().getKnownMinValue() == 256)
940       return &RISCV::VRM4RegClass;
941 
942     if (Ty.getSizeInBits().getKnownMinValue() == 512)
943       return &RISCV::VRM8RegClass;
944   }
945 
946   return nullptr;
947 }
948 
949 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
950   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
951 }
952 
953 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
954   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
955 }
956 
957 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
958   Register DstReg = MI.getOperand(0).getReg();
959 
960   if (DstReg.isPhysical())
961     return true;
962 
963   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
964       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
965   assert(DstRC &&
966          "Register class not available for LLT, register bank combination");
967 
968   // No need to constrain SrcReg. It will get constrained when
969   // we hit another of its uses or its defs.
970   // Copies do not have constraints.
971   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
972     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
973                       << " operand\n");
974     return false;
975   }
976 
977   MI.setDesc(TII.get(RISCV::COPY));
978   return true;
979 }
980 
981 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
982                                                  MachineIRBuilder &MIB) const {
983   assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
984 
985   const Register DstReg = MI.getOperand(0).getReg();
986   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
987       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
988 
989   assert(DstRC &&
990          "Register class not available for LLT, register bank combination");
991 
992   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
993     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
994                       << " operand\n");
995   }
996   MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
997   return true;
998 }
999 
1000 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1001                                               MachineIRBuilder &MIB) const {
1002   if (Imm == 0) {
1003     MIB.buildCopy(DstReg, Register(RISCV::X0));
1004     RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1005     return true;
1006   }
1007 
1008   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
1009   unsigned NumInsts = Seq.size();
1010   Register SrcReg = RISCV::X0;
1011 
1012   for (unsigned i = 0; i < NumInsts; i++) {
1013     Register TmpReg = i < NumInsts - 1
1014                           ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1015                           : DstReg;
1016     const RISCVMatInt::Inst &I = Seq[i];
1017     MachineInstr *Result;
1018 
1019     switch (I.getOpndKind()) {
1020     case RISCVMatInt::Imm:
1021       // clang-format off
1022       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1023                    .addImm(I.getImm());
1024       // clang-format on
1025       break;
1026     case RISCVMatInt::RegX0:
1027       Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1028                               {SrcReg, Register(RISCV::X0)});
1029       break;
1030     case RISCVMatInt::RegReg:
1031       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1032       break;
1033     case RISCVMatInt::RegImm:
1034       Result =
1035           MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1036       break;
1037     }
1038 
1039     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1040       return false;
1041 
1042     SrcReg = TmpReg;
1043   }
1044 
1045   return true;
1046 }
1047 
1048 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1049                                           MachineIRBuilder &MIB, bool IsLocal,
1050                                           bool IsExternWeak) const {
1051   assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1052           MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1053           MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1054          "Unexpected opcode");
1055 
1056   const MachineOperand &DispMO = MI.getOperand(1);
1057 
1058   Register DefReg = MI.getOperand(0).getReg();
1059   const LLT DefTy = MRI->getType(DefReg);
1060 
1061   // When HWASAN is used and tagging of global variables is enabled
1062   // they should be accessed via the GOT, since the tagged address of a global
1063   // is incompatible with existing code models. This also applies to non-pic
1064   // mode.
1065   if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1066     if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1067       // Use PC-relative addressing to access the symbol. This generates the
1068       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1069       // %pcrel_lo(auipc)).
1070       MI.setDesc(TII.get(RISCV::PseudoLLA));
1071       return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1072     }
1073 
1074     // Use PC-relative addressing to access the GOT for this symbol, then
1075     // load the address from the GOT. This generates the pattern (PseudoLGA
1076     // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1077     // %pcrel_lo(auipc))).
1078     MachineFunction &MF = *MI.getParent()->getParent();
1079     MachineMemOperand *MemOp = MF.getMachineMemOperand(
1080         MachinePointerInfo::getGOT(MF),
1081         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1082             MachineMemOperand::MOInvariant,
1083         DefTy, Align(DefTy.getSizeInBits() / 8));
1084 
1085     auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1086                       .addDisp(DispMO, 0)
1087                       .addMemOperand(MemOp);
1088 
1089     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1090       return false;
1091 
1092     MI.eraseFromParent();
1093     return true;
1094   }
1095 
1096   switch (TM.getCodeModel()) {
1097   default: {
1098     reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1099                        getName(), "Unsupported code model for lowering", MI);
1100     return false;
1101   }
1102   case CodeModel::Small: {
1103     // Must lie within a single 2 GiB address range and must lie between
1104     // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1105     // (lui %hi(sym)) %lo(sym)).
1106     Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1107     MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1108                                .addDisp(DispMO, 0, RISCVII::MO_HI);
1109 
1110     if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1111       return false;
1112 
1113     auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1114                       .addDisp(DispMO, 0, RISCVII::MO_LO);
1115 
1116     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1117       return false;
1118 
1119     MI.eraseFromParent();
1120     return true;
1121   }
1122   case CodeModel::Medium:
1123     // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1124     // relocation needs to reference a label that points to the auipc
1125     // instruction itself, not the global. This cannot be done inside the
1126     // instruction selector.
1127     if (IsExternWeak) {
1128       // An extern weak symbol may be undefined, i.e. have value 0, which may
1129       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1130       // symbol. This generates the pattern (PseudoLGA sym), which expands to
1131       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1132       MachineFunction &MF = *MI.getParent()->getParent();
1133       MachineMemOperand *MemOp = MF.getMachineMemOperand(
1134           MachinePointerInfo::getGOT(MF),
1135           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1136               MachineMemOperand::MOInvariant,
1137           DefTy, Align(DefTy.getSizeInBits() / 8));
1138 
1139       auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1140                         .addDisp(DispMO, 0)
1141                         .addMemOperand(MemOp);
1142 
1143       if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1144         return false;
1145 
1146       MI.eraseFromParent();
1147       return true;
1148     }
1149 
1150     // Generate a sequence for accessing addresses within any 2GiB range
1151     // within the address space. This generates the pattern (PseudoLLA sym),
1152     // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1153     MI.setDesc(TII.get(RISCV::PseudoLLA));
1154     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1155   }
1156 
1157   return false;
1158 }
1159 
1160 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1161                                             MachineIRBuilder &MIB) const {
1162   auto &SelectMI = cast<GSelect>(MI);
1163 
1164   Register LHS, RHS;
1165   RISCVCC::CondCode CC;
1166   getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1167 
1168   Register DstReg = SelectMI.getReg(0);
1169 
1170   unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1171   if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1172     unsigned Size = MRI->getType(DstReg).getSizeInBits();
1173     Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1174                      : RISCV::Select_FPR64_Using_CC_GPR;
1175   }
1176 
1177   MachineInstr *Result = MIB.buildInstr(Opc)
1178                              .addDef(DstReg)
1179                              .addReg(LHS)
1180                              .addReg(RHS)
1181                              .addImm(CC)
1182                              .addReg(SelectMI.getTrueReg())
1183                              .addReg(SelectMI.getFalseReg());
1184   MI.eraseFromParent();
1185   return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1186 }
1187 
1188 // Convert an FCMP predicate to one of the supported F or D instructions.
1189 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1190   assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1191   switch (Pred) {
1192   default:
1193     llvm_unreachable("Unsupported predicate");
1194   case CmpInst::FCMP_OLT:
1195     return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1196   case CmpInst::FCMP_OLE:
1197     return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1198   case CmpInst::FCMP_OEQ:
1199     return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1200   }
1201 }
1202 
1203 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1204 // is supported.
1205 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1206                                   CmpInst::Predicate &Pred, bool &NeedInvert) {
1207   auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1208     return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1209            Pred == CmpInst::FCMP_OEQ;
1210   };
1211 
1212   assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1213 
1214   CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
1215   if (isLegalFCmpPredicate(InvPred)) {
1216     Pred = InvPred;
1217     std::swap(LHS, RHS);
1218     return true;
1219   }
1220 
1221   InvPred = CmpInst::getInversePredicate(Pred);
1222   NeedInvert = true;
1223   if (isLegalFCmpPredicate(InvPred)) {
1224     Pred = InvPred;
1225     return true;
1226   }
1227   InvPred = CmpInst::getSwappedPredicate(InvPred);
1228   if (isLegalFCmpPredicate(InvPred)) {
1229     Pred = InvPred;
1230     std::swap(LHS, RHS);
1231     return true;
1232   }
1233 
1234   return false;
1235 }
1236 
1237 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1238 // the result in DstReg.
1239 // FIXME: Maybe we should expand this earlier.
1240 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1241                                                MachineIRBuilder &MIB) const {
1242   auto &CmpMI = cast<GFCmp>(MI);
1243   CmpInst::Predicate Pred = CmpMI.getCond();
1244 
1245   Register DstReg = CmpMI.getReg(0);
1246   Register LHS = CmpMI.getLHSReg();
1247   Register RHS = CmpMI.getRHSReg();
1248 
1249   unsigned Size = MRI->getType(LHS).getSizeInBits();
1250   assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1251 
1252   Register TmpReg = DstReg;
1253 
1254   bool NeedInvert = false;
1255   // First try swapping operands or inverting.
1256   if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1257     if (NeedInvert)
1258       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1259     auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1260     if (!Cmp.constrainAllUses(TII, TRI, RBI))
1261       return false;
1262   } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1263     // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1264     NeedInvert = Pred == CmpInst::FCMP_UEQ;
1265     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1266                                {&RISCV::GPRRegClass}, {LHS, RHS});
1267     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1268       return false;
1269     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1270                                {&RISCV::GPRRegClass}, {RHS, LHS});
1271     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1272       return false;
1273     if (NeedInvert)
1274       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1275     auto Or =
1276         MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1277     if (!Or.constrainAllUses(TII, TRI, RBI))
1278       return false;
1279   } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1280     // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1281     // FIXME: If LHS and RHS are the same we can use a single FEQ.
1282     NeedInvert = Pred == CmpInst::FCMP_UNO;
1283     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1284                                {&RISCV::GPRRegClass}, {LHS, LHS});
1285     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1286       return false;
1287     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1288                                {&RISCV::GPRRegClass}, {RHS, RHS});
1289     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1290       return false;
1291     if (NeedInvert)
1292       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1293     auto And =
1294         MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1295     if (!And.constrainAllUses(TII, TRI, RBI))
1296       return false;
1297   } else
1298     llvm_unreachable("Unhandled predicate");
1299 
1300   // Emit an XORI to invert the result if needed.
1301   if (NeedInvert) {
1302     auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1303     if (!Xor.constrainAllUses(TII, TRI, RBI))
1304       return false;
1305   }
1306 
1307   MI.eraseFromParent();
1308   return true;
1309 }
1310 
1311 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1312                                          SyncScope::ID FenceSSID,
1313                                          MachineIRBuilder &MIB) const {
1314   if (STI.hasStdExtZtso()) {
1315     // The only fence that needs an instruction is a sequentially-consistent
1316     // cross-thread fence.
1317     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1318         FenceSSID == SyncScope::System) {
1319       // fence rw, rw
1320       MIB.buildInstr(RISCV::FENCE, {}, {})
1321           .addImm(RISCVFenceField::R | RISCVFenceField::W)
1322           .addImm(RISCVFenceField::R | RISCVFenceField::W);
1323       return;
1324     }
1325 
1326     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1327     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1328     return;
1329   }
1330 
1331   // singlethread fences only synchronize with signal handlers on the same
1332   // thread and thus only need to preserve instruction order, not actually
1333   // enforce memory ordering.
1334   if (FenceSSID == SyncScope::SingleThread) {
1335     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1336     return;
1337   }
1338 
1339   // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1340   // Manual: Volume I.
1341   unsigned Pred, Succ;
1342   switch (FenceOrdering) {
1343   default:
1344     llvm_unreachable("Unexpected ordering");
1345   case AtomicOrdering::AcquireRelease:
1346     // fence acq_rel -> fence.tso
1347     MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1348     return;
1349   case AtomicOrdering::Acquire:
1350     // fence acquire -> fence r, rw
1351     Pred = RISCVFenceField::R;
1352     Succ = RISCVFenceField::R | RISCVFenceField::W;
1353     break;
1354   case AtomicOrdering::Release:
1355     // fence release -> fence rw, w
1356     Pred = RISCVFenceField::R | RISCVFenceField::W;
1357     Succ = RISCVFenceField::W;
1358     break;
1359   case AtomicOrdering::SequentiallyConsistent:
1360     // fence seq_cst -> fence rw, rw
1361     Pred = RISCVFenceField::R | RISCVFenceField::W;
1362     Succ = RISCVFenceField::R | RISCVFenceField::W;
1363     break;
1364   }
1365   MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1366 }
1367 
1368 namespace llvm {
1369 InstructionSelector *
1370 createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1371                                const RISCVSubtarget &Subtarget,
1372                                const RISCVRegisterBankInfo &RBI) {
1373   return new RISCVInstructionSelector(TM, Subtarget, RBI);
1374 }
1375 } // end namespace llvm
1376