xref: /llvm-project/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (revision da032b7903da57eb87015369e5c4db521cb4dbac)
1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// RISC-V.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
36 
37 namespace {
38 
39 class RISCVInstructionSelector : public InstructionSelector {
40 public:
41   RISCVInstructionSelector(const RISCVTargetMachine &TM,
42                            const RISCVSubtarget &STI,
43                            const RISCVRegisterBankInfo &RBI);
44 
45   bool select(MachineInstr &MI) override;
46 
47   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
48                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49                BlockFrequencyInfo *BFI) override {
50     InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
51     MRI = &MF.getRegInfo();
52   }
53 
54   static const char *getName() { return DEBUG_TYPE; }
55 
56 private:
57   const TargetRegisterClass *
58   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59 
60   bool isRegInGprb(Register Reg) const;
61   bool isRegInFprb(Register Reg) const;
62 
63   // tblgen-erated 'select' implementation, used as the initial selector for
64   // the patterns that don't require complex C++.
65   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
66 
67   // A lowering phase that runs before any selection attempts.
68   // Returns true if the instruction was modified.
69   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
70 
71   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
72 
73   // Custom selection methods
74   bool selectCopy(MachineInstr &MI) const;
75   bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
76   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
77   bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
78                   bool IsExternWeak = false) const;
79   bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const;
80   bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
81   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
82   void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
83                  MachineIRBuilder &MIB) const;
84   bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
85   bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
86 
87   ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
88   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
89 
90   ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
91   template <unsigned Bits>
92   ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
93     return selectZExtBits(Root, Bits);
94   }
95 
96   ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
97   template <unsigned ShAmt>
98   ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
99     return selectSHXADDOp(Root, ShAmt);
100   }
101 
102   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
103                                        unsigned ShAmt) const;
104   template <unsigned ShAmt>
105   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
106     return selectSHXADD_UWOp(Root, ShAmt);
107   }
108 
109   ComplexRendererFns renderVLOp(MachineOperand &Root) const;
110 
111   // Custom renderers for tablegen
112   void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
113                     int OpIdx) const;
114   void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
115                             int OpIdx) const;
116   void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
117                           int OpIdx) const;
118   void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
119                       int OpIdx) const;
120   void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
121                  int OpIdx) const;
122 
123   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
124                            int OpIdx) const;
125 
126   const RISCVSubtarget &STI;
127   const RISCVInstrInfo &TII;
128   const RISCVRegisterInfo &TRI;
129   const RISCVRegisterBankInfo &RBI;
130   const RISCVTargetMachine &TM;
131 
132   MachineRegisterInfo *MRI = nullptr;
133 
134   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
135   // uses "STI." in the code generated by TableGen. We need to unify the name of
136   // Subtarget variable.
137   const RISCVSubtarget *Subtarget = &STI;
138 
139 #define GET_GLOBALISEL_PREDICATES_DECL
140 #include "RISCVGenGlobalISel.inc"
141 #undef GET_GLOBALISEL_PREDICATES_DECL
142 
143 #define GET_GLOBALISEL_TEMPORARIES_DECL
144 #include "RISCVGenGlobalISel.inc"
145 #undef GET_GLOBALISEL_TEMPORARIES_DECL
146 };
147 
148 } // end anonymous namespace
149 
150 #define GET_GLOBALISEL_IMPL
151 #include "RISCVGenGlobalISel.inc"
152 #undef GET_GLOBALISEL_IMPL
153 
154 RISCVInstructionSelector::RISCVInstructionSelector(
155     const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
156     const RISCVRegisterBankInfo &RBI)
157     : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
158       TM(TM),
159 
160 #define GET_GLOBALISEL_PREDICATES_INIT
161 #include "RISCVGenGlobalISel.inc"
162 #undef GET_GLOBALISEL_PREDICATES_INIT
163 #define GET_GLOBALISEL_TEMPORARIES_INIT
164 #include "RISCVGenGlobalISel.inc"
165 #undef GET_GLOBALISEL_TEMPORARIES_INIT
166 {
167 }
168 
169 InstructionSelector::ComplexRendererFns
170 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
171   if (!Root.isReg())
172     return std::nullopt;
173 
174   using namespace llvm::MIPatternMatch;
175 
176   Register RootReg = Root.getReg();
177   Register ShAmtReg = RootReg;
178   const LLT ShiftLLT = MRI->getType(RootReg);
179   unsigned ShiftWidth = ShiftLLT.getSizeInBits();
180   assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
181   // Peek through zext.
182   Register ZExtSrcReg;
183   if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg)))) {
184     ShAmtReg = ZExtSrcReg;
185   }
186 
187   APInt AndMask;
188   Register AndSrcReg;
189   // Try to combine the following pattern (applicable to other shift
190   // instructions as well as 32-bit ones):
191   //
192   //   %4:gprb(s64) = G_AND %3, %2
193   //   %5:gprb(s64) = G_LSHR %1, %4(s64)
194   //
195   // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
196   // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
197   // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
198   // then it can be eliminated. Given register rs1 or rs2 holding a constant
199   // (the and mask), there are two cases G_AND can be erased:
200   //
201   // 1. the lowest log2(XLEN) bits of the and mask are all set
202   // 2. the bits of the register being masked are already unset (zero set)
203   if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
204     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
205     if (ShMask.isSubsetOf(AndMask)) {
206       ShAmtReg = AndSrcReg;
207     } else {
208       // SimplifyDemandedBits may have optimized the mask so try restoring any
209       // bits that are known zero.
210       KnownBits Known = KB->getKnownBits(AndSrcReg);
211       if (ShMask.isSubsetOf(AndMask | Known.Zero))
212         ShAmtReg = AndSrcReg;
213     }
214   }
215 
216   APInt Imm;
217   Register Reg;
218   if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
219     if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
220       // If we are shifting by X+N where N == 0 mod Size, then just shift by X
221       // to avoid the ADD.
222       ShAmtReg = Reg;
223   } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
224     if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
225       // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
226       // to generate a NEG instead of a SUB of a constant.
227       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
228       unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
229       return {{[=](MachineInstrBuilder &MIB) {
230         MachineIRBuilder(*MIB.getInstr())
231             .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
232         MIB.addReg(ShAmtReg);
233       }}};
234     }
235     if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
236       // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
237       // to generate a NOT instead of a SUB of a constant.
238       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
239       return {{[=](MachineInstrBuilder &MIB) {
240         MachineIRBuilder(*MIB.getInstr())
241             .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
242             .addImm(-1);
243         MIB.addReg(ShAmtReg);
244       }}};
245     }
246   }
247 
248   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
249 }
250 
251 InstructionSelector::ComplexRendererFns
252 RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
253                                          unsigned Bits) const {
254   if (!Root.isReg())
255     return std::nullopt;
256   Register RootReg = Root.getReg();
257 
258   Register RegX;
259   uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
260   if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
261     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
262   }
263 
264   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
265   if (KB->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
266     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
267 
268   return std::nullopt;
269 }
270 
271 InstructionSelector::ComplexRendererFns
272 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
273                                          unsigned ShAmt) const {
274   using namespace llvm::MIPatternMatch;
275 
276   if (!Root.isReg())
277     return std::nullopt;
278   Register RootReg = Root.getReg();
279 
280   const unsigned XLen = STI.getXLen();
281   APInt Mask, C2;
282   Register RegY;
283   std::optional<bool> LeftShift;
284   // (and (shl y, c2), mask)
285   if (mi_match(RootReg, *MRI,
286                m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
287     LeftShift = true;
288   // (and (lshr y, c2), mask)
289   else if (mi_match(RootReg, *MRI,
290                     m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
291     LeftShift = false;
292 
293   if (LeftShift.has_value()) {
294     if (*LeftShift)
295       Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
296     else
297       Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
298 
299     if (Mask.isShiftedMask()) {
300       unsigned Leading = XLen - Mask.getActiveBits();
301       unsigned Trailing = Mask.countr_zero();
302       // Given (and (shl y, c2), mask) in which mask has no leading zeros and
303       // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
304       if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
305         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
306         return {{[=](MachineInstrBuilder &MIB) {
307           MachineIRBuilder(*MIB.getInstr())
308               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
309               .addImm(Trailing - C2.getLimitedValue());
310           MIB.addReg(DstReg);
311         }}};
312       }
313 
314       // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
315       // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
316       if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
317         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
318         return {{[=](MachineInstrBuilder &MIB) {
319           MachineIRBuilder(*MIB.getInstr())
320               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
321               .addImm(Leading + Trailing);
322           MIB.addReg(DstReg);
323         }}};
324       }
325     }
326   }
327 
328   LeftShift.reset();
329 
330   // (shl (and y, mask), c2)
331   if (mi_match(RootReg, *MRI,
332                m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
333                       m_ICst(C2))))
334     LeftShift = true;
335   // (lshr (and y, mask), c2)
336   else if (mi_match(RootReg, *MRI,
337                     m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
338                             m_ICst(C2))))
339     LeftShift = false;
340 
341   if (LeftShift.has_value() && Mask.isShiftedMask()) {
342     unsigned Leading = XLen - Mask.getActiveBits();
343     unsigned Trailing = Mask.countr_zero();
344 
345     // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
346     // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
347     bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
348                 (Trailing + C2.getLimitedValue()) == ShAmt;
349     if (!Cond)
350       // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
351       // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
352       Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
353              (Trailing - C2.getLimitedValue()) == ShAmt;
354 
355     if (Cond) {
356       Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
357       return {{[=](MachineInstrBuilder &MIB) {
358         MachineIRBuilder(*MIB.getInstr())
359             .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
360             .addImm(Trailing);
361         MIB.addReg(DstReg);
362       }}};
363     }
364   }
365 
366   return std::nullopt;
367 }
368 
369 InstructionSelector::ComplexRendererFns
370 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
371                                             unsigned ShAmt) const {
372   using namespace llvm::MIPatternMatch;
373 
374   if (!Root.isReg())
375     return std::nullopt;
376   Register RootReg = Root.getReg();
377 
378   // Given (and (shl x, c2), mask) in which mask is a shifted mask with
379   // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
380   // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
381   APInt Mask, C2;
382   Register RegX;
383   if (mi_match(
384           RootReg, *MRI,
385           m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
386                                 m_ICst(Mask))))) {
387     Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
388 
389     if (Mask.isShiftedMask()) {
390       unsigned Leading = Mask.countl_zero();
391       unsigned Trailing = Mask.countr_zero();
392       if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
393         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
394         return {{[=](MachineInstrBuilder &MIB) {
395           MachineIRBuilder(*MIB.getInstr())
396               .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
397               .addImm(C2.getLimitedValue() - ShAmt);
398           MIB.addReg(DstReg);
399         }}};
400       }
401     }
402   }
403 
404   return std::nullopt;
405 }
406 
407 InstructionSelector::ComplexRendererFns
408 RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
409   assert(Root.isReg() && "Expected operand to be a Register");
410   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
411 
412   if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
413     auto C = RootDef->getOperand(1).getCImm();
414     if (C->getValue().isAllOnes())
415       // If the operand is a G_CONSTANT with value of all ones it is larger than
416       // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
417       // recognized specially by the vsetvli insertion pass.
418       return {{[=](MachineInstrBuilder &MIB) {
419         MIB.addImm(RISCV::VLMaxSentinel);
420       }}};
421 
422     if (isUInt<5>(C->getZExtValue())) {
423       uint64_t ZExtC = C->getZExtValue();
424       return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
425     }
426   }
427   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
428 }
429 
430 InstructionSelector::ComplexRendererFns
431 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
432   if (!Root.isReg())
433     return std::nullopt;
434 
435   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
436   if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
437     return {{
438         [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
439         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
440     }};
441   }
442 
443   if (isBaseWithConstantOffset(Root, *MRI)) {
444     MachineOperand &LHS = RootDef->getOperand(1);
445     MachineOperand &RHS = RootDef->getOperand(2);
446     MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
447     MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
448 
449     int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
450     if (isInt<12>(RHSC)) {
451       if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
452         return {{
453             [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
454             [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
455         }};
456 
457       return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
458                [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
459     }
460   }
461 
462   // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
463   // the combiner?
464   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
465            [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
466 }
467 
468 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
469 /// CC Must be an ICMP Predicate.
470 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
471   switch (CC) {
472   default:
473     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
474   case CmpInst::Predicate::ICMP_EQ:
475     return RISCVCC::COND_EQ;
476   case CmpInst::Predicate::ICMP_NE:
477     return RISCVCC::COND_NE;
478   case CmpInst::Predicate::ICMP_ULT:
479     return RISCVCC::COND_LTU;
480   case CmpInst::Predicate::ICMP_SLT:
481     return RISCVCC::COND_LT;
482   case CmpInst::Predicate::ICMP_UGE:
483     return RISCVCC::COND_GEU;
484   case CmpInst::Predicate::ICMP_SGE:
485     return RISCVCC::COND_GE;
486   }
487 }
488 
489 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
490                                  Register &LHS, Register &RHS,
491                                  MachineRegisterInfo &MRI) {
492   // Try to fold an ICmp. If that fails, use a NE compare with X0.
493   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
494   if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
495     LHS = CondReg;
496     RHS = RISCV::X0;
497     CC = RISCVCC::COND_NE;
498     return;
499   }
500 
501   // We found an ICmp, do some canonicalizations.
502 
503   // Adjust comparisons to use comparison with 0 if possible.
504   if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
505     switch (Pred) {
506     case CmpInst::Predicate::ICMP_SGT:
507       // Convert X > -1 to X >= 0
508       if (*Constant == -1) {
509         CC = RISCVCC::COND_GE;
510         RHS = RISCV::X0;
511         return;
512       }
513       break;
514     case CmpInst::Predicate::ICMP_SLT:
515       // Convert X < 1 to 0 >= X
516       if (*Constant == 1) {
517         CC = RISCVCC::COND_GE;
518         RHS = LHS;
519         LHS = RISCV::X0;
520         return;
521       }
522       break;
523     default:
524       break;
525     }
526   }
527 
528   switch (Pred) {
529   default:
530     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
531   case CmpInst::Predicate::ICMP_EQ:
532   case CmpInst::Predicate::ICMP_NE:
533   case CmpInst::Predicate::ICMP_ULT:
534   case CmpInst::Predicate::ICMP_SLT:
535   case CmpInst::Predicate::ICMP_UGE:
536   case CmpInst::Predicate::ICMP_SGE:
537     // These CCs are supported directly by RISC-V branches.
538     break;
539   case CmpInst::Predicate::ICMP_SGT:
540   case CmpInst::Predicate::ICMP_SLE:
541   case CmpInst::Predicate::ICMP_UGT:
542   case CmpInst::Predicate::ICMP_ULE:
543     // These CCs are not supported directly by RISC-V branches, but changing the
544     // direction of the CC and swapping LHS and RHS are.
545     Pred = CmpInst::getSwappedPredicate(Pred);
546     std::swap(LHS, RHS);
547     break;
548   }
549 
550   CC = getRISCVCCFromICmp(Pred);
551   return;
552 }
553 
554 bool RISCVInstructionSelector::select(MachineInstr &MI) {
555   MachineBasicBlock &MBB = *MI.getParent();
556   MachineFunction &MF = *MBB.getParent();
557   MachineIRBuilder MIB(MI);
558 
559   preISelLower(MI, MIB);
560   const unsigned Opc = MI.getOpcode();
561 
562   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
563     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
564       const Register DefReg = MI.getOperand(0).getReg();
565       const LLT DefTy = MRI->getType(DefReg);
566 
567       const RegClassOrRegBank &RegClassOrBank =
568           MRI->getRegClassOrRegBank(DefReg);
569 
570       const TargetRegisterClass *DefRC =
571           RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
572       if (!DefRC) {
573         if (!DefTy.isValid()) {
574           LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
575           return false;
576         }
577 
578         const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
579         DefRC = getRegClassForTypeOnBank(DefTy, RB);
580         if (!DefRC) {
581           LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
582           return false;
583         }
584       }
585 
586       MI.setDesc(TII.get(TargetOpcode::PHI));
587       return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
588     }
589 
590     // Certain non-generic instructions also need some special handling.
591     if (MI.isCopy())
592       return selectCopy(MI);
593 
594     return true;
595   }
596 
597   if (selectImpl(MI, *CoverageInfo))
598     return true;
599 
600   switch (Opc) {
601   case TargetOpcode::G_ANYEXT:
602   case TargetOpcode::G_PTRTOINT:
603   case TargetOpcode::G_INTTOPTR:
604   case TargetOpcode::G_TRUNC:
605   case TargetOpcode::G_FREEZE:
606     return selectCopy(MI);
607   case TargetOpcode::G_CONSTANT: {
608     Register DstReg = MI.getOperand(0).getReg();
609     int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
610 
611     if (!materializeImm(DstReg, Imm, MIB))
612       return false;
613 
614     MI.eraseFromParent();
615     return true;
616   }
617   case TargetOpcode::G_FCONSTANT: {
618     // TODO: Use constant pool for complext constants.
619     // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
620     Register DstReg = MI.getOperand(0).getReg();
621     const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
622     APInt Imm = FPimm.bitcastToAPInt();
623     unsigned Size = MRI->getType(DstReg).getSizeInBits();
624     if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
625       Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
626       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
627         return false;
628 
629       unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
630                         : Size == 32 ? RISCV::FMV_W_X
631                                      : RISCV::FMV_H_X;
632       auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
633       if (!FMV.constrainAllUses(TII, TRI, RBI))
634         return false;
635     } else {
636       assert(Size == 64 && !Subtarget->is64Bit() &&
637              "Unexpected size or subtarget");
638       // Split into two pieces and build through the stack.
639       Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
640       Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
641       if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
642                           MIB))
643         return false;
644       if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
645         return false;
646       MachineInstrBuilder PairF64 = MIB.buildInstr(
647           RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
648       if (!PairF64.constrainAllUses(TII, TRI, RBI))
649         return false;
650     }
651 
652     MI.eraseFromParent();
653     return true;
654   }
655   case TargetOpcode::G_GLOBAL_VALUE: {
656     auto *GV = MI.getOperand(1).getGlobal();
657     if (GV->isThreadLocal()) {
658       // TODO: implement this case.
659       return false;
660     }
661 
662     return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
663   }
664   case TargetOpcode::G_JUMP_TABLE:
665   case TargetOpcode::G_CONSTANT_POOL:
666     return selectAddr(MI, MIB, MRI);
667   case TargetOpcode::G_BRCOND: {
668     Register LHS, RHS;
669     RISCVCC::CondCode CC;
670     getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
671 
672     auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
673                    .addMBB(MI.getOperand(1).getMBB());
674     MI.eraseFromParent();
675     return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
676   }
677   case TargetOpcode::G_BRJT: {
678     // FIXME: Move to legalization?
679     const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
680     unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout());
681     assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
682            "Unsupported jump-table entry size");
683     assert(
684         (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
685          MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 ||
686          MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) &&
687         "Unexpected jump-table entry kind");
688 
689     auto SLL =
690         MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)})
691             .addImm(Log2_32(EntrySize));
692     if (!SLL.constrainAllUses(TII, TRI, RBI))
693       return false;
694 
695     // TODO: Use SHXADD. Moving to legalization would fix this automatically.
696     auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
697                               {MI.getOperand(0), SLL.getReg(0)});
698     if (!ADD.constrainAllUses(TII, TRI, RBI))
699       return false;
700 
701     unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
702     auto Dest =
703         MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)})
704             .addImm(0)
705             .addMemOperand(MF.getMachineMemOperand(
706                 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad,
707                 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout()))));
708     if (!Dest.constrainAllUses(TII, TRI, RBI))
709       return false;
710 
711     // If the Kind is EK_LabelDifference32, the table stores an offset from
712     // the location of the table. Add the table address to get an absolute
713     // address.
714     if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
715       Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
716                             {Dest.getReg(0), MI.getOperand(0)});
717       if (!Dest.constrainAllUses(TII, TRI, RBI))
718         return false;
719     }
720 
721     auto Branch =
722         MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
723     if (!Branch.constrainAllUses(TII, TRI, RBI))
724       return false;
725 
726     MI.eraseFromParent();
727     return true;
728   }
729   case TargetOpcode::G_BRINDIRECT:
730     MI.setDesc(TII.get(RISCV::PseudoBRIND));
731     MI.addOperand(MachineOperand::CreateImm(0));
732     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
733   case TargetOpcode::G_SEXT_INREG:
734     return selectSExtInreg(MI, MIB);
735   case TargetOpcode::G_FRAME_INDEX: {
736     // TODO: We may want to replace this code with the SelectionDAG patterns,
737     // which fail to get imported because it uses FrameAddrRegImm, which is a
738     // ComplexPattern
739     MI.setDesc(TII.get(RISCV::ADDI));
740     MI.addOperand(MachineOperand::CreateImm(0));
741     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
742   }
743   case TargetOpcode::G_SELECT:
744     return selectSelect(MI, MIB);
745   case TargetOpcode::G_FCMP:
746     return selectFPCompare(MI, MIB);
747   case TargetOpcode::G_FENCE: {
748     AtomicOrdering FenceOrdering =
749         static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
750     SyncScope::ID FenceSSID =
751         static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
752     emitFence(FenceOrdering, FenceSSID, MIB);
753     MI.eraseFromParent();
754     return true;
755   }
756   case TargetOpcode::G_IMPLICIT_DEF:
757     return selectImplicitDef(MI, MIB);
758   case TargetOpcode::G_MERGE_VALUES:
759     return selectMergeValues(MI, MIB);
760   case TargetOpcode::G_UNMERGE_VALUES:
761     return selectUnmergeValues(MI, MIB);
762   default:
763     return false;
764   }
765 }
766 
767 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI,
768                                                  MachineIRBuilder &MIB) const {
769   assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
770 
771   // Build a F64 Pair from operands
772   if (MI.getNumOperands() != 3)
773     return false;
774   Register Dst = MI.getOperand(0).getReg();
775   Register Lo = MI.getOperand(1).getReg();
776   Register Hi = MI.getOperand(2).getReg();
777   if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
778     return false;
779   MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
780   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
781 }
782 
783 bool RISCVInstructionSelector::selectUnmergeValues(
784     MachineInstr &MI, MachineIRBuilder &MIB) const {
785   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
786 
787   // Split F64 Src into two s32 parts
788   if (MI.getNumOperands() != 3)
789     return false;
790   Register Src = MI.getOperand(2).getReg();
791   Register Lo = MI.getOperand(0).getReg();
792   Register Hi = MI.getOperand(1).getReg();
793   if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
794     return false;
795   MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
796   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
797 }
798 
799 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
800                                                  MachineIRBuilder &MIB) {
801   Register PtrReg = Op.getReg();
802   assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
803 
804   const LLT sXLen = LLT::scalar(STI.getXLen());
805   auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
806   MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
807   Op.setReg(PtrToInt.getReg(0));
808   return select(*PtrToInt);
809 }
810 
811 void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
812                                             MachineIRBuilder &MIB) {
813   switch (MI.getOpcode()) {
814   case TargetOpcode::G_PTR_ADD: {
815     Register DstReg = MI.getOperand(0).getReg();
816     const LLT sXLen = LLT::scalar(STI.getXLen());
817 
818     replacePtrWithInt(MI.getOperand(1), MIB);
819     MI.setDesc(TII.get(TargetOpcode::G_ADD));
820     MRI->setType(DstReg, sXLen);
821     break;
822   }
823   case TargetOpcode::G_PTRMASK: {
824     Register DstReg = MI.getOperand(0).getReg();
825     const LLT sXLen = LLT::scalar(STI.getXLen());
826     replacePtrWithInt(MI.getOperand(1), MIB);
827     MI.setDesc(TII.get(TargetOpcode::G_AND));
828     MRI->setType(DstReg, sXLen);
829     break;
830   }
831   }
832 }
833 
834 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
835                                             const MachineInstr &MI,
836                                             int OpIdx) const {
837   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
838          "Expected G_CONSTANT");
839   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
840   MIB.addImm(-CstVal);
841 }
842 
843 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
844                                                     const MachineInstr &MI,
845                                                     int OpIdx) const {
846   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
847          "Expected G_CONSTANT");
848   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
849   MIB.addImm(STI.getXLen() - CstVal);
850 }
851 
852 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
853                                                   const MachineInstr &MI,
854                                                   int OpIdx) const {
855   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
856          "Expected G_CONSTANT");
857   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
858   MIB.addImm(32 - CstVal);
859 }
860 
861 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
862                                               const MachineInstr &MI,
863                                               int OpIdx) const {
864   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
865          "Expected G_CONSTANT");
866   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
867   MIB.addImm(CstVal + 1);
868 }
869 
870 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
871                                          const MachineInstr &MI,
872                                          int OpIdx) const {
873   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
874          "Expected G_CONSTANT");
875   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
876   MIB.addImm(CstVal);
877 }
878 
879 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
880                                                    const MachineInstr &MI,
881                                                    int OpIdx) const {
882   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
883          "Expected G_CONSTANT");
884   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
885   MIB.addImm(llvm::countr_zero(C));
886 }
887 
888 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
889     LLT Ty, const RegisterBank &RB) const {
890   if (RB.getID() == RISCV::GPRBRegBankID) {
891     if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
892       return &RISCV::GPRRegClass;
893   }
894 
895   if (RB.getID() == RISCV::FPRBRegBankID) {
896     if (Ty.getSizeInBits() == 16)
897       return &RISCV::FPR16RegClass;
898     if (Ty.getSizeInBits() == 32)
899       return &RISCV::FPR32RegClass;
900     if (Ty.getSizeInBits() == 64)
901       return &RISCV::FPR64RegClass;
902   }
903 
904   if (RB.getID() == RISCV::VRBRegBankID) {
905     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
906       return &RISCV::VRRegClass;
907 
908     if (Ty.getSizeInBits().getKnownMinValue() == 128)
909       return &RISCV::VRM2RegClass;
910 
911     if (Ty.getSizeInBits().getKnownMinValue() == 256)
912       return &RISCV::VRM4RegClass;
913 
914     if (Ty.getSizeInBits().getKnownMinValue() == 512)
915       return &RISCV::VRM8RegClass;
916   }
917 
918   return nullptr;
919 }
920 
921 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
922   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
923 }
924 
925 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
926   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
927 }
928 
929 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
930   Register DstReg = MI.getOperand(0).getReg();
931 
932   if (DstReg.isPhysical())
933     return true;
934 
935   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
936       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
937   assert(DstRC &&
938          "Register class not available for LLT, register bank combination");
939 
940   // No need to constrain SrcReg. It will get constrained when
941   // we hit another of its uses or its defs.
942   // Copies do not have constraints.
943   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
944     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
945                       << " operand\n");
946     return false;
947   }
948 
949   MI.setDesc(TII.get(RISCV::COPY));
950   return true;
951 }
952 
953 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
954                                                  MachineIRBuilder &MIB) const {
955   assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
956 
957   const Register DstReg = MI.getOperand(0).getReg();
958   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
959       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
960 
961   assert(DstRC &&
962          "Register class not available for LLT, register bank combination");
963 
964   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
965     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
966                       << " operand\n");
967   }
968   MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
969   return true;
970 }
971 
972 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
973                                               MachineIRBuilder &MIB) const {
974   if (Imm == 0) {
975     MIB.buildCopy(DstReg, Register(RISCV::X0));
976     RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
977     return true;
978   }
979 
980   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
981   unsigned NumInsts = Seq.size();
982   Register SrcReg = RISCV::X0;
983 
984   for (unsigned i = 0; i < NumInsts; i++) {
985     Register TmpReg = i < NumInsts - 1
986                           ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
987                           : DstReg;
988     const RISCVMatInt::Inst &I = Seq[i];
989     MachineInstr *Result;
990 
991     switch (I.getOpndKind()) {
992     case RISCVMatInt::Imm:
993       // clang-format off
994       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
995                    .addImm(I.getImm());
996       // clang-format on
997       break;
998     case RISCVMatInt::RegX0:
999       Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1000                               {SrcReg, Register(RISCV::X0)});
1001       break;
1002     case RISCVMatInt::RegReg:
1003       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1004       break;
1005     case RISCVMatInt::RegImm:
1006       Result =
1007           MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1008       break;
1009     }
1010 
1011     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1012       return false;
1013 
1014     SrcReg = TmpReg;
1015   }
1016 
1017   return true;
1018 }
1019 
1020 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1021                                           MachineIRBuilder &MIB, bool IsLocal,
1022                                           bool IsExternWeak) const {
1023   assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1024           MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1025           MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1026          "Unexpected opcode");
1027 
1028   const MachineOperand &DispMO = MI.getOperand(1);
1029 
1030   Register DefReg = MI.getOperand(0).getReg();
1031   const LLT DefTy = MRI->getType(DefReg);
1032 
1033   // When HWASAN is used and tagging of global variables is enabled
1034   // they should be accessed via the GOT, since the tagged address of a global
1035   // is incompatible with existing code models. This also applies to non-pic
1036   // mode.
1037   if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1038     if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1039       // Use PC-relative addressing to access the symbol. This generates the
1040       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1041       // %pcrel_lo(auipc)).
1042       MI.setDesc(TII.get(RISCV::PseudoLLA));
1043       return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1044     }
1045 
1046     // Use PC-relative addressing to access the GOT for this symbol, then
1047     // load the address from the GOT. This generates the pattern (PseudoLGA
1048     // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1049     // %pcrel_lo(auipc))).
1050     MachineFunction &MF = *MI.getParent()->getParent();
1051     MachineMemOperand *MemOp = MF.getMachineMemOperand(
1052         MachinePointerInfo::getGOT(MF),
1053         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1054             MachineMemOperand::MOInvariant,
1055         DefTy, Align(DefTy.getSizeInBits() / 8));
1056 
1057     auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1058                       .addDisp(DispMO, 0)
1059                       .addMemOperand(MemOp);
1060 
1061     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1062       return false;
1063 
1064     MI.eraseFromParent();
1065     return true;
1066   }
1067 
1068   switch (TM.getCodeModel()) {
1069   default: {
1070     reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1071                        getName(), "Unsupported code model for lowering", MI);
1072     return false;
1073   }
1074   case CodeModel::Small: {
1075     // Must lie within a single 2 GiB address range and must lie between
1076     // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1077     // (lui %hi(sym)) %lo(sym)).
1078     Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1079     MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1080                                .addDisp(DispMO, 0, RISCVII::MO_HI);
1081 
1082     if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1083       return false;
1084 
1085     auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1086                       .addDisp(DispMO, 0, RISCVII::MO_LO);
1087 
1088     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1089       return false;
1090 
1091     MI.eraseFromParent();
1092     return true;
1093   }
1094   case CodeModel::Medium:
1095     // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1096     // relocation needs to reference a label that points to the auipc
1097     // instruction itself, not the global. This cannot be done inside the
1098     // instruction selector.
1099     if (IsExternWeak) {
1100       // An extern weak symbol may be undefined, i.e. have value 0, which may
1101       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1102       // symbol. This generates the pattern (PseudoLGA sym), which expands to
1103       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1104       MachineFunction &MF = *MI.getParent()->getParent();
1105       MachineMemOperand *MemOp = MF.getMachineMemOperand(
1106           MachinePointerInfo::getGOT(MF),
1107           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1108               MachineMemOperand::MOInvariant,
1109           DefTy, Align(DefTy.getSizeInBits() / 8));
1110 
1111       auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1112                         .addDisp(DispMO, 0)
1113                         .addMemOperand(MemOp);
1114 
1115       if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1116         return false;
1117 
1118       MI.eraseFromParent();
1119       return true;
1120     }
1121 
1122     // Generate a sequence for accessing addresses within any 2GiB range
1123     // within the address space. This generates the pattern (PseudoLLA sym),
1124     // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1125     MI.setDesc(TII.get(RISCV::PseudoLLA));
1126     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1127   }
1128 
1129   return false;
1130 }
1131 
1132 bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI,
1133                                                MachineIRBuilder &MIB) const {
1134   Register DstReg = MI.getOperand(0).getReg();
1135   Register SrcReg = MI.getOperand(1).getReg();
1136   unsigned SrcSize = MI.getOperand(2).getImm();
1137 
1138   MachineInstr *NewMI;
1139   if (SrcSize == 32) {
1140     assert(Subtarget->is64Bit() && "Unexpected extend");
1141     // addiw rd, rs, 0 (i.e. sext.w rd, rs)
1142     NewMI = MIB.buildInstr(RISCV::ADDIW, {DstReg}, {SrcReg}).addImm(0U);
1143   } else {
1144     assert(Subtarget->hasStdExtZbb() && "Unexpected extension");
1145     assert((SrcSize == 8 || SrcSize == 16) && "Unexpected size");
1146     unsigned Opc = SrcSize == 16 ? RISCV::SEXT_H : RISCV::SEXT_B;
1147     NewMI = MIB.buildInstr(Opc, {DstReg}, {SrcReg});
1148   }
1149 
1150   if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI))
1151     return false;
1152 
1153   MI.eraseFromParent();
1154   return true;
1155 }
1156 
1157 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1158                                             MachineIRBuilder &MIB) const {
1159   auto &SelectMI = cast<GSelect>(MI);
1160 
1161   Register LHS, RHS;
1162   RISCVCC::CondCode CC;
1163   getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1164 
1165   Register DstReg = SelectMI.getReg(0);
1166 
1167   unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1168   if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1169     unsigned Size = MRI->getType(DstReg).getSizeInBits();
1170     Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1171                      : RISCV::Select_FPR64_Using_CC_GPR;
1172   }
1173 
1174   MachineInstr *Result = MIB.buildInstr(Opc)
1175                              .addDef(DstReg)
1176                              .addReg(LHS)
1177                              .addReg(RHS)
1178                              .addImm(CC)
1179                              .addReg(SelectMI.getTrueReg())
1180                              .addReg(SelectMI.getFalseReg());
1181   MI.eraseFromParent();
1182   return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1183 }
1184 
1185 // Convert an FCMP predicate to one of the supported F or D instructions.
1186 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1187   assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1188   switch (Pred) {
1189   default:
1190     llvm_unreachable("Unsupported predicate");
1191   case CmpInst::FCMP_OLT:
1192     return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1193   case CmpInst::FCMP_OLE:
1194     return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1195   case CmpInst::FCMP_OEQ:
1196     return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1197   }
1198 }
1199 
1200 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1201 // is supported.
1202 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1203                                   CmpInst::Predicate &Pred, bool &NeedInvert) {
1204   auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1205     return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1206            Pred == CmpInst::FCMP_OEQ;
1207   };
1208 
1209   assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1210 
1211   CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
1212   if (isLegalFCmpPredicate(InvPred)) {
1213     Pred = InvPred;
1214     std::swap(LHS, RHS);
1215     return true;
1216   }
1217 
1218   InvPred = CmpInst::getInversePredicate(Pred);
1219   NeedInvert = true;
1220   if (isLegalFCmpPredicate(InvPred)) {
1221     Pred = InvPred;
1222     return true;
1223   }
1224   InvPred = CmpInst::getSwappedPredicate(InvPred);
1225   if (isLegalFCmpPredicate(InvPred)) {
1226     Pred = InvPred;
1227     std::swap(LHS, RHS);
1228     return true;
1229   }
1230 
1231   return false;
1232 }
1233 
1234 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1235 // the result in DstReg.
1236 // FIXME: Maybe we should expand this earlier.
1237 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1238                                                MachineIRBuilder &MIB) const {
1239   auto &CmpMI = cast<GFCmp>(MI);
1240   CmpInst::Predicate Pred = CmpMI.getCond();
1241 
1242   Register DstReg = CmpMI.getReg(0);
1243   Register LHS = CmpMI.getLHSReg();
1244   Register RHS = CmpMI.getRHSReg();
1245 
1246   unsigned Size = MRI->getType(LHS).getSizeInBits();
1247   assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1248 
1249   Register TmpReg = DstReg;
1250 
1251   bool NeedInvert = false;
1252   // First try swapping operands or inverting.
1253   if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1254     if (NeedInvert)
1255       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1256     auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1257     if (!Cmp.constrainAllUses(TII, TRI, RBI))
1258       return false;
1259   } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1260     // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1261     NeedInvert = Pred == CmpInst::FCMP_UEQ;
1262     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1263                                {&RISCV::GPRRegClass}, {LHS, RHS});
1264     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1265       return false;
1266     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1267                                {&RISCV::GPRRegClass}, {RHS, LHS});
1268     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1269       return false;
1270     if (NeedInvert)
1271       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1272     auto Or =
1273         MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1274     if (!Or.constrainAllUses(TII, TRI, RBI))
1275       return false;
1276   } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1277     // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1278     // FIXME: If LHS and RHS are the same we can use a single FEQ.
1279     NeedInvert = Pred == CmpInst::FCMP_UNO;
1280     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1281                                {&RISCV::GPRRegClass}, {LHS, LHS});
1282     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1283       return false;
1284     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1285                                {&RISCV::GPRRegClass}, {RHS, RHS});
1286     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1287       return false;
1288     if (NeedInvert)
1289       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1290     auto And =
1291         MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1292     if (!And.constrainAllUses(TII, TRI, RBI))
1293       return false;
1294   } else
1295     llvm_unreachable("Unhandled predicate");
1296 
1297   // Emit an XORI to invert the result if needed.
1298   if (NeedInvert) {
1299     auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1300     if (!Xor.constrainAllUses(TII, TRI, RBI))
1301       return false;
1302   }
1303 
1304   MI.eraseFromParent();
1305   return true;
1306 }
1307 
1308 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1309                                          SyncScope::ID FenceSSID,
1310                                          MachineIRBuilder &MIB) const {
1311   if (STI.hasStdExtZtso()) {
1312     // The only fence that needs an instruction is a sequentially-consistent
1313     // cross-thread fence.
1314     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1315         FenceSSID == SyncScope::System) {
1316       // fence rw, rw
1317       MIB.buildInstr(RISCV::FENCE, {}, {})
1318           .addImm(RISCVFenceField::R | RISCVFenceField::W)
1319           .addImm(RISCVFenceField::R | RISCVFenceField::W);
1320       return;
1321     }
1322 
1323     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1324     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1325     return;
1326   }
1327 
1328   // singlethread fences only synchronize with signal handlers on the same
1329   // thread and thus only need to preserve instruction order, not actually
1330   // enforce memory ordering.
1331   if (FenceSSID == SyncScope::SingleThread) {
1332     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1333     return;
1334   }
1335 
1336   // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1337   // Manual: Volume I.
1338   unsigned Pred, Succ;
1339   switch (FenceOrdering) {
1340   default:
1341     llvm_unreachable("Unexpected ordering");
1342   case AtomicOrdering::AcquireRelease:
1343     // fence acq_rel -> fence.tso
1344     MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1345     return;
1346   case AtomicOrdering::Acquire:
1347     // fence acquire -> fence r, rw
1348     Pred = RISCVFenceField::R;
1349     Succ = RISCVFenceField::R | RISCVFenceField::W;
1350     break;
1351   case AtomicOrdering::Release:
1352     // fence release -> fence rw, w
1353     Pred = RISCVFenceField::R | RISCVFenceField::W;
1354     Succ = RISCVFenceField::W;
1355     break;
1356   case AtomicOrdering::SequentiallyConsistent:
1357     // fence seq_cst -> fence rw, rw
1358     Pred = RISCVFenceField::R | RISCVFenceField::W;
1359     Succ = RISCVFenceField::R | RISCVFenceField::W;
1360     break;
1361   }
1362   MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1363 }
1364 
1365 namespace llvm {
1366 InstructionSelector *
1367 createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1368                                const RISCVSubtarget &Subtarget,
1369                                const RISCVRegisterBankInfo &RBI) {
1370   return new RISCVInstructionSelector(TM, Subtarget, RBI);
1371 }
1372 } // end namespace llvm
1373