xref: /llvm-project/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (revision 37b10af6a0c0f39ac102dda4490f447122b550ef)
1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// RISC-V.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCVRegisterBankInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "RISCVTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/IR/IntrinsicsRISCV.h"
26 #include "llvm/Support/Debug.h"
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 #define GET_GLOBALISEL_PREDICATE_BITSET
34 #include "RISCVGenGlobalISel.inc"
35 #undef GET_GLOBALISEL_PREDICATE_BITSET
36 
37 namespace {
38 
39 class RISCVInstructionSelector : public InstructionSelector {
40 public:
41   RISCVInstructionSelector(const RISCVTargetMachine &TM,
42                            const RISCVSubtarget &STI,
43                            const RISCVRegisterBankInfo &RBI);
44 
45   bool select(MachineInstr &MI) override;
46 
47   void setupMF(MachineFunction &MF, GISelKnownBits *KB,
48                CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49                BlockFrequencyInfo *BFI) override {
50     InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
51     MRI = &MF.getRegInfo();
52   }
53 
54   static const char *getName() { return DEBUG_TYPE; }
55 
56 private:
57   const TargetRegisterClass *
58   getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59 
60   bool isRegInGprb(Register Reg) const;
61   bool isRegInFprb(Register Reg) const;
62 
63   // tblgen-erated 'select' implementation, used as the initial selector for
64   // the patterns that don't require complex C++.
65   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
66 
67   // A lowering phase that runs before any selection attempts.
68   // Returns true if the instruction was modified.
69   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
70 
71   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
72 
73   // Custom selection methods
74   bool selectCopy(MachineInstr &MI) const;
75   bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
76   bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
77   bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
78                   bool IsExternWeak = false) const;
79   bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
80   bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
81   void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
82                  MachineIRBuilder &MIB) const;
83   bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
84   bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
85 
86   ComplexRendererFns selectShiftMask(MachineOperand &Root,
87                                      unsigned ShiftWidth) const;
88   ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
89     return selectShiftMask(Root, STI.getXLen());
90   }
91   ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
92     return selectShiftMask(Root, 32);
93   }
94   ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
95 
96   ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
97   template <unsigned Bits>
98   ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
99     return selectSExtBits(Root, Bits);
100   }
101 
102   ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
103   template <unsigned Bits>
104   ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
105     return selectZExtBits(Root, Bits);
106   }
107 
108   ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
109   template <unsigned ShAmt>
110   ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
111     return selectSHXADDOp(Root, ShAmt);
112   }
113 
114   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
115                                        unsigned ShAmt) const;
116   template <unsigned ShAmt>
117   ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
118     return selectSHXADD_UWOp(Root, ShAmt);
119   }
120 
121   ComplexRendererFns renderVLOp(MachineOperand &Root) const;
122 
123   // Custom renderers for tablegen
124   void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
125                     int OpIdx) const;
126   void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
127                             int OpIdx) const;
128   void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
129                           int OpIdx) const;
130   void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
131                       int OpIdx) const;
132   void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
133                  int OpIdx) const;
134 
135   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
136                            int OpIdx) const;
137 
138   const RISCVSubtarget &STI;
139   const RISCVInstrInfo &TII;
140   const RISCVRegisterInfo &TRI;
141   const RISCVRegisterBankInfo &RBI;
142   const RISCVTargetMachine &TM;
143 
144   MachineRegisterInfo *MRI = nullptr;
145 
146   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
147   // uses "STI." in the code generated by TableGen. We need to unify the name of
148   // Subtarget variable.
149   const RISCVSubtarget *Subtarget = &STI;
150 
151 #define GET_GLOBALISEL_PREDICATES_DECL
152 #include "RISCVGenGlobalISel.inc"
153 #undef GET_GLOBALISEL_PREDICATES_DECL
154 
155 #define GET_GLOBALISEL_TEMPORARIES_DECL
156 #include "RISCVGenGlobalISel.inc"
157 #undef GET_GLOBALISEL_TEMPORARIES_DECL
158 };
159 
160 } // end anonymous namespace
161 
162 #define GET_GLOBALISEL_IMPL
163 #include "RISCVGenGlobalISel.inc"
164 #undef GET_GLOBALISEL_IMPL
165 
166 RISCVInstructionSelector::RISCVInstructionSelector(
167     const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
168     const RISCVRegisterBankInfo &RBI)
169     : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
170       TM(TM),
171 
172 #define GET_GLOBALISEL_PREDICATES_INIT
173 #include "RISCVGenGlobalISel.inc"
174 #undef GET_GLOBALISEL_PREDICATES_INIT
175 #define GET_GLOBALISEL_TEMPORARIES_INIT
176 #include "RISCVGenGlobalISel.inc"
177 #undef GET_GLOBALISEL_TEMPORARIES_INIT
178 {
179 }
180 
181 InstructionSelector::ComplexRendererFns
182 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
183                                           unsigned ShiftWidth) const {
184   if (!Root.isReg())
185     return std::nullopt;
186 
187   using namespace llvm::MIPatternMatch;
188 
189   Register ShAmtReg = Root.getReg();
190   // Peek through zext.
191   Register ZExtSrcReg;
192   if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
193     ShAmtReg = ZExtSrcReg;
194 
195   APInt AndMask;
196   Register AndSrcReg;
197   // Try to combine the following pattern (applicable to other shift
198   // instructions as well as 32-bit ones):
199   //
200   //   %4:gprb(s64) = G_AND %3, %2
201   //   %5:gprb(s64) = G_LSHR %1, %4(s64)
202   //
203   // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
204   // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
205   // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
206   // then it can be eliminated. Given register rs1 or rs2 holding a constant
207   // (the and mask), there are two cases G_AND can be erased:
208   //
209   // 1. the lowest log2(XLEN) bits of the and mask are all set
210   // 2. the bits of the register being masked are already unset (zero set)
211   if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
212     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
213     if (ShMask.isSubsetOf(AndMask)) {
214       ShAmtReg = AndSrcReg;
215     } else {
216       // SimplifyDemandedBits may have optimized the mask so try restoring any
217       // bits that are known zero.
218       KnownBits Known = KB->getKnownBits(AndSrcReg);
219       if (ShMask.isSubsetOf(AndMask | Known.Zero))
220         ShAmtReg = AndSrcReg;
221     }
222   }
223 
224   APInt Imm;
225   Register Reg;
226   if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
227     if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
228       // If we are shifting by X+N where N == 0 mod Size, then just shift by X
229       // to avoid the ADD.
230       ShAmtReg = Reg;
231   } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
232     if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
233       // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
234       // to generate a NEG instead of a SUB of a constant.
235       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
236       unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
237       return {{[=](MachineInstrBuilder &MIB) {
238         MachineIRBuilder(*MIB.getInstr())
239             .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
240         MIB.addReg(ShAmtReg);
241       }}};
242     }
243     if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
244       // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
245       // to generate a NOT instead of a SUB of a constant.
246       ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
247       return {{[=](MachineInstrBuilder &MIB) {
248         MachineIRBuilder(*MIB.getInstr())
249             .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
250             .addImm(-1);
251         MIB.addReg(ShAmtReg);
252       }}};
253     }
254   }
255 
256   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
257 }
258 
259 InstructionSelector::ComplexRendererFns
260 RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
261                                          unsigned Bits) const {
262   if (!Root.isReg())
263     return std::nullopt;
264   Register RootReg = Root.getReg();
265   MachineInstr *RootDef = MRI->getVRegDef(RootReg);
266 
267   if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
268       RootDef->getOperand(2).getImm() == Bits) {
269     return {
270         {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
271   }
272 
273   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
274   if ((Size - KB->computeNumSignBits(RootReg)) < Bits)
275     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
276 
277   return std::nullopt;
278 }
279 
280 InstructionSelector::ComplexRendererFns
281 RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
282                                          unsigned Bits) const {
283   if (!Root.isReg())
284     return std::nullopt;
285   Register RootReg = Root.getReg();
286 
287   Register RegX;
288   uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
289   if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
290     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
291   }
292 
293   if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
294       MRI->getType(RegX).getScalarSizeInBits() == Bits)
295     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
296 
297   unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
298   if (KB->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
299     return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
300 
301   return std::nullopt;
302 }
303 
304 InstructionSelector::ComplexRendererFns
305 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
306                                          unsigned ShAmt) const {
307   using namespace llvm::MIPatternMatch;
308 
309   if (!Root.isReg())
310     return std::nullopt;
311   Register RootReg = Root.getReg();
312 
313   const unsigned XLen = STI.getXLen();
314   APInt Mask, C2;
315   Register RegY;
316   std::optional<bool> LeftShift;
317   // (and (shl y, c2), mask)
318   if (mi_match(RootReg, *MRI,
319                m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
320     LeftShift = true;
321   // (and (lshr y, c2), mask)
322   else if (mi_match(RootReg, *MRI,
323                     m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
324     LeftShift = false;
325 
326   if (LeftShift.has_value()) {
327     if (*LeftShift)
328       Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
329     else
330       Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
331 
332     if (Mask.isShiftedMask()) {
333       unsigned Leading = XLen - Mask.getActiveBits();
334       unsigned Trailing = Mask.countr_zero();
335       // Given (and (shl y, c2), mask) in which mask has no leading zeros and
336       // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
337       if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
338         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
339         return {{[=](MachineInstrBuilder &MIB) {
340           MachineIRBuilder(*MIB.getInstr())
341               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
342               .addImm(Trailing - C2.getLimitedValue());
343           MIB.addReg(DstReg);
344         }}};
345       }
346 
347       // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
348       // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
349       if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
350         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
351         return {{[=](MachineInstrBuilder &MIB) {
352           MachineIRBuilder(*MIB.getInstr())
353               .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
354               .addImm(Leading + Trailing);
355           MIB.addReg(DstReg);
356         }}};
357       }
358     }
359   }
360 
361   LeftShift.reset();
362 
363   // (shl (and y, mask), c2)
364   if (mi_match(RootReg, *MRI,
365                m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
366                       m_ICst(C2))))
367     LeftShift = true;
368   // (lshr (and y, mask), c2)
369   else if (mi_match(RootReg, *MRI,
370                     m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
371                             m_ICst(C2))))
372     LeftShift = false;
373 
374   if (LeftShift.has_value() && Mask.isShiftedMask()) {
375     unsigned Leading = XLen - Mask.getActiveBits();
376     unsigned Trailing = Mask.countr_zero();
377 
378     // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
379     // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
380     bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
381                 (Trailing + C2.getLimitedValue()) == ShAmt;
382     if (!Cond)
383       // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
384       // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
385       Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
386              (Trailing - C2.getLimitedValue()) == ShAmt;
387 
388     if (Cond) {
389       Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
390       return {{[=](MachineInstrBuilder &MIB) {
391         MachineIRBuilder(*MIB.getInstr())
392             .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
393             .addImm(Trailing);
394         MIB.addReg(DstReg);
395       }}};
396     }
397   }
398 
399   return std::nullopt;
400 }
401 
402 InstructionSelector::ComplexRendererFns
403 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
404                                             unsigned ShAmt) const {
405   using namespace llvm::MIPatternMatch;
406 
407   if (!Root.isReg())
408     return std::nullopt;
409   Register RootReg = Root.getReg();
410 
411   // Given (and (shl x, c2), mask) in which mask is a shifted mask with
412   // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
413   // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
414   APInt Mask, C2;
415   Register RegX;
416   if (mi_match(
417           RootReg, *MRI,
418           m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
419                                 m_ICst(Mask))))) {
420     Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
421 
422     if (Mask.isShiftedMask()) {
423       unsigned Leading = Mask.countl_zero();
424       unsigned Trailing = Mask.countr_zero();
425       if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
426         Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
427         return {{[=](MachineInstrBuilder &MIB) {
428           MachineIRBuilder(*MIB.getInstr())
429               .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
430               .addImm(C2.getLimitedValue() - ShAmt);
431           MIB.addReg(DstReg);
432         }}};
433       }
434     }
435   }
436 
437   return std::nullopt;
438 }
439 
440 InstructionSelector::ComplexRendererFns
441 RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
442   assert(Root.isReg() && "Expected operand to be a Register");
443   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
444 
445   if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
446     auto C = RootDef->getOperand(1).getCImm();
447     if (C->getValue().isAllOnes())
448       // If the operand is a G_CONSTANT with value of all ones it is larger than
449       // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
450       // recognized specially by the vsetvli insertion pass.
451       return {{[=](MachineInstrBuilder &MIB) {
452         MIB.addImm(RISCV::VLMaxSentinel);
453       }}};
454 
455     if (isUInt<5>(C->getZExtValue())) {
456       uint64_t ZExtC = C->getZExtValue();
457       return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
458     }
459   }
460   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
461 }
462 
463 InstructionSelector::ComplexRendererFns
464 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
465   if (!Root.isReg())
466     return std::nullopt;
467 
468   MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
469   if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
470     return {{
471         [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
472         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
473     }};
474   }
475 
476   if (isBaseWithConstantOffset(Root, *MRI)) {
477     MachineOperand &LHS = RootDef->getOperand(1);
478     MachineOperand &RHS = RootDef->getOperand(2);
479     MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
480     MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
481 
482     int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
483     if (isInt<12>(RHSC)) {
484       if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
485         return {{
486             [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
487             [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
488         }};
489 
490       return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
491                [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
492     }
493   }
494 
495   // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
496   // the combiner?
497   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
498            [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
499 }
500 
501 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
502 /// CC Must be an ICMP Predicate.
503 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
504   switch (CC) {
505   default:
506     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
507   case CmpInst::Predicate::ICMP_EQ:
508     return RISCVCC::COND_EQ;
509   case CmpInst::Predicate::ICMP_NE:
510     return RISCVCC::COND_NE;
511   case CmpInst::Predicate::ICMP_ULT:
512     return RISCVCC::COND_LTU;
513   case CmpInst::Predicate::ICMP_SLT:
514     return RISCVCC::COND_LT;
515   case CmpInst::Predicate::ICMP_UGE:
516     return RISCVCC::COND_GEU;
517   case CmpInst::Predicate::ICMP_SGE:
518     return RISCVCC::COND_GE;
519   }
520 }
521 
522 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
523                                  Register &LHS, Register &RHS,
524                                  MachineRegisterInfo &MRI) {
525   // Try to fold an ICmp. If that fails, use a NE compare with X0.
526   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
527   if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
528     LHS = CondReg;
529     RHS = RISCV::X0;
530     CC = RISCVCC::COND_NE;
531     return;
532   }
533 
534   // We found an ICmp, do some canonicalizations.
535 
536   // Adjust comparisons to use comparison with 0 if possible.
537   if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
538     switch (Pred) {
539     case CmpInst::Predicate::ICMP_SGT:
540       // Convert X > -1 to X >= 0
541       if (*Constant == -1) {
542         CC = RISCVCC::COND_GE;
543         RHS = RISCV::X0;
544         return;
545       }
546       break;
547     case CmpInst::Predicate::ICMP_SLT:
548       // Convert X < 1 to 0 >= X
549       if (*Constant == 1) {
550         CC = RISCVCC::COND_GE;
551         RHS = LHS;
552         LHS = RISCV::X0;
553         return;
554       }
555       break;
556     default:
557       break;
558     }
559   }
560 
561   switch (Pred) {
562   default:
563     llvm_unreachable("Expected ICMP CmpInst::Predicate.");
564   case CmpInst::Predicate::ICMP_EQ:
565   case CmpInst::Predicate::ICMP_NE:
566   case CmpInst::Predicate::ICMP_ULT:
567   case CmpInst::Predicate::ICMP_SLT:
568   case CmpInst::Predicate::ICMP_UGE:
569   case CmpInst::Predicate::ICMP_SGE:
570     // These CCs are supported directly by RISC-V branches.
571     break;
572   case CmpInst::Predicate::ICMP_SGT:
573   case CmpInst::Predicate::ICMP_SLE:
574   case CmpInst::Predicate::ICMP_UGT:
575   case CmpInst::Predicate::ICMP_ULE:
576     // These CCs are not supported directly by RISC-V branches, but changing the
577     // direction of the CC and swapping LHS and RHS are.
578     Pred = CmpInst::getSwappedPredicate(Pred);
579     std::swap(LHS, RHS);
580     break;
581   }
582 
583   CC = getRISCVCCFromICmp(Pred);
584 }
585 
586 bool RISCVInstructionSelector::select(MachineInstr &MI) {
587   MachineIRBuilder MIB(MI);
588 
589   preISelLower(MI, MIB);
590   const unsigned Opc = MI.getOpcode();
591 
592   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
593     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
594       const Register DefReg = MI.getOperand(0).getReg();
595       const LLT DefTy = MRI->getType(DefReg);
596 
597       const RegClassOrRegBank &RegClassOrBank =
598           MRI->getRegClassOrRegBank(DefReg);
599 
600       const TargetRegisterClass *DefRC =
601           dyn_cast<const TargetRegisterClass *>(RegClassOrBank);
602       if (!DefRC) {
603         if (!DefTy.isValid()) {
604           LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
605           return false;
606         }
607 
608         const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
609         DefRC = getRegClassForTypeOnBank(DefTy, RB);
610         if (!DefRC) {
611           LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
612           return false;
613         }
614       }
615 
616       MI.setDesc(TII.get(TargetOpcode::PHI));
617       return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
618     }
619 
620     // Certain non-generic instructions also need some special handling.
621     if (MI.isCopy())
622       return selectCopy(MI);
623 
624     return true;
625   }
626 
627   if (selectImpl(MI, *CoverageInfo))
628     return true;
629 
630   switch (Opc) {
631   case TargetOpcode::G_ANYEXT:
632   case TargetOpcode::G_PTRTOINT:
633   case TargetOpcode::G_INTTOPTR:
634   case TargetOpcode::G_TRUNC:
635   case TargetOpcode::G_FREEZE:
636     return selectCopy(MI);
637   case TargetOpcode::G_CONSTANT: {
638     Register DstReg = MI.getOperand(0).getReg();
639     int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
640 
641     if (!materializeImm(DstReg, Imm, MIB))
642       return false;
643 
644     MI.eraseFromParent();
645     return true;
646   }
647   case TargetOpcode::G_FCONSTANT: {
648     // TODO: Use constant pool for complext constants.
649     // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
650     Register DstReg = MI.getOperand(0).getReg();
651     const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
652     APInt Imm = FPimm.bitcastToAPInt();
653     unsigned Size = MRI->getType(DstReg).getSizeInBits();
654     if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
655       Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
656       if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
657         return false;
658 
659       unsigned Opcode = Size == 64   ? RISCV::FMV_D_X
660                         : Size == 32 ? RISCV::FMV_W_X
661                                      : RISCV::FMV_H_X;
662       auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
663       if (!FMV.constrainAllUses(TII, TRI, RBI))
664         return false;
665     } else {
666       assert(Size == 64 && !Subtarget->is64Bit() &&
667              "Unexpected size or subtarget");
668       // Split into two pieces and build through the stack.
669       Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
670       Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
671       if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
672                           MIB))
673         return false;
674       if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
675         return false;
676       MachineInstrBuilder PairF64 = MIB.buildInstr(
677           RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
678       if (!PairF64.constrainAllUses(TII, TRI, RBI))
679         return false;
680     }
681 
682     MI.eraseFromParent();
683     return true;
684   }
685   case TargetOpcode::G_GLOBAL_VALUE: {
686     auto *GV = MI.getOperand(1).getGlobal();
687     if (GV->isThreadLocal()) {
688       // TODO: implement this case.
689       return false;
690     }
691 
692     return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
693   }
694   case TargetOpcode::G_JUMP_TABLE:
695   case TargetOpcode::G_CONSTANT_POOL:
696     return selectAddr(MI, MIB, MRI);
697   case TargetOpcode::G_BRCOND: {
698     Register LHS, RHS;
699     RISCVCC::CondCode CC;
700     getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
701 
702     auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
703                    .addMBB(MI.getOperand(1).getMBB());
704     MI.eraseFromParent();
705     return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
706   }
707   case TargetOpcode::G_BRINDIRECT:
708     MI.setDesc(TII.get(RISCV::PseudoBRIND));
709     MI.addOperand(MachineOperand::CreateImm(0));
710     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
711   case TargetOpcode::G_FRAME_INDEX: {
712     // TODO: We may want to replace this code with the SelectionDAG patterns,
713     // which fail to get imported because it uses FrameAddrRegImm, which is a
714     // ComplexPattern
715     MI.setDesc(TII.get(RISCV::ADDI));
716     MI.addOperand(MachineOperand::CreateImm(0));
717     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
718   }
719   case TargetOpcode::G_SELECT:
720     return selectSelect(MI, MIB);
721   case TargetOpcode::G_FCMP:
722     return selectFPCompare(MI, MIB);
723   case TargetOpcode::G_FENCE: {
724     AtomicOrdering FenceOrdering =
725         static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
726     SyncScope::ID FenceSSID =
727         static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
728     emitFence(FenceOrdering, FenceSSID, MIB);
729     MI.eraseFromParent();
730     return true;
731   }
732   case TargetOpcode::G_IMPLICIT_DEF:
733     return selectImplicitDef(MI, MIB);
734   case TargetOpcode::G_MERGE_VALUES:
735     return selectMergeValues(MI, MIB);
736   case TargetOpcode::G_UNMERGE_VALUES:
737     return selectUnmergeValues(MI, MIB);
738   default:
739     return false;
740   }
741 }
742 
743 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI,
744                                                  MachineIRBuilder &MIB) const {
745   assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
746 
747   // Build a F64 Pair from operands
748   if (MI.getNumOperands() != 3)
749     return false;
750   Register Dst = MI.getOperand(0).getReg();
751   Register Lo = MI.getOperand(1).getReg();
752   Register Hi = MI.getOperand(2).getReg();
753   if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
754     return false;
755   MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
756   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
757 }
758 
759 bool RISCVInstructionSelector::selectUnmergeValues(
760     MachineInstr &MI, MachineIRBuilder &MIB) const {
761   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
762 
763   // Split F64 Src into two s32 parts
764   if (MI.getNumOperands() != 3)
765     return false;
766   Register Src = MI.getOperand(2).getReg();
767   Register Lo = MI.getOperand(0).getReg();
768   Register Hi = MI.getOperand(1).getReg();
769   if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
770     return false;
771   MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
772   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
773 }
774 
775 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
776                                                  MachineIRBuilder &MIB) {
777   Register PtrReg = Op.getReg();
778   assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
779 
780   const LLT sXLen = LLT::scalar(STI.getXLen());
781   auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
782   MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
783   Op.setReg(PtrToInt.getReg(0));
784   return select(*PtrToInt);
785 }
786 
787 void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
788                                             MachineIRBuilder &MIB) {
789   switch (MI.getOpcode()) {
790   case TargetOpcode::G_PTR_ADD: {
791     Register DstReg = MI.getOperand(0).getReg();
792     const LLT sXLen = LLT::scalar(STI.getXLen());
793 
794     replacePtrWithInt(MI.getOperand(1), MIB);
795     MI.setDesc(TII.get(TargetOpcode::G_ADD));
796     MRI->setType(DstReg, sXLen);
797     break;
798   }
799   case TargetOpcode::G_PTRMASK: {
800     Register DstReg = MI.getOperand(0).getReg();
801     const LLT sXLen = LLT::scalar(STI.getXLen());
802     replacePtrWithInt(MI.getOperand(1), MIB);
803     MI.setDesc(TII.get(TargetOpcode::G_AND));
804     MRI->setType(DstReg, sXLen);
805     break;
806   }
807   }
808 }
809 
810 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
811                                             const MachineInstr &MI,
812                                             int OpIdx) const {
813   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
814          "Expected G_CONSTANT");
815   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
816   MIB.addImm(-CstVal);
817 }
818 
819 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
820                                                     const MachineInstr &MI,
821                                                     int OpIdx) const {
822   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
823          "Expected G_CONSTANT");
824   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
825   MIB.addImm(STI.getXLen() - CstVal);
826 }
827 
828 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
829                                                   const MachineInstr &MI,
830                                                   int OpIdx) const {
831   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
832          "Expected G_CONSTANT");
833   uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
834   MIB.addImm(32 - CstVal);
835 }
836 
837 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
838                                               const MachineInstr &MI,
839                                               int OpIdx) const {
840   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
841          "Expected G_CONSTANT");
842   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
843   MIB.addImm(CstVal + 1);
844 }
845 
846 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
847                                          const MachineInstr &MI,
848                                          int OpIdx) const {
849   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
850          "Expected G_CONSTANT");
851   int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
852   MIB.addImm(CstVal);
853 }
854 
855 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
856                                                    const MachineInstr &MI,
857                                                    int OpIdx) const {
858   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
859          "Expected G_CONSTANT");
860   uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
861   MIB.addImm(llvm::countr_zero(C));
862 }
863 
864 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
865     LLT Ty, const RegisterBank &RB) const {
866   if (RB.getID() == RISCV::GPRBRegBankID) {
867     if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
868       return &RISCV::GPRRegClass;
869   }
870 
871   if (RB.getID() == RISCV::FPRBRegBankID) {
872     if (Ty.getSizeInBits() == 16)
873       return &RISCV::FPR16RegClass;
874     if (Ty.getSizeInBits() == 32)
875       return &RISCV::FPR32RegClass;
876     if (Ty.getSizeInBits() == 64)
877       return &RISCV::FPR64RegClass;
878   }
879 
880   if (RB.getID() == RISCV::VRBRegBankID) {
881     if (Ty.getSizeInBits().getKnownMinValue() <= 64)
882       return &RISCV::VRRegClass;
883 
884     if (Ty.getSizeInBits().getKnownMinValue() == 128)
885       return &RISCV::VRM2RegClass;
886 
887     if (Ty.getSizeInBits().getKnownMinValue() == 256)
888       return &RISCV::VRM4RegClass;
889 
890     if (Ty.getSizeInBits().getKnownMinValue() == 512)
891       return &RISCV::VRM8RegClass;
892   }
893 
894   return nullptr;
895 }
896 
897 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
898   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
899 }
900 
901 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
902   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
903 }
904 
905 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
906   Register DstReg = MI.getOperand(0).getReg();
907 
908   if (DstReg.isPhysical())
909     return true;
910 
911   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
912       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
913   assert(DstRC &&
914          "Register class not available for LLT, register bank combination");
915 
916   // No need to constrain SrcReg. It will get constrained when
917   // we hit another of its uses or its defs.
918   // Copies do not have constraints.
919   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
920     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
921                       << " operand\n");
922     return false;
923   }
924 
925   MI.setDesc(TII.get(RISCV::COPY));
926   return true;
927 }
928 
929 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
930                                                  MachineIRBuilder &MIB) const {
931   assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
932 
933   const Register DstReg = MI.getOperand(0).getReg();
934   const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
935       MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
936 
937   assert(DstRC &&
938          "Register class not available for LLT, register bank combination");
939 
940   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
941     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
942                       << " operand\n");
943   }
944   MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
945   return true;
946 }
947 
948 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
949                                               MachineIRBuilder &MIB) const {
950   if (Imm == 0) {
951     MIB.buildCopy(DstReg, Register(RISCV::X0));
952     RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
953     return true;
954   }
955 
956   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
957   unsigned NumInsts = Seq.size();
958   Register SrcReg = RISCV::X0;
959 
960   for (unsigned i = 0; i < NumInsts; i++) {
961     Register TmpReg = i < NumInsts - 1
962                           ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
963                           : DstReg;
964     const RISCVMatInt::Inst &I = Seq[i];
965     MachineInstr *Result;
966 
967     switch (I.getOpndKind()) {
968     case RISCVMatInt::Imm:
969       // clang-format off
970       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
971                    .addImm(I.getImm());
972       // clang-format on
973       break;
974     case RISCVMatInt::RegX0:
975       Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
976                               {SrcReg, Register(RISCV::X0)});
977       break;
978     case RISCVMatInt::RegReg:
979       Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
980       break;
981     case RISCVMatInt::RegImm:
982       Result =
983           MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
984       break;
985     }
986 
987     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
988       return false;
989 
990     SrcReg = TmpReg;
991   }
992 
993   return true;
994 }
995 
996 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
997                                           MachineIRBuilder &MIB, bool IsLocal,
998                                           bool IsExternWeak) const {
999   assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1000           MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1001           MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1002          "Unexpected opcode");
1003 
1004   const MachineOperand &DispMO = MI.getOperand(1);
1005 
1006   Register DefReg = MI.getOperand(0).getReg();
1007   const LLT DefTy = MRI->getType(DefReg);
1008 
1009   // When HWASAN is used and tagging of global variables is enabled
1010   // they should be accessed via the GOT, since the tagged address of a global
1011   // is incompatible with existing code models. This also applies to non-pic
1012   // mode.
1013   if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1014     if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1015       // Use PC-relative addressing to access the symbol. This generates the
1016       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1017       // %pcrel_lo(auipc)).
1018       MI.setDesc(TII.get(RISCV::PseudoLLA));
1019       return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1020     }
1021 
1022     // Use PC-relative addressing to access the GOT for this symbol, then
1023     // load the address from the GOT. This generates the pattern (PseudoLGA
1024     // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1025     // %pcrel_lo(auipc))).
1026     MachineFunction &MF = *MI.getParent()->getParent();
1027     MachineMemOperand *MemOp = MF.getMachineMemOperand(
1028         MachinePointerInfo::getGOT(MF),
1029         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1030             MachineMemOperand::MOInvariant,
1031         DefTy, Align(DefTy.getSizeInBits() / 8));
1032 
1033     auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1034                       .addDisp(DispMO, 0)
1035                       .addMemOperand(MemOp);
1036 
1037     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1038       return false;
1039 
1040     MI.eraseFromParent();
1041     return true;
1042   }
1043 
1044   switch (TM.getCodeModel()) {
1045   default: {
1046     reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
1047                        getName(), "Unsupported code model for lowering", MI);
1048     return false;
1049   }
1050   case CodeModel::Small: {
1051     // Must lie within a single 2 GiB address range and must lie between
1052     // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1053     // (lui %hi(sym)) %lo(sym)).
1054     Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1055     MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1056                                .addDisp(DispMO, 0, RISCVII::MO_HI);
1057 
1058     if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1059       return false;
1060 
1061     auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1062                       .addDisp(DispMO, 0, RISCVII::MO_LO);
1063 
1064     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1065       return false;
1066 
1067     MI.eraseFromParent();
1068     return true;
1069   }
1070   case CodeModel::Medium:
1071     // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1072     // relocation needs to reference a label that points to the auipc
1073     // instruction itself, not the global. This cannot be done inside the
1074     // instruction selector.
1075     if (IsExternWeak) {
1076       // An extern weak symbol may be undefined, i.e. have value 0, which may
1077       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1078       // symbol. This generates the pattern (PseudoLGA sym), which expands to
1079       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1080       MachineFunction &MF = *MI.getParent()->getParent();
1081       MachineMemOperand *MemOp = MF.getMachineMemOperand(
1082           MachinePointerInfo::getGOT(MF),
1083           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1084               MachineMemOperand::MOInvariant,
1085           DefTy, Align(DefTy.getSizeInBits() / 8));
1086 
1087       auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1088                         .addDisp(DispMO, 0)
1089                         .addMemOperand(MemOp);
1090 
1091       if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1092         return false;
1093 
1094       MI.eraseFromParent();
1095       return true;
1096     }
1097 
1098     // Generate a sequence for accessing addresses within any 2GiB range
1099     // within the address space. This generates the pattern (PseudoLLA sym),
1100     // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1101     MI.setDesc(TII.get(RISCV::PseudoLLA));
1102     return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1103   }
1104 
1105   return false;
1106 }
1107 
1108 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1109                                             MachineIRBuilder &MIB) const {
1110   auto &SelectMI = cast<GSelect>(MI);
1111 
1112   Register LHS, RHS;
1113   RISCVCC::CondCode CC;
1114   getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1115 
1116   Register DstReg = SelectMI.getReg(0);
1117 
1118   unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1119   if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1120     unsigned Size = MRI->getType(DstReg).getSizeInBits();
1121     Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1122                      : RISCV::Select_FPR64_Using_CC_GPR;
1123   }
1124 
1125   MachineInstr *Result = MIB.buildInstr(Opc)
1126                              .addDef(DstReg)
1127                              .addReg(LHS)
1128                              .addReg(RHS)
1129                              .addImm(CC)
1130                              .addReg(SelectMI.getTrueReg())
1131                              .addReg(SelectMI.getFalseReg());
1132   MI.eraseFromParent();
1133   return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1134 }
1135 
1136 // Convert an FCMP predicate to one of the supported F or D instructions.
1137 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1138   assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1139   switch (Pred) {
1140   default:
1141     llvm_unreachable("Unsupported predicate");
1142   case CmpInst::FCMP_OLT:
1143     return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1144   case CmpInst::FCMP_OLE:
1145     return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1146   case CmpInst::FCMP_OEQ:
1147     return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1148   }
1149 }
1150 
1151 // Try legalizing an FCMP by swapping or inverting the predicate to one that
1152 // is supported.
1153 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1154                                   CmpInst::Predicate &Pred, bool &NeedInvert) {
1155   auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1156     return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1157            Pred == CmpInst::FCMP_OEQ;
1158   };
1159 
1160   assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1161 
1162   CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
1163   if (isLegalFCmpPredicate(InvPred)) {
1164     Pred = InvPred;
1165     std::swap(LHS, RHS);
1166     return true;
1167   }
1168 
1169   InvPred = CmpInst::getInversePredicate(Pred);
1170   NeedInvert = true;
1171   if (isLegalFCmpPredicate(InvPred)) {
1172     Pred = InvPred;
1173     return true;
1174   }
1175   InvPred = CmpInst::getSwappedPredicate(InvPred);
1176   if (isLegalFCmpPredicate(InvPred)) {
1177     Pred = InvPred;
1178     std::swap(LHS, RHS);
1179     return true;
1180   }
1181 
1182   return false;
1183 }
1184 
1185 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1186 // the result in DstReg.
1187 // FIXME: Maybe we should expand this earlier.
1188 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1189                                                MachineIRBuilder &MIB) const {
1190   auto &CmpMI = cast<GFCmp>(MI);
1191   CmpInst::Predicate Pred = CmpMI.getCond();
1192 
1193   Register DstReg = CmpMI.getReg(0);
1194   Register LHS = CmpMI.getLHSReg();
1195   Register RHS = CmpMI.getRHSReg();
1196 
1197   unsigned Size = MRI->getType(LHS).getSizeInBits();
1198   assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1199 
1200   Register TmpReg = DstReg;
1201 
1202   bool NeedInvert = false;
1203   // First try swapping operands or inverting.
1204   if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1205     if (NeedInvert)
1206       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1207     auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1208     if (!Cmp.constrainAllUses(TII, TRI, RBI))
1209       return false;
1210   } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1211     // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1212     NeedInvert = Pred == CmpInst::FCMP_UEQ;
1213     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1214                                {&RISCV::GPRRegClass}, {LHS, RHS});
1215     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1216       return false;
1217     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1218                                {&RISCV::GPRRegClass}, {RHS, LHS});
1219     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1220       return false;
1221     if (NeedInvert)
1222       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1223     auto Or =
1224         MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1225     if (!Or.constrainAllUses(TII, TRI, RBI))
1226       return false;
1227   } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1228     // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1229     // FIXME: If LHS and RHS are the same we can use a single FEQ.
1230     NeedInvert = Pred == CmpInst::FCMP_UNO;
1231     auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1232                                {&RISCV::GPRRegClass}, {LHS, LHS});
1233     if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1234       return false;
1235     auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1236                                {&RISCV::GPRRegClass}, {RHS, RHS});
1237     if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1238       return false;
1239     if (NeedInvert)
1240       TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1241     auto And =
1242         MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1243     if (!And.constrainAllUses(TII, TRI, RBI))
1244       return false;
1245   } else
1246     llvm_unreachable("Unhandled predicate");
1247 
1248   // Emit an XORI to invert the result if needed.
1249   if (NeedInvert) {
1250     auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1251     if (!Xor.constrainAllUses(TII, TRI, RBI))
1252       return false;
1253   }
1254 
1255   MI.eraseFromParent();
1256   return true;
1257 }
1258 
1259 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1260                                          SyncScope::ID FenceSSID,
1261                                          MachineIRBuilder &MIB) const {
1262   if (STI.hasStdExtZtso()) {
1263     // The only fence that needs an instruction is a sequentially-consistent
1264     // cross-thread fence.
1265     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1266         FenceSSID == SyncScope::System) {
1267       // fence rw, rw
1268       MIB.buildInstr(RISCV::FENCE, {}, {})
1269           .addImm(RISCVFenceField::R | RISCVFenceField::W)
1270           .addImm(RISCVFenceField::R | RISCVFenceField::W);
1271       return;
1272     }
1273 
1274     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1275     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1276     return;
1277   }
1278 
1279   // singlethread fences only synchronize with signal handlers on the same
1280   // thread and thus only need to preserve instruction order, not actually
1281   // enforce memory ordering.
1282   if (FenceSSID == SyncScope::SingleThread) {
1283     MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1284     return;
1285   }
1286 
1287   // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1288   // Manual: Volume I.
1289   unsigned Pred, Succ;
1290   switch (FenceOrdering) {
1291   default:
1292     llvm_unreachable("Unexpected ordering");
1293   case AtomicOrdering::AcquireRelease:
1294     // fence acq_rel -> fence.tso
1295     MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1296     return;
1297   case AtomicOrdering::Acquire:
1298     // fence acquire -> fence r, rw
1299     Pred = RISCVFenceField::R;
1300     Succ = RISCVFenceField::R | RISCVFenceField::W;
1301     break;
1302   case AtomicOrdering::Release:
1303     // fence release -> fence rw, w
1304     Pred = RISCVFenceField::R | RISCVFenceField::W;
1305     Succ = RISCVFenceField::W;
1306     break;
1307   case AtomicOrdering::SequentiallyConsistent:
1308     // fence seq_cst -> fence rw, rw
1309     Pred = RISCVFenceField::R | RISCVFenceField::W;
1310     Succ = RISCVFenceField::R | RISCVFenceField::W;
1311     break;
1312   }
1313   MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1314 }
1315 
1316 namespace llvm {
1317 InstructionSelector *
1318 createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1319                                const RISCVSubtarget &Subtarget,
1320                                const RISCVRegisterBankInfo &RBI) {
1321   return new RISCVInstructionSelector(TM, Subtarget, RBI);
1322 }
1323 } // end namespace llvm
1324