1 //===---- RISCVISelDAGToDAG.h - A dag to dag inst selector for RISC-V -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines an instruction selector for the RISC-V target. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H 14 #define LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H 15 16 #include "RISCV.h" 17 #include "RISCVTargetMachine.h" 18 #include "llvm/CodeGen/SelectionDAGISel.h" 19 #include "llvm/Support/KnownBits.h" 20 21 // RISC-V specific code to select RISC-V machine instructions for 22 // SelectionDAG operations. 23 namespace llvm { 24 class RISCVDAGToDAGISel : public SelectionDAGISel { 25 const RISCVSubtarget *Subtarget = nullptr; 26 27 public: 28 RISCVDAGToDAGISel() = delete; 29 30 explicit RISCVDAGToDAGISel(RISCVTargetMachine &TargetMachine, 31 CodeGenOptLevel OptLevel) 32 : SelectionDAGISel(TargetMachine, OptLevel) {} 33 34 bool runOnMachineFunction(MachineFunction &MF) override { 35 Subtarget = &MF.getSubtarget<RISCVSubtarget>(); 36 return SelectionDAGISel::runOnMachineFunction(MF); 37 } 38 39 void PreprocessISelDAG() override; 40 void PostprocessISelDAG() override; 41 42 void Select(SDNode *Node) override; 43 44 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 45 InlineAsm::ConstraintCode ConstraintID, 46 std::vector<SDValue> &OutOps) override; 47 48 bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset); 49 bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, 50 bool IsRV32Zdinx = false); 51 bool SelectAddrRegImmRV32Zdinx(SDValue Addr, SDValue &Base, SDValue &Offset) { 52 return SelectAddrRegImm(Addr, Base, Offset, true); 53 } 54 bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset); 55 56 bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, 57 SDValue &Base, SDValue &Index, SDValue &Scale); 58 59 template <unsigned MaxShift> 60 bool SelectAddrRegRegScale(SDValue Addr, SDValue &Base, SDValue &Index, 61 SDValue &Scale) { 62 return SelectAddrRegRegScale(Addr, MaxShift, Base, Index, Scale); 63 } 64 65 template <unsigned MaxShift, unsigned Bits> 66 bool SelectAddrRegZextRegScale(SDValue Addr, SDValue &Base, SDValue &Index, 67 SDValue &Scale) { 68 if (SelectAddrRegRegScale(Addr, MaxShift, Base, Index, Scale)) { 69 if (Index.getOpcode() == ISD::AND) { 70 auto *C = dyn_cast<ConstantSDNode>(Index.getOperand(1)); 71 if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) { 72 Index = Index.getOperand(0); 73 return true; 74 } 75 } 76 } 77 return false; 78 } 79 80 bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset); 81 82 bool tryShrinkShlLogicImm(SDNode *Node); 83 bool trySignedBitfieldExtract(SDNode *Node); 84 bool tryIndexedLoad(SDNode *Node); 85 86 bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt); 87 bool selectShiftMaskXLen(SDValue N, SDValue &ShAmt) { 88 return selectShiftMask(N, Subtarget->getXLen(), ShAmt); 89 } 90 bool selectShiftMask32(SDValue N, SDValue &ShAmt) { 91 return selectShiftMask(N, 32, ShAmt); 92 } 93 94 bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val); 95 bool selectSETNE(SDValue N, SDValue &Val) { 96 return selectSETCC(N, ISD::SETNE, Val); 97 } 98 bool selectSETEQ(SDValue N, SDValue &Val) { 99 return selectSETCC(N, ISD::SETEQ, Val); 100 } 101 102 bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val); 103 template <unsigned Bits> bool selectSExtBits(SDValue N, SDValue &Val) { 104 return selectSExtBits(N, Bits, Val); 105 } 106 bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val); 107 template <unsigned Bits> bool selectZExtBits(SDValue N, SDValue &Val) { 108 return selectZExtBits(N, Bits, Val); 109 } 110 111 bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val); 112 template <unsigned ShAmt> bool selectSHXADDOp(SDValue N, SDValue &Val) { 113 return selectSHXADDOp(N, ShAmt, Val); 114 } 115 116 bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val); 117 template <unsigned ShAmt> bool selectSHXADD_UWOp(SDValue N, SDValue &Val) { 118 return selectSHXADD_UWOp(N, ShAmt, Val); 119 } 120 121 bool selectInvLogicImm(SDValue N, SDValue &Val); 122 123 bool hasAllNBitUsers(SDNode *Node, unsigned Bits, 124 const unsigned Depth = 0) const; 125 bool hasAllBUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 8); } 126 bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); } 127 bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); } 128 129 bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2); 130 131 bool selectVLOp(SDValue N, SDValue &VL); 132 133 bool selectVSplat(SDValue N, SDValue &SplatVal); 134 bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); 135 bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal); 136 template <unsigned Bits> bool selectVSplatUimmBits(SDValue N, SDValue &Val) { 137 return selectVSplatUimm(N, Bits, Val); 138 } 139 bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal); 140 bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal); 141 // Matches the splat of a value which can be extended or truncated, such that 142 // only the bottom 8 bits are preserved. 143 bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal); 144 bool selectScalarFPAsInt(SDValue N, SDValue &Imm); 145 146 bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm); 147 template <unsigned Width> bool selectRVVSimm5(SDValue N, SDValue &Imm) { 148 return selectRVVSimm5(N, Width, Imm); 149 } 150 151 void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, 152 const SDLoc &DL, unsigned CurOp, 153 bool IsMasked, bool IsStridedOrIndexed, 154 SmallVectorImpl<SDValue> &Operands, 155 bool IsLoad = false, MVT *IndexVT = nullptr); 156 157 void selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided); 158 void selectVLSEGFF(SDNode *Node, unsigned NF, bool IsMasked); 159 void selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered); 160 void selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsStrided); 161 void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered); 162 163 void selectVSETVLI(SDNode *Node); 164 165 void selectSF_VC_X_SE(SDNode *Node); 166 167 // Return the RISC-V condition code that matches the given DAG integer 168 // condition code. The CondCode must be one of those supported by the RISC-V 169 // ISA (see translateSetCCForBranch). 170 static RISCVCC::CondCode getRISCVCCForIntCC(ISD::CondCode CC) { 171 switch (CC) { 172 default: 173 llvm_unreachable("Unsupported CondCode"); 174 case ISD::SETEQ: 175 return RISCVCC::COND_EQ; 176 case ISD::SETNE: 177 return RISCVCC::COND_NE; 178 case ISD::SETLT: 179 return RISCVCC::COND_LT; 180 case ISD::SETGE: 181 return RISCVCC::COND_GE; 182 case ISD::SETULT: 183 return RISCVCC::COND_LTU; 184 case ISD::SETUGE: 185 return RISCVCC::COND_GEU; 186 } 187 } 188 189 // Include the pieces autogenerated from the target description. 190 #include "RISCVGenDAGISel.inc" 191 192 private: 193 bool doPeepholeSExtW(SDNode *Node); 194 bool doPeepholeMaskedRVV(MachineSDNode *Node); 195 bool doPeepholeMergeVVMFold(); 196 bool doPeepholeNoRegPassThru(); 197 bool performCombineVMergeAndVOps(SDNode *N); 198 }; 199 200 class RISCVDAGToDAGISelLegacy : public SelectionDAGISelLegacy { 201 public: 202 static char ID; 203 explicit RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine, 204 CodeGenOptLevel OptLevel); 205 }; 206 207 namespace RISCV { 208 struct VLSEGPseudo { 209 uint16_t NF : 4; 210 uint16_t Masked : 1; 211 uint16_t Strided : 1; 212 uint16_t FF : 1; 213 uint16_t Log2SEW : 3; 214 uint16_t LMUL : 3; 215 uint16_t Pseudo; 216 }; 217 218 struct VLXSEGPseudo { 219 uint16_t NF : 4; 220 uint16_t Masked : 1; 221 uint16_t Ordered : 1; 222 uint16_t Log2SEW : 3; 223 uint16_t LMUL : 3; 224 uint16_t IndexLMUL : 3; 225 uint16_t Pseudo; 226 }; 227 228 struct VSSEGPseudo { 229 uint16_t NF : 4; 230 uint16_t Masked : 1; 231 uint16_t Strided : 1; 232 uint16_t Log2SEW : 3; 233 uint16_t LMUL : 3; 234 uint16_t Pseudo; 235 }; 236 237 struct VSXSEGPseudo { 238 uint16_t NF : 4; 239 uint16_t Masked : 1; 240 uint16_t Ordered : 1; 241 uint16_t Log2SEW : 3; 242 uint16_t LMUL : 3; 243 uint16_t IndexLMUL : 3; 244 uint16_t Pseudo; 245 }; 246 247 struct VLEPseudo { 248 uint16_t Masked : 1; 249 uint16_t Strided : 1; 250 uint16_t FF : 1; 251 uint16_t Log2SEW : 3; 252 uint16_t LMUL : 3; 253 uint16_t Pseudo; 254 }; 255 256 struct VSEPseudo { 257 uint16_t Masked :1; 258 uint16_t Strided : 1; 259 uint16_t Log2SEW : 3; 260 uint16_t LMUL : 3; 261 uint16_t Pseudo; 262 }; 263 264 struct VLX_VSXPseudo { 265 uint16_t Masked : 1; 266 uint16_t Ordered : 1; 267 uint16_t Log2SEW : 3; 268 uint16_t LMUL : 3; 269 uint16_t IndexLMUL : 3; 270 uint16_t Pseudo; 271 }; 272 273 #define GET_RISCVVSSEGTable_DECL 274 #define GET_RISCVVLSEGTable_DECL 275 #define GET_RISCVVLXSEGTable_DECL 276 #define GET_RISCVVSXSEGTable_DECL 277 #define GET_RISCVVLETable_DECL 278 #define GET_RISCVVSETable_DECL 279 #define GET_RISCVVLXTable_DECL 280 #define GET_RISCVVSXTable_DECL 281 } // namespace RISCV 282 283 } // namespace llvm 284 285 #endif 286