1//===-- RISCVInstrInfo.td - Target Description for RISC-V --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions in TableGen format. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// RISC-V specific DAG Nodes. 15//===----------------------------------------------------------------------===// 16 17// Target-independent type requirements, but with target-specific formats. 18def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, 19 SDTCisVT<1, i32>]>; 20def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, 21 SDTCisVT<1, i32>]>; 22 23// Target-dependent type requirements. 24def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>; 25def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, 26 SDTCisVT<3, OtherVT>, 27 SDTCisSameAs<0, 4>, 28 SDTCisSameAs<4, 5>]>; 29def SDT_RISCVBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, 30 SDTCisVT<2, OtherVT>, 31 SDTCisVT<3, OtherVT>]>; 32def SDT_RISCVReadCSR : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>; 33def SDT_RISCVWriteCSR : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisInt<1>]>; 34def SDT_RISCVSwapCSR : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, 35 SDTCisInt<2>]>; 36def SDT_RISCVReadCounterWide : SDTypeProfile<2, 2, [SDTCisVT<0, i32>, 37 SDTCisVT<1, i32>, 38 SDTCisInt<2>, 39 SDTCisInt<3>]>; 40def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [ 41 SDTCisSameAs<0, 1>, SDTCisVT<0, i64> 42]>; 43def SDT_RISCVIntBinOpW : SDTypeProfile<1, 2, [ 44 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64> 45]>; 46def SDT_RISCVIntShiftDOpW : SDTypeProfile<1, 3, [ 47 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>, SDTCisVT<3, i64> 48]>; 49 50// Target-independent nodes, but with target-specific formats. 51def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart, 52 [SDNPHasChain, SDNPOutGlue]>; 53def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd, 54 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 55 56// Target-dependent nodes. 57def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, 58 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 59 SDNPVariadic]>; 60def riscv_sw_guarded_call : SDNode<"RISCVISD::SW_GUARDED_CALL", SDT_RISCVCall, 61 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 62 SDNPVariadic]>; 63def riscv_ret_glue : SDNode<"RISCVISD::RET_GLUE", SDTNone, 64 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 65def riscv_sret_glue : SDNode<"RISCVISD::SRET_GLUE", SDTNone, 66 [SDNPHasChain, SDNPOptInGlue]>; 67def riscv_mret_glue : SDNode<"RISCVISD::MRET_GLUE", SDTNone, 68 [SDNPHasChain, SDNPOptInGlue]>; 69def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC>; 70def riscv_brcc : SDNode<"RISCVISD::BR_CC", SDT_RISCVBrCC, 71 [SDNPHasChain]>; 72def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, 73 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 74 SDNPVariadic]>; 75def riscv_sw_guarded_tail : SDNode<"RISCVISD::SW_GUARDED_TAIL", SDT_RISCVCall, 76 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 77 SDNPVariadic]>; 78def riscv_sw_guarded_brind : SDNode<"RISCVISD::SW_GUARDED_BRIND", 79 SDTBrind, [SDNPHasChain]>; 80def riscv_sllw : SDNode<"RISCVISD::SLLW", SDT_RISCVIntBinOpW>; 81def riscv_sraw : SDNode<"RISCVISD::SRAW", SDT_RISCVIntBinOpW>; 82def riscv_srlw : SDNode<"RISCVISD::SRLW", SDT_RISCVIntBinOpW>; 83def riscv_read_csr : SDNode<"RISCVISD::READ_CSR", SDT_RISCVReadCSR, 84 [SDNPHasChain]>; 85def riscv_write_csr : SDNode<"RISCVISD::WRITE_CSR", SDT_RISCVWriteCSR, 86 [SDNPHasChain]>; 87def riscv_swap_csr : SDNode<"RISCVISD::SWAP_CSR", SDT_RISCVSwapCSR, 88 [SDNPHasChain]>; 89 90def riscv_read_counter_wide : SDNode<"RISCVISD::READ_COUNTER_WIDE", 91 SDT_RISCVReadCounterWide, 92 [SDNPHasChain, SDNPSideEffect]>; 93 94def riscv_add_lo : SDNode<"RISCVISD::ADD_LO", SDTIntBinOp>; 95def riscv_hi : SDNode<"RISCVISD::HI", SDTIntUnaryOp>; 96def riscv_lla : SDNode<"RISCVISD::LLA", SDTIntUnaryOp>; 97def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL", 98 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 99 SDTCisSameAs<0, 2>, 100 SDTCisSameAs<0, 3>, 101 SDTCisInt<0>]>>; 102 103def riscv_probed_alloca : SDNode<"RISCVISD::PROBED_ALLOCA", 104 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, 105 SDTCisVT<0, i32>]>, 106 [SDNPHasChain, SDNPMayStore]>; 107 108//===----------------------------------------------------------------------===// 109// Operand and SDNode transformation definitions. 110//===----------------------------------------------------------------------===// 111 112class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass { 113 let Name = prefix # "ImmXLen" # suffix; 114 let RenderMethod = "addImmOperands"; 115 let DiagnosticType = !strconcat("Invalid", Name); 116} 117 118class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass { 119 let Name = prefix # "Imm" # width # suffix; 120 let RenderMethod = "addImmOperands"; 121 let DiagnosticType = !strconcat("Invalid", Name); 122} 123 124def ImmZeroAsmOperand : AsmOperandClass { 125 let Name = "ImmZero"; 126 let RenderMethod = "addImmOperands"; 127 let DiagnosticType = !strconcat("Invalid", Name); 128} 129 130// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored. 131def ZeroOffsetMemOpOperand : AsmOperandClass { 132 let Name = "ZeroOffsetMemOpOperand"; 133 let RenderMethod = "addRegOperands"; 134 let PredicateMethod = "isGPR"; 135 let ParserMethod = "parseZeroOffsetMemOp"; 136} 137 138class MemOperand<RegisterClass regClass> : RegisterOperand<regClass>{ 139 let OperandType = "OPERAND_MEMORY"; 140} 141 142def GPRMemZeroOffset : MemOperand<GPR> { 143 let ParserMatchClass = ZeroOffsetMemOpOperand; 144 let PrintMethod = "printZeroOffsetMemOp"; 145} 146 147def GPRMem : MemOperand<GPR>; 148 149def SPMem : MemOperand<SP>; 150 151def GPRCMem : MemOperand<GPRC>; 152 153class SImmAsmOperand<int width, string suffix = ""> 154 : ImmAsmOperand<"S", width, suffix> { 155} 156 157class UImmAsmOperand<int width, string suffix = ""> 158 : ImmAsmOperand<"U", width, suffix> { 159} 160 161class RISCVOp<ValueType vt = XLenVT> : Operand<vt> { 162 let OperandNamespace = "RISCVOp"; 163} 164 165class RISCVUImmOp<int bitsNum> : RISCVOp { 166 let ParserMatchClass = UImmAsmOperand<bitsNum>; 167 let EncoderMethod = "getImmOpValue"; 168 let DecoderMethod = "decodeUImmOperand<" # bitsNum # ">"; 169 let OperandType = "OPERAND_UIMM" # bitsNum; 170 let MCOperandPredicate = [{ 171 int64_t Imm; 172 if (!MCOp.evaluateAsConstantImm(Imm)) 173 return false; 174 return isUInt<}]# bitsNum #[{>(Imm); 175 }]; 176} 177 178class RISCVUImmLeafOp<int bitsNum> : 179 RISCVUImmOp<bitsNum>, ImmLeaf<XLenVT, "return isUInt<" # bitsNum # ">(Imm);">; 180 181class RISCVSImmOp<int bitsNum> : RISCVOp { 182 let ParserMatchClass = SImmAsmOperand<bitsNum>; 183 let EncoderMethod = "getImmOpValue"; 184 let DecoderMethod = "decodeSImmOperand<" # bitsNum # ">"; 185 let OperandType = "OPERAND_SIMM" # bitsNum; 186 let MCOperandPredicate = [{ 187 int64_t Imm; 188 if (!MCOp.evaluateAsConstantImm(Imm)) 189 return false; 190 return isInt<}] # bitsNum # [{>(Imm); 191 }]; 192} 193 194class RISCVSImmLeafOp<int bitsNum> : 195 RISCVSImmOp<bitsNum>, ImmLeaf<XLenVT, "return isInt<" # bitsNum # ">(Imm);">; 196 197def FenceArg : AsmOperandClass { 198 let Name = "FenceArg"; 199 let RenderMethod = "addFenceArgOperands"; 200 let ParserMethod = "parseFenceArg"; 201} 202 203def fencearg : RISCVOp { 204 let ParserMatchClass = FenceArg; 205 let PrintMethod = "printFenceArg"; 206 let DecoderMethod = "decodeUImmOperand<4>"; 207 let OperandType = "OPERAND_UIMM4"; 208} 209 210def UImmLog2XLenAsmOperand : AsmOperandClass { 211 let Name = "UImmLog2XLen"; 212 let RenderMethod = "addImmOperands"; 213 let DiagnosticType = "InvalidUImmLog2XLen"; 214} 215 216def uimmlog2xlen : RISCVOp, ImmLeaf<XLenVT, [{ 217 if (Subtarget->is64Bit()) 218 return isUInt<6>(Imm); 219 return isUInt<5>(Imm); 220}]> { 221 let ParserMatchClass = UImmLog2XLenAsmOperand; 222 let DecoderMethod = "decodeUImmLog2XLenOperand"; 223 let MCOperandPredicate = [{ 224 int64_t Imm; 225 if (!MCOp.evaluateAsConstantImm(Imm)) 226 return false; 227 if (STI.getTargetTriple().isArch64Bit()) 228 return isUInt<6>(Imm); 229 return isUInt<5>(Imm); 230 }]; 231 let OperandType = "OPERAND_UIMMLOG2XLEN"; 232} 233 234def InsnDirectiveOpcode : AsmOperandClass { 235 let Name = "InsnDirectiveOpcode"; 236 let ParserMethod = "parseInsnDirectiveOpcode"; 237 let RenderMethod = "addImmOperands"; 238 let PredicateMethod = "isImm"; 239} 240 241def uimm1 : RISCVUImmLeafOp<1>; 242def uimm2 : RISCVUImmLeafOp<2>; 243def uimm3 : RISCVUImmOp<3>; 244def uimm4 : RISCVUImmLeafOp<4>; 245def uimm5 : RISCVUImmLeafOp<5>; 246def uimm6 : RISCVUImmLeafOp<6>; 247def uimm7_opcode : RISCVUImmOp<7> { 248 let ParserMatchClass = InsnDirectiveOpcode; 249} 250def uimm7 : RISCVUImmOp<7>; 251def uimm8 : RISCVUImmOp<8>; 252def uimm16 : RISCVUImmOp<16>; 253def uimm32 : RISCVUImmOp<32>; 254def uimm48 : RISCVUImmOp<48>; 255def uimm64 : RISCVUImmOp<64>; 256def simm12 : RISCVSImmLeafOp<12> { 257 let MCOperandPredicate = [{ 258 int64_t Imm; 259 if (MCOp.evaluateAsConstantImm(Imm)) 260 return isInt<12>(Imm); 261 return MCOp.isBareSymbolRef(); 262 }]; 263} 264 265// A 12-bit signed immediate which cannot fit in 6-bit signed immediate, 266// but even negative value fit in 12-bit. 267def simm12_no6 : ImmLeaf<XLenVT, [{ 268 return isInt<12>(Imm) && !isInt<6>(Imm) && isInt<12>(-Imm);}]>; 269 270// A 13-bit signed immediate where the least significant bit is zero. 271def simm13_lsb0 : Operand<OtherVT> { 272 let ParserMatchClass = SImmAsmOperand<13, "Lsb0">; 273 let PrintMethod = "printBranchOperand"; 274 let EncoderMethod = "getImmOpValueAsr1"; 275 let DecoderMethod = "decodeSImmOperandAndLsl1<13>"; 276 let MCOperandPredicate = [{ 277 int64_t Imm; 278 if (MCOp.evaluateAsConstantImm(Imm)) 279 return isShiftedInt<12, 1>(Imm); 280 return MCOp.isBareSymbolRef(); 281 }]; 282 let OperandType = "OPERAND_PCREL"; 283} 284 285class UImm20OperandMaybeSym : RISCVUImmOp<20> { 286 let MCOperandPredicate = [{ 287 int64_t Imm; 288 if (MCOp.evaluateAsConstantImm(Imm)) 289 return isUInt<20>(Imm); 290 return MCOp.isBareSymbolRef(); 291 }]; 292} 293 294def uimm20_lui : UImm20OperandMaybeSym { 295 let ParserMatchClass = UImmAsmOperand<20, "LUI">; 296} 297def uimm20_auipc : UImm20OperandMaybeSym { 298 let ParserMatchClass = UImmAsmOperand<20, "AUIPC">; 299} 300 301def uimm20 : RISCVUImmOp<20>; 302 303def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> { 304 let ParserMethod = "parseJALOffset"; 305} 306 307// A 21-bit signed immediate where the least significant bit is zero. 308def simm21_lsb0_jal : Operand<OtherVT> { 309 let ParserMatchClass = Simm21Lsb0JALAsmOperand; 310 let PrintMethod = "printBranchOperand"; 311 let EncoderMethod = "getImmOpValueAsr1"; 312 let DecoderMethod = "decodeSImmOperandAndLsl1<21>"; 313 let MCOperandPredicate = [{ 314 int64_t Imm; 315 if (MCOp.evaluateAsConstantImm(Imm)) 316 return isShiftedInt<20, 1>(Imm); 317 return MCOp.isBareSymbolRef(); 318 }]; 319 let OperandType = "OPERAND_PCREL"; 320} 321 322def BareSymbol : AsmOperandClass { 323 let Name = "BareSymbol"; 324 let RenderMethod = "addImmOperands"; 325 let DiagnosticType = "InvalidBareSymbol"; 326 let ParserMethod = "parseBareSymbol"; 327} 328 329// A bare symbol. 330def bare_symbol : Operand<XLenVT> { 331 let ParserMatchClass = BareSymbol; 332} 333 334def CallSymbol : AsmOperandClass { 335 let Name = "CallSymbol"; 336 let RenderMethod = "addImmOperands"; 337 let DiagnosticType = "InvalidCallSymbol"; 338 let ParserMethod = "parseCallSymbol"; 339} 340 341// A bare symbol used in call/tail only. 342def call_symbol : Operand<XLenVT> { 343 let ParserMatchClass = CallSymbol; 344} 345 346def PseudoJumpSymbol : AsmOperandClass { 347 let Name = "PseudoJumpSymbol"; 348 let RenderMethod = "addImmOperands"; 349 let DiagnosticType = "InvalidPseudoJumpSymbol"; 350 let ParserMethod = "parsePseudoJumpSymbol"; 351} 352 353// A bare symbol used for pseudo jumps only. 354def pseudo_jump_symbol : Operand<XLenVT> { 355 let ParserMatchClass = PseudoJumpSymbol; 356} 357 358def TPRelAddSymbol : AsmOperandClass { 359 let Name = "TPRelAddSymbol"; 360 let RenderMethod = "addImmOperands"; 361 let DiagnosticType = "InvalidTPRelAddSymbol"; 362 let ParserMethod = "parseOperandWithModifier"; 363} 364 365// A bare symbol with the %tprel_add variant. 366def tprel_add_symbol : Operand<XLenVT> { 367 let ParserMatchClass = TPRelAddSymbol; 368} 369 370def CSRSystemRegister : AsmOperandClass { 371 let Name = "CSRSystemRegister"; 372 let ParserMethod = "parseCSRSystemRegister"; 373 let DiagnosticType = "InvalidCSRSystemRegister"; 374} 375 376def csr_sysreg : RISCVOp, TImmLeaf<XLenVT, "return isUInt<12>(Imm);"> { 377 let ParserMatchClass = CSRSystemRegister; 378 let PrintMethod = "printCSRSystemRegister"; 379 let DecoderMethod = "decodeUImmOperand<12>"; 380 let OperandType = "OPERAND_UIMM12"; 381} 382 383// A parameterized register class alternative to i32imm/i64imm from Target.td. 384def ixlenimm : Operand<XLenVT>; 385 386// Condition code used by select and short forward branch pseudos. 387def cond_code : RISCVOp { 388 let OperandType = "OPERAND_COND_CODE"; 389} 390 391def ixlenimm_li : Operand<XLenVT> { 392 let ParserMatchClass = ImmXLenAsmOperand<"", "LI">; 393} 394 395// Accepts subset of LI operands, used by LAImm and LLAImm 396def ixlenimm_li_restricted : Operand<XLenVT> { 397 let ParserMatchClass = ImmXLenAsmOperand<"", "LI_Restricted">; 398} 399 400// Standalone (codegen-only) immleaf patterns. 401 402// A 12-bit signed immediate plus one where the imm range will be -2047~2048. 403def simm12_plus1 : ImmLeaf<XLenVT, 404 [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>; 405 406// A 6-bit constant greater than 32. 407def uimm6gt32 : ImmLeaf<XLenVT, [{ 408 return isUInt<6>(Imm) && Imm > 32; 409}]>; 410 411// Addressing modes. 412def AddrRegImm : ComplexPattern<iPTR, 2, "SelectAddrRegImm">; 413 414// Return the negation of an immediate value. 415def NegImm : SDNodeXForm<imm, [{ 416 return CurDAG->getSignedTargetConstant(-N->getSExtValue(), SDLoc(N), 417 N->getValueType(0)); 418}]>; 419def GINegImm : GICustomOperandRenderer<"renderNegImm">, 420 GISDNodeXFormEquiv<NegImm>; 421 422// Return an immediate value minus 32. 423def ImmSub32 : SDNodeXForm<imm, [{ 424 return CurDAG->getSignedTargetConstant(N->getSExtValue() - 32, SDLoc(N), 425 N->getValueType(0)); 426}]>; 427 428// Return an immediate subtracted from XLen. 429def ImmSubFromXLen : SDNodeXForm<imm, [{ 430 uint64_t XLen = Subtarget->getXLen(); 431 return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N), 432 N->getValueType(0)); 433}]>; 434def GIImmSubFromXLen : GICustomOperandRenderer<"renderImmSubFromXLen">, 435 GISDNodeXFormEquiv<ImmSubFromXLen>; 436 437// Return an immediate subtracted from 32. 438def ImmSubFrom32 : SDNodeXForm<imm, [{ 439 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), 440 N->getValueType(0)); 441}]>; 442def GIImmSubFrom32 : GICustomOperandRenderer<"renderImmSubFrom32">, 443 GISDNodeXFormEquiv<ImmSubFrom32>; 444 445// Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1), 446// in which imm = imm0 + imm1 and both imm0 and imm1 are simm12. We make imm0 447// as large as possible and imm1 as small as possible so that we might be able 448// to use c.addi for the small immediate. 449def AddiPair : ImmLeaf<XLenVT, [{ 450 // The immediate operand must be in range [-4096,-2049] or [2048,4094]. 451 return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094); 452}]>; 453 454// Return imm - (imm < 0 ? -2048 : 2047). 455def AddiPairImmSmall : SDNodeXForm<imm, [{ 456 int64_t Imm = N->getSExtValue(); 457 int64_t Adj = N->getSExtValue() < 0 ? -2048 : 2047; 458 return CurDAG->getSignedTargetConstant(Imm - Adj, SDLoc(N), 459 N->getValueType(0)); 460}]>; 461def GIAddiPairImmSmall : GICustomOperandRenderer<"renderAddiPairImmSmall">, 462 GISDNodeXFormEquiv<AddiPairImmSmall>; 463 464// Return -2048 if immediate is negative or 2047 if positive. These are the 465// largest simm12 values. 466def AddiPairImmLarge : SDNodeXForm<imm, [{ 467 int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047; 468 return CurDAG->getSignedTargetConstant(Imm, SDLoc(N), N->getValueType(0)); 469}]>; 470def GIAddiPairImmLarge : GICustomOperandRenderer<"renderAddiPairImmLarge">, 471 GISDNodeXFormEquiv<AddiPairImmLarge>; 472 473def TrailingZeros : SDNodeXForm<imm, [{ 474 return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()), 475 SDLoc(N), N->getValueType(0)); 476}]>; 477def GITrailingZeros : GICustomOperandRenderer<"renderTrailingZeros">, 478 GISDNodeXFormEquiv<TrailingZeros>; 479 480def XLenSubTrailingOnes : SDNodeXForm<imm, [{ 481 uint64_t XLen = Subtarget->getXLen(); 482 uint64_t TrailingOnes = llvm::countr_one(N->getZExtValue()); 483 return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N), 484 N->getValueType(0)); 485}]>; 486def GIXLenSubTrailingOnes : GICustomOperandRenderer<"renderXLenSubTrailingOnes">, 487 GISDNodeXFormEquiv<XLenSubTrailingOnes>; 488 489// Checks if this mask is a non-empty sequence of ones starting at the 490// most/least significant bit with the remainder zero and exceeds simm32/simm12. 491def LeadingOnesMask : ImmLeaf<XLenVT, [{ 492 return !isInt<32>(Imm) && isMask_64(~Imm); 493}], TrailingZeros>; 494 495def TrailingOnesMask : IntImmLeaf<XLenVT, [{ 496 return !isInt<12>(Imm.getSExtValue()) && isMask_64(Imm.getZExtValue()); 497}], XLenSubTrailingOnes>; 498 499// Similar to LeadingOnesMask, but only consider leading ones in the lower 32 500// bits. 501def LeadingOnesWMask : ImmLeaf<XLenVT, [{ 502 // If the value is a uint32 but not an int32, it must have bit 31 set and 503 // bits 63:32 cleared. After that we're looking for a shifted mask but not 504 // an all ones mask. 505 return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) && 506 Imm != UINT64_C(0xffffffff); 507}], TrailingZeros>; 508 509//===----------------------------------------------------------------------===// 510// Instruction Formats 511//===----------------------------------------------------------------------===// 512 513include "RISCVInstrFormats.td" 514 515//===----------------------------------------------------------------------===// 516// Instruction Class Templates 517//===----------------------------------------------------------------------===// 518 519let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 520class BranchCC_rri<bits<3> funct3, string opcodestr> 521 : RVInstB<funct3, OPC_BRANCH, (outs), 522 (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12), 523 opcodestr, "$rs1, $rs2, $imm12">, 524 Sched<[WriteJmp, ReadJmp, ReadJmp]> { 525 let isBranch = 1; 526 let isTerminator = 1; 527} 528 529let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { 530class Load_ri<bits<3> funct3, string opcodestr, DAGOperand rty = GPR> 531 : RVInstI<funct3, OPC_LOAD, (outs rty:$rd), (ins GPRMem:$rs1, simm12:$imm12), 532 opcodestr, "$rd, ${imm12}(${rs1})">; 533 534class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr> 535 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs GPR:$rd), 536 (ins GPRMemZeroOffset:$rs1), opcodestr, "$rd, $rs1"> { 537 let rs2 = funct5; 538} 539} 540 541// Operands for stores are in the order srcreg, base, offset rather than 542// reflecting the order these fields are specified in the instruction 543// encoding. 544let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { 545class Store_rri<bits<3> funct3, string opcodestr, DAGOperand rty = GPR> 546 : RVInstS<funct3, OPC_STORE, (outs), 547 (ins rty:$rs2, GPRMem:$rs1, simm12:$imm12), 548 opcodestr, "$rs2, ${imm12}(${rs1})">; 549 550class HStore_rr<bits<7> funct7, string opcodestr> 551 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs), 552 (ins GPR:$rs2, GPRMemZeroOffset:$rs1), 553 opcodestr, "$rs2, $rs1"> { 554 let rd = 0; 555} 556} 557 558let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 559class ALU_ri<bits<3> funct3, string opcodestr> 560 : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12), 561 opcodestr, "$rd, $rs1, $imm12">, 562 Sched<[WriteIALU, ReadIALU]>; 563 564let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 565class Shift_ri<bits<5> imm11_7, bits<3> funct3, string opcodestr> 566 : RVInstIShift<imm11_7, funct3, OPC_OP_IMM, (outs GPR:$rd), 567 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr, 568 "$rd, $rs1, $shamt">; 569 570let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 571class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 572 bit Commutable = 0> 573 : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), 574 opcodestr, "$rd, $rs1, $rs2"> { 575 let isCommutable = Commutable; 576} 577 578let hasNoSchedulingInfo = 1, 579 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 580class CSR_ir<bits<3> funct3, string opcodestr> 581 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1), 582 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR, ReadCSR]>; 583 584let hasNoSchedulingInfo = 1, 585 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 586class CSR_ii<bits<3> funct3, string opcodestr> 587 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), 588 (ins csr_sysreg:$imm12, uimm5:$rs1), 589 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR]>; 590 591let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 592class ShiftW_ri<bits<7> imm11_5, bits<3> funct3, string opcodestr> 593 : RVInstIShiftW<imm11_5, funct3, OPC_OP_IMM_32, (outs GPR:$rd), 594 (ins GPR:$rs1, uimm5:$shamt), opcodestr, 595 "$rd, $rs1, $shamt">; 596 597let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 598class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 599 bit Commutable = 0> 600 : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd), 601 (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2"> { 602 let isCommutable = Commutable; 603} 604 605let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 606class Priv<string opcodestr, bits<7> funct7> 607 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins), opcodestr, "">; 608 609let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 610class Priv_rr<string opcodestr, bits<7> funct7> 611 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2), 612 opcodestr, "$rs1, $rs2"> { 613 let rd = 0; 614} 615 616let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 617class Unary_r<bits<12> imm12, bits<3> funct3, string opcodestr> 618 : RVInstIUnary<imm12, funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1), 619 opcodestr, "$rd, $rs1">; 620 621let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 622class UnaryW_r<bits<12> imm12, bits<3> funct3, string opcodestr> 623 : RVInstIUnary<imm12, funct3, OPC_OP_IMM_32, (outs GPR:$rd), (ins GPR:$rs1), 624 opcodestr, "$rd, $rs1">; 625 626//===----------------------------------------------------------------------===// 627// Instructions 628//===----------------------------------------------------------------------===// 629 630let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 631let isReMaterializable = 1, isAsCheapAsAMove = 1, 632 IsSignExtendingOpW = 1 in 633def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20), 634 "lui", "$rd, $imm20">, Sched<[WriteIALU]>; 635 636def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20), 637 "auipc", "$rd, $imm20">, Sched<[WriteIALU]>; 638 639def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20), 640 "jal", "$rd, $imm20">, Sched<[WriteJal]>; 641 642def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd), 643 (ins GPR:$rs1, simm12:$imm12), 644 "jalr", "$rd, ${imm12}(${rs1})">, 645 Sched<[WriteJalr, ReadJalr]>; 646} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 647 648def BEQ : BranchCC_rri<0b000, "beq">; 649def BNE : BranchCC_rri<0b001, "bne">; 650def BLT : BranchCC_rri<0b100, "blt">; 651def BGE : BranchCC_rri<0b101, "bge">; 652def BLTU : BranchCC_rri<0b110, "bltu">; 653def BGEU : BranchCC_rri<0b111, "bgeu">; 654 655let IsSignExtendingOpW = 1 in { 656def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>; 657def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>; 658def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>; 659def LBU : Load_ri<0b100, "lbu">, Sched<[WriteLDB, ReadMemBase]>; 660def LHU : Load_ri<0b101, "lhu">, Sched<[WriteLDH, ReadMemBase]>; 661} 662 663def SB : Store_rri<0b000, "sb">, Sched<[WriteSTB, ReadStoreData, ReadMemBase]>; 664def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>; 665def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>; 666 667// ADDI isn't always rematerializable, but isReMaterializable will be used as 668// a hint which is verified in isReallyTriviallyReMaterializable. 669let isReMaterializable = 1, isAsCheapAsAMove = 1 in 670def ADDI : ALU_ri<0b000, "addi">; 671 672let IsSignExtendingOpW = 1 in { 673def SLTI : ALU_ri<0b010, "slti">; 674def SLTIU : ALU_ri<0b011, "sltiu">; 675} 676 677let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 678def XORI : ALU_ri<0b100, "xori">; 679def ORI : ALU_ri<0b110, "ori">; 680} 681 682def ANDI : ALU_ri<0b111, "andi">; 683 684def SLLI : Shift_ri<0b00000, 0b001, "slli">, 685 Sched<[WriteShiftImm, ReadShiftImm]>; 686def SRLI : Shift_ri<0b00000, 0b101, "srli">, 687 Sched<[WriteShiftImm, ReadShiftImm]>; 688def SRAI : Shift_ri<0b01000, 0b101, "srai">, 689 Sched<[WriteShiftImm, ReadShiftImm]>; 690 691def ADD : ALU_rr<0b0000000, 0b000, "add", Commutable=1>, 692 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 693def SUB : ALU_rr<0b0100000, 0b000, "sub">, 694 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 695def SLL : ALU_rr<0b0000000, 0b001, "sll">, 696 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 697let IsSignExtendingOpW = 1 in { 698def SLT : ALU_rr<0b0000000, 0b010, "slt">, 699 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 700def SLTU : ALU_rr<0b0000000, 0b011, "sltu">, 701 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 702} 703def XOR : ALU_rr<0b0000000, 0b100, "xor", Commutable=1>, 704 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 705def SRL : ALU_rr<0b0000000, 0b101, "srl">, 706 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 707def SRA : ALU_rr<0b0100000, 0b101, "sra">, 708 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 709def OR : ALU_rr<0b0000000, 0b110, "or", Commutable=1>, 710 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 711def AND : ALU_rr<0b0000000, 0b111, "and", Commutable=1>, 712 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 713 714let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 715def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs), 716 (ins fencearg:$pred, fencearg:$succ), 717 "fence", "$pred, $succ">, Sched<[]> { 718 bits<4> pred; 719 bits<4> succ; 720 721 let rs1 = 0; 722 let rd = 0; 723 let imm12 = {0b0000,pred,succ}; 724} 725 726def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", "">, Sched<[]> { 727 let rs1 = 0; 728 let rd = 0; 729 let imm12 = {0b1000,0b0011,0b0011}; 730} 731 732def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", "">, Sched<[]> { 733 let rs1 = 0; 734 let rd = 0; 735 let imm12 = 0; 736} 737 738def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", "">, Sched<[WriteJmp]> { 739 let rs1 = 0; 740 let rd = 0; 741 let imm12 = 0; 742} 743 744def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", "">, 745 Sched<[]> { 746 let rs1 = 0; 747 let rd = 0; 748 let imm12 = 1; 749} 750 751// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented 752// instruction (i.e., it should always trap, if your implementation has invalid 753// instruction traps). 754def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">, 755 Sched<[]> { 756 let rs1 = 0; 757 let rd = 0; 758 let imm12 = 0b110000000000; 759} 760 761} // hasSideEffects = 1, mayLoad = 0, mayStore = 0 762 763def CSRRW : CSR_ir<0b001, "csrrw">; 764def CSRRS : CSR_ir<0b010, "csrrs">; 765def CSRRC : CSR_ir<0b011, "csrrc">; 766 767def CSRRWI : CSR_ii<0b101, "csrrwi">; 768def CSRRSI : CSR_ii<0b110, "csrrsi">; 769def CSRRCI : CSR_ii<0b111, "csrrci">; 770 771/// RV64I instructions 772 773let Predicates = [IsRV64] in { 774def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>; 775def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>; 776def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>; 777 778let IsSignExtendingOpW = 1 in { 779let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 780def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd), 781 (ins GPR:$rs1, simm12:$imm12), 782 "addiw", "$rd, $rs1, $imm12">, 783 Sched<[WriteIALU32, ReadIALU32]>; 784 785def SLLIW : ShiftW_ri<0b0000000, 0b001, "slliw">, 786 Sched<[WriteShiftImm32, ReadShiftImm32]>; 787def SRLIW : ShiftW_ri<0b0000000, 0b101, "srliw">, 788 Sched<[WriteShiftImm32, ReadShiftImm32]>; 789def SRAIW : ShiftW_ri<0b0100000, 0b101, "sraiw">, 790 Sched<[WriteShiftImm32, ReadShiftImm32]>; 791 792def ADDW : ALUW_rr<0b0000000, 0b000, "addw", Commutable=1>, 793 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 794def SUBW : ALUW_rr<0b0100000, 0b000, "subw">, 795 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 796def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">, 797 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 798def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">, 799 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 800def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">, 801 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 802} // IsSignExtendingOpW = 1 803} // Predicates = [IsRV64] 804 805//===----------------------------------------------------------------------===// 806// Privileged instructions 807//===----------------------------------------------------------------------===// 808 809let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 810def SRET : Priv<"sret", 0b0001000>, Sched<[]> { 811 let rd = 0; 812 let rs1 = 0; 813 let rs2 = 0b00010; 814} 815 816def MRET : Priv<"mret", 0b0011000>, Sched<[]> { 817 let rd = 0; 818 let rs1 = 0; 819 let rs2 = 0b00010; 820} 821} // isBarrier = 1, isReturn = 1, isTerminator = 1 822 823let Predicates = [HasStdExtSmrnmi] in { 824def MNRET : Priv<"mnret", 0b0111000>, Sched<[]> { 825 let rd = 0; 826 let rs1 = 0; 827 let rs2 = 0b00010; 828} 829}// Predicates = [HasStdExtSmrnmi] 830 831def WFI : Priv<"wfi", 0b0001000>, Sched<[]> { 832 let rd = 0; 833 let rs1 = 0; 834 let rs2 = 0b00101; 835} 836 837let Predicates = [HasStdExtSvinval] in { 838def SFENCE_W_INVAL : Priv<"sfence.w.inval", 0b0001100>, Sched<[]> { 839 let rd = 0; 840 let rs1 = 0; 841 let rs2 = 0; 842} 843 844def SFENCE_INVAL_IR : Priv<"sfence.inval.ir", 0b0001100>, Sched<[]> { 845 let rd = 0; 846 let rs1 = 0; 847 let rs2 = 0b00001; 848} 849def SINVAL_VMA : Priv_rr<"sinval.vma", 0b0001011>, Sched<[]>; 850def HINVAL_VVMA : Priv_rr<"hinval.vvma", 0b0010011>, Sched<[]>; 851def HINVAL_GVMA : Priv_rr<"hinval.gvma", 0b0110011>, Sched<[]>; 852} // Predicates = [HasStdExtSvinval] 853 854def SFENCE_VMA : Priv_rr<"sfence.vma", 0b0001001>, Sched<[]>; 855 856let Predicates = [HasStdExtH] in { 857def HFENCE_VVMA : Priv_rr<"hfence.vvma", 0b0010001>, Sched<[]>; 858def HFENCE_GVMA : Priv_rr<"hfence.gvma", 0b0110001>, Sched<[]>; 859 860def HLV_B : HLoad_r<0b0110000, 0b00000, "hlv.b">, Sched<[]>; 861def HLV_BU : HLoad_r<0b0110000, 0b00001, "hlv.bu">, Sched<[]>; 862def HLV_H : HLoad_r<0b0110010, 0b00000, "hlv.h">, Sched<[]>; 863def HLV_HU : HLoad_r<0b0110010, 0b00001, "hlv.hu">, Sched<[]>; 864def HLVX_HU : HLoad_r<0b0110010, 0b00011, "hlvx.hu">, Sched<[]>; 865def HLV_W : HLoad_r<0b0110100, 0b00000, "hlv.w">, Sched<[]>; 866def HLVX_WU : HLoad_r<0b0110100, 0b00011, "hlvx.wu">, Sched<[]>; 867def HSV_B : HStore_rr<0b0110001, "hsv.b">, Sched<[]>; 868def HSV_H : HStore_rr<0b0110011, "hsv.h">, Sched<[]>; 869def HSV_W : HStore_rr<0b0110101, "hsv.w">, Sched<[]>; 870} 871let Predicates = [IsRV64, HasStdExtH] in { 872def HLV_WU : HLoad_r<0b0110100, 0b00001, "hlv.wu">, Sched<[]>; 873def HLV_D : HLoad_r<0b0110110, 0b00000, "hlv.d">, Sched<[]>; 874def HSV_D : HStore_rr<0b0110111, "hsv.d">, Sched<[]>; 875} 876 877let Predicates = [HasStdExtSmctrOrSsctr] in { 878def SCTRCLR : Priv<"sctrclr", 0b0001000>, Sched<[]> { 879 let rd = 0; 880 let rs1 = 0; 881 let rs2 = 0b00100; 882} 883} 884 885//===----------------------------------------------------------------------===// 886// Debug instructions 887//===----------------------------------------------------------------------===// 888 889let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 890def DRET : Priv<"dret", 0b0111101>, Sched<[]> { 891 let rd = 0; 892 let rs1 = 0; 893 let rs2 = 0b10010; 894} 895} // isBarrier = 1, isReturn = 1, isTerminator = 1 896 897//===----------------------------------------------------------------------===// 898// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20) 899//===----------------------------------------------------------------------===// 900 901// Note that the size is 32 because up to 8 32-bit instructions are needed to 902// generate an arbitrary 64-bit immediate. However, the size does not really 903// matter since PseudoLI is currently only used in the AsmParser where it gets 904// expanded to real instructions immediately. 905let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 906 isCodeGenOnly = 0, isAsmParserOnly = 1 in 907def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [], 908 "li", "$rd, $imm">; 909 910def PseudoLB : PseudoLoad<"lb">; 911def PseudoLBU : PseudoLoad<"lbu">; 912def PseudoLH : PseudoLoad<"lh">; 913def PseudoLHU : PseudoLoad<"lhu">; 914def PseudoLW : PseudoLoad<"lw">; 915 916def PseudoSB : PseudoStore<"sb">; 917def PseudoSH : PseudoStore<"sh">; 918def PseudoSW : PseudoStore<"sw">; 919 920let Predicates = [IsRV64] in { 921def PseudoLWU : PseudoLoad<"lwu">; 922def PseudoLD : PseudoLoad<"ld">; 923def PseudoSD : PseudoStore<"sd">; 924} // Predicates = [IsRV64] 925 926def : InstAlias<"nop", (ADDI X0, X0, 0), 3>; 927def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm), 2>; 928def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>; 929 930def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>; 931def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>; 932 933let Predicates = [IsRV64] in { 934def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>; 935def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>; 936} // Predicates = [IsRV64] 937 938def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>; 939def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>; 940def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>; 941def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>; 942 943// sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu 944// form will always be printed. Therefore, set a zero weight. 945def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>; 946def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>; 947 948def : InstAlias<"beqz $rs, $offset", 949 (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>; 950def : InstAlias<"bnez $rs, $offset", 951 (BNE GPR:$rs, X0, simm13_lsb0:$offset)>; 952def : InstAlias<"blez $rs, $offset", 953 (BGE X0, GPR:$rs, simm13_lsb0:$offset)>; 954def : InstAlias<"bgez $rs, $offset", 955 (BGE GPR:$rs, X0, simm13_lsb0:$offset)>; 956def : InstAlias<"bltz $rs, $offset", 957 (BLT GPR:$rs, X0, simm13_lsb0:$offset)>; 958def : InstAlias<"bgtz $rs, $offset", 959 (BLT X0, GPR:$rs, simm13_lsb0:$offset)>; 960 961// Always output the canonical mnemonic for the pseudo branch instructions. 962// The GNU tools emit the canonical mnemonic for the branch pseudo instructions 963// as well (e.g. "bgt" will be recognised by the assembler but never printed by 964// objdump). Match this behaviour by setting a zero weight. 965def : InstAlias<"bgt $rs, $rt, $offset", 966 (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 967def : InstAlias<"ble $rs, $rt, $offset", 968 (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 969def : InstAlias<"bgtu $rs, $rt, $offset", 970 (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 971def : InstAlias<"bleu $rs, $rt, $offset", 972 (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 973 974def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>; 975def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>; 976 977// Non-zero offset aliases of "jalr" are the lowest weight, followed by the 978// two-register form, then the one-register forms and finally "ret". 979def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>; 980def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>; 981def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>; 982def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>; 983def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>; 984def : InstAlias<"ret", (JALR X0, X1, 0), 4>; 985 986// Non-canonical forms for jump targets also accepted by the assembler. 987def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>; 988def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>; 989def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>; 990def : InstAlias<"jr (${rs})", (JALR X0, GPR:$rs, 0), 0>; 991def : InstAlias<"jalr (${rs})", (JALR X1, GPR:$rs, 0), 0>; 992def : InstAlias<"jalr $rd, (${rs})", (JALR GPR:$rd, GPR:$rs, 0), 0>; 993 994def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw 995 996let Predicates = [HasStdExtZihintpause] in 997def : InstAlias<"pause", (FENCE 0x1, 0x0)>; // 0x1 == w 998 999def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0), 2>; 1000def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0), 2>; 1001def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0), 2>; 1002 1003let Predicates = [IsRV32] in { 1004def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0), 2>; 1005def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0), 2>; 1006def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0), 2>; 1007} // Predicates = [IsRV32] 1008 1009def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>; 1010def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>; 1011def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>; 1012def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>; 1013 1014def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 1015def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 1016def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 1017 1018let EmitPriority = 0 in { 1019def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 1020def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 1021def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 1022 1023def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1024def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1025def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1026} 1027 1028def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0), 2>; 1029def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>; 1030 1031def : InstAlias<"hfence.gvma", (HFENCE_GVMA X0, X0), 2>; 1032def : InstAlias<"hfence.gvma $rs", (HFENCE_GVMA GPR:$rs, X0)>; 1033 1034def : InstAlias<"hfence.vvma", (HFENCE_VVMA X0, X0), 2>; 1035def : InstAlias<"hfence.vvma $rs", (HFENCE_VVMA GPR:$rs, X0)>; 1036 1037let Predicates = [HasStdExtZihintntl] in { 1038 def : InstAlias<"ntl.p1", (ADD X0, X0, X2)>; 1039 def : InstAlias<"ntl.pall", (ADD X0, X0, X3)>; 1040 def : InstAlias<"ntl.s1", (ADD X0, X0, X4)>; 1041 def : InstAlias<"ntl.all", (ADD X0, X0, X5)>; 1042} // Predicates = [HasStdExtZihintntl] 1043 1044let EmitPriority = 0 in { 1045def : InstAlias<"lb $rd, (${rs1})", 1046 (LB GPR:$rd, GPR:$rs1, 0)>; 1047def : InstAlias<"lh $rd, (${rs1})", 1048 (LH GPR:$rd, GPR:$rs1, 0)>; 1049def : InstAlias<"lw $rd, (${rs1})", 1050 (LW GPR:$rd, GPR:$rs1, 0)>; 1051def : InstAlias<"lbu $rd, (${rs1})", 1052 (LBU GPR:$rd, GPR:$rs1, 0)>; 1053def : InstAlias<"lhu $rd, (${rs1})", 1054 (LHU GPR:$rd, GPR:$rs1, 0)>; 1055 1056def : InstAlias<"sb $rs2, (${rs1})", 1057 (SB GPR:$rs2, GPR:$rs1, 0)>; 1058def : InstAlias<"sh $rs2, (${rs1})", 1059 (SH GPR:$rs2, GPR:$rs1, 0)>; 1060def : InstAlias<"sw $rs2, (${rs1})", 1061 (SW GPR:$rs2, GPR:$rs1, 0)>; 1062 1063def : InstAlias<"add $rd, $rs1, $imm12", 1064 (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1065def : InstAlias<"and $rd, $rs1, $imm12", 1066 (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1067def : InstAlias<"xor $rd, $rs1, $imm12", 1068 (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1069def : InstAlias<"or $rd, $rs1, $imm12", 1070 (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1071def : InstAlias<"sll $rd, $rs1, $shamt", 1072 (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1073def : InstAlias<"srl $rd, $rs1, $shamt", 1074 (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1075def : InstAlias<"sra $rd, $rs1, $shamt", 1076 (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1077let Predicates = [IsRV64] in { 1078def : InstAlias<"lwu $rd, (${rs1})", 1079 (LWU GPR:$rd, GPR:$rs1, 0)>; 1080def : InstAlias<"ld $rd, (${rs1})", 1081 (LD GPR:$rd, GPR:$rs1, 0)>; 1082def : InstAlias<"sd $rs2, (${rs1})", 1083 (SD GPR:$rs2, GPR:$rs1, 0)>; 1084 1085def : InstAlias<"addw $rd, $rs1, $imm12", 1086 (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1087def : InstAlias<"sllw $rd, $rs1, $shamt", 1088 (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1089def : InstAlias<"srlw $rd, $rs1, $shamt", 1090 (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1091def : InstAlias<"sraw $rd, $rs1, $shamt", 1092 (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1093} // Predicates = [IsRV64] 1094def : InstAlias<"slt $rd, $rs1, $imm12", 1095 (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1096def : InstAlias<"sltu $rd, $rs1, $imm12", 1097 (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1098} 1099 1100def : MnemonicAlias<"move", "mv">; 1101 1102// The SCALL and SBREAK instructions were renamed to ECALL and EBREAK in 1103// version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept 1104// the old name for backwards compatibility. 1105def : MnemonicAlias<"scall", "ecall">; 1106def : MnemonicAlias<"sbreak", "ebreak">; 1107 1108// This alias was added to the spec in December 2020. Don't print it by default 1109// to allow assembly we print to be compatible with versions of GNU assembler 1110// that don't support this alias. 1111def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF), 0>; 1112 1113let Predicates = [HasStdExtZicfilp] in { 1114def : InstAlias<"lpad $imm20", (AUIPC X0, uimm20:$imm20)>; 1115} 1116 1117//===----------------------------------------------------------------------===// 1118// .insn directive instructions 1119//===----------------------------------------------------------------------===// 1120 1121def AnyRegOperand : AsmOperandClass { 1122 let Name = "AnyRegOperand"; 1123 let RenderMethod = "addRegOperands"; 1124 let PredicateMethod = "isAnyReg"; 1125} 1126 1127def AnyReg : Operand<XLenVT> { 1128 let OperandType = "OPERAND_REGISTER"; 1129 let ParserMatchClass = AnyRegOperand; 1130} 1131 1132// isCodeGenOnly = 1 to hide them from the tablegened assembly parser. 1133let isCodeGenOnly = 1, hasSideEffects = 1, mayLoad = 1, mayStore = 1, 1134 hasNoSchedulingInfo = 1 in { 1135def InsnR : DirectiveInsnR<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1136 uimm7:$funct7, AnyReg:$rs1, 1137 AnyReg:$rs2), 1138 "$opcode, $funct3, $funct7, $rd, $rs1, $rs2">; 1139def InsnR4 : DirectiveInsnR4<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1140 uimm3:$funct3, 1141 uimm2:$funct2, 1142 AnyReg:$rs1, AnyReg:$rs2, 1143 AnyReg:$rs3), 1144 "$opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3">; 1145def InsnI : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1146 AnyReg:$rs1, simm12:$imm12), 1147 "$opcode, $funct3, $rd, $rs1, $imm12">; 1148def InsnI_Mem : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1149 uimm3:$funct3, 1150 AnyReg:$rs1, 1151 simm12:$imm12), 1152 "$opcode, $funct3, $rd, ${imm12}(${rs1})">; 1153def InsnB : DirectiveInsnB<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1154 AnyReg:$rs1, AnyReg:$rs2, 1155 simm13_lsb0:$imm12), 1156 "$opcode, $funct3, $rs1, $rs2, $imm12">; 1157def InsnU : DirectiveInsnU<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1158 uimm20_lui:$imm20), 1159 "$opcode, $rd, $imm20">; 1160def InsnJ : DirectiveInsnJ<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1161 simm21_lsb0_jal:$imm20), 1162 "$opcode, $rd, $imm20">; 1163def InsnS : DirectiveInsnS<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1164 AnyReg:$rs2, AnyReg:$rs1, 1165 simm12:$imm12), 1166 "$opcode, $funct3, $rs2, ${imm12}(${rs1})">; 1167def Insn32 : RVInst<(outs), (ins uimm32:$value), "", "", [], InstFormatOther> { 1168 bits<32> value; 1169 1170 let Inst{31-0} = value; 1171 let AsmString = ".insn 0x4, $value"; 1172} 1173def Insn48 : RVInst48<(outs), (ins uimm48:$value), "", "", [], InstFormatOther> { 1174 bits<48> value; 1175 let Inst{47-0} = value; 1176 let AsmString = ".insn 0x6, $value"; 1177} 1178def Insn64 : RVInst64<(outs), (ins uimm64:$value), "", "", [], InstFormatOther> { 1179 bits<64> value; 1180 let Inst{63-0} = value; 1181 let AsmString = ".insn 0x8, $value"; 1182} 1183} 1184 1185// Use InstAliases to match these so that we can combine the insn and format 1186// into a mnemonic to use as the key for the tablegened asm matcher table. The 1187// parser will take care of creating these fake mnemonics and will only do it 1188// for known formats. 1189let EmitPriority = 0 in { 1190def : InstAlias<".insn_r $opcode, $funct3, $funct7, $rd, $rs1, $rs2", 1191 (InsnR AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm7:$funct7, 1192 AnyReg:$rs1, AnyReg:$rs2)>; 1193// Accept 4 register form of ".insn r" as alias for ".insn r4". 1194def : InstAlias<".insn_r $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1195 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1196 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1197def : InstAlias<".insn_r4 $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1198 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1199 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1200def : InstAlias<".insn_i $opcode, $funct3, $rd, $rs1, $imm12", 1201 (InsnI AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1202 simm12:$imm12)>; 1203def : InstAlias<".insn_i $opcode, $funct3, $rd, ${imm12}(${rs1})", 1204 (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, 1205 AnyReg:$rs1, simm12:$imm12)>; 1206def : InstAlias<".insn_i $opcode, $funct3, $rd, (${rs1})", 1207 (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, 1208 AnyReg:$rs1, 0)>; 1209def : InstAlias<".insn_b $opcode, $funct3, $rs1, $rs2, $imm12", 1210 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1211 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1212// Accept sb as an alias for b. 1213def : InstAlias<".insn_sb $opcode, $funct3, $rs1, $rs2, $imm12", 1214 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1215 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1216def : InstAlias<".insn_u $opcode, $rd, $imm20", 1217 (InsnU AnyReg:$rd, uimm7_opcode:$opcode, uimm20_lui:$imm20)>; 1218def : InstAlias<".insn_j $opcode, $rd, $imm20", 1219 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1220// Accept uj as an alias for j. 1221def : InstAlias<".insn_uj $opcode, $rd, $imm20", 1222 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1223def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})", 1224 (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, 1225 AnyReg:$rs1, simm12:$imm12)>; 1226def : InstAlias<".insn_s $opcode, $funct3, $rs2, (${rs1})", 1227 (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, 1228 AnyReg:$rs1, 0)>; 1229} 1230 1231//===----------------------------------------------------------------------===// 1232// Pseudo-instructions and codegen patterns 1233// 1234// Naming convention: For 'generic' pattern classes, we use the naming 1235// convention PatTy1Ty2. For pattern classes which offer a more complex 1236// expansion, prefix the class name, e.g. BccPat. 1237//===----------------------------------------------------------------------===// 1238 1239/// Generic pattern classes 1240 1241class PatGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> 1242 : Pat<(vt (OpNode (vt GPR:$rs1))), (Inst GPR:$rs1)>; 1243class PatGprGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt1 = XLenVT, 1244 ValueType vt2 = XLenVT> 1245 : Pat<(vt1 (OpNode (vt1 GPR:$rs1), (vt2 GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>; 1246 1247class PatGprImm<SDPatternOperator OpNode, RVInst Inst, ImmLeaf ImmType, 1248 ValueType vt = XLenVT> 1249 : Pat<(vt (OpNode (vt GPR:$rs1), ImmType:$imm)), 1250 (Inst GPR:$rs1, ImmType:$imm)>; 1251class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst> 1252 : PatGprImm<OpNode, Inst, simm12>; 1253class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst> 1254 : PatGprImm<OpNode, Inst, uimmlog2xlen>; 1255 1256/// Predicates 1257 1258def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ 1259 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1260}]>; 1261def sexti16 : ComplexPattern<XLenVT, 1, "selectSExtBits<16>">; 1262 1263def sexti32 : ComplexPattern<i64, 1, "selectSExtBits<32>">; 1264def gi_sexti32 : GIComplexOperandMatcher<s64, "selectSExtBits<32>">, 1265 GIComplexPatternEquiv<sexti32>; 1266 1267def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{ 1268 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1269}]>; 1270 1271def zexti32 : ComplexPattern<i64, 1, "selectZExtBits<32>">; 1272def gi_zexti32 : GIComplexOperandMatcher<s64, "selectZExtBits<32>">, 1273 GIComplexPatternEquiv<zexti32>; 1274 1275def zexti16 : ComplexPattern<XLenVT, 1, "selectZExtBits<16>">; 1276def gi_zexti16 : GIComplexOperandMatcher<s32, "selectZExtBits<16>">, 1277 GIComplexPatternEquiv<zexti16>; 1278 1279def zexti8 : ComplexPattern<XLenVT, 1, "selectZExtBits<8>">; 1280def gi_zexti8 : GIComplexOperandMatcher<s32, "selectZExtBits<8>">, 1281 GIComplexPatternEquiv<zexti8>; 1282 1283def ext : PatFrags<(ops node:$A), [(sext node:$A), (zext node:$A)]>; 1284 1285class binop_oneuse<SDPatternOperator operator> 1286 : PatFrag<(ops node:$A, node:$B), 1287 (operator node:$A, node:$B), [{ 1288 return N->hasOneUse(); 1289}]>; 1290 1291def and_oneuse : binop_oneuse<and>; 1292def mul_oneuse : binop_oneuse<mul>; 1293 1294def mul_const_oneuse : PatFrag<(ops node:$A, node:$B), 1295 (mul node:$A, node:$B), [{ 1296 if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1297 return N1C->hasOneUse(); 1298 return false; 1299}]>; 1300 1301class unop_oneuse<SDPatternOperator operator> 1302 : PatFrag<(ops node:$A), 1303 (operator node:$A), [{ 1304 return N->hasOneUse(); 1305}]>; 1306 1307def sext_oneuse : unop_oneuse<sext>; 1308def zext_oneuse : unop_oneuse<zext>; 1309def anyext_oneuse : unop_oneuse<anyext>; 1310def ext_oneuse : unop_oneuse<ext>; 1311def fpext_oneuse : unop_oneuse<any_fpextend>; 1312 1313def 33signbits_node : PatLeaf<(i64 GPR:$src), [{ 1314 return CurDAG->ComputeNumSignBits(SDValue(N, 0)) > 32; 1315}]>; 1316 1317class immop_oneuse<ImmLeaf leaf> : PatLeaf<(leaf), [{ 1318 return N->hasOneUse(); 1319}]> { 1320 let GISelPredicateCode = [{ 1321 return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()); 1322 }]; 1323} 1324 1325/// Simple arithmetic operations 1326 1327def : PatGprGpr<add, ADD>; 1328def : PatGprSimm12<add, ADDI>; 1329def : PatGprGpr<sub, SUB>; 1330def : PatGprGpr<or, OR>; 1331def : PatGprSimm12<or, ORI>; 1332def : PatGprGpr<and, AND>; 1333def : PatGprSimm12<and, ANDI>; 1334def : PatGprGpr<xor, XOR>; 1335def : PatGprSimm12<xor, XORI>; 1336def : PatGprUimmLog2XLen<shl, SLLI>; 1337def : PatGprUimmLog2XLen<srl, SRLI>; 1338def : PatGprUimmLog2XLen<sra, SRAI>; 1339 1340// Select 'or' as ADDI if the immediate bits are known to be 0 in $rs1. This 1341// can improve compressibility. 1342def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1343 if (N->getFlags().hasDisjoint()) 1344 return true; 1345 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1346 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1347 return KnownBits::haveNoCommonBitsSet(Known0, Known1); 1348}]>; 1349def : PatGprSimm12<or_is_add, ADDI>; 1350 1351def add_like : PatFrags<(ops node:$lhs, node:$rhs), 1352 [(or_is_add node:$lhs, node:$rhs), 1353 (add node:$lhs, node:$rhs)]>; 1354 1355// negate of low bit can be done via two (compressible) shifts. The negate 1356// is never compressible since rs1 and rd can't be the same register. 1357def : Pat<(i32 (sub 0, (and_oneuse GPR:$rs, 1))), 1358 (SRAI (i32 (SLLI $rs, 31)), 31)>, Requires<[IsRV32]>; 1359def : Pat<(i64 (sub 0, (and_oneuse GPR:$rs, 1))), 1360 (SRAI (i64 (SLLI $rs, 63)), 63)>, Requires<[IsRV64]>; 1361 1362// AND with leading/trailing ones mask exceeding simm32/simm12. 1363def : Pat<(i64 (and GPR:$rs, immop_oneuse<LeadingOnesMask>:$mask)), 1364 (SLLI (i64 (SRLI $rs, (TrailingZeros imm:$mask))), 1365 (TrailingZeros imm:$mask))>; 1366def : Pat<(XLenVT (and GPR:$rs, immop_oneuse<TrailingOnesMask>:$mask)), 1367 (SRLI (XLenVT (SLLI $rs, (XLenSubTrailingOnes imm:$mask))), 1368 (XLenSubTrailingOnes imm:$mask))>; 1369 1370// Match both a plain shift and one where the shift amount is masked (this is 1371// typically introduced when the legalizer promotes the shift amount and 1372// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base 1373// ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I). 1374def shiftMaskXLen : ComplexPattern<XLenVT, 1, "selectShiftMaskXLen", [], [], 0>; 1375def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>; 1376// FIXME: This is labelled as handling 's32', however the ComplexPattern it 1377// refers to handles both i32 and i64 based on the HwMode. Currently this LLT 1378// parameter appears to be ignored so this pattern works for both, however we 1379// should add a LowLevelTypeByHwMode, and use that to define our XLenLLT instead 1380// here. 1381def GIShiftMaskXLen : 1382 GIComplexOperandMatcher<s32, "selectShiftMaskXLen">, 1383 GIComplexPatternEquiv<shiftMaskXLen>; 1384def GIShiftMask32 : 1385 GIComplexOperandMatcher<s64, "selectShiftMask32">, 1386 GIComplexPatternEquiv<shiftMask32>; 1387 1388class shiftop<SDPatternOperator operator> 1389 : PatFrag<(ops node:$val, node:$count), 1390 (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>; 1391class shiftopw<SDPatternOperator operator> 1392 : PatFrag<(ops node:$val, node:$count), 1393 (operator node:$val, (i64 (shiftMask32 node:$count)))>; 1394 1395def : PatGprGpr<shiftop<shl>, SLL>; 1396def : PatGprGpr<shiftop<srl>, SRL>; 1397def : PatGprGpr<shiftop<sra>, SRA>; 1398 1399// This is a special case of the ADD instruction used to facilitate the use of a 1400// fourth operand to emit a relocation on a symbol relating to this instruction. 1401// The relocation does not affect any bits of the instruction itself but is used 1402// as a hint to the linker. 1403let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in 1404def PseudoAddTPRel : Pseudo<(outs GPR:$rd), 1405 (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [], 1406 "add", "$rd, $rs1, $rs2, $src">; 1407 1408/// FrameIndex calculations 1409 1410// Transforms frameindex -> tframeindex. 1411def to_tframeindex : SDNodeXForm<frameindex, [{ 1412 return CurDAG->getTargetFrameIndex(N->getIndex(), N->getValueType(0)); 1413}]>; 1414 1415def : GICustomOperandRenderer<"renderFrameIndex">, 1416 GISDNodeXFormEquiv<to_tframeindex>; 1417 1418def : Pat<(frameindex:$fi), (ADDI (iPTR (to_tframeindex $fi)), 0)>; 1419 1420def : Pat<(add_like frameindex:$fi, simm12:$offset), 1421 (ADDI (iPTR (to_tframeindex $fi)), simm12:$offset)>; 1422 1423def GIAddrRegImm : 1424 GIComplexOperandMatcher<s32, "selectAddrRegImm">, 1425 GIComplexPatternEquiv<AddrRegImm>; 1426 1427/// Stack probing 1428 1429let hasSideEffects = 1, mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { 1430// Probed stack allocation of a constant size, used in function prologues when 1431// stack-clash protection is enabled. 1432def PROBED_STACKALLOC : Pseudo<(outs GPR:$sp), 1433 (ins GPR:$scratch), 1434 []>, 1435 Sched<[]>; 1436def PROBED_STACKALLOC_RVV : Pseudo<(outs GPR:$sp), 1437 (ins GPR:$scratch), 1438 []>, 1439 Sched<[]>; 1440let usesCustomInserter = 1 in 1441def PROBED_STACKALLOC_DYN : Pseudo<(outs GPR:$rd), 1442 (ins GPR:$scratch), 1443 [(set GPR:$rd, (riscv_probed_alloca GPR:$scratch))]>, 1444 Sched<[]>; 1445} 1446 1447/// HI and ADD_LO address nodes. 1448 1449// Pseudo for a rematerializable LUI+ADDI sequence for loading an address. 1450// It will be expanded after register allocation. 1451// FIXME: The scheduling information does not reflect the multiple instructions. 1452let Size = 8, isReMaterializable = 1 in 1453def PseudoMovAddr : Pseudo<(outs GPR:$dst), (ins uimm20_lui:$hi, simm12:$lo), []>, 1454 Sched<[WriteIALU]>; 1455 1456def riscv_hi_oneuse : unop_oneuse<riscv_hi>; 1457def addr_hi_lo : PatFrag<(ops node:$hi, node:$lo), 1458 (riscv_add_lo (riscv_hi_oneuse node:$hi), node:$lo)>; 1459 1460def : Pat<(addr_hi_lo tglobaladdr:$hi, tglobaladdr:$lo), 1461 (PseudoMovAddr tglobaladdr:$hi, tglobaladdr:$lo)>; 1462def : Pat<(addr_hi_lo tblockaddress:$hi, tblockaddress:$lo), 1463 (PseudoMovAddr tblockaddress:$hi, tblockaddress:$lo)>; 1464def : Pat<(addr_hi_lo tjumptable:$hi, tjumptable:$lo), 1465 (PseudoMovAddr tjumptable:$hi, tjumptable:$lo)>; 1466def : Pat<(addr_hi_lo tconstpool:$hi, tconstpool:$lo), 1467 (PseudoMovAddr tconstpool:$hi, tconstpool:$lo)>; 1468 1469def : Pat<(riscv_hi tglobaladdr:$in), (LUI tglobaladdr:$in)>; 1470def : Pat<(riscv_hi tblockaddress:$in), (LUI tblockaddress:$in)>; 1471def : Pat<(riscv_hi tjumptable:$in), (LUI tjumptable:$in)>; 1472def : Pat<(riscv_hi tconstpool:$in), (LUI tconstpool:$in)>; 1473 1474def : Pat<(riscv_add_lo GPR:$hi, tglobaladdr:$lo), 1475 (ADDI GPR:$hi, tglobaladdr:$lo)>; 1476def : Pat<(riscv_add_lo GPR:$hi, tblockaddress:$lo), 1477 (ADDI GPR:$hi, tblockaddress:$lo)>; 1478def : Pat<(riscv_add_lo GPR:$hi, tjumptable:$lo), 1479 (ADDI GPR:$hi, tjumptable:$lo)>; 1480def : Pat<(riscv_add_lo GPR:$hi, tconstpool:$lo), 1481 (ADDI GPR:$hi, tconstpool:$lo)>; 1482 1483/// TLS address nodes. 1484 1485def : Pat<(riscv_hi tglobaltlsaddr:$in), (LUI tglobaltlsaddr:$in)>; 1486def : Pat<(riscv_add_tprel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src), 1487 (PseudoAddTPRel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src)>; 1488def : Pat<(riscv_add_lo GPR:$src, tglobaltlsaddr:$lo), 1489 (ADDI GPR:$src, tglobaltlsaddr:$lo)>; 1490 1491/// Setcc 1492 1493def : PatGprGpr<setlt, SLT>; 1494def : PatGprSimm12<setlt, SLTI>; 1495def : PatGprGpr<setult, SLTU>; 1496def : PatGprSimm12<setult, SLTIU>; 1497 1498// RISC-V doesn't have general instructions for integer setne/seteq, but we can 1499// check for equality with 0. These ComplexPatterns rewrite the setne/seteq into 1500// something that can be compared with 0. 1501// These ComplexPatterns must be used in pairs. 1502def riscv_setne : ComplexPattern<XLenVT, 1, "selectSETNE", [setcc]>; 1503def riscv_seteq : ComplexPattern<XLenVT, 1, "selectSETEQ", [setcc]>; 1504 1505// Define pattern expansions for setcc operations that aren't directly 1506// handled by a RISC-V instruction. 1507def : Pat<(riscv_seteq (XLenVT GPR:$rs1)), (SLTIU GPR:$rs1, 1)>; 1508def : Pat<(riscv_setne (XLenVT GPR:$rs1)), (SLTU (XLenVT X0), GPR:$rs1)>; 1509def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), -1)), (SLTIU GPR:$rs1, -1)>; 1510 1511def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{ 1512 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 1513 RISCVCC::CondCode BrCC = getRISCVCCForIntCC(CC); 1514 return CurDAG->getTargetConstant(BrCC, SDLoc(N), Subtarget->getXLenVT()); 1515}]>; 1516 1517def riscv_selectcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc, 1518 node:$truev, node:$falsev), 1519 (riscv_selectcc node:$lhs, node:$rhs, 1520 node:$cc, node:$truev, 1521 node:$falsev), [{}], 1522 IntCCtoRISCVCC>; 1523 1524multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt> { 1525 let usesCustomInserter = 1 in 1526 def _Using_CC_GPR : Pseudo<(outs valty:$dst), 1527 (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, 1528 valty:$truev, valty:$falsev), 1529 [(set valty:$dst, 1530 (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond, 1531 (vt valty:$truev), valty:$falsev))]>; 1532 // Explicitly select 0 in the condition to X0. The register coalescer doesn't 1533 // always do it. 1534 def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev), 1535 valty:$falsev), 1536 (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0), 1537 (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>; 1538} 1539 1540let Predicates = [NoConditionalMoveFusion] in 1541defm Select_GPR : SelectCC_GPR_rrirr<GPR, XLenVT>; 1542 1543class SelectCompressOpt<CondCode Cond> 1544 : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond, 1545 (XLenVT GPR:$truev), GPR:$falsev), 1546 (Select_GPR_Using_CC_GPR (XLenVT (ADDI GPR:$lhs, (NegImm simm12:$Constant))), (XLenVT X0), 1547 (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>; 1548 1549def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">; 1550 1551let Predicates = [HasStdExtC, OptForMinSize] in { 1552 def : SelectCompressOpt<SETEQ>; 1553 def : SelectCompressOpt<SETNE>; 1554} 1555 1556/// Branches and jumps 1557 1558// Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction. 1559multiclass BccPat<CondCode Cond, RVInstB Inst> { 1560 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), GPR:$rs2, Cond, bb:$imm12), 1561 (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>; 1562 // Explicitly select 0 to X0. The register coalescer doesn't always do it. 1563 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), 0, Cond, bb:$imm12), 1564 (Inst GPR:$rs1, (XLenVT X0), simm13_lsb0:$imm12)>; 1565} 1566 1567class BrccCompressOpt<CondCode Cond, RVInstB Inst> 1568 : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place), 1569 (Inst (XLenVT (ADDI GPR:$lhs, (NegImm simm12:$Constant))), 1570 (XLenVT X0), bb:$place)>; 1571 1572defm : BccPat<SETEQ, BEQ>; 1573defm : BccPat<SETNE, BNE>; 1574defm : BccPat<SETLT, BLT>; 1575defm : BccPat<SETGE, BGE>; 1576defm : BccPat<SETULT, BLTU>; 1577defm : BccPat<SETUGE, BGEU>; 1578 1579let Predicates = [HasStdExtCOrZca, OptForMinSize] in { 1580 def : BrccCompressOpt<SETEQ, BEQ>; 1581 def : BrccCompressOpt<SETNE, BNE>; 1582} 1583 1584class LongBccPseudo : Pseudo<(outs), 1585 (ins GPR:$rs1, GPR:$rs2, simm21_lsb0_jal:$imm20), 1586 []> { 1587 let Size = 8; 1588 let isBarrier = 1; 1589 let isBranch = 1; 1590 let hasSideEffects = 0; 1591 let mayStore = 0; 1592 let mayLoad = 0; 1593 let isAsmParserOnly = 1; 1594 let hasNoSchedulingInfo = 1; 1595} 1596 1597def PseudoLongBEQ : LongBccPseudo; 1598def PseudoLongBNE : LongBccPseudo; 1599def PseudoLongBLT : LongBccPseudo; 1600def PseudoLongBGE : LongBccPseudo; 1601def PseudoLongBLTU : LongBccPseudo; 1602def PseudoLongBGEU : LongBccPseudo; 1603 1604let isBarrier = 1, isBranch = 1, isTerminator = 1 in 1605def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>, 1606 PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>; 1607 1608let Predicates = [NoStdExtZicfilp], 1609 isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in 1610def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12:$imm12), []>, 1611 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; 1612 1613let Predicates = [HasStdExtZicfilp], 1614 isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in { 1615def PseudoBRINDNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1, simm12:$imm12), []>, 1616 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; 1617def PseudoBRINDX7 : Pseudo<(outs), (ins GPRX7:$rs1, simm12:$imm12), []>, 1618 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; 1619} 1620 1621// For Zicfilp, need to avoid using X7/T2 for indirect branches which need 1622// landing pad. 1623let Predicates = [HasStdExtZicfilp] in { 1624def : Pat<(brind GPRJALRNonX7:$rs1), (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, 0)>; 1625def : Pat<(brind (add GPRJALRNonX7:$rs1, simm12:$imm12)), 1626 (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, simm12:$imm12)>; 1627 1628def : Pat<(riscv_sw_guarded_brind GPRX7:$rs1), (PseudoBRINDX7 GPRX7:$rs1, 0)>; 1629def : Pat<(riscv_sw_guarded_brind (add GPRX7:$rs1, simm12:$imm12)), 1630 (PseudoBRINDX7 GPRX7:$rs1, simm12:$imm12)>; 1631} 1632 1633let Predicates = [NoStdExtZicfilp] in { 1634def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>; 1635def : Pat<(brind (add GPRJALR:$rs1, simm12:$imm12)), 1636 (PseudoBRIND GPRJALR:$rs1, simm12:$imm12)>; 1637} 1638 1639// PseudoCALLReg is a generic pseudo instruction for calls which will eventually 1640// expand to auipc and jalr while encoding, with any given register used as the 1641// destination. 1642// Define AsmString to print "call" when compile with -S flag. 1643// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1644let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, 1645 mayStore = 0, mayLoad = 0 in 1646def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), [], 1647 "call", "$rd, $func">, 1648 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1649 1650// PseudoCALL is a pseudo instruction which will eventually expand to auipc 1651// and jalr while encoding. This is desirable, as an auipc+jalr pair with 1652// R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker 1653// if the offset fits in a signed 21-bit immediate. 1654// Define AsmString to print "call" when compile with -S flag. 1655// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1656let isCall = 1, Defs = [X1], isCodeGenOnly = 0, Size = 8 in 1657def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), [], 1658 "call", "$func">, 1659 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1660 1661def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>; 1662def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; 1663 1664def : Pat<(riscv_sret_glue), (SRET)>; 1665def : Pat<(riscv_mret_glue), (MRET)>; 1666 1667let isCall = 1, Defs = [X1] in { 1668let Predicates = [NoStdExtZicfilp] in 1669def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1), 1670 [(riscv_call GPRJALR:$rs1)]>, 1671 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; 1672let Predicates = [HasStdExtZicfilp] in { 1673def PseudoCALLIndirectNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1), 1674 [(riscv_call GPRJALRNonX7:$rs1)]>, 1675 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; 1676// For large code model, non-indirect calls could be software-guarded 1677def PseudoCALLIndirectX7 : Pseudo<(outs), (ins GPRX7:$rs1), 1678 [(riscv_sw_guarded_call GPRX7:$rs1)]>, 1679 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; 1680} 1681} 1682 1683let isBarrier = 1, isReturn = 1, isTerminator = 1 in 1684def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_glue)]>, 1685 PseudoInstExpansion<(JALR X0, X1, 0)>; 1686 1687// PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually 1688// expand to auipc and jalr while encoding. 1689// Define AsmString to print "tail" when compile with -S flag. 1690let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2], 1691 Size = 8, isCodeGenOnly = 0 in 1692def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), [], 1693 "tail", "$dst">, 1694 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1695 1696let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in { 1697let Predicates = [NoStdExtZicfilp] in 1698def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1), 1699 [(riscv_tail GPRTC:$rs1)]>, 1700 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; 1701let Predicates = [HasStdExtZicfilp] in { 1702def PseudoTAILIndirectNonX7 : Pseudo<(outs), (ins GPRTCNonX7:$rs1), 1703 [(riscv_tail GPRTCNonX7:$rs1)]>, 1704 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; 1705// For large code model, non-indirect calls could be software-guarded 1706def PseudoTAILIndirectX7 : Pseudo<(outs), (ins GPRX7:$rs1), 1707 [(riscv_sw_guarded_tail GPRX7:$rs1)]>, 1708 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; 1709} 1710} 1711 1712def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)), 1713 (PseudoTAIL tglobaladdr:$dst)>; 1714def : Pat<(riscv_tail (iPTR texternalsym:$dst)), 1715 (PseudoTAIL texternalsym:$dst)>; 1716 1717let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, Size = 8, 1718 isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in 1719def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), [], 1720 "jump", "$target, $rd">, 1721 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1722 1723// Pseudo for a rematerializable constant materialization sequence. 1724// This is an experimental feature enabled by 1725// -riscv-use-rematerializable-movimm in RISCVISelDAGToDAG.cpp 1726// It will be expanded after register allocation. 1727// FIXME: The scheduling information does not reflect the multiple instructions. 1728let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, 1729 isReMaterializable = 1 in 1730def PseudoMovImm : Pseudo<(outs GPR:$dst), (ins i32imm:$imm), []>, 1731 Sched<[WriteIALU]>; 1732 1733let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1734 isAsmParserOnly = 1 in 1735def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1736 "lla", "$dst, $src">; 1737 1738// Refer to comment on PseudoLI for explanation of Size=32 1739let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1740 isAsmParserOnly = 1 in 1741def PseudoLLAImm : Pseudo<(outs GPR:$dst), (ins ixlenimm_li_restricted:$imm), [], 1742 "lla", "$dst, $imm">; 1743def : Pat<(riscv_lla tglobaladdr:$in), (PseudoLLA tglobaladdr:$in)>; 1744def : Pat<(riscv_lla tblockaddress:$in), (PseudoLLA tblockaddress:$in)>; 1745def : Pat<(riscv_lla tjumptable:$in), (PseudoLLA tjumptable:$in)>; 1746def : Pat<(riscv_lla tconstpool:$in), (PseudoLLA tconstpool:$in)>; 1747 1748let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1749 isAsmParserOnly = 1 in 1750def PseudoLGA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1751 "lga", "$dst, $src">; 1752 1753let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1754 isAsmParserOnly = 1 in 1755def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1756 "la", "$dst, $src">; 1757 1758// Refer to comment on PseudoLI for explanation of Size=32 1759let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 1760 isCodeGenOnly = 0, isAsmParserOnly = 1 in 1761def PseudoLAImm : Pseudo<(outs GPR:$rd), (ins ixlenimm_li_restricted:$imm), [], 1762 "la", "$rd, $imm">; 1763 1764let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1765 isAsmParserOnly = 1 in 1766def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1767 "la.tls.ie", "$dst, $src">; 1768 1769let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1770 isAsmParserOnly = 1 in 1771def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1772 "la.tls.gd", "$dst, $src">; 1773 1774let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 32, isCodeGenOnly = 0 in 1775def PseudoLA_TLSDESC : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1776 "la.tlsdesc", "$dst, $src">; 1777 1778def TLSDESCCallSymbol : AsmOperandClass { 1779 let Name = "TLSDESCCallSymbol"; 1780 let RenderMethod = "addImmOperands"; 1781 let DiagnosticType = "InvalidTLSDESCCallSymbol"; 1782 let ParserMethod = "parseOperandWithModifier"; 1783} 1784 1785// A bare symbol with the %tlsdesc_call variant. 1786def tlsdesc_call_symbol : Operand<XLenVT> { 1787 let ParserMatchClass = TLSDESCCallSymbol; 1788} 1789// This is a special case of the JALR instruction used to facilitate the use of a 1790// fourth operand to emit a relocation on a symbol relating to this instruction. 1791// The relocation does not affect any bits of the instruction itself but is used 1792// as a hint to the linker. 1793let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, 1794 mayStore = 0, mayLoad = 0 in 1795def PseudoTLSDESCCall : Pseudo<(outs GPR:$rd), 1796 (ins GPR:$rs1, simm12:$imm12, tlsdesc_call_symbol:$src), [], 1797 "jalr", "$rd, ${imm12}(${rs1}), $src">, 1798 Sched<[WriteJalr, ReadJalr]> { 1799 let Defs = [X10]; 1800 let Uses = [X10]; 1801} 1802 1803 1804/// Sign/Zero Extends 1805 1806// There are single-instruction versions of these in Zbb, so disable these 1807// Pseudos if that extension is present. 1808let hasSideEffects = 0, mayLoad = 0, 1809 mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1810def PseudoSEXT_B : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.b", "$rd, $rs">; 1811def PseudoSEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.h", "$rd, $rs">; 1812// rv64's sext.w is defined above, using InstAlias<"sext.w ... 1813// zext.b is defined above, using InstAlias<"zext.b ... 1814def PseudoZEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.h", "$rd, $rs">; 1815} // hasSideEffects = 0, ... 1816 1817let Predicates = [IsRV64], hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1818 isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1819def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs">; 1820} // Predicates = [IsRV64], ... 1821 1822/// Loads 1823 1824class LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> 1825 : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))), 1826 (Inst GPR:$rs1, simm12:$imm12)>; 1827 1828def : LdPat<sextloadi8, LB>; 1829def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb. 1830def : LdPat<sextloadi16, LH>; 1831def : LdPat<extloadi16, LH>; 1832def : LdPat<load, LW, i32>; 1833def : LdPat<zextloadi8, LBU>; 1834def : LdPat<zextloadi16, LHU>; 1835 1836/// Stores 1837 1838class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy, 1839 ValueType vt> 1840 : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPR:$rs1), 1841 simm12:$imm12)), 1842 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>; 1843 1844def : StPat<truncstorei8, SB, GPR, XLenVT>; 1845def : StPat<truncstorei16, SH, GPR, XLenVT>; 1846def : StPat<store, SW, GPR, i32>; 1847 1848/// Fences 1849 1850// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set 1851// Manual: Volume I. 1852 1853// fence acquire -> fence r, rw 1854def : Pat<(atomic_fence (XLenVT 4), (timm)), (FENCE 0b10, 0b11)>; 1855// fence release -> fence rw, w 1856def : Pat<(atomic_fence (XLenVT 5), (timm)), (FENCE 0b11, 0b1)>; 1857// fence acq_rel -> fence.tso 1858def : Pat<(atomic_fence (XLenVT 6), (timm)), (FENCE_TSO)>; 1859// fence seq_cst -> fence rw, rw 1860def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>; 1861 1862// Lowering for atomic load and store is defined in RISCVInstrInfoA.td. 1863// Although these are lowered to fence+load/store instructions defined in the 1864// base RV32I/RV64I ISA, this lowering is only used when the A extension is 1865// present. This is necessary as it isn't valid to mix __atomic_* libcalls 1866// with inline atomic operations for the same object. 1867 1868/// Access to system registers 1869 1870// Helpers for defining specific operations. They are defined for each system 1871// register separately. Side effect is not used because dependencies are 1872// expressed via use-def properties. 1873 1874class ReadSysReg<SysReg SR, list<Register> Regs> 1875 : Pseudo<(outs GPR:$rd), (ins), 1876 [(set GPR:$rd, (XLenVT (riscv_read_csr (XLenVT SR.Encoding))))]>, 1877 PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> { 1878 let hasSideEffects = 0; 1879 let Uses = Regs; 1880} 1881 1882class WriteSysReg<SysReg SR, list<Register> Regs> 1883 : Pseudo<(outs), (ins GPR:$val), 1884 [(riscv_write_csr (XLenVT SR.Encoding), (XLenVT GPR:$val))]>, 1885 PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> { 1886 let hasSideEffects = 0; 1887 let Defs = Regs; 1888} 1889 1890class WriteSysRegImm<SysReg SR, list<Register> Regs> 1891 : Pseudo<(outs), (ins uimm5:$val), 1892 [(riscv_write_csr (XLenVT SR.Encoding), uimm5:$val)]>, 1893 PseudoInstExpansion<(CSRRWI X0, SR.Encoding, uimm5:$val)> { 1894 let hasSideEffects = 0; 1895 let Defs = Regs; 1896} 1897 1898class SwapSysReg<SysReg SR, list<Register> Regs> 1899 : Pseudo<(outs GPR:$rd), (ins GPR:$val), 1900 [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), (XLenVT GPR:$val)))]>, 1901 PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> { 1902 let hasSideEffects = 0; 1903 let Uses = Regs; 1904 let Defs = Regs; 1905} 1906 1907class SwapSysRegImm<SysReg SR, list<Register> Regs> 1908 : Pseudo<(outs GPR:$rd), (ins uimm5:$val), 1909 [(set GPR:$rd, (XLenVT (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val)))]>, 1910 PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> { 1911 let hasSideEffects = 0; 1912 let Uses = Regs; 1913 let Defs = Regs; 1914} 1915 1916def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>; 1917def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>; 1918def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>; 1919def SwapFRMImm : SwapSysRegImm<SysRegFRM, [FRM]>; 1920 1921def WriteVXRMImm : WriteSysRegImm<SysRegVXRM, [VXRM]>; 1922 1923let hasSideEffects = true in { 1924def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>; 1925def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>; 1926} 1927/// Other pseudo-instructions 1928 1929// Pessimistically assume the stack pointer will be clobbered 1930let Defs = [X2], Uses = [X2] in { 1931def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1932 [(callseq_start timm:$amt1, timm:$amt2)]>; 1933def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1934 [(callseq_end timm:$amt1, timm:$amt2)]>; 1935} // Defs = [X2], Uses = [X2] 1936 1937/// RV64 patterns 1938 1939let Predicates = [IsRV64, NotHasStdExtZba] in { 1940def : Pat<(i64 (and GPR:$rs1, 0xffffffff)), (SRLI (i64 (SLLI GPR:$rs1, 32)), 32)>; 1941 1942// If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2 1943// shifts instead of 3. This can occur when unsigned is used to index an array. 1944def : Pat<(i64 (shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1945 (SRLI (i64 (SLLI GPR:$rs1, 32)), (ImmSubFrom32 uimm5:$shamt))>; 1946} 1947 1948class binop_allhusers<SDPatternOperator operator> 1949 : PatFrag<(ops node:$lhs, node:$rhs), 1950 (XLenVT (operator node:$lhs, node:$rhs)), [{ 1951 return hasAllHUsers(Node); 1952}]>; 1953 1954// PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl 1955// if only the lower 32 bits of their result is used. 1956class binop_allwusers<SDPatternOperator operator> 1957 : PatFrag<(ops node:$lhs, node:$rhs), 1958 (i64 (operator node:$lhs, node:$rhs)), [{ 1959 return hasAllWUsers(Node); 1960}]>; 1961 1962def sexti32_allwusers : PatFrag<(ops node:$src), 1963 (sext_inreg node:$src, i32), [{ 1964 return hasAllWUsers(Node); 1965}]>; 1966 1967def ImmSExt32 : SDNodeXForm<imm, [{ 1968 return CurDAG->getTargetConstant(SignExtend64<32>(N->getSExtValue()), 1969 SDLoc(N), N->getValueType(0)); 1970}]>; 1971// Look for constants where the upper 32 bits are 0, but sign extending bit 31 1972// would be an simm12. 1973def u32simm12 : ImmLeaf<XLenVT, [{ 1974 return isUInt<32>(Imm) && isInt<12>(SignExtend64<32>(Imm)); 1975}], ImmSExt32>; 1976 1977let Predicates = [IsRV64] in { 1978 1979def : Pat<(i64 (and GPR:$rs, immop_oneuse<LeadingOnesWMask>:$mask)), 1980 (SLLI (i64 (SRLIW $rs, (TrailingZeros imm:$mask))), 1981 (TrailingZeros imm:$mask))>; 1982 1983/// sext and zext 1984 1985// Sign extend is not needed if all users are W instructions. 1986def : Pat<(sexti32_allwusers GPR:$rs1), (XLenVT GPR:$rs1)>; 1987 1988def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>; 1989 1990/// ALU operations 1991 1992def : Pat<(i64 (srl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1993 (SRLIW GPR:$rs1, uimm5:$shamt)>; 1994def : Pat<(i64 (srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1995 (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 1996def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 1997 (SRAIW GPR:$rs1, uimm5:$shamt)>; 1998def : Pat<(i64 (sra (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1999 (SRAIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 2000 2001def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>; 2002def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>; 2003def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>; 2004 2005// Select W instructions if only the lower 32 bits of the result are used. 2006def : PatGprGpr<binop_allwusers<add>, ADDW>; 2007def : PatGprSimm12<binop_allwusers<add>, ADDIW>; 2008def : PatGprImm<binop_allwusers<add>, ADDIW, u32simm12>; 2009def : PatGprGpr<binop_allwusers<sub>, SUBW>; 2010def : PatGprImm<binop_allwusers<shl>, SLLIW, uimm5>; 2011 2012// If this is a shr of a value sign extended from i32, and all the users only 2013// use the lower 32 bits, we can use an sraiw to remove the sext_inreg. This 2014// occurs because SimplifyDemandedBits prefers srl over sra. 2015def : Pat<(binop_allwusers<srl> (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 2016 (SRAIW GPR:$rs1, uimm5:$shamt)>; 2017 2018// Use binop_allwusers to recover immediates that may have been broken by 2019// SimplifyDemandedBits. 2020def : Pat<(binop_allwusers<and> GPR:$rs1, 0xffffffff), 2021 (COPY GPR:$rs1)>; 2022def : PatGprImm<binop_allwusers<and>, ANDI, u32simm12>; 2023def : PatGprImm<binop_allwusers<or>, ORI, u32simm12>; 2024def : PatGprImm<binop_allwusers<xor>, XORI, u32simm12>; 2025 2026/// Loads 2027 2028def : LdPat<sextloadi32, LW, i64>; 2029def : LdPat<extloadi32, LW, i64>; 2030def : LdPat<zextloadi32, LWU, i64>; 2031def : LdPat<load, LD, i64>; 2032 2033/// Stores 2034 2035def : StPat<truncstorei32, SW, GPR, i64>; 2036def : StPat<store, SD, GPR, i64>; 2037} // Predicates = [IsRV64] 2038 2039// On RV64, we can directly read these 64-bit counter CSRs. 2040let Predicates = [IsRV64] in { 2041/// readcyclecounter 2042def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>; 2043/// readsteadycounter 2044def : Pat<(i64 (readsteadycounter)), (CSRRS TIME.Encoding, (XLenVT X0))>; 2045} 2046 2047// On RV32, ReadCounterWide will be expanded to the suggested loop reading both 2048// halves of 64-bit counter CSRs. 2049let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in 2050def ReadCounterWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins i32imm:$csr_lo, i32imm:$csr_hi), 2051 [(set GPR:$lo, GPR:$hi, 2052 (riscv_read_counter_wide csr_sysreg:$csr_lo, csr_sysreg:$csr_hi))], 2053 "", "">; 2054 2055/// traps 2056 2057// We lower `trap` to `unimp`, as this causes a hard exception on nearly all 2058// systems. 2059def : Pat<(trap), (UNIMP)>; 2060 2061// We lower `debugtrap` to `ebreak`, as this will get the attention of the 2062// debugger if possible. 2063def : Pat<(debugtrap), (EBREAK)>; 2064 2065let Predicates = [IsRV64], Uses = [X5], 2066 Defs = [X1, X6, X7, X28, X29, X30, X31], Size = 8 in 2067def HWASAN_CHECK_MEMACCESS_SHORTGRANULES 2068 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$accessinfo), 2069 [(int_hwasan_check_memaccess_shortgranules (i64 X5), GPRJALR:$ptr, 2070 (i32 timm:$accessinfo))]>; 2071 2072// This gets lowered into a 20-byte instruction sequence (at most) 2073let hasSideEffects = 0, mayLoad = 1, mayStore = 0, 2074 Defs = [ X6, X7, X28, X29, X30, X31 ], Size = 20 in { 2075def KCFI_CHECK 2076 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$type), []>, Sched<[]>; 2077} 2078 2079/// Simple optimization 2080def : Pat<(XLenVT (add GPR:$rs1, immop_oneuse<AddiPair>:$rs2)), 2081 (ADDI (XLenVT (ADDI GPR:$rs1, (AddiPairImmLarge imm:$rs2))), 2082 (AddiPairImmSmall imm:$rs2))>; 2083 2084let Predicates = [IsRV64] in { 2085// Select W instructions if only the lower 32-bits of the result are used. 2086def : Pat<(binop_allwusers<add> GPR:$rs1, immop_oneuse<AddiPair>:$rs2), 2087 (ADDIW (i64 (ADDIW GPR:$rs1, (AddiPairImmLarge imm:$rs2))), 2088 (AddiPairImmSmall imm:$rs2))>; 2089} 2090 2091//===----------------------------------------------------------------------===// 2092// Standard extensions 2093//===----------------------------------------------------------------------===// 2094 2095// Multiply and Division 2096include "RISCVInstrInfoM.td" 2097 2098// Atomic 2099include "RISCVInstrInfoA.td" 2100include "RISCVInstrInfoZa.td" 2101include "RISCVInstrInfoZalasr.td" 2102 2103// Scalar FP 2104include "RISCVInstrInfoF.td" 2105include "RISCVInstrInfoD.td" 2106include "RISCVInstrInfoZfh.td" 2107include "RISCVInstrInfoZfbfmin.td" 2108include "RISCVInstrInfoZfa.td" 2109 2110// Scalar bitmanip and cryptography 2111include "RISCVInstrInfoZb.td" 2112include "RISCVInstrInfoZk.td" 2113 2114// Vector 2115include "RISCVInstrInfoV.td" 2116include "RISCVInstrInfoZvk.td" 2117 2118// Compressed 2119include "RISCVInstrInfoC.td" 2120include "RISCVInstrInfoZc.td" 2121include "RISCVInstrInfoZcmop.td" 2122 2123// Integer 2124include "RISCVInstrInfoZimop.td" 2125include "RISCVInstrInfoZicbo.td" 2126include "RISCVInstrInfoZicond.td" 2127include "RISCVInstrInfoZicfiss.td" 2128 2129//===----------------------------------------------------------------------===// 2130// Vendor extensions 2131//===----------------------------------------------------------------------===// 2132 2133include "RISCVInstrInfoXVentana.td" 2134include "RISCVInstrInfoXTHead.td" 2135include "RISCVInstrInfoXSf.td" 2136include "RISCVInstrInfoSFB.td" 2137include "RISCVInstrInfoXCV.td" 2138include "RISCVInstrInfoXwch.td" 2139include "RISCVInstrInfoXqci.td" 2140include "RISCVInstrInfoXMips.td" 2141 2142//===----------------------------------------------------------------------===// 2143// Global ISel 2144//===----------------------------------------------------------------------===// 2145 2146include "RISCVInstrGISel.td" 2147