1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "Utils/RISCVMatInt.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAGISel.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/CodeGen/ValueTypes.h" 31 #include "llvm/IR/DiagnosticInfo.h" 32 #include "llvm/IR/DiagnosticPrinter.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/raw_ostream.h" 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "riscv-lower" 40 41 STATISTIC(NumTailCalls, "Number of tail calls"); 42 43 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 44 const RISCVSubtarget &STI) 45 : TargetLowering(TM), Subtarget(STI) { 46 47 if (Subtarget.isRV32E()) 48 report_fatal_error("Codegen not yet implemented for RV32E"); 49 50 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 51 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 52 53 switch (ABI) { 54 default: 55 report_fatal_error("Don't know how to lower this ABI"); 56 case RISCVABI::ABI_ILP32: 57 case RISCVABI::ABI_ILP32F: 58 case RISCVABI::ABI_ILP32D: 59 case RISCVABI::ABI_LP64: 60 case RISCVABI::ABI_LP64F: 61 case RISCVABI::ABI_LP64D: 62 break; 63 } 64 65 MVT XLenVT = Subtarget.getXLenVT(); 66 67 // Set up the register classes. 68 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 69 70 if (Subtarget.hasStdExtF()) 71 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 72 if (Subtarget.hasStdExtD()) 73 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 74 75 // Compute derived properties from the register classes. 76 computeRegisterProperties(STI.getRegisterInfo()); 77 78 setStackPointerRegisterToSaveRestore(RISCV::X2); 79 80 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 81 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 82 83 // TODO: add all necessary setOperationAction calls. 84 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 85 86 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 87 setOperationAction(ISD::BR_CC, XLenVT, Expand); 88 setOperationAction(ISD::SELECT, XLenVT, Custom); 89 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 90 91 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 92 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 93 94 setOperationAction(ISD::VASTART, MVT::Other, Custom); 95 setOperationAction(ISD::VAARG, MVT::Other, Expand); 96 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 97 setOperationAction(ISD::VAEND, MVT::Other, Expand); 98 99 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 100 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 101 102 if (Subtarget.is64Bit()) { 103 setOperationAction(ISD::ADD, MVT::i32, Custom); 104 setOperationAction(ISD::SUB, MVT::i32, Custom); 105 setOperationAction(ISD::SHL, MVT::i32, Custom); 106 setOperationAction(ISD::SRA, MVT::i32, Custom); 107 setOperationAction(ISD::SRL, MVT::i32, Custom); 108 } 109 110 if (!Subtarget.hasStdExtM()) { 111 setOperationAction(ISD::MUL, XLenVT, Expand); 112 setOperationAction(ISD::MULHS, XLenVT, Expand); 113 setOperationAction(ISD::MULHU, XLenVT, Expand); 114 setOperationAction(ISD::SDIV, XLenVT, Expand); 115 setOperationAction(ISD::UDIV, XLenVT, Expand); 116 setOperationAction(ISD::SREM, XLenVT, Expand); 117 setOperationAction(ISD::UREM, XLenVT, Expand); 118 } 119 120 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 121 setOperationAction(ISD::MUL, MVT::i32, Custom); 122 setOperationAction(ISD::SDIV, MVT::i32, Custom); 123 setOperationAction(ISD::UDIV, MVT::i32, Custom); 124 setOperationAction(ISD::UREM, MVT::i32, Custom); 125 } 126 127 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 128 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 129 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 130 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 131 132 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 133 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 134 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 135 136 setOperationAction(ISD::ROTL, XLenVT, Expand); 137 setOperationAction(ISD::ROTR, XLenVT, Expand); 138 setOperationAction(ISD::BSWAP, XLenVT, Expand); 139 setOperationAction(ISD::CTTZ, XLenVT, Expand); 140 setOperationAction(ISD::CTLZ, XLenVT, Expand); 141 setOperationAction(ISD::CTPOP, XLenVT, Expand); 142 143 ISD::CondCode FPCCToExtend[] = { 144 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 145 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 146 ISD::SETGE, ISD::SETNE}; 147 148 ISD::NodeType FPOpToExtend[] = { 149 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 150 ISD::FP_TO_FP16}; 151 152 if (Subtarget.hasStdExtF()) { 153 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 154 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 155 for (auto CC : FPCCToExtend) 156 setCondCodeAction(CC, MVT::f32, Expand); 157 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 158 setOperationAction(ISD::SELECT, MVT::f32, Custom); 159 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 160 for (auto Op : FPOpToExtend) 161 setOperationAction(Op, MVT::f32, Expand); 162 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 163 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 164 } 165 166 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 167 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 168 169 if (Subtarget.hasStdExtD()) { 170 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 171 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 172 for (auto CC : FPCCToExtend) 173 setCondCodeAction(CC, MVT::f64, Expand); 174 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 175 setOperationAction(ISD::SELECT, MVT::f64, Custom); 176 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 178 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 179 for (auto Op : FPOpToExtend) 180 setOperationAction(Op, MVT::f64, Expand); 181 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 182 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 183 } 184 185 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 186 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 187 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 188 189 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 190 191 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 192 // Unfortunately this can't be determined just from the ISA naming string. 193 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 194 Subtarget.is64Bit() ? Legal : Custom); 195 196 setOperationAction(ISD::TRAP, MVT::Other, Legal); 197 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 198 199 if (Subtarget.hasStdExtA()) { 200 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 201 setMinCmpXchgSizeInBits(32); 202 } else { 203 setMaxAtomicSizeInBitsSupported(0); 204 } 205 206 setBooleanContents(ZeroOrOneBooleanContent); 207 208 // Function alignments. 209 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 210 setMinFunctionAlignment(FunctionAlignment); 211 setPrefFunctionAlignment(FunctionAlignment); 212 213 // Effectively disable jump table generation. 214 setMinimumJumpTableEntries(INT_MAX); 215 } 216 217 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 218 EVT VT) const { 219 if (!VT.isVector()) 220 return getPointerTy(DL); 221 return VT.changeVectorElementTypeToInteger(); 222 } 223 224 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 225 const CallInst &I, 226 MachineFunction &MF, 227 unsigned Intrinsic) const { 228 switch (Intrinsic) { 229 default: 230 return false; 231 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 232 case Intrinsic::riscv_masked_atomicrmw_add_i32: 233 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 234 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 235 case Intrinsic::riscv_masked_atomicrmw_max_i32: 236 case Intrinsic::riscv_masked_atomicrmw_min_i32: 237 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 238 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 239 case Intrinsic::riscv_masked_cmpxchg_i32: 240 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 241 Info.opc = ISD::INTRINSIC_W_CHAIN; 242 Info.memVT = MVT::getVT(PtrTy->getElementType()); 243 Info.ptrVal = I.getArgOperand(0); 244 Info.offset = 0; 245 Info.align = Align(4); 246 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 247 MachineMemOperand::MOVolatile; 248 return true; 249 } 250 } 251 252 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 253 const AddrMode &AM, Type *Ty, 254 unsigned AS, 255 Instruction *I) const { 256 // No global is ever allowed as a base. 257 if (AM.BaseGV) 258 return false; 259 260 // Require a 12-bit signed offset. 261 if (!isInt<12>(AM.BaseOffs)) 262 return false; 263 264 switch (AM.Scale) { 265 case 0: // "r+i" or just "i", depending on HasBaseReg. 266 break; 267 case 1: 268 if (!AM.HasBaseReg) // allow "r+i". 269 break; 270 return false; // disallow "r+r" or "r+r+i". 271 default: 272 return false; 273 } 274 275 return true; 276 } 277 278 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 279 return isInt<12>(Imm); 280 } 281 282 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 283 return isInt<12>(Imm); 284 } 285 286 // On RV32, 64-bit integers are split into their high and low parts and held 287 // in two different registers, so the trunc is free since the low register can 288 // just be used. 289 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 290 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 291 return false; 292 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 293 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 294 return (SrcBits == 64 && DestBits == 32); 295 } 296 297 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 298 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 299 !SrcVT.isInteger() || !DstVT.isInteger()) 300 return false; 301 unsigned SrcBits = SrcVT.getSizeInBits(); 302 unsigned DestBits = DstVT.getSizeInBits(); 303 return (SrcBits == 64 && DestBits == 32); 304 } 305 306 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 307 // Zexts are free if they can be combined with a load. 308 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 309 EVT MemVT = LD->getMemoryVT(); 310 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 311 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 312 (LD->getExtensionType() == ISD::NON_EXTLOAD || 313 LD->getExtensionType() == ISD::ZEXTLOAD)) 314 return true; 315 } 316 317 return TargetLowering::isZExtFree(Val, VT2); 318 } 319 320 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 321 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 322 } 323 324 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 325 return (VT == MVT::f32 && Subtarget.hasStdExtF()) || 326 (VT == MVT::f64 && Subtarget.hasStdExtD()); 327 } 328 329 // Changes the condition code and swaps operands if necessary, so the SetCC 330 // operation matches one of the comparisons supported directly in the RISC-V 331 // ISA. 332 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 333 switch (CC) { 334 default: 335 break; 336 case ISD::SETGT: 337 case ISD::SETLE: 338 case ISD::SETUGT: 339 case ISD::SETULE: 340 CC = ISD::getSetCCSwappedOperands(CC); 341 std::swap(LHS, RHS); 342 break; 343 } 344 } 345 346 // Return the RISC-V branch opcode that matches the given DAG integer 347 // condition code. The CondCode must be one of those supported by the RISC-V 348 // ISA (see normaliseSetCC). 349 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 350 switch (CC) { 351 default: 352 llvm_unreachable("Unsupported CondCode"); 353 case ISD::SETEQ: 354 return RISCV::BEQ; 355 case ISD::SETNE: 356 return RISCV::BNE; 357 case ISD::SETLT: 358 return RISCV::BLT; 359 case ISD::SETGE: 360 return RISCV::BGE; 361 case ISD::SETULT: 362 return RISCV::BLTU; 363 case ISD::SETUGE: 364 return RISCV::BGEU; 365 } 366 } 367 368 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 369 SelectionDAG &DAG) const { 370 switch (Op.getOpcode()) { 371 default: 372 report_fatal_error("unimplemented operand"); 373 case ISD::GlobalAddress: 374 return lowerGlobalAddress(Op, DAG); 375 case ISD::BlockAddress: 376 return lowerBlockAddress(Op, DAG); 377 case ISD::ConstantPool: 378 return lowerConstantPool(Op, DAG); 379 case ISD::GlobalTLSAddress: 380 return lowerGlobalTLSAddress(Op, DAG); 381 case ISD::SELECT: 382 return lowerSELECT(Op, DAG); 383 case ISD::VASTART: 384 return lowerVASTART(Op, DAG); 385 case ISD::FRAMEADDR: 386 return lowerFRAMEADDR(Op, DAG); 387 case ISD::RETURNADDR: 388 return lowerRETURNADDR(Op, DAG); 389 case ISD::SHL_PARTS: 390 return lowerShiftLeftParts(Op, DAG); 391 case ISD::SRA_PARTS: 392 return lowerShiftRightParts(Op, DAG, true); 393 case ISD::SRL_PARTS: 394 return lowerShiftRightParts(Op, DAG, false); 395 case ISD::BITCAST: { 396 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() && 397 "Unexpected custom legalisation"); 398 SDLoc DL(Op); 399 SDValue Op0 = Op.getOperand(0); 400 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32) 401 return SDValue(); 402 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 403 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 404 return FPConv; 405 } 406 } 407 } 408 409 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 410 SelectionDAG &DAG, unsigned Flags) { 411 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 412 } 413 414 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 415 SelectionDAG &DAG, unsigned Flags) { 416 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 417 Flags); 418 } 419 420 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 421 SelectionDAG &DAG, unsigned Flags) { 422 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), 423 N->getOffset(), Flags); 424 } 425 426 template <class NodeTy> 427 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 428 bool IsLocal) const { 429 SDLoc DL(N); 430 EVT Ty = getPointerTy(DAG.getDataLayout()); 431 432 if (isPositionIndependent()) { 433 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 434 if (IsLocal) 435 // Use PC-relative addressing to access the symbol. This generates the 436 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 437 // %pcrel_lo(auipc)). 438 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 439 440 // Use PC-relative addressing to access the GOT for this symbol, then load 441 // the address from the GOT. This generates the pattern (PseudoLA sym), 442 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 443 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 444 } 445 446 switch (getTargetMachine().getCodeModel()) { 447 default: 448 report_fatal_error("Unsupported code model for lowering"); 449 case CodeModel::Small: { 450 // Generate a sequence for accessing addresses within the first 2 GiB of 451 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 452 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 453 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 454 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 455 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 456 } 457 case CodeModel::Medium: { 458 // Generate a sequence for accessing addresses within any 2GiB range within 459 // the address space. This generates the pattern (PseudoLLA sym), which 460 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 461 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 462 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 463 } 464 } 465 } 466 467 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 468 SelectionDAG &DAG) const { 469 SDLoc DL(Op); 470 EVT Ty = Op.getValueType(); 471 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 472 int64_t Offset = N->getOffset(); 473 MVT XLenVT = Subtarget.getXLenVT(); 474 475 const GlobalValue *GV = N->getGlobal(); 476 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 477 SDValue Addr = getAddr(N, DAG, IsLocal); 478 479 // In order to maximise the opportunity for common subexpression elimination, 480 // emit a separate ADD node for the global address offset instead of folding 481 // it in the global address node. Later peephole optimisations may choose to 482 // fold it back in when profitable. 483 if (Offset != 0) 484 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 485 DAG.getConstant(Offset, DL, XLenVT)); 486 return Addr; 487 } 488 489 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 490 SelectionDAG &DAG) const { 491 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 492 493 return getAddr(N, DAG); 494 } 495 496 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 497 SelectionDAG &DAG) const { 498 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 499 500 return getAddr(N, DAG); 501 } 502 503 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 504 SelectionDAG &DAG, 505 bool UseGOT) const { 506 SDLoc DL(N); 507 EVT Ty = getPointerTy(DAG.getDataLayout()); 508 const GlobalValue *GV = N->getGlobal(); 509 MVT XLenVT = Subtarget.getXLenVT(); 510 511 if (UseGOT) { 512 // Use PC-relative addressing to access the GOT for this TLS symbol, then 513 // load the address from the GOT and add the thread pointer. This generates 514 // the pattern (PseudoLA_TLS_IE sym), which expands to 515 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 516 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 517 SDValue Load = 518 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 519 520 // Add the thread pointer. 521 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 522 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 523 } 524 525 // Generate a sequence for accessing the address relative to the thread 526 // pointer, with the appropriate adjustment for the thread pointer offset. 527 // This generates the pattern 528 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 529 SDValue AddrHi = 530 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 531 SDValue AddrAdd = 532 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 533 SDValue AddrLo = 534 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 535 536 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 537 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 538 SDValue MNAdd = SDValue( 539 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 540 0); 541 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 542 } 543 544 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 545 SelectionDAG &DAG) const { 546 SDLoc DL(N); 547 EVT Ty = getPointerTy(DAG.getDataLayout()); 548 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 549 const GlobalValue *GV = N->getGlobal(); 550 551 // Use a PC-relative addressing mode to access the global dynamic GOT address. 552 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 553 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 554 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 555 SDValue Load = 556 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 557 558 // Prepare argument list to generate call. 559 ArgListTy Args; 560 ArgListEntry Entry; 561 Entry.Node = Load; 562 Entry.Ty = CallTy; 563 Args.push_back(Entry); 564 565 // Setup call to __tls_get_addr. 566 TargetLowering::CallLoweringInfo CLI(DAG); 567 CLI.setDebugLoc(DL) 568 .setChain(DAG.getEntryNode()) 569 .setLibCallee(CallingConv::C, CallTy, 570 DAG.getExternalSymbol("__tls_get_addr", Ty), 571 std::move(Args)); 572 573 return LowerCallTo(CLI).first; 574 } 575 576 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 577 SelectionDAG &DAG) const { 578 SDLoc DL(Op); 579 EVT Ty = Op.getValueType(); 580 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 581 int64_t Offset = N->getOffset(); 582 MVT XLenVT = Subtarget.getXLenVT(); 583 584 // Non-PIC TLS lowering should always use the LocalExec model. 585 TLSModel::Model Model = isPositionIndependent() 586 ? getTargetMachine().getTLSModel(N->getGlobal()) 587 : TLSModel::LocalExec; 588 589 SDValue Addr; 590 switch (Model) { 591 case TLSModel::LocalExec: 592 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 593 break; 594 case TLSModel::InitialExec: 595 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 596 break; 597 case TLSModel::LocalDynamic: 598 case TLSModel::GeneralDynamic: 599 Addr = getDynamicTLSAddr(N, DAG); 600 break; 601 } 602 603 // In order to maximise the opportunity for common subexpression elimination, 604 // emit a separate ADD node for the global address offset instead of folding 605 // it in the global address node. Later peephole optimisations may choose to 606 // fold it back in when profitable. 607 if (Offset != 0) 608 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 609 DAG.getConstant(Offset, DL, XLenVT)); 610 return Addr; 611 } 612 613 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 614 SDValue CondV = Op.getOperand(0); 615 SDValue TrueV = Op.getOperand(1); 616 SDValue FalseV = Op.getOperand(2); 617 SDLoc DL(Op); 618 MVT XLenVT = Subtarget.getXLenVT(); 619 620 // If the result type is XLenVT and CondV is the output of a SETCC node 621 // which also operated on XLenVT inputs, then merge the SETCC node into the 622 // lowered RISCVISD::SELECT_CC to take advantage of the integer 623 // compare+branch instructions. i.e.: 624 // (select (setcc lhs, rhs, cc), truev, falsev) 625 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 626 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 627 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 628 SDValue LHS = CondV.getOperand(0); 629 SDValue RHS = CondV.getOperand(1); 630 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 631 ISD::CondCode CCVal = CC->get(); 632 633 normaliseSetCC(LHS, RHS, CCVal); 634 635 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 636 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 637 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 638 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 639 } 640 641 // Otherwise: 642 // (select condv, truev, falsev) 643 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 644 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 645 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 646 647 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 648 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 649 650 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 651 } 652 653 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 654 MachineFunction &MF = DAG.getMachineFunction(); 655 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 656 657 SDLoc DL(Op); 658 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 659 getPointerTy(MF.getDataLayout())); 660 661 // vastart just stores the address of the VarArgsFrameIndex slot into the 662 // memory location argument. 663 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 664 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 665 MachinePointerInfo(SV)); 666 } 667 668 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 669 SelectionDAG &DAG) const { 670 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 671 MachineFunction &MF = DAG.getMachineFunction(); 672 MachineFrameInfo &MFI = MF.getFrameInfo(); 673 MFI.setFrameAddressIsTaken(true); 674 Register FrameReg = RI.getFrameRegister(MF); 675 int XLenInBytes = Subtarget.getXLen() / 8; 676 677 EVT VT = Op.getValueType(); 678 SDLoc DL(Op); 679 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 680 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 681 while (Depth--) { 682 int Offset = -(XLenInBytes * 2); 683 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 684 DAG.getIntPtrConstant(Offset, DL)); 685 FrameAddr = 686 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 687 } 688 return FrameAddr; 689 } 690 691 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 692 SelectionDAG &DAG) const { 693 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 694 MachineFunction &MF = DAG.getMachineFunction(); 695 MachineFrameInfo &MFI = MF.getFrameInfo(); 696 MFI.setReturnAddressIsTaken(true); 697 MVT XLenVT = Subtarget.getXLenVT(); 698 int XLenInBytes = Subtarget.getXLen() / 8; 699 700 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 701 return SDValue(); 702 703 EVT VT = Op.getValueType(); 704 SDLoc DL(Op); 705 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 706 if (Depth) { 707 int Off = -XLenInBytes; 708 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 709 SDValue Offset = DAG.getConstant(Off, DL, VT); 710 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 711 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 712 MachinePointerInfo()); 713 } 714 715 // Return the value of the return address register, marking it an implicit 716 // live-in. 717 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 718 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 719 } 720 721 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 722 SelectionDAG &DAG) const { 723 SDLoc DL(Op); 724 SDValue Lo = Op.getOperand(0); 725 SDValue Hi = Op.getOperand(1); 726 SDValue Shamt = Op.getOperand(2); 727 EVT VT = Lo.getValueType(); 728 729 // if Shamt-XLEN < 0: // Shamt < XLEN 730 // Lo = Lo << Shamt 731 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 732 // else: 733 // Lo = 0 734 // Hi = Lo << (Shamt-XLEN) 735 736 SDValue Zero = DAG.getConstant(0, DL, VT); 737 SDValue One = DAG.getConstant(1, DL, VT); 738 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 739 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 740 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 741 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 742 743 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 744 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 745 SDValue ShiftRightLo = 746 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 747 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 748 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 749 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 750 751 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 752 753 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 754 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 755 756 SDValue Parts[2] = {Lo, Hi}; 757 return DAG.getMergeValues(Parts, DL); 758 } 759 760 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 761 bool IsSRA) const { 762 SDLoc DL(Op); 763 SDValue Lo = Op.getOperand(0); 764 SDValue Hi = Op.getOperand(1); 765 SDValue Shamt = Op.getOperand(2); 766 EVT VT = Lo.getValueType(); 767 768 // SRA expansion: 769 // if Shamt-XLEN < 0: // Shamt < XLEN 770 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 771 // Hi = Hi >>s Shamt 772 // else: 773 // Lo = Hi >>s (Shamt-XLEN); 774 // Hi = Hi >>s (XLEN-1) 775 // 776 // SRL expansion: 777 // if Shamt-XLEN < 0: // Shamt < XLEN 778 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 779 // Hi = Hi >>u Shamt 780 // else: 781 // Lo = Hi >>u (Shamt-XLEN); 782 // Hi = 0; 783 784 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 785 786 SDValue Zero = DAG.getConstant(0, DL, VT); 787 SDValue One = DAG.getConstant(1, DL, VT); 788 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 789 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 790 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 791 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 792 793 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 794 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 795 SDValue ShiftLeftHi = 796 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 797 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 798 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 799 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 800 SDValue HiFalse = 801 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 802 803 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 804 805 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 806 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 807 808 SDValue Parts[2] = {Lo, Hi}; 809 return DAG.getMergeValues(Parts, DL); 810 } 811 812 // Returns the opcode of the target-specific SDNode that implements the 32-bit 813 // form of the given Opcode. 814 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 815 switch (Opcode) { 816 default: 817 llvm_unreachable("Unexpected opcode"); 818 case ISD::SHL: 819 return RISCVISD::SLLW; 820 case ISD::SRA: 821 return RISCVISD::SRAW; 822 case ISD::SRL: 823 return RISCVISD::SRLW; 824 case ISD::SDIV: 825 return RISCVISD::DIVW; 826 case ISD::UDIV: 827 return RISCVISD::DIVUW; 828 case ISD::UREM: 829 return RISCVISD::REMUW; 830 } 831 } 832 833 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 834 // Because i32 isn't a legal type for RV64, these operations would otherwise 835 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 836 // later one because the fact the operation was originally of type i32 is 837 // lost. 838 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 839 SDLoc DL(N); 840 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 841 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 842 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 843 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 844 // ReplaceNodeResults requires we maintain the same type for the return value. 845 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 846 } 847 848 // Converts the given 32-bit operation to a i64 operation with signed extension 849 // semantic to reduce the signed extension instructions. 850 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 851 SDLoc DL(N); 852 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 853 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 854 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 855 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 856 DAG.getValueType(MVT::i32)); 857 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 858 } 859 860 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 861 SmallVectorImpl<SDValue> &Results, 862 SelectionDAG &DAG) const { 863 SDLoc DL(N); 864 switch (N->getOpcode()) { 865 default: 866 llvm_unreachable("Don't know how to custom type legalize this operation!"); 867 case ISD::READCYCLECOUNTER: { 868 assert(!Subtarget.is64Bit() && 869 "READCYCLECOUNTER only has custom type legalization on riscv32"); 870 871 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 872 SDValue RCW = 873 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 874 875 Results.push_back(RCW); 876 Results.push_back(RCW.getValue(1)); 877 Results.push_back(RCW.getValue(2)); 878 break; 879 } 880 case ISD::ADD: 881 case ISD::SUB: 882 case ISD::MUL: 883 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 884 "Unexpected custom legalisation"); 885 if (N->getOperand(1).getOpcode() == ISD::Constant) 886 return; 887 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 888 break; 889 case ISD::SHL: 890 case ISD::SRA: 891 case ISD::SRL: 892 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 893 "Unexpected custom legalisation"); 894 if (N->getOperand(1).getOpcode() == ISD::Constant) 895 return; 896 Results.push_back(customLegalizeToWOp(N, DAG)); 897 break; 898 case ISD::SDIV: 899 case ISD::UDIV: 900 case ISD::UREM: 901 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 902 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 903 if (N->getOperand(0).getOpcode() == ISD::Constant || 904 N->getOperand(1).getOpcode() == ISD::Constant) 905 return; 906 Results.push_back(customLegalizeToWOp(N, DAG)); 907 break; 908 case ISD::BITCAST: { 909 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 910 Subtarget.hasStdExtF() && "Unexpected custom legalisation"); 911 SDLoc DL(N); 912 SDValue Op0 = N->getOperand(0); 913 if (Op0.getValueType() != MVT::f32) 914 return; 915 SDValue FPConv = 916 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 917 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 918 break; 919 } 920 } 921 } 922 923 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 924 DAGCombinerInfo &DCI) const { 925 SelectionDAG &DAG = DCI.DAG; 926 927 switch (N->getOpcode()) { 928 default: 929 break; 930 case RISCVISD::SplitF64: { 931 SDValue Op0 = N->getOperand(0); 932 // If the input to SplitF64 is just BuildPairF64 then the operation is 933 // redundant. Instead, use BuildPairF64's operands directly. 934 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 935 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 936 937 SDLoc DL(N); 938 939 // It's cheaper to materialise two 32-bit integers than to load a double 940 // from the constant pool and transfer it to integer registers through the 941 // stack. 942 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 943 APInt V = C->getValueAPF().bitcastToAPInt(); 944 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 945 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 946 return DCI.CombineTo(N, Lo, Hi); 947 } 948 949 // This is a target-specific version of a DAGCombine performed in 950 // DAGCombiner::visitBITCAST. It performs the equivalent of: 951 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 952 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 953 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 954 !Op0.getNode()->hasOneUse()) 955 break; 956 SDValue NewSplitF64 = 957 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 958 Op0.getOperand(0)); 959 SDValue Lo = NewSplitF64.getValue(0); 960 SDValue Hi = NewSplitF64.getValue(1); 961 APInt SignBit = APInt::getSignMask(32); 962 if (Op0.getOpcode() == ISD::FNEG) { 963 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 964 DAG.getConstant(SignBit, DL, MVT::i32)); 965 return DCI.CombineTo(N, Lo, NewHi); 966 } 967 assert(Op0.getOpcode() == ISD::FABS); 968 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 969 DAG.getConstant(~SignBit, DL, MVT::i32)); 970 return DCI.CombineTo(N, Lo, NewHi); 971 } 972 case RISCVISD::SLLW: 973 case RISCVISD::SRAW: 974 case RISCVISD::SRLW: { 975 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 976 SDValue LHS = N->getOperand(0); 977 SDValue RHS = N->getOperand(1); 978 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 979 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 980 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) || 981 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI))) 982 return SDValue(); 983 break; 984 } 985 case RISCVISD::FMV_X_ANYEXTW_RV64: { 986 SDLoc DL(N); 987 SDValue Op0 = N->getOperand(0); 988 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 989 // conversion is unnecessary and can be replaced with an ANY_EXTEND 990 // of the FMV_W_X_RV64 operand. 991 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 992 SDValue AExtOp = 993 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0)); 994 return DCI.CombineTo(N, AExtOp); 995 } 996 997 // This is a target-specific version of a DAGCombine performed in 998 // DAGCombiner::visitBITCAST. It performs the equivalent of: 999 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1000 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1001 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1002 !Op0.getNode()->hasOneUse()) 1003 break; 1004 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 1005 Op0.getOperand(0)); 1006 APInt SignBit = APInt::getSignMask(32).sext(64); 1007 if (Op0.getOpcode() == ISD::FNEG) { 1008 return DCI.CombineTo(N, 1009 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 1010 DAG.getConstant(SignBit, DL, MVT::i64))); 1011 } 1012 assert(Op0.getOpcode() == ISD::FABS); 1013 return DCI.CombineTo(N, 1014 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 1015 DAG.getConstant(~SignBit, DL, MVT::i64))); 1016 } 1017 } 1018 1019 return SDValue(); 1020 } 1021 1022 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 1023 const SDNode *N, CombineLevel Level) const { 1024 // The following folds are only desirable if `(OP _, c1 << c2)` can be 1025 // materialised in fewer instructions than `(OP _, c1)`: 1026 // 1027 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1028 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1029 SDValue N0 = N->getOperand(0); 1030 EVT Ty = N0.getValueType(); 1031 if (Ty.isScalarInteger() && 1032 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 1033 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1034 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1035 if (C1 && C2) { 1036 APInt C1Int = C1->getAPIntValue(); 1037 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 1038 1039 // We can materialise `c1 << c2` into an add immediate, so it's "free", 1040 // and the combine should happen, to potentially allow further combines 1041 // later. 1042 if (ShiftedC1Int.getMinSignedBits() <= 64 && 1043 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 1044 return true; 1045 1046 // We can materialise `c1` in an add immediate, so it's "free", and the 1047 // combine should be prevented. 1048 if (C1Int.getMinSignedBits() <= 64 && 1049 isLegalAddImmediate(C1Int.getSExtValue())) 1050 return false; 1051 1052 // Neither constant will fit into an immediate, so find materialisation 1053 // costs. 1054 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 1055 Subtarget.is64Bit()); 1056 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 1057 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 1058 1059 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 1060 // combine should be prevented. 1061 if (C1Cost < ShiftedC1Cost) 1062 return false; 1063 } 1064 } 1065 return true; 1066 } 1067 1068 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 1069 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 1070 unsigned Depth) const { 1071 switch (Op.getOpcode()) { 1072 default: 1073 break; 1074 case RISCVISD::SLLW: 1075 case RISCVISD::SRAW: 1076 case RISCVISD::SRLW: 1077 case RISCVISD::DIVW: 1078 case RISCVISD::DIVUW: 1079 case RISCVISD::REMUW: 1080 // TODO: As the result is sign-extended, this is conservatively correct. A 1081 // more precise answer could be calculated for SRAW depending on known 1082 // bits in the shift amount. 1083 return 33; 1084 } 1085 1086 return 1; 1087 } 1088 1089 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 1090 MachineBasicBlock *BB) { 1091 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 1092 1093 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 1094 // Should the count have wrapped while it was being read, we need to try 1095 // again. 1096 // ... 1097 // read: 1098 // rdcycleh x3 # load high word of cycle 1099 // rdcycle x2 # load low word of cycle 1100 // rdcycleh x4 # load high word of cycle 1101 // bne x3, x4, read # check if high word reads match, otherwise try again 1102 // ... 1103 1104 MachineFunction &MF = *BB->getParent(); 1105 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1106 MachineFunction::iterator It = ++BB->getIterator(); 1107 1108 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1109 MF.insert(It, LoopMBB); 1110 1111 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1112 MF.insert(It, DoneMBB); 1113 1114 // Transfer the remainder of BB and its successor edges to DoneMBB. 1115 DoneMBB->splice(DoneMBB->begin(), BB, 1116 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1117 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 1118 1119 BB->addSuccessor(LoopMBB); 1120 1121 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1122 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1123 Register LoReg = MI.getOperand(0).getReg(); 1124 Register HiReg = MI.getOperand(1).getReg(); 1125 DebugLoc DL = MI.getDebugLoc(); 1126 1127 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 1128 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 1129 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1130 .addReg(RISCV::X0); 1131 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 1132 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 1133 .addReg(RISCV::X0); 1134 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 1135 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1136 .addReg(RISCV::X0); 1137 1138 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 1139 .addReg(HiReg) 1140 .addReg(ReadAgainReg) 1141 .addMBB(LoopMBB); 1142 1143 LoopMBB->addSuccessor(LoopMBB); 1144 LoopMBB->addSuccessor(DoneMBB); 1145 1146 MI.eraseFromParent(); 1147 1148 return DoneMBB; 1149 } 1150 1151 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 1152 MachineBasicBlock *BB) { 1153 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 1154 1155 MachineFunction &MF = *BB->getParent(); 1156 DebugLoc DL = MI.getDebugLoc(); 1157 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1158 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1159 Register LoReg = MI.getOperand(0).getReg(); 1160 Register HiReg = MI.getOperand(1).getReg(); 1161 Register SrcReg = MI.getOperand(2).getReg(); 1162 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 1163 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1164 1165 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 1166 RI); 1167 MachineMemOperand *MMO = 1168 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1169 MachineMemOperand::MOLoad, 8, 8); 1170 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 1171 .addFrameIndex(FI) 1172 .addImm(0) 1173 .addMemOperand(MMO); 1174 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 1175 .addFrameIndex(FI) 1176 .addImm(4) 1177 .addMemOperand(MMO); 1178 MI.eraseFromParent(); // The pseudo instruction is gone now. 1179 return BB; 1180 } 1181 1182 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 1183 MachineBasicBlock *BB) { 1184 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 1185 "Unexpected instruction"); 1186 1187 MachineFunction &MF = *BB->getParent(); 1188 DebugLoc DL = MI.getDebugLoc(); 1189 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1190 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1191 Register DstReg = MI.getOperand(0).getReg(); 1192 Register LoReg = MI.getOperand(1).getReg(); 1193 Register HiReg = MI.getOperand(2).getReg(); 1194 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 1195 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1196 1197 MachineMemOperand *MMO = 1198 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1199 MachineMemOperand::MOStore, 8, 8); 1200 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1201 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 1202 .addFrameIndex(FI) 1203 .addImm(0) 1204 .addMemOperand(MMO); 1205 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1206 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 1207 .addFrameIndex(FI) 1208 .addImm(4) 1209 .addMemOperand(MMO); 1210 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 1211 MI.eraseFromParent(); // The pseudo instruction is gone now. 1212 return BB; 1213 } 1214 1215 static bool isSelectPseudo(MachineInstr &MI) { 1216 switch (MI.getOpcode()) { 1217 default: 1218 return false; 1219 case RISCV::Select_GPR_Using_CC_GPR: 1220 case RISCV::Select_FPR32_Using_CC_GPR: 1221 case RISCV::Select_FPR64_Using_CC_GPR: 1222 return true; 1223 } 1224 } 1225 1226 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 1227 MachineBasicBlock *BB) { 1228 // To "insert" Select_* instructions, we actually have to insert the triangle 1229 // control-flow pattern. The incoming instructions know the destination vreg 1230 // to set, the condition code register to branch on, the true/false values to 1231 // select between, and the condcode to use to select the appropriate branch. 1232 // 1233 // We produce the following control flow: 1234 // HeadMBB 1235 // | \ 1236 // | IfFalseMBB 1237 // | / 1238 // TailMBB 1239 // 1240 // When we find a sequence of selects we attempt to optimize their emission 1241 // by sharing the control flow. Currently we only handle cases where we have 1242 // multiple selects with the exact same condition (same LHS, RHS and CC). 1243 // The selects may be interleaved with other instructions if the other 1244 // instructions meet some requirements we deem safe: 1245 // - They are debug instructions. Otherwise, 1246 // - They do not have side-effects, do not access memory and their inputs do 1247 // not depend on the results of the select pseudo-instructions. 1248 // The TrueV/FalseV operands of the selects cannot depend on the result of 1249 // previous selects in the sequence. 1250 // These conditions could be further relaxed. See the X86 target for a 1251 // related approach and more information. 1252 Register LHS = MI.getOperand(1).getReg(); 1253 Register RHS = MI.getOperand(2).getReg(); 1254 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 1255 1256 SmallVector<MachineInstr *, 4> SelectDebugValues; 1257 SmallSet<Register, 4> SelectDests; 1258 SelectDests.insert(MI.getOperand(0).getReg()); 1259 1260 MachineInstr *LastSelectPseudo = &MI; 1261 1262 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 1263 SequenceMBBI != E; ++SequenceMBBI) { 1264 if (SequenceMBBI->isDebugInstr()) 1265 continue; 1266 else if (isSelectPseudo(*SequenceMBBI)) { 1267 if (SequenceMBBI->getOperand(1).getReg() != LHS || 1268 SequenceMBBI->getOperand(2).getReg() != RHS || 1269 SequenceMBBI->getOperand(3).getImm() != CC || 1270 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 1271 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 1272 break; 1273 LastSelectPseudo = &*SequenceMBBI; 1274 SequenceMBBI->collectDebugValues(SelectDebugValues); 1275 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 1276 } else { 1277 if (SequenceMBBI->hasUnmodeledSideEffects() || 1278 SequenceMBBI->mayLoadOrStore()) 1279 break; 1280 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 1281 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 1282 })) 1283 break; 1284 } 1285 } 1286 1287 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 1288 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1289 DebugLoc DL = MI.getDebugLoc(); 1290 MachineFunction::iterator I = ++BB->getIterator(); 1291 1292 MachineBasicBlock *HeadMBB = BB; 1293 MachineFunction *F = BB->getParent(); 1294 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 1295 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 1296 1297 F->insert(I, IfFalseMBB); 1298 F->insert(I, TailMBB); 1299 1300 // Transfer debug instructions associated with the selects to TailMBB. 1301 for (MachineInstr *DebugInstr : SelectDebugValues) { 1302 TailMBB->push_back(DebugInstr->removeFromParent()); 1303 } 1304 1305 // Move all instructions after the sequence to TailMBB. 1306 TailMBB->splice(TailMBB->end(), HeadMBB, 1307 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 1308 // Update machine-CFG edges by transferring all successors of the current 1309 // block to the new block which will contain the Phi nodes for the selects. 1310 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 1311 // Set the successors for HeadMBB. 1312 HeadMBB->addSuccessor(IfFalseMBB); 1313 HeadMBB->addSuccessor(TailMBB); 1314 1315 // Insert appropriate branch. 1316 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 1317 1318 BuildMI(HeadMBB, DL, TII.get(Opcode)) 1319 .addReg(LHS) 1320 .addReg(RHS) 1321 .addMBB(TailMBB); 1322 1323 // IfFalseMBB just falls through to TailMBB. 1324 IfFalseMBB->addSuccessor(TailMBB); 1325 1326 // Create PHIs for all of the select pseudo-instructions. 1327 auto SelectMBBI = MI.getIterator(); 1328 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 1329 auto InsertionPoint = TailMBB->begin(); 1330 while (SelectMBBI != SelectEnd) { 1331 auto Next = std::next(SelectMBBI); 1332 if (isSelectPseudo(*SelectMBBI)) { 1333 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 1334 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 1335 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 1336 .addReg(SelectMBBI->getOperand(4).getReg()) 1337 .addMBB(HeadMBB) 1338 .addReg(SelectMBBI->getOperand(5).getReg()) 1339 .addMBB(IfFalseMBB); 1340 SelectMBBI->eraseFromParent(); 1341 } 1342 SelectMBBI = Next; 1343 } 1344 1345 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 1346 return TailMBB; 1347 } 1348 1349 MachineBasicBlock * 1350 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1351 MachineBasicBlock *BB) const { 1352 switch (MI.getOpcode()) { 1353 default: 1354 llvm_unreachable("Unexpected instr type to insert"); 1355 case RISCV::ReadCycleWide: 1356 assert(!Subtarget.is64Bit() && 1357 "ReadCycleWrite is only to be used on riscv32"); 1358 return emitReadCycleWidePseudo(MI, BB); 1359 case RISCV::Select_GPR_Using_CC_GPR: 1360 case RISCV::Select_FPR32_Using_CC_GPR: 1361 case RISCV::Select_FPR64_Using_CC_GPR: 1362 return emitSelectPseudo(MI, BB); 1363 case RISCV::BuildPairF64Pseudo: 1364 return emitBuildPairF64Pseudo(MI, BB); 1365 case RISCV::SplitF64Pseudo: 1366 return emitSplitF64Pseudo(MI, BB); 1367 } 1368 } 1369 1370 // Calling Convention Implementation. 1371 // The expectations for frontend ABI lowering vary from target to target. 1372 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 1373 // details, but this is a longer term goal. For now, we simply try to keep the 1374 // role of the frontend as simple and well-defined as possible. The rules can 1375 // be summarised as: 1376 // * Never split up large scalar arguments. We handle them here. 1377 // * If a hardfloat calling convention is being used, and the struct may be 1378 // passed in a pair of registers (fp+fp, int+fp), and both registers are 1379 // available, then pass as two separate arguments. If either the GPRs or FPRs 1380 // are exhausted, then pass according to the rule below. 1381 // * If a struct could never be passed in registers or directly in a stack 1382 // slot (as it is larger than 2*XLEN and the floating point rules don't 1383 // apply), then pass it using a pointer with the byval attribute. 1384 // * If a struct is less than 2*XLEN, then coerce to either a two-element 1385 // word-sized array or a 2*XLEN scalar (depending on alignment). 1386 // * The frontend can determine whether a struct is returned by reference or 1387 // not based on its size and fields. If it will be returned by reference, the 1388 // frontend must modify the prototype so a pointer with the sret annotation is 1389 // passed as the first argument. This is not necessary for large scalar 1390 // returns. 1391 // * Struct return values and varargs should be coerced to structs containing 1392 // register-size fields in the same situations they would be for fixed 1393 // arguments. 1394 1395 static const MCPhysReg ArgGPRs[] = { 1396 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 1397 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 1398 }; 1399 static const MCPhysReg ArgFPR32s[] = { 1400 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 1401 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 1402 }; 1403 static const MCPhysReg ArgFPR64s[] = { 1404 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 1405 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 1406 }; 1407 1408 // Pass a 2*XLEN argument that has been split into two XLEN values through 1409 // registers or the stack as necessary. 1410 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 1411 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 1412 MVT ValVT2, MVT LocVT2, 1413 ISD::ArgFlagsTy ArgFlags2) { 1414 unsigned XLenInBytes = XLen / 8; 1415 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1416 // At least one half can be passed via register. 1417 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 1418 VA1.getLocVT(), CCValAssign::Full)); 1419 } else { 1420 // Both halves must be passed on the stack, with proper alignment. 1421 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); 1422 State.addLoc( 1423 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 1424 State.AllocateStack(XLenInBytes, StackAlign), 1425 VA1.getLocVT(), CCValAssign::Full)); 1426 State.addLoc(CCValAssign::getMem( 1427 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1428 CCValAssign::Full)); 1429 return false; 1430 } 1431 1432 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1433 // The second half can also be passed via register. 1434 State.addLoc( 1435 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 1436 } else { 1437 // The second half is passed via the stack, without additional alignment. 1438 State.addLoc(CCValAssign::getMem( 1439 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1440 CCValAssign::Full)); 1441 } 1442 1443 return false; 1444 } 1445 1446 // Implements the RISC-V calling convention. Returns true upon failure. 1447 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 1448 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 1449 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 1450 bool IsRet, Type *OrigTy) { 1451 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 1452 assert(XLen == 32 || XLen == 64); 1453 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 1454 1455 // Any return value split in to more than two values can't be returned 1456 // directly. 1457 if (IsRet && ValNo > 1) 1458 return true; 1459 1460 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a 1461 // variadic argument, or if no F32 argument registers are available. 1462 bool UseGPRForF32 = true; 1463 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 1464 // variadic argument, or if no F64 argument registers are available. 1465 bool UseGPRForF64 = true; 1466 1467 switch (ABI) { 1468 default: 1469 llvm_unreachable("Unexpected ABI"); 1470 case RISCVABI::ABI_ILP32: 1471 case RISCVABI::ABI_LP64: 1472 break; 1473 case RISCVABI::ABI_ILP32F: 1474 case RISCVABI::ABI_LP64F: 1475 UseGPRForF32 = !IsFixed; 1476 break; 1477 case RISCVABI::ABI_ILP32D: 1478 case RISCVABI::ABI_LP64D: 1479 UseGPRForF32 = !IsFixed; 1480 UseGPRForF64 = !IsFixed; 1481 break; 1482 } 1483 1484 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) 1485 UseGPRForF32 = true; 1486 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s)) 1487 UseGPRForF64 = true; 1488 1489 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local 1490 // variables rather than directly checking against the target ABI. 1491 1492 if (UseGPRForF32 && ValVT == MVT::f32) { 1493 LocVT = XLenVT; 1494 LocInfo = CCValAssign::BCvt; 1495 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 1496 LocVT = MVT::i64; 1497 LocInfo = CCValAssign::BCvt; 1498 } 1499 1500 // If this is a variadic argument, the RISC-V calling convention requires 1501 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 1502 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 1503 // be used regardless of whether the original argument was split during 1504 // legalisation or not. The argument will not be passed by registers if the 1505 // original type is larger than 2*XLEN, so the register alignment rule does 1506 // not apply. 1507 unsigned TwoXLenInBytes = (2 * XLen) / 8; 1508 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && 1509 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 1510 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 1511 // Skip 'odd' register if necessary. 1512 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 1513 State.AllocateReg(ArgGPRs); 1514 } 1515 1516 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 1517 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 1518 State.getPendingArgFlags(); 1519 1520 assert(PendingLocs.size() == PendingArgFlags.size() && 1521 "PendingLocs and PendingArgFlags out of sync"); 1522 1523 // Handle passing f64 on RV32D with a soft float ABI or when floating point 1524 // registers are exhausted. 1525 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 1526 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 1527 "Can't lower f64 if it is split"); 1528 // Depending on available argument GPRS, f64 may be passed in a pair of 1529 // GPRs, split between a GPR and the stack, or passed completely on the 1530 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 1531 // cases. 1532 Register Reg = State.AllocateReg(ArgGPRs); 1533 LocVT = MVT::i32; 1534 if (!Reg) { 1535 unsigned StackOffset = State.AllocateStack(8, 8); 1536 State.addLoc( 1537 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1538 return false; 1539 } 1540 if (!State.AllocateReg(ArgGPRs)) 1541 State.AllocateStack(4, 4); 1542 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1543 return false; 1544 } 1545 1546 // Split arguments might be passed indirectly, so keep track of the pending 1547 // values. 1548 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 1549 LocVT = XLenVT; 1550 LocInfo = CCValAssign::Indirect; 1551 PendingLocs.push_back( 1552 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 1553 PendingArgFlags.push_back(ArgFlags); 1554 if (!ArgFlags.isSplitEnd()) { 1555 return false; 1556 } 1557 } 1558 1559 // If the split argument only had two elements, it should be passed directly 1560 // in registers or on the stack. 1561 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 1562 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 1563 // Apply the normal calling convention rules to the first half of the 1564 // split argument. 1565 CCValAssign VA = PendingLocs[0]; 1566 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 1567 PendingLocs.clear(); 1568 PendingArgFlags.clear(); 1569 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 1570 ArgFlags); 1571 } 1572 1573 // Allocate to a register if possible, or else a stack slot. 1574 Register Reg; 1575 if (ValVT == MVT::f32 && !UseGPRForF32) 1576 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s); 1577 else if (ValVT == MVT::f64 && !UseGPRForF64) 1578 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s); 1579 else 1580 Reg = State.AllocateReg(ArgGPRs); 1581 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); 1582 1583 // If we reach this point and PendingLocs is non-empty, we must be at the 1584 // end of a split argument that must be passed indirectly. 1585 if (!PendingLocs.empty()) { 1586 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 1587 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 1588 1589 for (auto &It : PendingLocs) { 1590 if (Reg) 1591 It.convertToReg(Reg); 1592 else 1593 It.convertToMem(StackOffset); 1594 State.addLoc(It); 1595 } 1596 PendingLocs.clear(); 1597 PendingArgFlags.clear(); 1598 return false; 1599 } 1600 1601 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && 1602 "Expected an XLenVT at this stage"); 1603 1604 if (Reg) { 1605 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1606 return false; 1607 } 1608 1609 // When an f32 or f64 is passed on the stack, no bit-conversion is needed. 1610 if (ValVT == MVT::f32 || ValVT == MVT::f64) { 1611 LocVT = ValVT; 1612 LocInfo = CCValAssign::Full; 1613 } 1614 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1615 return false; 1616 } 1617 1618 void RISCVTargetLowering::analyzeInputArgs( 1619 MachineFunction &MF, CCState &CCInfo, 1620 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 1621 unsigned NumArgs = Ins.size(); 1622 FunctionType *FType = MF.getFunction().getFunctionType(); 1623 1624 for (unsigned i = 0; i != NumArgs; ++i) { 1625 MVT ArgVT = Ins[i].VT; 1626 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 1627 1628 Type *ArgTy = nullptr; 1629 if (IsRet) 1630 ArgTy = FType->getReturnType(); 1631 else if (Ins[i].isOrigArg()) 1632 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 1633 1634 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1635 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1636 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) { 1637 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 1638 << EVT(ArgVT).getEVTString() << '\n'); 1639 llvm_unreachable(nullptr); 1640 } 1641 } 1642 } 1643 1644 void RISCVTargetLowering::analyzeOutputArgs( 1645 MachineFunction &MF, CCState &CCInfo, 1646 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 1647 CallLoweringInfo *CLI) const { 1648 unsigned NumArgs = Outs.size(); 1649 1650 for (unsigned i = 0; i != NumArgs; i++) { 1651 MVT ArgVT = Outs[i].VT; 1652 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 1653 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 1654 1655 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1656 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1657 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 1658 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 1659 << EVT(ArgVT).getEVTString() << "\n"); 1660 llvm_unreachable(nullptr); 1661 } 1662 } 1663 } 1664 1665 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 1666 // values. 1667 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 1668 const CCValAssign &VA, const SDLoc &DL) { 1669 switch (VA.getLocInfo()) { 1670 default: 1671 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1672 case CCValAssign::Full: 1673 break; 1674 case CCValAssign::BCvt: 1675 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1676 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 1677 break; 1678 } 1679 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 1680 break; 1681 } 1682 return Val; 1683 } 1684 1685 // The caller is responsible for loading the full value if the argument is 1686 // passed with CCValAssign::Indirect. 1687 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 1688 const CCValAssign &VA, const SDLoc &DL) { 1689 MachineFunction &MF = DAG.getMachineFunction(); 1690 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1691 EVT LocVT = VA.getLocVT(); 1692 SDValue Val; 1693 const TargetRegisterClass *RC; 1694 1695 switch (LocVT.getSimpleVT().SimpleTy) { 1696 default: 1697 llvm_unreachable("Unexpected register type"); 1698 case MVT::i32: 1699 case MVT::i64: 1700 RC = &RISCV::GPRRegClass; 1701 break; 1702 case MVT::f32: 1703 RC = &RISCV::FPR32RegClass; 1704 break; 1705 case MVT::f64: 1706 RC = &RISCV::FPR64RegClass; 1707 break; 1708 } 1709 1710 Register VReg = RegInfo.createVirtualRegister(RC); 1711 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1712 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1713 1714 if (VA.getLocInfo() == CCValAssign::Indirect) 1715 return Val; 1716 1717 return convertLocVTToValVT(DAG, Val, VA, DL); 1718 } 1719 1720 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 1721 const CCValAssign &VA, const SDLoc &DL) { 1722 EVT LocVT = VA.getLocVT(); 1723 1724 switch (VA.getLocInfo()) { 1725 default: 1726 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1727 case CCValAssign::Full: 1728 break; 1729 case CCValAssign::BCvt: 1730 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1731 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 1732 break; 1733 } 1734 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 1735 break; 1736 } 1737 return Val; 1738 } 1739 1740 // The caller is responsible for loading the full value if the argument is 1741 // passed with CCValAssign::Indirect. 1742 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 1743 const CCValAssign &VA, const SDLoc &DL) { 1744 MachineFunction &MF = DAG.getMachineFunction(); 1745 MachineFrameInfo &MFI = MF.getFrameInfo(); 1746 EVT LocVT = VA.getLocVT(); 1747 EVT ValVT = VA.getValVT(); 1748 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 1749 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 1750 VA.getLocMemOffset(), /*Immutable=*/true); 1751 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1752 SDValue Val; 1753 1754 ISD::LoadExtType ExtType; 1755 switch (VA.getLocInfo()) { 1756 default: 1757 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1758 case CCValAssign::Full: 1759 case CCValAssign::Indirect: 1760 case CCValAssign::BCvt: 1761 ExtType = ISD::NON_EXTLOAD; 1762 break; 1763 } 1764 Val = DAG.getExtLoad( 1765 ExtType, DL, LocVT, Chain, FIN, 1766 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 1767 return Val; 1768 } 1769 1770 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 1771 const CCValAssign &VA, const SDLoc &DL) { 1772 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 1773 "Unexpected VA"); 1774 MachineFunction &MF = DAG.getMachineFunction(); 1775 MachineFrameInfo &MFI = MF.getFrameInfo(); 1776 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1777 1778 if (VA.isMemLoc()) { 1779 // f64 is passed on the stack. 1780 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 1781 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1782 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 1783 MachinePointerInfo::getFixedStack(MF, FI)); 1784 } 1785 1786 assert(VA.isRegLoc() && "Expected register VA assignment"); 1787 1788 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1789 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 1790 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 1791 SDValue Hi; 1792 if (VA.getLocReg() == RISCV::X17) { 1793 // Second half of f64 is passed on the stack. 1794 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 1795 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1796 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 1797 MachinePointerInfo::getFixedStack(MF, FI)); 1798 } else { 1799 // Second half of f64 is passed in another GPR. 1800 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1801 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 1802 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 1803 } 1804 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1805 } 1806 1807 // FastCC has less than 1% performance improvement for some particular 1808 // benchmark. But theoretically, it may has benenfit for some cases. 1809 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 1810 CCValAssign::LocInfo LocInfo, 1811 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1812 1813 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 1814 // X5 and X6 might be used for save-restore libcall. 1815 static const MCPhysReg GPRList[] = { 1816 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 1817 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 1818 RISCV::X29, RISCV::X30, RISCV::X31}; 1819 if (unsigned Reg = State.AllocateReg(GPRList)) { 1820 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1821 return false; 1822 } 1823 } 1824 1825 if (LocVT == MVT::f32) { 1826 static const MCPhysReg FPR32List[] = { 1827 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 1828 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 1829 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 1830 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 1831 if (unsigned Reg = State.AllocateReg(FPR32List)) { 1832 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1833 return false; 1834 } 1835 } 1836 1837 if (LocVT == MVT::f64) { 1838 static const MCPhysReg FPR64List[] = { 1839 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 1840 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 1841 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 1842 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 1843 if (unsigned Reg = State.AllocateReg(FPR64List)) { 1844 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1845 return false; 1846 } 1847 } 1848 1849 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 1850 unsigned Offset4 = State.AllocateStack(4, 4); 1851 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 1852 return false; 1853 } 1854 1855 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 1856 unsigned Offset5 = State.AllocateStack(8, 8); 1857 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 1858 return false; 1859 } 1860 1861 return true; // CC didn't match. 1862 } 1863 1864 // Transform physical registers into virtual registers. 1865 SDValue RISCVTargetLowering::LowerFormalArguments( 1866 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1867 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1868 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1869 1870 switch (CallConv) { 1871 default: 1872 report_fatal_error("Unsupported calling convention"); 1873 case CallingConv::C: 1874 case CallingConv::Fast: 1875 break; 1876 } 1877 1878 MachineFunction &MF = DAG.getMachineFunction(); 1879 1880 const Function &Func = MF.getFunction(); 1881 if (Func.hasFnAttribute("interrupt")) { 1882 if (!Func.arg_empty()) 1883 report_fatal_error( 1884 "Functions with the interrupt attribute cannot have arguments!"); 1885 1886 StringRef Kind = 1887 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 1888 1889 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 1890 report_fatal_error( 1891 "Function interrupt attribute argument not supported!"); 1892 } 1893 1894 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1895 MVT XLenVT = Subtarget.getXLenVT(); 1896 unsigned XLenInBytes = Subtarget.getXLen() / 8; 1897 // Used with vargs to acumulate store chains. 1898 std::vector<SDValue> OutChains; 1899 1900 // Assign locations to all of the incoming arguments. 1901 SmallVector<CCValAssign, 16> ArgLocs; 1902 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1903 1904 if (CallConv == CallingConv::Fast) 1905 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 1906 else 1907 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 1908 1909 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1910 CCValAssign &VA = ArgLocs[i]; 1911 SDValue ArgValue; 1912 // Passing f64 on RV32D with a soft float ABI must be handled as a special 1913 // case. 1914 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 1915 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 1916 else if (VA.isRegLoc()) 1917 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 1918 else 1919 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 1920 1921 if (VA.getLocInfo() == CCValAssign::Indirect) { 1922 // If the original argument was split and passed by reference (e.g. i128 1923 // on RV32), we need to load all parts of it here (using the same 1924 // address). 1925 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1926 MachinePointerInfo())); 1927 unsigned ArgIndex = Ins[i].OrigArgIndex; 1928 assert(Ins[i].PartOffset == 0); 1929 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 1930 CCValAssign &PartVA = ArgLocs[i + 1]; 1931 unsigned PartOffset = Ins[i + 1].PartOffset; 1932 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1933 DAG.getIntPtrConstant(PartOffset, DL)); 1934 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1935 MachinePointerInfo())); 1936 ++i; 1937 } 1938 continue; 1939 } 1940 InVals.push_back(ArgValue); 1941 } 1942 1943 if (IsVarArg) { 1944 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 1945 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 1946 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 1947 MachineFrameInfo &MFI = MF.getFrameInfo(); 1948 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1949 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 1950 1951 // Offset of the first variable argument from stack pointer, and size of 1952 // the vararg save area. For now, the varargs save area is either zero or 1953 // large enough to hold a0-a7. 1954 int VaArgOffset, VarArgsSaveSize; 1955 1956 // If all registers are allocated, then all varargs must be passed on the 1957 // stack and we don't need to save any argregs. 1958 if (ArgRegs.size() == Idx) { 1959 VaArgOffset = CCInfo.getNextStackOffset(); 1960 VarArgsSaveSize = 0; 1961 } else { 1962 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 1963 VaArgOffset = -VarArgsSaveSize; 1964 } 1965 1966 // Record the frame index of the first variable argument 1967 // which is a value necessary to VASTART. 1968 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 1969 RVFI->setVarArgsFrameIndex(FI); 1970 1971 // If saving an odd number of registers then create an extra stack slot to 1972 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 1973 // offsets to even-numbered registered remain 2*XLEN-aligned. 1974 if (Idx % 2) { 1975 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 1976 VarArgsSaveSize += XLenInBytes; 1977 } 1978 1979 // Copy the integer registers that may have been used for passing varargs 1980 // to the vararg save area. 1981 for (unsigned I = Idx; I < ArgRegs.size(); 1982 ++I, VaArgOffset += XLenInBytes) { 1983 const Register Reg = RegInfo.createVirtualRegister(RC); 1984 RegInfo.addLiveIn(ArgRegs[I], Reg); 1985 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 1986 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 1987 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1988 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 1989 MachinePointerInfo::getFixedStack(MF, FI)); 1990 cast<StoreSDNode>(Store.getNode()) 1991 ->getMemOperand() 1992 ->setValue((Value *)nullptr); 1993 OutChains.push_back(Store); 1994 } 1995 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 1996 } 1997 1998 // All stores are grouped in one node to allow the matching between 1999 // the size of Ins and InVals. This only happens for vararg functions. 2000 if (!OutChains.empty()) { 2001 OutChains.push_back(Chain); 2002 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 2003 } 2004 2005 return Chain; 2006 } 2007 2008 /// isEligibleForTailCallOptimization - Check whether the call is eligible 2009 /// for tail call optimization. 2010 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 2011 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 2012 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 2013 const SmallVector<CCValAssign, 16> &ArgLocs) const { 2014 2015 auto &Callee = CLI.Callee; 2016 auto CalleeCC = CLI.CallConv; 2017 auto &Outs = CLI.Outs; 2018 auto &Caller = MF.getFunction(); 2019 auto CallerCC = Caller.getCallingConv(); 2020 2021 // Do not tail call opt functions with "disable-tail-calls" attribute. 2022 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 2023 return false; 2024 2025 // Exception-handling functions need a special set of instructions to 2026 // indicate a return to the hardware. Tail-calling another function would 2027 // probably break this. 2028 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 2029 // should be expanded as new function attributes are introduced. 2030 if (Caller.hasFnAttribute("interrupt")) 2031 return false; 2032 2033 // Do not tail call opt if the stack is used to pass parameters. 2034 if (CCInfo.getNextStackOffset() != 0) 2035 return false; 2036 2037 // Do not tail call opt if any parameters need to be passed indirectly. 2038 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 2039 // passed indirectly. So the address of the value will be passed in a 2040 // register, or if not available, then the address is put on the stack. In 2041 // order to pass indirectly, space on the stack often needs to be allocated 2042 // in order to store the value. In this case the CCInfo.getNextStackOffset() 2043 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 2044 // are passed CCValAssign::Indirect. 2045 for (auto &VA : ArgLocs) 2046 if (VA.getLocInfo() == CCValAssign::Indirect) 2047 return false; 2048 2049 // Do not tail call opt if either caller or callee uses struct return 2050 // semantics. 2051 auto IsCallerStructRet = Caller.hasStructRetAttr(); 2052 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 2053 if (IsCallerStructRet || IsCalleeStructRet) 2054 return false; 2055 2056 // Externally-defined functions with weak linkage should not be 2057 // tail-called. The behaviour of branch instructions in this situation (as 2058 // used for tail calls) is implementation-defined, so we cannot rely on the 2059 // linker replacing the tail call with a return. 2060 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2061 const GlobalValue *GV = G->getGlobal(); 2062 if (GV->hasExternalWeakLinkage()) 2063 return false; 2064 } 2065 2066 // The callee has to preserve all registers the caller needs to preserve. 2067 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2068 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2069 if (CalleeCC != CallerCC) { 2070 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2071 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2072 return false; 2073 } 2074 2075 // Byval parameters hand the function a pointer directly into the stack area 2076 // we want to reuse during a tail call. Working around this *is* possible 2077 // but less efficient and uglier in LowerCall. 2078 for (auto &Arg : Outs) 2079 if (Arg.Flags.isByVal()) 2080 return false; 2081 2082 return true; 2083 } 2084 2085 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 2086 // and output parameter nodes. 2087 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 2088 SmallVectorImpl<SDValue> &InVals) const { 2089 SelectionDAG &DAG = CLI.DAG; 2090 SDLoc &DL = CLI.DL; 2091 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2092 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2093 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2094 SDValue Chain = CLI.Chain; 2095 SDValue Callee = CLI.Callee; 2096 bool &IsTailCall = CLI.IsTailCall; 2097 CallingConv::ID CallConv = CLI.CallConv; 2098 bool IsVarArg = CLI.IsVarArg; 2099 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2100 MVT XLenVT = Subtarget.getXLenVT(); 2101 2102 MachineFunction &MF = DAG.getMachineFunction(); 2103 2104 // Analyze the operands of the call, assigning locations to each operand. 2105 SmallVector<CCValAssign, 16> ArgLocs; 2106 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2107 2108 if (CallConv == CallingConv::Fast) 2109 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 2110 else 2111 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 2112 2113 // Check if it's really possible to do a tail call. 2114 if (IsTailCall) 2115 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 2116 2117 if (IsTailCall) 2118 ++NumTailCalls; 2119 else if (CLI.CS && CLI.CS.isMustTailCall()) 2120 report_fatal_error("failed to perform tail call elimination on a call " 2121 "site marked musttail"); 2122 2123 // Get a count of how many bytes are to be pushed on the stack. 2124 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 2125 2126 // Create local copies for byval args 2127 SmallVector<SDValue, 8> ByValArgs; 2128 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2129 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2130 if (!Flags.isByVal()) 2131 continue; 2132 2133 SDValue Arg = OutVals[i]; 2134 unsigned Size = Flags.getByValSize(); 2135 unsigned Align = Flags.getByValAlign(); 2136 2137 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false); 2138 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2139 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 2140 2141 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align, 2142 /*IsVolatile=*/false, 2143 /*AlwaysInline=*/false, 2144 IsTailCall, MachinePointerInfo(), 2145 MachinePointerInfo()); 2146 ByValArgs.push_back(FIPtr); 2147 } 2148 2149 if (!IsTailCall) 2150 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 2151 2152 // Copy argument values to their designated locations. 2153 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 2154 SmallVector<SDValue, 8> MemOpChains; 2155 SDValue StackPtr; 2156 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 2157 CCValAssign &VA = ArgLocs[i]; 2158 SDValue ArgValue = OutVals[i]; 2159 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2160 2161 // Handle passing f64 on RV32D with a soft float ABI as a special case. 2162 bool IsF64OnRV32DSoftABI = 2163 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 2164 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 2165 SDValue SplitF64 = DAG.getNode( 2166 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 2167 SDValue Lo = SplitF64.getValue(0); 2168 SDValue Hi = SplitF64.getValue(1); 2169 2170 Register RegLo = VA.getLocReg(); 2171 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 2172 2173 if (RegLo == RISCV::X17) { 2174 // Second half of f64 is passed on the stack. 2175 // Work out the address of the stack slot. 2176 if (!StackPtr.getNode()) 2177 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2178 // Emit the store. 2179 MemOpChains.push_back( 2180 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 2181 } else { 2182 // Second half of f64 is passed in another GPR. 2183 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2184 Register RegHigh = RegLo + 1; 2185 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 2186 } 2187 continue; 2188 } 2189 2190 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 2191 // as any other MemLoc. 2192 2193 // Promote the value if needed. 2194 // For now, only handle fully promoted and indirect arguments. 2195 if (VA.getLocInfo() == CCValAssign::Indirect) { 2196 // Store the argument in a stack slot and pass its address. 2197 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 2198 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2199 MemOpChains.push_back( 2200 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 2201 MachinePointerInfo::getFixedStack(MF, FI))); 2202 // If the original argument was split (e.g. i128), we need 2203 // to store all parts of it here (and pass just one address). 2204 unsigned ArgIndex = Outs[i].OrigArgIndex; 2205 assert(Outs[i].PartOffset == 0); 2206 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 2207 SDValue PartValue = OutVals[i + 1]; 2208 unsigned PartOffset = Outs[i + 1].PartOffset; 2209 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 2210 DAG.getIntPtrConstant(PartOffset, DL)); 2211 MemOpChains.push_back( 2212 DAG.getStore(Chain, DL, PartValue, Address, 2213 MachinePointerInfo::getFixedStack(MF, FI))); 2214 ++i; 2215 } 2216 ArgValue = SpillSlot; 2217 } else { 2218 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 2219 } 2220 2221 // Use local copy if it is a byval arg. 2222 if (Flags.isByVal()) 2223 ArgValue = ByValArgs[j++]; 2224 2225 if (VA.isRegLoc()) { 2226 // Queue up the argument copies and emit them at the end. 2227 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 2228 } else { 2229 assert(VA.isMemLoc() && "Argument not register or memory"); 2230 assert(!IsTailCall && "Tail call not allowed if stack is used " 2231 "for passing parameters"); 2232 2233 // Work out the address of the stack slot. 2234 if (!StackPtr.getNode()) 2235 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2236 SDValue Address = 2237 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 2238 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 2239 2240 // Emit the store. 2241 MemOpChains.push_back( 2242 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 2243 } 2244 } 2245 2246 // Join the stores, which are independent of one another. 2247 if (!MemOpChains.empty()) 2248 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2249 2250 SDValue Glue; 2251 2252 // Build a sequence of copy-to-reg nodes, chained and glued together. 2253 for (auto &Reg : RegsToPass) { 2254 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 2255 Glue = Chain.getValue(1); 2256 } 2257 2258 // Validate that none of the argument registers have been marked as 2259 // reserved, if so report an error. Do the same for the return address if this 2260 // is not a tailcall. 2261 validateCCReservedRegs(RegsToPass, MF); 2262 if (!IsTailCall && 2263 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 2264 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2265 MF.getFunction(), 2266 "Return address register required, but has been reserved."}); 2267 2268 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 2269 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 2270 // split it and then direct call can be matched by PseudoCALL. 2271 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 2272 const GlobalValue *GV = S->getGlobal(); 2273 2274 unsigned OpFlags = RISCVII::MO_CALL; 2275 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 2276 OpFlags = RISCVII::MO_PLT; 2277 2278 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 2279 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2280 unsigned OpFlags = RISCVII::MO_CALL; 2281 2282 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 2283 nullptr)) 2284 OpFlags = RISCVII::MO_PLT; 2285 2286 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 2287 } 2288 2289 // The first call operand is the chain and the second is the target address. 2290 SmallVector<SDValue, 8> Ops; 2291 Ops.push_back(Chain); 2292 Ops.push_back(Callee); 2293 2294 // Add argument registers to the end of the list so that they are 2295 // known live into the call. 2296 for (auto &Reg : RegsToPass) 2297 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 2298 2299 if (!IsTailCall) { 2300 // Add a register mask operand representing the call-preserved registers. 2301 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2302 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2303 assert(Mask && "Missing call preserved mask for calling convention"); 2304 Ops.push_back(DAG.getRegisterMask(Mask)); 2305 } 2306 2307 // Glue the call to the argument copies, if any. 2308 if (Glue.getNode()) 2309 Ops.push_back(Glue); 2310 2311 // Emit the call. 2312 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2313 2314 if (IsTailCall) { 2315 MF.getFrameInfo().setHasTailCall(); 2316 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 2317 } 2318 2319 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 2320 Glue = Chain.getValue(1); 2321 2322 // Mark the end of the call, which is glued to the call itself. 2323 Chain = DAG.getCALLSEQ_END(Chain, 2324 DAG.getConstant(NumBytes, DL, PtrVT, true), 2325 DAG.getConstant(0, DL, PtrVT, true), 2326 Glue, DL); 2327 Glue = Chain.getValue(1); 2328 2329 // Assign locations to each value returned by this call. 2330 SmallVector<CCValAssign, 16> RVLocs; 2331 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 2332 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 2333 2334 // Copy all of the result registers out of their specified physreg. 2335 for (auto &VA : RVLocs) { 2336 // Copy the value out 2337 SDValue RetValue = 2338 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 2339 // Glue the RetValue to the end of the call sequence 2340 Chain = RetValue.getValue(1); 2341 Glue = RetValue.getValue(2); 2342 2343 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2344 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 2345 SDValue RetValue2 = 2346 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 2347 Chain = RetValue2.getValue(1); 2348 Glue = RetValue2.getValue(2); 2349 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 2350 RetValue2); 2351 } 2352 2353 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 2354 2355 InVals.push_back(RetValue); 2356 } 2357 2358 return Chain; 2359 } 2360 2361 bool RISCVTargetLowering::CanLowerReturn( 2362 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 2363 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 2364 SmallVector<CCValAssign, 16> RVLocs; 2365 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2366 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2367 MVT VT = Outs[i].VT; 2368 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2369 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2370 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 2371 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 2372 return false; 2373 } 2374 return true; 2375 } 2376 2377 SDValue 2378 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2379 bool IsVarArg, 2380 const SmallVectorImpl<ISD::OutputArg> &Outs, 2381 const SmallVectorImpl<SDValue> &OutVals, 2382 const SDLoc &DL, SelectionDAG &DAG) const { 2383 const MachineFunction &MF = DAG.getMachineFunction(); 2384 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2385 2386 // Stores the assignment of the return value to a location. 2387 SmallVector<CCValAssign, 16> RVLocs; 2388 2389 // Info about the registers and stack slot. 2390 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2391 *DAG.getContext()); 2392 2393 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 2394 nullptr); 2395 2396 SDValue Glue; 2397 SmallVector<SDValue, 4> RetOps(1, Chain); 2398 2399 // Copy the result values into the output registers. 2400 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 2401 SDValue Val = OutVals[i]; 2402 CCValAssign &VA = RVLocs[i]; 2403 assert(VA.isRegLoc() && "Can only return in registers!"); 2404 2405 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2406 // Handle returning f64 on RV32D with a soft float ABI. 2407 assert(VA.isRegLoc() && "Expected return via registers"); 2408 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 2409 DAG.getVTList(MVT::i32, MVT::i32), Val); 2410 SDValue Lo = SplitF64.getValue(0); 2411 SDValue Hi = SplitF64.getValue(1); 2412 Register RegLo = VA.getLocReg(); 2413 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2414 Register RegHi = RegLo + 1; 2415 2416 if (STI.isRegisterReservedByUser(RegLo) || 2417 STI.isRegisterReservedByUser(RegHi)) 2418 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2419 MF.getFunction(), 2420 "Return value register required, but has been reserved."}); 2421 2422 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 2423 Glue = Chain.getValue(1); 2424 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 2425 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 2426 Glue = Chain.getValue(1); 2427 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 2428 } else { 2429 // Handle a 'normal' return. 2430 Val = convertValVTToLocVT(DAG, Val, VA, DL); 2431 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 2432 2433 if (STI.isRegisterReservedByUser(VA.getLocReg())) 2434 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2435 MF.getFunction(), 2436 "Return value register required, but has been reserved."}); 2437 2438 // Guarantee that all emitted copies are stuck together. 2439 Glue = Chain.getValue(1); 2440 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2441 } 2442 } 2443 2444 RetOps[0] = Chain; // Update chain. 2445 2446 // Add the glue node if we have it. 2447 if (Glue.getNode()) { 2448 RetOps.push_back(Glue); 2449 } 2450 2451 // Interrupt service routines use different return instructions. 2452 const Function &Func = DAG.getMachineFunction().getFunction(); 2453 if (Func.hasFnAttribute("interrupt")) { 2454 if (!Func.getReturnType()->isVoidTy()) 2455 report_fatal_error( 2456 "Functions with the interrupt attribute must have void return type!"); 2457 2458 MachineFunction &MF = DAG.getMachineFunction(); 2459 StringRef Kind = 2460 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 2461 2462 unsigned RetOpc; 2463 if (Kind == "user") 2464 RetOpc = RISCVISD::URET_FLAG; 2465 else if (Kind == "supervisor") 2466 RetOpc = RISCVISD::SRET_FLAG; 2467 else 2468 RetOpc = RISCVISD::MRET_FLAG; 2469 2470 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 2471 } 2472 2473 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 2474 } 2475 2476 void RISCVTargetLowering::validateCCReservedRegs( 2477 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 2478 MachineFunction &MF) const { 2479 const Function &F = MF.getFunction(); 2480 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2481 2482 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) { 2483 return STI.isRegisterReservedByUser(Reg.first); 2484 })) 2485 F.getContext().diagnose(DiagnosticInfoUnsupported{ 2486 F, "Argument register required, but has been reserved."}); 2487 } 2488 2489 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 2490 switch ((RISCVISD::NodeType)Opcode) { 2491 case RISCVISD::FIRST_NUMBER: 2492 break; 2493 case RISCVISD::RET_FLAG: 2494 return "RISCVISD::RET_FLAG"; 2495 case RISCVISD::URET_FLAG: 2496 return "RISCVISD::URET_FLAG"; 2497 case RISCVISD::SRET_FLAG: 2498 return "RISCVISD::SRET_FLAG"; 2499 case RISCVISD::MRET_FLAG: 2500 return "RISCVISD::MRET_FLAG"; 2501 case RISCVISD::CALL: 2502 return "RISCVISD::CALL"; 2503 case RISCVISD::SELECT_CC: 2504 return "RISCVISD::SELECT_CC"; 2505 case RISCVISD::BuildPairF64: 2506 return "RISCVISD::BuildPairF64"; 2507 case RISCVISD::SplitF64: 2508 return "RISCVISD::SplitF64"; 2509 case RISCVISD::TAIL: 2510 return "RISCVISD::TAIL"; 2511 case RISCVISD::SLLW: 2512 return "RISCVISD::SLLW"; 2513 case RISCVISD::SRAW: 2514 return "RISCVISD::SRAW"; 2515 case RISCVISD::SRLW: 2516 return "RISCVISD::SRLW"; 2517 case RISCVISD::DIVW: 2518 return "RISCVISD::DIVW"; 2519 case RISCVISD::DIVUW: 2520 return "RISCVISD::DIVUW"; 2521 case RISCVISD::REMUW: 2522 return "RISCVISD::REMUW"; 2523 case RISCVISD::FMV_W_X_RV64: 2524 return "RISCVISD::FMV_W_X_RV64"; 2525 case RISCVISD::FMV_X_ANYEXTW_RV64: 2526 return "RISCVISD::FMV_X_ANYEXTW_RV64"; 2527 case RISCVISD::READ_CYCLE_WIDE: 2528 return "RISCVISD::READ_CYCLE_WIDE"; 2529 } 2530 return nullptr; 2531 } 2532 2533 /// getConstraintType - Given a constraint letter, return the type of 2534 /// constraint it is for this target. 2535 RISCVTargetLowering::ConstraintType 2536 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 2537 if (Constraint.size() == 1) { 2538 switch (Constraint[0]) { 2539 default: 2540 break; 2541 case 'f': 2542 return C_RegisterClass; 2543 case 'I': 2544 case 'J': 2545 case 'K': 2546 return C_Immediate; 2547 case 'A': 2548 return C_Memory; 2549 } 2550 } 2551 return TargetLowering::getConstraintType(Constraint); 2552 } 2553 2554 std::pair<unsigned, const TargetRegisterClass *> 2555 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 2556 StringRef Constraint, 2557 MVT VT) const { 2558 // First, see if this is a constraint that directly corresponds to a 2559 // RISCV register class. 2560 if (Constraint.size() == 1) { 2561 switch (Constraint[0]) { 2562 case 'r': 2563 return std::make_pair(0U, &RISCV::GPRRegClass); 2564 case 'f': 2565 if (Subtarget.hasStdExtF() && VT == MVT::f32) 2566 return std::make_pair(0U, &RISCV::FPR32RegClass); 2567 if (Subtarget.hasStdExtD() && VT == MVT::f64) 2568 return std::make_pair(0U, &RISCV::FPR64RegClass); 2569 break; 2570 default: 2571 break; 2572 } 2573 } 2574 2575 // Clang will correctly decode the usage of register name aliases into their 2576 // official names. However, other frontends like `rustc` do not. This allows 2577 // users of these frontends to use the ABI names for registers in LLVM-style 2578 // register constraints. 2579 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower()) 2580 .Case("{zero}", RISCV::X0) 2581 .Case("{ra}", RISCV::X1) 2582 .Case("{sp}", RISCV::X2) 2583 .Case("{gp}", RISCV::X3) 2584 .Case("{tp}", RISCV::X4) 2585 .Case("{t0}", RISCV::X5) 2586 .Case("{t1}", RISCV::X6) 2587 .Case("{t2}", RISCV::X7) 2588 .Cases("{s0}", "{fp}", RISCV::X8) 2589 .Case("{s1}", RISCV::X9) 2590 .Case("{a0}", RISCV::X10) 2591 .Case("{a1}", RISCV::X11) 2592 .Case("{a2}", RISCV::X12) 2593 .Case("{a3}", RISCV::X13) 2594 .Case("{a4}", RISCV::X14) 2595 .Case("{a5}", RISCV::X15) 2596 .Case("{a6}", RISCV::X16) 2597 .Case("{a7}", RISCV::X17) 2598 .Case("{s2}", RISCV::X18) 2599 .Case("{s3}", RISCV::X19) 2600 .Case("{s4}", RISCV::X20) 2601 .Case("{s5}", RISCV::X21) 2602 .Case("{s6}", RISCV::X22) 2603 .Case("{s7}", RISCV::X23) 2604 .Case("{s8}", RISCV::X24) 2605 .Case("{s9}", RISCV::X25) 2606 .Case("{s10}", RISCV::X26) 2607 .Case("{s11}", RISCV::X27) 2608 .Case("{t3}", RISCV::X28) 2609 .Case("{t4}", RISCV::X29) 2610 .Case("{t5}", RISCV::X30) 2611 .Case("{t6}", RISCV::X31) 2612 .Default(RISCV::NoRegister); 2613 if (XRegFromAlias != RISCV::NoRegister) 2614 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 2615 2616 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 2617 // TableGen record rather than the AsmName to choose registers for InlineAsm 2618 // constraints, plus we want to match those names to the widest floating point 2619 // register type available, manually select floating point registers here. 2620 // 2621 // The second case is the ABI name of the register, so that frontends can also 2622 // use the ABI names in register constraint lists. 2623 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) { 2624 std::pair<Register, Register> FReg = 2625 StringSwitch<std::pair<Register, Register>>(Constraint.lower()) 2626 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D}) 2627 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D}) 2628 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D}) 2629 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D}) 2630 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D}) 2631 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D}) 2632 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D}) 2633 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D}) 2634 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D}) 2635 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D}) 2636 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D}) 2637 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D}) 2638 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D}) 2639 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D}) 2640 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D}) 2641 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D}) 2642 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D}) 2643 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D}) 2644 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D}) 2645 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D}) 2646 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D}) 2647 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D}) 2648 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D}) 2649 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D}) 2650 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D}) 2651 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D}) 2652 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D}) 2653 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D}) 2654 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D}) 2655 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D}) 2656 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D}) 2657 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D}) 2658 .Default({RISCV::NoRegister, RISCV::NoRegister}); 2659 if (FReg.first != RISCV::NoRegister) 2660 return Subtarget.hasStdExtD() 2661 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass) 2662 : std::make_pair(FReg.first, &RISCV::FPR32RegClass); 2663 } 2664 2665 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 2666 } 2667 2668 unsigned 2669 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 2670 // Currently only support length 1 constraints. 2671 if (ConstraintCode.size() == 1) { 2672 switch (ConstraintCode[0]) { 2673 case 'A': 2674 return InlineAsm::Constraint_A; 2675 default: 2676 break; 2677 } 2678 } 2679 2680 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 2681 } 2682 2683 void RISCVTargetLowering::LowerAsmOperandForConstraint( 2684 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 2685 SelectionDAG &DAG) const { 2686 // Currently only support length 1 constraints. 2687 if (Constraint.length() == 1) { 2688 switch (Constraint[0]) { 2689 case 'I': 2690 // Validate & create a 12-bit signed immediate operand. 2691 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2692 uint64_t CVal = C->getSExtValue(); 2693 if (isInt<12>(CVal)) 2694 Ops.push_back( 2695 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2696 } 2697 return; 2698 case 'J': 2699 // Validate & create an integer zero operand. 2700 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 2701 if (C->getZExtValue() == 0) 2702 Ops.push_back( 2703 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 2704 return; 2705 case 'K': 2706 // Validate & create a 5-bit unsigned immediate operand. 2707 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2708 uint64_t CVal = C->getZExtValue(); 2709 if (isUInt<5>(CVal)) 2710 Ops.push_back( 2711 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2712 } 2713 return; 2714 default: 2715 break; 2716 } 2717 } 2718 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 2719 } 2720 2721 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 2722 Instruction *Inst, 2723 AtomicOrdering Ord) const { 2724 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 2725 return Builder.CreateFence(Ord); 2726 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 2727 return Builder.CreateFence(AtomicOrdering::Release); 2728 return nullptr; 2729 } 2730 2731 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 2732 Instruction *Inst, 2733 AtomicOrdering Ord) const { 2734 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 2735 return Builder.CreateFence(AtomicOrdering::Acquire); 2736 return nullptr; 2737 } 2738 2739 TargetLowering::AtomicExpansionKind 2740 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 2741 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 2742 // point operations can't be used in an lr/sc sequence without breaking the 2743 // forward-progress guarantee. 2744 if (AI->isFloatingPointOperation()) 2745 return AtomicExpansionKind::CmpXChg; 2746 2747 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 2748 if (Size == 8 || Size == 16) 2749 return AtomicExpansionKind::MaskedIntrinsic; 2750 return AtomicExpansionKind::None; 2751 } 2752 2753 static Intrinsic::ID 2754 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 2755 if (XLen == 32) { 2756 switch (BinOp) { 2757 default: 2758 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2759 case AtomicRMWInst::Xchg: 2760 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 2761 case AtomicRMWInst::Add: 2762 return Intrinsic::riscv_masked_atomicrmw_add_i32; 2763 case AtomicRMWInst::Sub: 2764 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 2765 case AtomicRMWInst::Nand: 2766 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 2767 case AtomicRMWInst::Max: 2768 return Intrinsic::riscv_masked_atomicrmw_max_i32; 2769 case AtomicRMWInst::Min: 2770 return Intrinsic::riscv_masked_atomicrmw_min_i32; 2771 case AtomicRMWInst::UMax: 2772 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 2773 case AtomicRMWInst::UMin: 2774 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 2775 } 2776 } 2777 2778 if (XLen == 64) { 2779 switch (BinOp) { 2780 default: 2781 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2782 case AtomicRMWInst::Xchg: 2783 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 2784 case AtomicRMWInst::Add: 2785 return Intrinsic::riscv_masked_atomicrmw_add_i64; 2786 case AtomicRMWInst::Sub: 2787 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 2788 case AtomicRMWInst::Nand: 2789 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 2790 case AtomicRMWInst::Max: 2791 return Intrinsic::riscv_masked_atomicrmw_max_i64; 2792 case AtomicRMWInst::Min: 2793 return Intrinsic::riscv_masked_atomicrmw_min_i64; 2794 case AtomicRMWInst::UMax: 2795 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 2796 case AtomicRMWInst::UMin: 2797 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 2798 } 2799 } 2800 2801 llvm_unreachable("Unexpected XLen\n"); 2802 } 2803 2804 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 2805 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 2806 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 2807 unsigned XLen = Subtarget.getXLen(); 2808 Value *Ordering = 2809 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 2810 Type *Tys[] = {AlignedAddr->getType()}; 2811 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 2812 AI->getModule(), 2813 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 2814 2815 if (XLen == 64) { 2816 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 2817 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2818 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 2819 } 2820 2821 Value *Result; 2822 2823 // Must pass the shift amount needed to sign extend the loaded value prior 2824 // to performing a signed comparison for min/max. ShiftAmt is the number of 2825 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 2826 // is the number of bits to left+right shift the value in order to 2827 // sign-extend. 2828 if (AI->getOperation() == AtomicRMWInst::Min || 2829 AI->getOperation() == AtomicRMWInst::Max) { 2830 const DataLayout &DL = AI->getModule()->getDataLayout(); 2831 unsigned ValWidth = 2832 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 2833 Value *SextShamt = 2834 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 2835 Result = Builder.CreateCall(LrwOpScwLoop, 2836 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 2837 } else { 2838 Result = 2839 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 2840 } 2841 2842 if (XLen == 64) 2843 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2844 return Result; 2845 } 2846 2847 TargetLowering::AtomicExpansionKind 2848 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 2849 AtomicCmpXchgInst *CI) const { 2850 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 2851 if (Size == 8 || Size == 16) 2852 return AtomicExpansionKind::MaskedIntrinsic; 2853 return AtomicExpansionKind::None; 2854 } 2855 2856 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 2857 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2858 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2859 unsigned XLen = Subtarget.getXLen(); 2860 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 2861 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 2862 if (XLen == 64) { 2863 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 2864 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 2865 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2866 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 2867 } 2868 Type *Tys[] = {AlignedAddr->getType()}; 2869 Function *MaskedCmpXchg = 2870 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 2871 Value *Result = Builder.CreateCall( 2872 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 2873 if (XLen == 64) 2874 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2875 return Result; 2876 } 2877 2878 unsigned RISCVTargetLowering::getExceptionPointerRegister( 2879 const Constant *PersonalityFn) const { 2880 return RISCV::X10; 2881 } 2882 2883 unsigned RISCVTargetLowering::getExceptionSelectorRegister( 2884 const Constant *PersonalityFn) const { 2885 return RISCV::X11; 2886 } 2887 2888 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 2889 // Return false to suppress the unnecessary extensions if the LibCall 2890 // arguments or return value is f32 type for LP64 ABI. 2891 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 2892 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 2893 return false; 2894 2895 return true; 2896 } 2897