1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// RISC-V. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/RISCVMatInt.h" 15 #include "RISCVRegisterBankInfo.h" 16 #include "RISCVSubtarget.h" 17 #include "RISCVTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 20 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 22 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/IR/IntrinsicsRISCV.h" 25 #include "llvm/Support/Debug.h" 26 27 #define DEBUG_TYPE "riscv-isel" 28 29 using namespace llvm; 30 using namespace MIPatternMatch; 31 32 #define GET_GLOBALISEL_PREDICATE_BITSET 33 #include "RISCVGenGlobalISel.inc" 34 #undef GET_GLOBALISEL_PREDICATE_BITSET 35 36 namespace { 37 38 class RISCVInstructionSelector : public InstructionSelector { 39 public: 40 RISCVInstructionSelector(const RISCVTargetMachine &TM, 41 const RISCVSubtarget &STI, 42 const RISCVRegisterBankInfo &RBI); 43 44 bool select(MachineInstr &MI) override; 45 static const char *getName() { return DEBUG_TYPE; } 46 47 private: 48 const TargetRegisterClass * 49 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const; 50 51 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const; 52 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const; 53 54 // tblgen-erated 'select' implementation, used as the initial selector for 55 // the patterns that don't require complex C++. 56 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 57 58 // A lowering phase that runs before any selection attempts. 59 // Returns true if the instruction was modified. 60 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB, 61 MachineRegisterInfo &MRI); 62 63 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB, 64 MachineRegisterInfo &MRI); 65 66 // Custom selection methods 67 bool selectCopy(MachineInstr &MI, MachineRegisterInfo &MRI) const; 68 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB, 69 MachineRegisterInfo &MRI) const; 70 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const; 71 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, 72 MachineRegisterInfo &MRI, bool IsLocal = true, 73 bool IsExternWeak = false) const; 74 bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const; 75 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB, 76 MachineRegisterInfo &MRI) const; 77 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB, 78 MachineRegisterInfo &MRI) const; 79 bool selectIntrinsicWithSideEffects(MachineInstr &MI, MachineIRBuilder &MIB, 80 MachineRegisterInfo &MRI) const; 81 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID, 82 MachineIRBuilder &MIB) const; 83 bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB, 84 MachineRegisterInfo &MRI) const; 85 bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB, 86 MachineRegisterInfo &MRI) const; 87 88 ComplexRendererFns selectShiftMask(MachineOperand &Root) const; 89 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const; 90 91 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const; 92 template <unsigned ShAmt> 93 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const { 94 return selectSHXADDOp(Root, ShAmt); 95 } 96 97 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root, 98 unsigned ShAmt) const; 99 template <unsigned ShAmt> 100 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const { 101 return selectSHXADD_UWOp(Root, ShAmt); 102 } 103 104 // Custom renderers for tablegen 105 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI, 106 int OpIdx) const; 107 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI, 108 int OpIdx) const; 109 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI, 110 int OpIdx) const; 111 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI, 112 int OpIdx) const; 113 void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI, 114 int OpIdx) const; 115 116 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI, 117 int OpIdx) const; 118 119 const RISCVSubtarget &STI; 120 const RISCVInstrInfo &TII; 121 const RISCVRegisterInfo &TRI; 122 const RISCVRegisterBankInfo &RBI; 123 const RISCVTargetMachine &TM; 124 125 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel 126 // uses "STI." in the code generated by TableGen. We need to unify the name of 127 // Subtarget variable. 128 const RISCVSubtarget *Subtarget = &STI; 129 130 #define GET_GLOBALISEL_PREDICATES_DECL 131 #include "RISCVGenGlobalISel.inc" 132 #undef GET_GLOBALISEL_PREDICATES_DECL 133 134 #define GET_GLOBALISEL_TEMPORARIES_DECL 135 #include "RISCVGenGlobalISel.inc" 136 #undef GET_GLOBALISEL_TEMPORARIES_DECL 137 }; 138 139 } // end anonymous namespace 140 141 #define GET_GLOBALISEL_IMPL 142 #include "RISCVGenGlobalISel.inc" 143 #undef GET_GLOBALISEL_IMPL 144 145 RISCVInstructionSelector::RISCVInstructionSelector( 146 const RISCVTargetMachine &TM, const RISCVSubtarget &STI, 147 const RISCVRegisterBankInfo &RBI) 148 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), 149 TM(TM), 150 151 #define GET_GLOBALISEL_PREDICATES_INIT 152 #include "RISCVGenGlobalISel.inc" 153 #undef GET_GLOBALISEL_PREDICATES_INIT 154 #define GET_GLOBALISEL_TEMPORARIES_INIT 155 #include "RISCVGenGlobalISel.inc" 156 #undef GET_GLOBALISEL_TEMPORARIES_INIT 157 { 158 } 159 160 InstructionSelector::ComplexRendererFns 161 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const { 162 // TODO: Also check if we are seeing the result of an AND operation which 163 // could be bypassed since we only check the lower log2(xlen) bits. 164 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}}; 165 } 166 167 InstructionSelector::ComplexRendererFns 168 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root, 169 unsigned ShAmt) const { 170 using namespace llvm::MIPatternMatch; 171 MachineFunction &MF = *Root.getParent()->getParent()->getParent(); 172 MachineRegisterInfo &MRI = MF.getRegInfo(); 173 174 if (!Root.isReg()) 175 return std::nullopt; 176 Register RootReg = Root.getReg(); 177 178 const unsigned XLen = STI.getXLen(); 179 APInt Mask, C2; 180 Register RegY; 181 std::optional<bool> LeftShift; 182 // (and (shl y, c2), mask) 183 if (mi_match(RootReg, MRI, 184 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask)))) 185 LeftShift = true; 186 // (and (lshr y, c2), mask) 187 else if (mi_match(RootReg, MRI, 188 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask)))) 189 LeftShift = false; 190 191 if (LeftShift.has_value()) { 192 if (*LeftShift) 193 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue()); 194 else 195 Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue()); 196 197 if (Mask.isShiftedMask()) { 198 unsigned Leading = XLen - Mask.getActiveBits(); 199 unsigned Trailing = Mask.countr_zero(); 200 // Given (and (shl y, c2), mask) in which mask has no leading zeros and 201 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD. 202 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) { 203 Register DstReg = 204 MRI.createGenericVirtualRegister(MRI.getType(RootReg)); 205 return {{[=](MachineInstrBuilder &MIB) { 206 MachineIRBuilder(*MIB.getInstr()) 207 .buildInstr(RISCV::SRLI, {DstReg}, {RegY}) 208 .addImm(Trailing - C2.getLimitedValue()); 209 MIB.addReg(DstReg); 210 }}}; 211 } 212 213 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and 214 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD. 215 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) { 216 Register DstReg = 217 MRI.createGenericVirtualRegister(MRI.getType(RootReg)); 218 return {{[=](MachineInstrBuilder &MIB) { 219 MachineIRBuilder(*MIB.getInstr()) 220 .buildInstr(RISCV::SRLI, {DstReg}, {RegY}) 221 .addImm(Leading + Trailing); 222 MIB.addReg(DstReg); 223 }}}; 224 } 225 } 226 } 227 228 LeftShift.reset(); 229 230 // (shl (and y, mask), c2) 231 if (mi_match(RootReg, MRI, 232 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))), 233 m_ICst(C2)))) 234 LeftShift = true; 235 // (lshr (and y, mask), c2) 236 else if (mi_match(RootReg, MRI, 237 m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))), 238 m_ICst(C2)))) 239 LeftShift = false; 240 241 if (LeftShift.has_value() && Mask.isShiftedMask()) { 242 unsigned Leading = XLen - Mask.getActiveBits(); 243 unsigned Trailing = Mask.countr_zero(); 244 245 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and 246 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD. 247 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 && 248 (Trailing + C2.getLimitedValue()) == ShAmt; 249 if (!Cond) 250 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and 251 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD. 252 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) && 253 (Trailing - C2.getLimitedValue()) == ShAmt; 254 255 if (Cond) { 256 Register DstReg = MRI.createGenericVirtualRegister(MRI.getType(RootReg)); 257 return {{[=](MachineInstrBuilder &MIB) { 258 MachineIRBuilder(*MIB.getInstr()) 259 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY}) 260 .addImm(Trailing); 261 MIB.addReg(DstReg); 262 }}}; 263 } 264 } 265 266 return std::nullopt; 267 } 268 269 InstructionSelector::ComplexRendererFns 270 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root, 271 unsigned ShAmt) const { 272 using namespace llvm::MIPatternMatch; 273 MachineFunction &MF = *Root.getParent()->getParent()->getParent(); 274 MachineRegisterInfo &MRI = MF.getRegInfo(); 275 276 if (!Root.isReg()) 277 return std::nullopt; 278 Register RootReg = Root.getReg(); 279 280 // Given (and (shl x, c2), mask) in which mask is a shifted mask with 281 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by 282 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount. 283 APInt Mask, C2; 284 Register RegX; 285 if (mi_match( 286 RootReg, MRI, 287 m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))), 288 m_ICst(Mask))))) { 289 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue()); 290 291 if (Mask.isShiftedMask()) { 292 unsigned Leading = Mask.countl_zero(); 293 unsigned Trailing = Mask.countr_zero(); 294 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) { 295 Register DstReg = 296 MRI.createGenericVirtualRegister(MRI.getType(RootReg)); 297 return {{[=](MachineInstrBuilder &MIB) { 298 MachineIRBuilder(*MIB.getInstr()) 299 .buildInstr(RISCV::SLLI, {DstReg}, {RegX}) 300 .addImm(C2.getLimitedValue() - ShAmt); 301 MIB.addReg(DstReg); 302 }}}; 303 } 304 } 305 } 306 307 return std::nullopt; 308 } 309 310 InstructionSelector::ComplexRendererFns 311 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const { 312 MachineFunction &MF = *Root.getParent()->getParent()->getParent(); 313 MachineRegisterInfo &MRI = MF.getRegInfo(); 314 315 if (!Root.isReg()) 316 return std::nullopt; 317 318 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg()); 319 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { 320 return {{ 321 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }, 322 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, 323 }}; 324 } 325 326 if (isBaseWithConstantOffset(Root, MRI)) { 327 MachineOperand &LHS = RootDef->getOperand(1); 328 MachineOperand &RHS = RootDef->getOperand(2); 329 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg()); 330 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg()); 331 332 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue(); 333 if (isInt<12>(RHSC)) { 334 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) 335 return {{ 336 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); }, 337 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }, 338 }}; 339 340 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); }, 341 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}}; 342 } 343 } 344 345 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in 346 // the combiner? 347 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 348 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}}; 349 } 350 351 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC. 352 /// CC Must be an ICMP Predicate. 353 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) { 354 switch (CC) { 355 default: 356 llvm_unreachable("Expected ICMP CmpInst::Predicate."); 357 case CmpInst::Predicate::ICMP_EQ: 358 return RISCVCC::COND_EQ; 359 case CmpInst::Predicate::ICMP_NE: 360 return RISCVCC::COND_NE; 361 case CmpInst::Predicate::ICMP_ULT: 362 return RISCVCC::COND_LTU; 363 case CmpInst::Predicate::ICMP_SLT: 364 return RISCVCC::COND_LT; 365 case CmpInst::Predicate::ICMP_UGE: 366 return RISCVCC::COND_GEU; 367 case CmpInst::Predicate::ICMP_SGE: 368 return RISCVCC::COND_GE; 369 } 370 } 371 372 static void getOperandsForBranch(Register CondReg, MachineRegisterInfo &MRI, 373 RISCVCC::CondCode &CC, Register &LHS, 374 Register &RHS) { 375 // Try to fold an ICmp. If that fails, use a NE compare with X0. 376 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 377 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) { 378 LHS = CondReg; 379 RHS = RISCV::X0; 380 CC = RISCVCC::COND_NE; 381 return; 382 } 383 384 // We found an ICmp, do some canonicalizations. 385 386 // Adjust comparisons to use comparison with 0 if possible. 387 if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) { 388 switch (Pred) { 389 case CmpInst::Predicate::ICMP_SGT: 390 // Convert X > -1 to X >= 0 391 if (*Constant == -1) { 392 CC = RISCVCC::COND_GE; 393 RHS = RISCV::X0; 394 return; 395 } 396 break; 397 case CmpInst::Predicate::ICMP_SLT: 398 // Convert X < 1 to 0 >= X 399 if (*Constant == 1) { 400 CC = RISCVCC::COND_GE; 401 RHS = LHS; 402 LHS = RISCV::X0; 403 return; 404 } 405 break; 406 default: 407 break; 408 } 409 } 410 411 switch (Pred) { 412 default: 413 llvm_unreachable("Expected ICMP CmpInst::Predicate."); 414 case CmpInst::Predicate::ICMP_EQ: 415 case CmpInst::Predicate::ICMP_NE: 416 case CmpInst::Predicate::ICMP_ULT: 417 case CmpInst::Predicate::ICMP_SLT: 418 case CmpInst::Predicate::ICMP_UGE: 419 case CmpInst::Predicate::ICMP_SGE: 420 // These CCs are supported directly by RISC-V branches. 421 break; 422 case CmpInst::Predicate::ICMP_SGT: 423 case CmpInst::Predicate::ICMP_SLE: 424 case CmpInst::Predicate::ICMP_UGT: 425 case CmpInst::Predicate::ICMP_ULE: 426 // These CCs are not supported directly by RISC-V branches, but changing the 427 // direction of the CC and swapping LHS and RHS are. 428 Pred = CmpInst::getSwappedPredicate(Pred); 429 std::swap(LHS, RHS); 430 break; 431 } 432 433 CC = getRISCVCCFromICmp(Pred); 434 return; 435 } 436 437 bool RISCVInstructionSelector::select(MachineInstr &MI) { 438 MachineBasicBlock &MBB = *MI.getParent(); 439 MachineFunction &MF = *MBB.getParent(); 440 MachineRegisterInfo &MRI = MF.getRegInfo(); 441 MachineIRBuilder MIB(MI); 442 443 preISelLower(MI, MIB, MRI); 444 const unsigned Opc = MI.getOpcode(); 445 446 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) { 447 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) { 448 const Register DefReg = MI.getOperand(0).getReg(); 449 const LLT DefTy = MRI.getType(DefReg); 450 451 const RegClassOrRegBank &RegClassOrBank = 452 MRI.getRegClassOrRegBank(DefReg); 453 454 const TargetRegisterClass *DefRC = 455 RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 456 if (!DefRC) { 457 if (!DefTy.isValid()) { 458 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 459 return false; 460 } 461 462 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 463 DefRC = getRegClassForTypeOnBank(DefTy, RB); 464 if (!DefRC) { 465 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 466 return false; 467 } 468 } 469 470 MI.setDesc(TII.get(TargetOpcode::PHI)); 471 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI); 472 } 473 474 // Certain non-generic instructions also need some special handling. 475 if (MI.isCopy()) 476 return selectCopy(MI, MRI); 477 478 return true; 479 } 480 481 if (selectImpl(MI, *CoverageInfo)) 482 return true; 483 484 switch (Opc) { 485 case TargetOpcode::G_ANYEXT: 486 case TargetOpcode::G_PTRTOINT: 487 case TargetOpcode::G_INTTOPTR: 488 case TargetOpcode::G_TRUNC: 489 return selectCopy(MI, MRI); 490 case TargetOpcode::G_CONSTANT: { 491 Register DstReg = MI.getOperand(0).getReg(); 492 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue(); 493 494 if (!materializeImm(DstReg, Imm, MIB)) 495 return false; 496 497 MI.eraseFromParent(); 498 return true; 499 } 500 case TargetOpcode::G_FCONSTANT: { 501 // TODO: Use constant pool for complext constants. 502 // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32. 503 Register DstReg = MI.getOperand(0).getReg(); 504 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF(); 505 APInt Imm = FPimm.bitcastToAPInt(); 506 unsigned Size = MRI.getType(DstReg).getSizeInBits(); 507 if (Size == 32 || (Size == 64 && Subtarget->is64Bit())) { 508 Register GPRReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 509 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB)) 510 return false; 511 512 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X : RISCV::FMV_W_X; 513 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg}); 514 if (!FMV.constrainAllUses(TII, TRI, RBI)) 515 return false; 516 } else { 517 assert(Size == 64 && !Subtarget->is64Bit() && 518 "Unexpected size or subtarget"); 519 // Split into two pieces and build through the stack. 520 Register GPRRegHigh = MRI.createVirtualRegister(&RISCV::GPRRegClass); 521 Register GPRRegLow = MRI.createVirtualRegister(&RISCV::GPRRegClass); 522 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(), 523 MIB)) 524 return false; 525 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB)) 526 return false; 527 MachineInstrBuilder PairF64 = MIB.buildInstr( 528 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh}); 529 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 530 return false; 531 } 532 533 MI.eraseFromParent(); 534 return true; 535 } 536 case TargetOpcode::G_GLOBAL_VALUE: { 537 auto *GV = MI.getOperand(1).getGlobal(); 538 if (GV->isThreadLocal()) { 539 // TODO: implement this case. 540 return false; 541 } 542 543 return selectAddr(MI, MIB, MRI, GV->isDSOLocal(), 544 GV->hasExternalWeakLinkage()); 545 } 546 case TargetOpcode::G_JUMP_TABLE: 547 case TargetOpcode::G_CONSTANT_POOL: 548 return selectAddr(MI, MIB, MRI); 549 case TargetOpcode::G_BRCOND: { 550 Register LHS, RHS; 551 RISCVCC::CondCode CC; 552 getOperandsForBranch(MI.getOperand(0).getReg(), MRI, CC, LHS, RHS); 553 554 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS}) 555 .addMBB(MI.getOperand(1).getMBB()); 556 MI.eraseFromParent(); 557 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI); 558 } 559 case TargetOpcode::G_BRJT: { 560 // FIXME: Move to legalization? 561 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 562 unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout()); 563 assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) && 564 "Unsupported jump-table entry size"); 565 assert( 566 (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 || 567 MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 || 568 MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) && 569 "Unexpected jump-table entry kind"); 570 571 auto SLL = 572 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)}) 573 .addImm(Log2_32(EntrySize)); 574 if (!SLL.constrainAllUses(TII, TRI, RBI)) 575 return false; 576 577 // TODO: Use SHXADD. Moving to legalization would fix this automatically. 578 auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass}, 579 {MI.getOperand(0), SLL.getReg(0)}); 580 if (!ADD.constrainAllUses(TII, TRI, RBI)) 581 return false; 582 583 unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW; 584 auto Dest = 585 MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)}) 586 .addImm(0) 587 .addMemOperand(MF.getMachineMemOperand( 588 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad, 589 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout())))); 590 if (!Dest.constrainAllUses(TII, TRI, RBI)) 591 return false; 592 593 // If the Kind is EK_LabelDifference32, the table stores an offset from 594 // the location of the table. Add the table address to get an absolute 595 // address. 596 if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) { 597 Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass}, 598 {Dest.getReg(0), MI.getOperand(0)}); 599 if (!Dest.constrainAllUses(TII, TRI, RBI)) 600 return false; 601 } 602 603 auto Branch = 604 MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0); 605 if (!Branch.constrainAllUses(TII, TRI, RBI)) 606 return false; 607 608 MI.eraseFromParent(); 609 return true; 610 } 611 case TargetOpcode::G_BRINDIRECT: 612 MI.setDesc(TII.get(RISCV::PseudoBRIND)); 613 MI.addOperand(MachineOperand::CreateImm(0)); 614 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 615 case TargetOpcode::G_SEXT_INREG: 616 return selectSExtInreg(MI, MIB); 617 case TargetOpcode::G_FRAME_INDEX: { 618 // TODO: We may want to replace this code with the SelectionDAG patterns, 619 // which fail to get imported because it uses FrameAddrRegImm, which is a 620 // ComplexPattern 621 MI.setDesc(TII.get(RISCV::ADDI)); 622 MI.addOperand(MachineOperand::CreateImm(0)); 623 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 624 } 625 case TargetOpcode::G_SELECT: 626 return selectSelect(MI, MIB, MRI); 627 case TargetOpcode::G_FCMP: 628 return selectFPCompare(MI, MIB, MRI); 629 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 630 return selectIntrinsicWithSideEffects(MI, MIB, MRI); 631 case TargetOpcode::G_FENCE: { 632 AtomicOrdering FenceOrdering = 633 static_cast<AtomicOrdering>(MI.getOperand(0).getImm()); 634 SyncScope::ID FenceSSID = 635 static_cast<SyncScope::ID>(MI.getOperand(1).getImm()); 636 emitFence(FenceOrdering, FenceSSID, MIB); 637 MI.eraseFromParent(); 638 return true; 639 } 640 case TargetOpcode::G_IMPLICIT_DEF: 641 return selectImplicitDef(MI, MIB, MRI); 642 case TargetOpcode::G_MERGE_VALUES: 643 return selectMergeValues(MI, MIB, MRI); 644 case TargetOpcode::G_UNMERGE_VALUES: 645 return selectUnmergeValues(MI, MIB, MRI); 646 default: 647 return false; 648 } 649 } 650 651 bool RISCVInstructionSelector::selectMergeValues( 652 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const { 653 assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES); 654 655 // Build a F64 Pair from operands 656 if (MI.getNumOperands() != 3) 657 return false; 658 Register Dst = MI.getOperand(0).getReg(); 659 Register Lo = MI.getOperand(1).getReg(); 660 Register Hi = MI.getOperand(2).getReg(); 661 if (!isRegInFprb(Dst, MRI) || !isRegInGprb(Lo, MRI) || !isRegInGprb(Hi, MRI)) 662 return false; 663 MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo)); 664 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 665 } 666 667 bool RISCVInstructionSelector::selectUnmergeValues( 668 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const { 669 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES); 670 671 // Split F64 Src into two s32 parts 672 if (MI.getNumOperands() != 3) 673 return false; 674 Register Src = MI.getOperand(2).getReg(); 675 Register Lo = MI.getOperand(0).getReg(); 676 Register Hi = MI.getOperand(1).getReg(); 677 if (!isRegInFprb(Src, MRI) || !isRegInGprb(Lo, MRI) || !isRegInGprb(Hi, MRI)) 678 return false; 679 MI.setDesc(TII.get(RISCV::SplitF64Pseudo)); 680 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 681 } 682 683 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op, 684 MachineIRBuilder &MIB, 685 MachineRegisterInfo &MRI) { 686 Register PtrReg = Op.getReg(); 687 assert(MRI.getType(PtrReg).isPointer() && "Operand is not a pointer!"); 688 689 const LLT sXLen = LLT::scalar(STI.getXLen()); 690 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg); 691 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID)); 692 Op.setReg(PtrToInt.getReg(0)); 693 return select(*PtrToInt); 694 } 695 696 void RISCVInstructionSelector::preISelLower(MachineInstr &MI, 697 MachineIRBuilder &MIB, 698 MachineRegisterInfo &MRI) { 699 switch (MI.getOpcode()) { 700 case TargetOpcode::G_PTR_ADD: { 701 Register DstReg = MI.getOperand(0).getReg(); 702 const LLT sXLen = LLT::scalar(STI.getXLen()); 703 704 replacePtrWithInt(MI.getOperand(1), MIB, MRI); 705 MI.setDesc(TII.get(TargetOpcode::G_ADD)); 706 MRI.setType(DstReg, sXLen); 707 break; 708 } 709 case TargetOpcode::G_PTRMASK: { 710 Register DstReg = MI.getOperand(0).getReg(); 711 const LLT sXLen = LLT::scalar(STI.getXLen()); 712 replacePtrWithInt(MI.getOperand(1), MIB, MRI); 713 MI.setDesc(TII.get(TargetOpcode::G_AND)); 714 MRI.setType(DstReg, sXLen); 715 } 716 } 717 } 718 719 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB, 720 const MachineInstr &MI, 721 int OpIdx) const { 722 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 723 "Expected G_CONSTANT"); 724 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 725 MIB.addImm(-CstVal); 726 } 727 728 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB, 729 const MachineInstr &MI, 730 int OpIdx) const { 731 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 732 "Expected G_CONSTANT"); 733 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue(); 734 MIB.addImm(STI.getXLen() - CstVal); 735 } 736 737 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB, 738 const MachineInstr &MI, 739 int OpIdx) const { 740 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 741 "Expected G_CONSTANT"); 742 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue(); 743 MIB.addImm(32 - CstVal); 744 } 745 746 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB, 747 const MachineInstr &MI, 748 int OpIdx) const { 749 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 750 "Expected G_CONSTANT"); 751 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 752 MIB.addImm(CstVal + 1); 753 } 754 755 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB, 756 const MachineInstr &MI, 757 int OpIdx) const { 758 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 759 "Expected G_CONSTANT"); 760 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 761 MIB.addImm(CstVal); 762 } 763 764 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB, 765 const MachineInstr &MI, 766 int OpIdx) const { 767 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 768 "Expected G_CONSTANT"); 769 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue(); 770 MIB.addImm(llvm::countr_zero(C)); 771 } 772 773 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank( 774 LLT Ty, const RegisterBank &RB) const { 775 if (RB.getID() == RISCV::GPRBRegBankID) { 776 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64)) 777 return &RISCV::GPRRegClass; 778 } 779 780 if (RB.getID() == RISCV::FPRBRegBankID) { 781 if (Ty.getSizeInBits() == 32) 782 return &RISCV::FPR32RegClass; 783 if (Ty.getSizeInBits() == 64) 784 return &RISCV::FPR64RegClass; 785 } 786 787 // TODO: Non-GPR register classes. 788 return nullptr; 789 } 790 791 bool RISCVInstructionSelector::isRegInGprb(Register Reg, 792 MachineRegisterInfo &MRI) const { 793 return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::GPRBRegBankID; 794 } 795 796 bool RISCVInstructionSelector::isRegInFprb(Register Reg, 797 MachineRegisterInfo &MRI) const { 798 return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::FPRBRegBankID; 799 } 800 801 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI, 802 MachineRegisterInfo &MRI) const { 803 Register DstReg = MI.getOperand(0).getReg(); 804 805 if (DstReg.isPhysical()) 806 return true; 807 808 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( 809 MRI.getType(DstReg), *RBI.getRegBank(DstReg, MRI, TRI)); 810 assert(DstRC && 811 "Register class not available for LLT, register bank combination"); 812 813 // No need to constrain SrcReg. It will get constrained when 814 // we hit another of its uses or its defs. 815 // Copies do not have constraints. 816 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 817 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode()) 818 << " operand\n"); 819 return false; 820 } 821 822 MI.setDesc(TII.get(RISCV::COPY)); 823 return true; 824 } 825 826 bool RISCVInstructionSelector::selectImplicitDef( 827 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const { 828 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF); 829 830 const Register DstReg = MI.getOperand(0).getReg(); 831 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( 832 MRI.getType(DstReg), *RBI.getRegBank(DstReg, MRI, TRI)); 833 834 assert(DstRC && 835 "Register class not available for LLT, register bank combination"); 836 837 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 838 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode()) 839 << " operand\n"); 840 } 841 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 842 return true; 843 } 844 845 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm, 846 MachineIRBuilder &MIB) const { 847 MachineRegisterInfo &MRI = *MIB.getMRI(); 848 849 if (Imm == 0) { 850 MIB.buildCopy(DstReg, Register(RISCV::X0)); 851 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, MRI); 852 return true; 853 } 854 855 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget); 856 unsigned NumInsts = Seq.size(); 857 Register SrcReg = RISCV::X0; 858 859 for (unsigned i = 0; i < NumInsts; i++) { 860 Register TmpReg = i < NumInsts - 1 861 ? MRI.createVirtualRegister(&RISCV::GPRRegClass) 862 : DstReg; 863 const RISCVMatInt::Inst &I = Seq[i]; 864 MachineInstr *Result; 865 866 switch (I.getOpndKind()) { 867 case RISCVMatInt::Imm: 868 // clang-format off 869 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {}) 870 .addImm(I.getImm()); 871 // clang-format on 872 break; 873 case RISCVMatInt::RegX0: 874 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, 875 {SrcReg, Register(RISCV::X0)}); 876 break; 877 case RISCVMatInt::RegReg: 878 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg}); 879 break; 880 case RISCVMatInt::RegImm: 881 Result = 882 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm()); 883 break; 884 } 885 886 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 887 return false; 888 889 SrcReg = TmpReg; 890 } 891 892 return true; 893 } 894 895 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI, 896 MachineIRBuilder &MIB, 897 MachineRegisterInfo &MRI, 898 bool IsLocal, 899 bool IsExternWeak) const { 900 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE || 901 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE || 902 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) && 903 "Unexpected opcode"); 904 905 const MachineOperand &DispMO = MI.getOperand(1); 906 907 Register DefReg = MI.getOperand(0).getReg(); 908 const LLT DefTy = MRI.getType(DefReg); 909 910 // When HWASAN is used and tagging of global variables is enabled 911 // they should be accessed via the GOT, since the tagged address of a global 912 // is incompatible with existing code models. This also applies to non-pic 913 // mode. 914 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) { 915 if (IsLocal && !Subtarget->allowTaggedGlobals()) { 916 // Use PC-relative addressing to access the symbol. This generates the 917 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 918 // %pcrel_lo(auipc)). 919 MI.setDesc(TII.get(RISCV::PseudoLLA)); 920 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 921 } 922 923 // Use PC-relative addressing to access the GOT for this symbol, then 924 // load the address from the GOT. This generates the pattern (PseudoLGA 925 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym)) 926 // %pcrel_lo(auipc))). 927 MachineFunction &MF = *MI.getParent()->getParent(); 928 MachineMemOperand *MemOp = MF.getMachineMemOperand( 929 MachinePointerInfo::getGOT(MF), 930 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 931 MachineMemOperand::MOInvariant, 932 DefTy, Align(DefTy.getSizeInBits() / 8)); 933 934 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {}) 935 .addDisp(DispMO, 0) 936 .addMemOperand(MemOp); 937 938 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 939 return false; 940 941 MI.eraseFromParent(); 942 return true; 943 } 944 945 switch (TM.getCodeModel()) { 946 default: { 947 reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE, 948 getName(), "Unsupported code model for lowering", MI); 949 return false; 950 } 951 case CodeModel::Small: { 952 // Must lie within a single 2 GiB address range and must lie between 953 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi 954 // (lui %hi(sym)) %lo(sym)). 955 Register AddrHiDest = MRI.createVirtualRegister(&RISCV::GPRRegClass); 956 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {}) 957 .addDisp(DispMO, 0, RISCVII::MO_HI); 958 959 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI)) 960 return false; 961 962 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest}) 963 .addDisp(DispMO, 0, RISCVII::MO_LO); 964 965 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 966 return false; 967 968 MI.eraseFromParent(); 969 return true; 970 } 971 case CodeModel::Medium: 972 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo 973 // relocation needs to reference a label that points to the auipc 974 // instruction itself, not the global. This cannot be done inside the 975 // instruction selector. 976 if (IsExternWeak) { 977 // An extern weak symbol may be undefined, i.e. have value 0, which may 978 // not be within 2GiB of PC, so use GOT-indirect addressing to access the 979 // symbol. This generates the pattern (PseudoLGA sym), which expands to 980 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 981 MachineFunction &MF = *MI.getParent()->getParent(); 982 MachineMemOperand *MemOp = MF.getMachineMemOperand( 983 MachinePointerInfo::getGOT(MF), 984 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 985 MachineMemOperand::MOInvariant, 986 DefTy, Align(DefTy.getSizeInBits() / 8)); 987 988 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {}) 989 .addDisp(DispMO, 0) 990 .addMemOperand(MemOp); 991 992 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 993 return false; 994 995 MI.eraseFromParent(); 996 return true; 997 } 998 999 // Generate a sequence for accessing addresses within any 2GiB range 1000 // within the address space. This generates the pattern (PseudoLLA sym), 1001 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 1002 MI.setDesc(TII.get(RISCV::PseudoLLA)); 1003 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 1004 } 1005 1006 return false; 1007 } 1008 1009 bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI, 1010 MachineIRBuilder &MIB) const { 1011 if (!STI.isRV64()) 1012 return false; 1013 1014 const MachineOperand &Size = MI.getOperand(2); 1015 // Only Size == 32 (i.e. shift by 32 bits) is acceptable at this point. 1016 if (!Size.isImm() || Size.getImm() != 32) 1017 return false; 1018 1019 const MachineOperand &Src = MI.getOperand(1); 1020 const MachineOperand &Dst = MI.getOperand(0); 1021 // addiw rd, rs, 0 (i.e. sext.w rd, rs) 1022 MachineInstr *NewMI = 1023 MIB.buildInstr(RISCV::ADDIW, {Dst.getReg()}, {Src.getReg()}).addImm(0U); 1024 1025 if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI)) 1026 return false; 1027 1028 MI.eraseFromParent(); 1029 return true; 1030 } 1031 1032 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI, 1033 MachineIRBuilder &MIB, 1034 MachineRegisterInfo &MRI) const { 1035 auto &SelectMI = cast<GSelect>(MI); 1036 1037 Register LHS, RHS; 1038 RISCVCC::CondCode CC; 1039 getOperandsForBranch(SelectMI.getCondReg(), MRI, CC, LHS, RHS); 1040 1041 Register DstReg = SelectMI.getReg(0); 1042 1043 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR; 1044 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() == RISCV::FPRBRegBankID) { 1045 unsigned Size = MRI.getType(DstReg).getSizeInBits(); 1046 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR 1047 : RISCV::Select_FPR64_Using_CC_GPR; 1048 } 1049 1050 MachineInstr *Result = MIB.buildInstr(Opc) 1051 .addDef(DstReg) 1052 .addReg(LHS) 1053 .addReg(RHS) 1054 .addImm(CC) 1055 .addReg(SelectMI.getTrueReg()) 1056 .addReg(SelectMI.getFalseReg()); 1057 MI.eraseFromParent(); 1058 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI); 1059 } 1060 1061 // Convert an FCMP predicate to one of the supported F or D instructions. 1062 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) { 1063 assert((Size == 32 || Size == 64) && "Unsupported size"); 1064 switch (Pred) { 1065 default: 1066 llvm_unreachable("Unsupported predicate"); 1067 case CmpInst::FCMP_OLT: 1068 return Size == 32 ? RISCV::FLT_S : RISCV::FLT_D; 1069 case CmpInst::FCMP_OLE: 1070 return Size == 32 ? RISCV::FLE_S : RISCV::FLE_D; 1071 case CmpInst::FCMP_OEQ: 1072 return Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D; 1073 } 1074 } 1075 1076 // Try legalizing an FCMP by swapping or inverting the predicate to one that 1077 // is supported. 1078 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, 1079 CmpInst::Predicate &Pred, bool &NeedInvert) { 1080 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) { 1081 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE || 1082 Pred == CmpInst::FCMP_OEQ; 1083 }; 1084 1085 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?"); 1086 1087 CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred); 1088 if (isLegalFCmpPredicate(InvPred)) { 1089 Pred = InvPred; 1090 std::swap(LHS, RHS); 1091 return true; 1092 } 1093 1094 InvPred = CmpInst::getInversePredicate(Pred); 1095 NeedInvert = true; 1096 if (isLegalFCmpPredicate(InvPred)) { 1097 Pred = InvPred; 1098 return true; 1099 } 1100 InvPred = CmpInst::getSwappedPredicate(InvPred); 1101 if (isLegalFCmpPredicate(InvPred)) { 1102 Pred = InvPred; 1103 std::swap(LHS, RHS); 1104 return true; 1105 } 1106 1107 return false; 1108 } 1109 1110 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return 1111 // the result in DstReg. 1112 // FIXME: Maybe we should expand this earlier. 1113 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI, 1114 MachineIRBuilder &MIB, 1115 MachineRegisterInfo &MRI) const { 1116 auto &CmpMI = cast<GFCmp>(MI); 1117 CmpInst::Predicate Pred = CmpMI.getCond(); 1118 1119 Register DstReg = CmpMI.getReg(0); 1120 Register LHS = CmpMI.getLHSReg(); 1121 Register RHS = CmpMI.getRHSReg(); 1122 1123 unsigned Size = MRI.getType(LHS).getSizeInBits(); 1124 assert((Size == 32 || Size == 64) && "Unexpected size"); 1125 1126 Register TmpReg = DstReg; 1127 1128 bool NeedInvert = false; 1129 // First try swapping operands or inverting. 1130 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) { 1131 if (NeedInvert) 1132 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 1133 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS}); 1134 if (!Cmp.constrainAllUses(TII, TRI, RBI)) 1135 return false; 1136 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) { 1137 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS)) 1138 NeedInvert = Pred == CmpInst::FCMP_UEQ; 1139 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size), 1140 {&RISCV::GPRRegClass}, {LHS, RHS}); 1141 if (!Cmp1.constrainAllUses(TII, TRI, RBI)) 1142 return false; 1143 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size), 1144 {&RISCV::GPRRegClass}, {RHS, LHS}); 1145 if (!Cmp2.constrainAllUses(TII, TRI, RBI)) 1146 return false; 1147 if (NeedInvert) 1148 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 1149 auto Or = 1150 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)}); 1151 if (!Or.constrainAllUses(TII, TRI, RBI)) 1152 return false; 1153 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 1154 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS)) 1155 // FIXME: If LHS and RHS are the same we can use a single FEQ. 1156 NeedInvert = Pred == CmpInst::FCMP_UNO; 1157 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size), 1158 {&RISCV::GPRRegClass}, {LHS, LHS}); 1159 if (!Cmp1.constrainAllUses(TII, TRI, RBI)) 1160 return false; 1161 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size), 1162 {&RISCV::GPRRegClass}, {RHS, RHS}); 1163 if (!Cmp2.constrainAllUses(TII, TRI, RBI)) 1164 return false; 1165 if (NeedInvert) 1166 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 1167 auto And = 1168 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)}); 1169 if (!And.constrainAllUses(TII, TRI, RBI)) 1170 return false; 1171 } else 1172 llvm_unreachable("Unhandled predicate"); 1173 1174 // Emit an XORI to invert the result if needed. 1175 if (NeedInvert) { 1176 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1); 1177 if (!Xor.constrainAllUses(TII, TRI, RBI)) 1178 return false; 1179 } 1180 1181 MI.eraseFromParent(); 1182 return true; 1183 } 1184 1185 bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( 1186 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const { 1187 assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS && 1188 "Unexpected opcode"); 1189 // Find the intrinsic ID. 1190 unsigned IntrinID = cast<GIntrinsic>(MI).getIntrinsicID(); 1191 1192 // Select the instruction. 1193 switch (IntrinID) { 1194 default: 1195 return false; 1196 case Intrinsic::trap: 1197 MIB.buildInstr(RISCV::UNIMP, {}, {}); 1198 break; 1199 case Intrinsic::debugtrap: 1200 MIB.buildInstr(RISCV::EBREAK, {}, {}); 1201 break; 1202 } 1203 1204 MI.eraseFromParent(); 1205 return true; 1206 } 1207 1208 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering, 1209 SyncScope::ID FenceSSID, 1210 MachineIRBuilder &MIB) const { 1211 if (STI.hasStdExtZtso()) { 1212 // The only fence that needs an instruction is a sequentially-consistent 1213 // cross-thread fence. 1214 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 1215 FenceSSID == SyncScope::System) { 1216 // fence rw, rw 1217 MIB.buildInstr(RISCV::FENCE, {}, {}) 1218 .addImm(RISCVFenceField::R | RISCVFenceField::W) 1219 .addImm(RISCVFenceField::R | RISCVFenceField::W); 1220 return; 1221 } 1222 1223 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 1224 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {}); 1225 return; 1226 } 1227 1228 // singlethread fences only synchronize with signal handlers on the same 1229 // thread and thus only need to preserve instruction order, not actually 1230 // enforce memory ordering. 1231 if (FenceSSID == SyncScope::SingleThread) { 1232 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {}); 1233 return; 1234 } 1235 1236 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set 1237 // Manual: Volume I. 1238 unsigned Pred, Succ; 1239 switch (FenceOrdering) { 1240 default: 1241 llvm_unreachable("Unexpected ordering"); 1242 case AtomicOrdering::AcquireRelease: 1243 // fence acq_rel -> fence.tso 1244 MIB.buildInstr(RISCV::FENCE_TSO, {}, {}); 1245 return; 1246 case AtomicOrdering::Acquire: 1247 // fence acquire -> fence r, rw 1248 Pred = RISCVFenceField::R; 1249 Succ = RISCVFenceField::R | RISCVFenceField::W; 1250 break; 1251 case AtomicOrdering::Release: 1252 // fence release -> fence rw, w 1253 Pred = RISCVFenceField::R | RISCVFenceField::W; 1254 Succ = RISCVFenceField::W; 1255 break; 1256 case AtomicOrdering::SequentiallyConsistent: 1257 // fence seq_cst -> fence rw, rw 1258 Pred = RISCVFenceField::R | RISCVFenceField::W; 1259 Succ = RISCVFenceField::R | RISCVFenceField::W; 1260 break; 1261 } 1262 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ); 1263 } 1264 1265 namespace llvm { 1266 InstructionSelector * 1267 createRISCVInstructionSelector(const RISCVTargetMachine &TM, 1268 RISCVSubtarget &Subtarget, 1269 RISCVRegisterBankInfo &RBI) { 1270 return new RISCVInstructionSelector(TM, Subtarget, RBI); 1271 } 1272 } // end namespace llvm 1273