1 //===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// RISC-V. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/RISCVMatInt.h" 15 #include "RISCVRegisterBankInfo.h" 16 #include "RISCVSubtarget.h" 17 #include "RISCVTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 20 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 21 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 22 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/IR/IntrinsicsRISCV.h" 26 #include "llvm/Support/Debug.h" 27 28 #define DEBUG_TYPE "riscv-isel" 29 30 using namespace llvm; 31 using namespace MIPatternMatch; 32 33 #define GET_GLOBALISEL_PREDICATE_BITSET 34 #include "RISCVGenGlobalISel.inc" 35 #undef GET_GLOBALISEL_PREDICATE_BITSET 36 37 namespace { 38 39 class RISCVInstructionSelector : public InstructionSelector { 40 public: 41 RISCVInstructionSelector(const RISCVTargetMachine &TM, 42 const RISCVSubtarget &STI, 43 const RISCVRegisterBankInfo &RBI); 44 45 bool select(MachineInstr &MI) override; 46 47 void setupMF(MachineFunction &MF, GISelKnownBits *KB, 48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI, 49 BlockFrequencyInfo *BFI) override { 50 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 51 MRI = &MF.getRegInfo(); 52 } 53 54 static const char *getName() { return DEBUG_TYPE; } 55 56 private: 57 const TargetRegisterClass * 58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const; 59 60 bool isRegInGprb(Register Reg) const; 61 bool isRegInFprb(Register Reg) const; 62 63 // tblgen-erated 'select' implementation, used as the initial selector for 64 // the patterns that don't require complex C++. 65 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 66 67 // A lowering phase that runs before any selection attempts. 68 // Returns true if the instruction was modified. 69 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB); 70 71 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB); 72 73 // Custom selection methods 74 bool selectCopy(MachineInstr &MI) const; 75 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const; 76 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const; 77 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true, 78 bool IsExternWeak = false) const; 79 bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const; 80 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const; 81 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const; 82 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID, 83 MachineIRBuilder &MIB) const; 84 bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const; 85 bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const; 86 87 ComplexRendererFns selectShiftMask(MachineOperand &Root) const; 88 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const; 89 90 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const; 91 template <unsigned ShAmt> 92 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const { 93 return selectSHXADDOp(Root, ShAmt); 94 } 95 96 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root, 97 unsigned ShAmt) const; 98 template <unsigned ShAmt> 99 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const { 100 return selectSHXADD_UWOp(Root, ShAmt); 101 } 102 103 ComplexRendererFns renderVLOp(MachineOperand &Root) const; 104 105 // Custom renderers for tablegen 106 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI, 107 int OpIdx) const; 108 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI, 109 int OpIdx) const; 110 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI, 111 int OpIdx) const; 112 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI, 113 int OpIdx) const; 114 void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI, 115 int OpIdx) const; 116 117 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI, 118 int OpIdx) const; 119 120 const RISCVSubtarget &STI; 121 const RISCVInstrInfo &TII; 122 const RISCVRegisterInfo &TRI; 123 const RISCVRegisterBankInfo &RBI; 124 const RISCVTargetMachine &TM; 125 126 MachineRegisterInfo *MRI = nullptr; 127 128 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel 129 // uses "STI." in the code generated by TableGen. We need to unify the name of 130 // Subtarget variable. 131 const RISCVSubtarget *Subtarget = &STI; 132 133 #define GET_GLOBALISEL_PREDICATES_DECL 134 #include "RISCVGenGlobalISel.inc" 135 #undef GET_GLOBALISEL_PREDICATES_DECL 136 137 #define GET_GLOBALISEL_TEMPORARIES_DECL 138 #include "RISCVGenGlobalISel.inc" 139 #undef GET_GLOBALISEL_TEMPORARIES_DECL 140 }; 141 142 } // end anonymous namespace 143 144 #define GET_GLOBALISEL_IMPL 145 #include "RISCVGenGlobalISel.inc" 146 #undef GET_GLOBALISEL_IMPL 147 148 RISCVInstructionSelector::RISCVInstructionSelector( 149 const RISCVTargetMachine &TM, const RISCVSubtarget &STI, 150 const RISCVRegisterBankInfo &RBI) 151 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), 152 TM(TM), 153 154 #define GET_GLOBALISEL_PREDICATES_INIT 155 #include "RISCVGenGlobalISel.inc" 156 #undef GET_GLOBALISEL_PREDICATES_INIT 157 #define GET_GLOBALISEL_TEMPORARIES_INIT 158 #include "RISCVGenGlobalISel.inc" 159 #undef GET_GLOBALISEL_TEMPORARIES_INIT 160 { 161 } 162 163 InstructionSelector::ComplexRendererFns 164 RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const { 165 if (!Root.isReg()) 166 return std::nullopt; 167 168 using namespace llvm::MIPatternMatch; 169 170 Register RootReg = Root.getReg(); 171 Register ShAmtReg = RootReg; 172 const LLT ShiftLLT = MRI->getType(RootReg); 173 unsigned ShiftWidth = ShiftLLT.getSizeInBits(); 174 assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!"); 175 // Peek through zext. 176 Register ZExtSrcReg; 177 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg)))) { 178 ShAmtReg = ZExtSrcReg; 179 } 180 181 APInt AndMask; 182 Register AndSrcReg; 183 // Try to combine the following pattern (applicable to other shift 184 // instructions as well as 32-bit ones): 185 // 186 // %4:gprb(s64) = G_AND %3, %2 187 // %5:gprb(s64) = G_LSHR %1, %4(s64) 188 // 189 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than 190 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if 191 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same, 192 // then it can be eliminated. Given register rs1 or rs2 holding a constant 193 // (the and mask), there are two cases G_AND can be erased: 194 // 195 // 1. the lowest log2(XLEN) bits of the and mask are all set 196 // 2. the bits of the register being masked are already unset (zero set) 197 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) { 198 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1); 199 if (ShMask.isSubsetOf(AndMask)) { 200 ShAmtReg = AndSrcReg; 201 } else { 202 // SimplifyDemandedBits may have optimized the mask so try restoring any 203 // bits that are known zero. 204 KnownBits Known = KB->getKnownBits(AndSrcReg); 205 if (ShMask.isSubsetOf(AndMask | Known.Zero)) 206 ShAmtReg = AndSrcReg; 207 } 208 } 209 210 APInt Imm; 211 Register Reg; 212 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) { 213 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) 214 // If we are shifting by X+N where N == 0 mod Size, then just shift by X 215 // to avoid the ADD. 216 ShAmtReg = Reg; 217 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) { 218 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) { 219 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X 220 // to generate a NEG instead of a SUB of a constant. 221 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 222 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB; 223 return {{[=](MachineInstrBuilder &MIB) { 224 MachineIRBuilder(*MIB.getInstr()) 225 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg}); 226 MIB.addReg(ShAmtReg); 227 }}}; 228 } 229 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) { 230 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X 231 // to generate a NOT instead of a SUB of a constant. 232 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 233 return {{[=](MachineInstrBuilder &MIB) { 234 MachineIRBuilder(*MIB.getInstr()) 235 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg}) 236 .addImm(-1); 237 MIB.addReg(ShAmtReg); 238 }}}; 239 } 240 } 241 242 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}}; 243 } 244 245 InstructionSelector::ComplexRendererFns 246 RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root, 247 unsigned ShAmt) const { 248 using namespace llvm::MIPatternMatch; 249 250 if (!Root.isReg()) 251 return std::nullopt; 252 Register RootReg = Root.getReg(); 253 254 const unsigned XLen = STI.getXLen(); 255 APInt Mask, C2; 256 Register RegY; 257 std::optional<bool> LeftShift; 258 // (and (shl y, c2), mask) 259 if (mi_match(RootReg, *MRI, 260 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask)))) 261 LeftShift = true; 262 // (and (lshr y, c2), mask) 263 else if (mi_match(RootReg, *MRI, 264 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask)))) 265 LeftShift = false; 266 267 if (LeftShift.has_value()) { 268 if (*LeftShift) 269 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue()); 270 else 271 Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue()); 272 273 if (Mask.isShiftedMask()) { 274 unsigned Leading = XLen - Mask.getActiveBits(); 275 unsigned Trailing = Mask.countr_zero(); 276 // Given (and (shl y, c2), mask) in which mask has no leading zeros and 277 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD. 278 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) { 279 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 280 return {{[=](MachineInstrBuilder &MIB) { 281 MachineIRBuilder(*MIB.getInstr()) 282 .buildInstr(RISCV::SRLI, {DstReg}, {RegY}) 283 .addImm(Trailing - C2.getLimitedValue()); 284 MIB.addReg(DstReg); 285 }}}; 286 } 287 288 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and 289 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD. 290 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) { 291 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 292 return {{[=](MachineInstrBuilder &MIB) { 293 MachineIRBuilder(*MIB.getInstr()) 294 .buildInstr(RISCV::SRLI, {DstReg}, {RegY}) 295 .addImm(Leading + Trailing); 296 MIB.addReg(DstReg); 297 }}}; 298 } 299 } 300 } 301 302 LeftShift.reset(); 303 304 // (shl (and y, mask), c2) 305 if (mi_match(RootReg, *MRI, 306 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))), 307 m_ICst(C2)))) 308 LeftShift = true; 309 // (lshr (and y, mask), c2) 310 else if (mi_match(RootReg, *MRI, 311 m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))), 312 m_ICst(C2)))) 313 LeftShift = false; 314 315 if (LeftShift.has_value() && Mask.isShiftedMask()) { 316 unsigned Leading = XLen - Mask.getActiveBits(); 317 unsigned Trailing = Mask.countr_zero(); 318 319 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and 320 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD. 321 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 && 322 (Trailing + C2.getLimitedValue()) == ShAmt; 323 if (!Cond) 324 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and 325 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD. 326 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) && 327 (Trailing - C2.getLimitedValue()) == ShAmt; 328 329 if (Cond) { 330 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 331 return {{[=](MachineInstrBuilder &MIB) { 332 MachineIRBuilder(*MIB.getInstr()) 333 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY}) 334 .addImm(Trailing); 335 MIB.addReg(DstReg); 336 }}}; 337 } 338 } 339 340 return std::nullopt; 341 } 342 343 InstructionSelector::ComplexRendererFns 344 RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root, 345 unsigned ShAmt) const { 346 using namespace llvm::MIPatternMatch; 347 348 if (!Root.isReg()) 349 return std::nullopt; 350 Register RootReg = Root.getReg(); 351 352 // Given (and (shl x, c2), mask) in which mask is a shifted mask with 353 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by 354 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount. 355 APInt Mask, C2; 356 Register RegX; 357 if (mi_match( 358 RootReg, *MRI, 359 m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))), 360 m_ICst(Mask))))) { 361 Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue()); 362 363 if (Mask.isShiftedMask()) { 364 unsigned Leading = Mask.countl_zero(); 365 unsigned Trailing = Mask.countr_zero(); 366 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) { 367 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 368 return {{[=](MachineInstrBuilder &MIB) { 369 MachineIRBuilder(*MIB.getInstr()) 370 .buildInstr(RISCV::SLLI, {DstReg}, {RegX}) 371 .addImm(C2.getLimitedValue() - ShAmt); 372 MIB.addReg(DstReg); 373 }}}; 374 } 375 } 376 } 377 378 return std::nullopt; 379 } 380 381 InstructionSelector::ComplexRendererFns 382 RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const { 383 assert(Root.isReg() && "Expected operand to be a Register"); 384 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 385 386 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) { 387 auto C = RootDef->getOperand(1).getCImm(); 388 if (C->getValue().isAllOnes()) 389 // If the operand is a G_CONSTANT with value of all ones it is larger than 390 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is 391 // recognized specially by the vsetvli insertion pass. 392 return {{[=](MachineInstrBuilder &MIB) { 393 MIB.addImm(RISCV::VLMaxSentinel); 394 }}}; 395 396 if (isUInt<5>(C->getZExtValue())) { 397 uint64_t ZExtC = C->getZExtValue(); 398 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}}; 399 } 400 } 401 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}}; 402 } 403 404 InstructionSelector::ComplexRendererFns 405 RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const { 406 if (!Root.isReg()) 407 return std::nullopt; 408 409 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 410 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { 411 return {{ 412 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }, 413 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, 414 }}; 415 } 416 417 if (isBaseWithConstantOffset(Root, *MRI)) { 418 MachineOperand &LHS = RootDef->getOperand(1); 419 MachineOperand &RHS = RootDef->getOperand(2); 420 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg()); 421 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg()); 422 423 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue(); 424 if (isInt<12>(RHSC)) { 425 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) 426 return {{ 427 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); }, 428 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }, 429 }}; 430 431 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); }, 432 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}}; 433 } 434 } 435 436 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in 437 // the combiner? 438 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 439 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}}; 440 } 441 442 /// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC. 443 /// CC Must be an ICMP Predicate. 444 static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) { 445 switch (CC) { 446 default: 447 llvm_unreachable("Expected ICMP CmpInst::Predicate."); 448 case CmpInst::Predicate::ICMP_EQ: 449 return RISCVCC::COND_EQ; 450 case CmpInst::Predicate::ICMP_NE: 451 return RISCVCC::COND_NE; 452 case CmpInst::Predicate::ICMP_ULT: 453 return RISCVCC::COND_LTU; 454 case CmpInst::Predicate::ICMP_SLT: 455 return RISCVCC::COND_LT; 456 case CmpInst::Predicate::ICMP_UGE: 457 return RISCVCC::COND_GEU; 458 case CmpInst::Predicate::ICMP_SGE: 459 return RISCVCC::COND_GE; 460 } 461 } 462 463 static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, 464 Register &LHS, Register &RHS, 465 MachineRegisterInfo &MRI) { 466 // Try to fold an ICmp. If that fails, use a NE compare with X0. 467 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 468 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) { 469 LHS = CondReg; 470 RHS = RISCV::X0; 471 CC = RISCVCC::COND_NE; 472 return; 473 } 474 475 // We found an ICmp, do some canonicalizations. 476 477 // Adjust comparisons to use comparison with 0 if possible. 478 if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) { 479 switch (Pred) { 480 case CmpInst::Predicate::ICMP_SGT: 481 // Convert X > -1 to X >= 0 482 if (*Constant == -1) { 483 CC = RISCVCC::COND_GE; 484 RHS = RISCV::X0; 485 return; 486 } 487 break; 488 case CmpInst::Predicate::ICMP_SLT: 489 // Convert X < 1 to 0 >= X 490 if (*Constant == 1) { 491 CC = RISCVCC::COND_GE; 492 RHS = LHS; 493 LHS = RISCV::X0; 494 return; 495 } 496 break; 497 default: 498 break; 499 } 500 } 501 502 switch (Pred) { 503 default: 504 llvm_unreachable("Expected ICMP CmpInst::Predicate."); 505 case CmpInst::Predicate::ICMP_EQ: 506 case CmpInst::Predicate::ICMP_NE: 507 case CmpInst::Predicate::ICMP_ULT: 508 case CmpInst::Predicate::ICMP_SLT: 509 case CmpInst::Predicate::ICMP_UGE: 510 case CmpInst::Predicate::ICMP_SGE: 511 // These CCs are supported directly by RISC-V branches. 512 break; 513 case CmpInst::Predicate::ICMP_SGT: 514 case CmpInst::Predicate::ICMP_SLE: 515 case CmpInst::Predicate::ICMP_UGT: 516 case CmpInst::Predicate::ICMP_ULE: 517 // These CCs are not supported directly by RISC-V branches, but changing the 518 // direction of the CC and swapping LHS and RHS are. 519 Pred = CmpInst::getSwappedPredicate(Pred); 520 std::swap(LHS, RHS); 521 break; 522 } 523 524 CC = getRISCVCCFromICmp(Pred); 525 return; 526 } 527 528 bool RISCVInstructionSelector::select(MachineInstr &MI) { 529 MachineBasicBlock &MBB = *MI.getParent(); 530 MachineFunction &MF = *MBB.getParent(); 531 MachineIRBuilder MIB(MI); 532 533 preISelLower(MI, MIB); 534 const unsigned Opc = MI.getOpcode(); 535 536 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) { 537 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) { 538 const Register DefReg = MI.getOperand(0).getReg(); 539 const LLT DefTy = MRI->getType(DefReg); 540 541 const RegClassOrRegBank &RegClassOrBank = 542 MRI->getRegClassOrRegBank(DefReg); 543 544 const TargetRegisterClass *DefRC = 545 RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 546 if (!DefRC) { 547 if (!DefTy.isValid()) { 548 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 549 return false; 550 } 551 552 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 553 DefRC = getRegClassForTypeOnBank(DefTy, RB); 554 if (!DefRC) { 555 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 556 return false; 557 } 558 } 559 560 MI.setDesc(TII.get(TargetOpcode::PHI)); 561 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 562 } 563 564 // Certain non-generic instructions also need some special handling. 565 if (MI.isCopy()) 566 return selectCopy(MI); 567 568 return true; 569 } 570 571 if (selectImpl(MI, *CoverageInfo)) 572 return true; 573 574 switch (Opc) { 575 case TargetOpcode::G_ANYEXT: 576 case TargetOpcode::G_PTRTOINT: 577 case TargetOpcode::G_INTTOPTR: 578 case TargetOpcode::G_TRUNC: 579 case TargetOpcode::G_FREEZE: 580 return selectCopy(MI); 581 case TargetOpcode::G_CONSTANT: { 582 Register DstReg = MI.getOperand(0).getReg(); 583 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue(); 584 585 if (!materializeImm(DstReg, Imm, MIB)) 586 return false; 587 588 MI.eraseFromParent(); 589 return true; 590 } 591 case TargetOpcode::G_FCONSTANT: { 592 // TODO: Use constant pool for complext constants. 593 // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32. 594 Register DstReg = MI.getOperand(0).getReg(); 595 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF(); 596 APInt Imm = FPimm.bitcastToAPInt(); 597 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 598 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) { 599 Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 600 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB)) 601 return false; 602 603 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X 604 : Size == 32 ? RISCV::FMV_W_X 605 : RISCV::FMV_H_X; 606 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg}); 607 if (!FMV.constrainAllUses(TII, TRI, RBI)) 608 return false; 609 } else { 610 assert(Size == 64 && !Subtarget->is64Bit() && 611 "Unexpected size or subtarget"); 612 // Split into two pieces and build through the stack. 613 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass); 614 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass); 615 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(), 616 MIB)) 617 return false; 618 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB)) 619 return false; 620 MachineInstrBuilder PairF64 = MIB.buildInstr( 621 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh}); 622 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 623 return false; 624 } 625 626 MI.eraseFromParent(); 627 return true; 628 } 629 case TargetOpcode::G_GLOBAL_VALUE: { 630 auto *GV = MI.getOperand(1).getGlobal(); 631 if (GV->isThreadLocal()) { 632 // TODO: implement this case. 633 return false; 634 } 635 636 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage()); 637 } 638 case TargetOpcode::G_JUMP_TABLE: 639 case TargetOpcode::G_CONSTANT_POOL: 640 return selectAddr(MI, MIB, MRI); 641 case TargetOpcode::G_BRCOND: { 642 Register LHS, RHS; 643 RISCVCC::CondCode CC; 644 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI); 645 646 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS}) 647 .addMBB(MI.getOperand(1).getMBB()); 648 MI.eraseFromParent(); 649 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI); 650 } 651 case TargetOpcode::G_BRJT: { 652 // FIXME: Move to legalization? 653 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 654 unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout()); 655 assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) && 656 "Unsupported jump-table entry size"); 657 assert( 658 (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 || 659 MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 || 660 MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) && 661 "Unexpected jump-table entry kind"); 662 663 auto SLL = 664 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)}) 665 .addImm(Log2_32(EntrySize)); 666 if (!SLL.constrainAllUses(TII, TRI, RBI)) 667 return false; 668 669 // TODO: Use SHXADD. Moving to legalization would fix this automatically. 670 auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass}, 671 {MI.getOperand(0), SLL.getReg(0)}); 672 if (!ADD.constrainAllUses(TII, TRI, RBI)) 673 return false; 674 675 unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW; 676 auto Dest = 677 MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)}) 678 .addImm(0) 679 .addMemOperand(MF.getMachineMemOperand( 680 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad, 681 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout())))); 682 if (!Dest.constrainAllUses(TII, TRI, RBI)) 683 return false; 684 685 // If the Kind is EK_LabelDifference32, the table stores an offset from 686 // the location of the table. Add the table address to get an absolute 687 // address. 688 if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) { 689 Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass}, 690 {Dest.getReg(0), MI.getOperand(0)}); 691 if (!Dest.constrainAllUses(TII, TRI, RBI)) 692 return false; 693 } 694 695 auto Branch = 696 MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0); 697 if (!Branch.constrainAllUses(TII, TRI, RBI)) 698 return false; 699 700 MI.eraseFromParent(); 701 return true; 702 } 703 case TargetOpcode::G_BRINDIRECT: 704 MI.setDesc(TII.get(RISCV::PseudoBRIND)); 705 MI.addOperand(MachineOperand::CreateImm(0)); 706 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 707 case TargetOpcode::G_SEXT_INREG: 708 return selectSExtInreg(MI, MIB); 709 case TargetOpcode::G_FRAME_INDEX: { 710 // TODO: We may want to replace this code with the SelectionDAG patterns, 711 // which fail to get imported because it uses FrameAddrRegImm, which is a 712 // ComplexPattern 713 MI.setDesc(TII.get(RISCV::ADDI)); 714 MI.addOperand(MachineOperand::CreateImm(0)); 715 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 716 } 717 case TargetOpcode::G_SELECT: 718 return selectSelect(MI, MIB); 719 case TargetOpcode::G_FCMP: 720 return selectFPCompare(MI, MIB); 721 case TargetOpcode::G_FENCE: { 722 AtomicOrdering FenceOrdering = 723 static_cast<AtomicOrdering>(MI.getOperand(0).getImm()); 724 SyncScope::ID FenceSSID = 725 static_cast<SyncScope::ID>(MI.getOperand(1).getImm()); 726 emitFence(FenceOrdering, FenceSSID, MIB); 727 MI.eraseFromParent(); 728 return true; 729 } 730 case TargetOpcode::G_IMPLICIT_DEF: 731 return selectImplicitDef(MI, MIB); 732 case TargetOpcode::G_MERGE_VALUES: 733 return selectMergeValues(MI, MIB); 734 case TargetOpcode::G_UNMERGE_VALUES: 735 return selectUnmergeValues(MI, MIB); 736 default: 737 return false; 738 } 739 } 740 741 bool RISCVInstructionSelector::selectMergeValues(MachineInstr &MI, 742 MachineIRBuilder &MIB) const { 743 assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES); 744 745 // Build a F64 Pair from operands 746 if (MI.getNumOperands() != 3) 747 return false; 748 Register Dst = MI.getOperand(0).getReg(); 749 Register Lo = MI.getOperand(1).getReg(); 750 Register Hi = MI.getOperand(2).getReg(); 751 if (!isRegInFprb(Dst) || !isRegInGprb(Lo) || !isRegInGprb(Hi)) 752 return false; 753 MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo)); 754 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 755 } 756 757 bool RISCVInstructionSelector::selectUnmergeValues( 758 MachineInstr &MI, MachineIRBuilder &MIB) const { 759 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES); 760 761 // Split F64 Src into two s32 parts 762 if (MI.getNumOperands() != 3) 763 return false; 764 Register Src = MI.getOperand(2).getReg(); 765 Register Lo = MI.getOperand(0).getReg(); 766 Register Hi = MI.getOperand(1).getReg(); 767 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi)) 768 return false; 769 MI.setDesc(TII.get(RISCV::SplitF64Pseudo)); 770 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 771 } 772 773 bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op, 774 MachineIRBuilder &MIB) { 775 Register PtrReg = Op.getReg(); 776 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!"); 777 778 const LLT sXLen = LLT::scalar(STI.getXLen()); 779 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg); 780 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID)); 781 Op.setReg(PtrToInt.getReg(0)); 782 return select(*PtrToInt); 783 } 784 785 void RISCVInstructionSelector::preISelLower(MachineInstr &MI, 786 MachineIRBuilder &MIB) { 787 switch (MI.getOpcode()) { 788 case TargetOpcode::G_PTR_ADD: { 789 Register DstReg = MI.getOperand(0).getReg(); 790 const LLT sXLen = LLT::scalar(STI.getXLen()); 791 792 replacePtrWithInt(MI.getOperand(1), MIB); 793 MI.setDesc(TII.get(TargetOpcode::G_ADD)); 794 MRI->setType(DstReg, sXLen); 795 break; 796 } 797 case TargetOpcode::G_PTRMASK: { 798 Register DstReg = MI.getOperand(0).getReg(); 799 const LLT sXLen = LLT::scalar(STI.getXLen()); 800 replacePtrWithInt(MI.getOperand(1), MIB); 801 MI.setDesc(TII.get(TargetOpcode::G_AND)); 802 MRI->setType(DstReg, sXLen); 803 } 804 } 805 } 806 807 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB, 808 const MachineInstr &MI, 809 int OpIdx) const { 810 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 811 "Expected G_CONSTANT"); 812 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 813 MIB.addImm(-CstVal); 814 } 815 816 void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB, 817 const MachineInstr &MI, 818 int OpIdx) const { 819 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 820 "Expected G_CONSTANT"); 821 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue(); 822 MIB.addImm(STI.getXLen() - CstVal); 823 } 824 825 void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB, 826 const MachineInstr &MI, 827 int OpIdx) const { 828 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 829 "Expected G_CONSTANT"); 830 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue(); 831 MIB.addImm(32 - CstVal); 832 } 833 834 void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB, 835 const MachineInstr &MI, 836 int OpIdx) const { 837 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 838 "Expected G_CONSTANT"); 839 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 840 MIB.addImm(CstVal + 1); 841 } 842 843 void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB, 844 const MachineInstr &MI, 845 int OpIdx) const { 846 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 847 "Expected G_CONSTANT"); 848 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue(); 849 MIB.addImm(CstVal); 850 } 851 852 void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB, 853 const MachineInstr &MI, 854 int OpIdx) const { 855 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 856 "Expected G_CONSTANT"); 857 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue(); 858 MIB.addImm(llvm::countr_zero(C)); 859 } 860 861 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank( 862 LLT Ty, const RegisterBank &RB) const { 863 if (RB.getID() == RISCV::GPRBRegBankID) { 864 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64)) 865 return &RISCV::GPRRegClass; 866 } 867 868 if (RB.getID() == RISCV::FPRBRegBankID) { 869 if (Ty.getSizeInBits() == 16) 870 return &RISCV::FPR16RegClass; 871 if (Ty.getSizeInBits() == 32) 872 return &RISCV::FPR32RegClass; 873 if (Ty.getSizeInBits() == 64) 874 return &RISCV::FPR64RegClass; 875 } 876 877 if (RB.getID() == RISCV::VRBRegBankID) { 878 if (Ty.getSizeInBits().getKnownMinValue() <= 64) 879 return &RISCV::VRRegClass; 880 881 if (Ty.getSizeInBits().getKnownMinValue() == 128) 882 return &RISCV::VRM2RegClass; 883 884 if (Ty.getSizeInBits().getKnownMinValue() == 256) 885 return &RISCV::VRM4RegClass; 886 887 if (Ty.getSizeInBits().getKnownMinValue() == 512) 888 return &RISCV::VRM8RegClass; 889 } 890 891 return nullptr; 892 } 893 894 bool RISCVInstructionSelector::isRegInGprb(Register Reg) const { 895 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID; 896 } 897 898 bool RISCVInstructionSelector::isRegInFprb(Register Reg) const { 899 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID; 900 } 901 902 bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const { 903 Register DstReg = MI.getOperand(0).getReg(); 904 905 if (DstReg.isPhysical()) 906 return true; 907 908 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( 909 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI)); 910 assert(DstRC && 911 "Register class not available for LLT, register bank combination"); 912 913 // No need to constrain SrcReg. It will get constrained when 914 // we hit another of its uses or its defs. 915 // Copies do not have constraints. 916 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 917 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode()) 918 << " operand\n"); 919 return false; 920 } 921 922 MI.setDesc(TII.get(RISCV::COPY)); 923 return true; 924 } 925 926 bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI, 927 MachineIRBuilder &MIB) const { 928 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF); 929 930 const Register DstReg = MI.getOperand(0).getReg(); 931 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( 932 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI)); 933 934 assert(DstRC && 935 "Register class not available for LLT, register bank combination"); 936 937 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 938 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode()) 939 << " operand\n"); 940 } 941 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 942 return true; 943 } 944 945 bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm, 946 MachineIRBuilder &MIB) const { 947 if (Imm == 0) { 948 MIB.buildCopy(DstReg, Register(RISCV::X0)); 949 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI); 950 return true; 951 } 952 953 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget); 954 unsigned NumInsts = Seq.size(); 955 Register SrcReg = RISCV::X0; 956 957 for (unsigned i = 0; i < NumInsts; i++) { 958 Register TmpReg = i < NumInsts - 1 959 ? MRI->createVirtualRegister(&RISCV::GPRRegClass) 960 : DstReg; 961 const RISCVMatInt::Inst &I = Seq[i]; 962 MachineInstr *Result; 963 964 switch (I.getOpndKind()) { 965 case RISCVMatInt::Imm: 966 // clang-format off 967 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {}) 968 .addImm(I.getImm()); 969 // clang-format on 970 break; 971 case RISCVMatInt::RegX0: 972 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, 973 {SrcReg, Register(RISCV::X0)}); 974 break; 975 case RISCVMatInt::RegReg: 976 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg}); 977 break; 978 case RISCVMatInt::RegImm: 979 Result = 980 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm()); 981 break; 982 } 983 984 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 985 return false; 986 987 SrcReg = TmpReg; 988 } 989 990 return true; 991 } 992 993 bool RISCVInstructionSelector::selectAddr(MachineInstr &MI, 994 MachineIRBuilder &MIB, bool IsLocal, 995 bool IsExternWeak) const { 996 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE || 997 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE || 998 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) && 999 "Unexpected opcode"); 1000 1001 const MachineOperand &DispMO = MI.getOperand(1); 1002 1003 Register DefReg = MI.getOperand(0).getReg(); 1004 const LLT DefTy = MRI->getType(DefReg); 1005 1006 // When HWASAN is used and tagging of global variables is enabled 1007 // they should be accessed via the GOT, since the tagged address of a global 1008 // is incompatible with existing code models. This also applies to non-pic 1009 // mode. 1010 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) { 1011 if (IsLocal && !Subtarget->allowTaggedGlobals()) { 1012 // Use PC-relative addressing to access the symbol. This generates the 1013 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 1014 // %pcrel_lo(auipc)). 1015 MI.setDesc(TII.get(RISCV::PseudoLLA)); 1016 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 1017 } 1018 1019 // Use PC-relative addressing to access the GOT for this symbol, then 1020 // load the address from the GOT. This generates the pattern (PseudoLGA 1021 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym)) 1022 // %pcrel_lo(auipc))). 1023 MachineFunction &MF = *MI.getParent()->getParent(); 1024 MachineMemOperand *MemOp = MF.getMachineMemOperand( 1025 MachinePointerInfo::getGOT(MF), 1026 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 1027 MachineMemOperand::MOInvariant, 1028 DefTy, Align(DefTy.getSizeInBits() / 8)); 1029 1030 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {}) 1031 .addDisp(DispMO, 0) 1032 .addMemOperand(MemOp); 1033 1034 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 1035 return false; 1036 1037 MI.eraseFromParent(); 1038 return true; 1039 } 1040 1041 switch (TM.getCodeModel()) { 1042 default: { 1043 reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE, 1044 getName(), "Unsupported code model for lowering", MI); 1045 return false; 1046 } 1047 case CodeModel::Small: { 1048 // Must lie within a single 2 GiB address range and must lie between 1049 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi 1050 // (lui %hi(sym)) %lo(sym)). 1051 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass); 1052 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {}) 1053 .addDisp(DispMO, 0, RISCVII::MO_HI); 1054 1055 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI)) 1056 return false; 1057 1058 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest}) 1059 .addDisp(DispMO, 0, RISCVII::MO_LO); 1060 1061 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 1062 return false; 1063 1064 MI.eraseFromParent(); 1065 return true; 1066 } 1067 case CodeModel::Medium: 1068 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo 1069 // relocation needs to reference a label that points to the auipc 1070 // instruction itself, not the global. This cannot be done inside the 1071 // instruction selector. 1072 if (IsExternWeak) { 1073 // An extern weak symbol may be undefined, i.e. have value 0, which may 1074 // not be within 2GiB of PC, so use GOT-indirect addressing to access the 1075 // symbol. This generates the pattern (PseudoLGA sym), which expands to 1076 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 1077 MachineFunction &MF = *MI.getParent()->getParent(); 1078 MachineMemOperand *MemOp = MF.getMachineMemOperand( 1079 MachinePointerInfo::getGOT(MF), 1080 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 1081 MachineMemOperand::MOInvariant, 1082 DefTy, Align(DefTy.getSizeInBits() / 8)); 1083 1084 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {}) 1085 .addDisp(DispMO, 0) 1086 .addMemOperand(MemOp); 1087 1088 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI)) 1089 return false; 1090 1091 MI.eraseFromParent(); 1092 return true; 1093 } 1094 1095 // Generate a sequence for accessing addresses within any 2GiB range 1096 // within the address space. This generates the pattern (PseudoLLA sym), 1097 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 1098 MI.setDesc(TII.get(RISCV::PseudoLLA)); 1099 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 1100 } 1101 1102 return false; 1103 } 1104 1105 bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI, 1106 MachineIRBuilder &MIB) const { 1107 Register DstReg = MI.getOperand(0).getReg(); 1108 Register SrcReg = MI.getOperand(1).getReg(); 1109 unsigned SrcSize = MI.getOperand(2).getImm(); 1110 1111 MachineInstr *NewMI; 1112 if (SrcSize == 32) { 1113 assert(Subtarget->is64Bit() && "Unexpected extend"); 1114 // addiw rd, rs, 0 (i.e. sext.w rd, rs) 1115 NewMI = MIB.buildInstr(RISCV::ADDIW, {DstReg}, {SrcReg}).addImm(0U); 1116 } else { 1117 assert(Subtarget->hasStdExtZbb() && "Unexpected extension"); 1118 assert((SrcSize == 8 || SrcSize == 16) && "Unexpected size"); 1119 unsigned Opc = SrcSize == 16 ? RISCV::SEXT_H : RISCV::SEXT_B; 1120 NewMI = MIB.buildInstr(Opc, {DstReg}, {SrcReg}); 1121 } 1122 1123 if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI)) 1124 return false; 1125 1126 MI.eraseFromParent(); 1127 return true; 1128 } 1129 1130 bool RISCVInstructionSelector::selectSelect(MachineInstr &MI, 1131 MachineIRBuilder &MIB) const { 1132 auto &SelectMI = cast<GSelect>(MI); 1133 1134 Register LHS, RHS; 1135 RISCVCC::CondCode CC; 1136 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI); 1137 1138 Register DstReg = SelectMI.getReg(0); 1139 1140 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR; 1141 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) { 1142 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1143 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR 1144 : RISCV::Select_FPR64_Using_CC_GPR; 1145 } 1146 1147 MachineInstr *Result = MIB.buildInstr(Opc) 1148 .addDef(DstReg) 1149 .addReg(LHS) 1150 .addReg(RHS) 1151 .addImm(CC) 1152 .addReg(SelectMI.getTrueReg()) 1153 .addReg(SelectMI.getFalseReg()); 1154 MI.eraseFromParent(); 1155 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI); 1156 } 1157 1158 // Convert an FCMP predicate to one of the supported F or D instructions. 1159 static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) { 1160 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size"); 1161 switch (Pred) { 1162 default: 1163 llvm_unreachable("Unsupported predicate"); 1164 case CmpInst::FCMP_OLT: 1165 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D; 1166 case CmpInst::FCMP_OLE: 1167 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D; 1168 case CmpInst::FCMP_OEQ: 1169 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D; 1170 } 1171 } 1172 1173 // Try legalizing an FCMP by swapping or inverting the predicate to one that 1174 // is supported. 1175 static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, 1176 CmpInst::Predicate &Pred, bool &NeedInvert) { 1177 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) { 1178 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE || 1179 Pred == CmpInst::FCMP_OEQ; 1180 }; 1181 1182 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?"); 1183 1184 CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred); 1185 if (isLegalFCmpPredicate(InvPred)) { 1186 Pred = InvPred; 1187 std::swap(LHS, RHS); 1188 return true; 1189 } 1190 1191 InvPred = CmpInst::getInversePredicate(Pred); 1192 NeedInvert = true; 1193 if (isLegalFCmpPredicate(InvPred)) { 1194 Pred = InvPred; 1195 return true; 1196 } 1197 InvPred = CmpInst::getSwappedPredicate(InvPred); 1198 if (isLegalFCmpPredicate(InvPred)) { 1199 Pred = InvPred; 1200 std::swap(LHS, RHS); 1201 return true; 1202 } 1203 1204 return false; 1205 } 1206 1207 // Emit a sequence of instructions to compare LHS and RHS using Pred. Return 1208 // the result in DstReg. 1209 // FIXME: Maybe we should expand this earlier. 1210 bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI, 1211 MachineIRBuilder &MIB) const { 1212 auto &CmpMI = cast<GFCmp>(MI); 1213 CmpInst::Predicate Pred = CmpMI.getCond(); 1214 1215 Register DstReg = CmpMI.getReg(0); 1216 Register LHS = CmpMI.getLHSReg(); 1217 Register RHS = CmpMI.getRHSReg(); 1218 1219 unsigned Size = MRI->getType(LHS).getSizeInBits(); 1220 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size"); 1221 1222 Register TmpReg = DstReg; 1223 1224 bool NeedInvert = false; 1225 // First try swapping operands or inverting. 1226 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) { 1227 if (NeedInvert) 1228 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 1229 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS}); 1230 if (!Cmp.constrainAllUses(TII, TRI, RBI)) 1231 return false; 1232 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) { 1233 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS)) 1234 NeedInvert = Pred == CmpInst::FCMP_UEQ; 1235 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size), 1236 {&RISCV::GPRRegClass}, {LHS, RHS}); 1237 if (!Cmp1.constrainAllUses(TII, TRI, RBI)) 1238 return false; 1239 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size), 1240 {&RISCV::GPRRegClass}, {RHS, LHS}); 1241 if (!Cmp2.constrainAllUses(TII, TRI, RBI)) 1242 return false; 1243 if (NeedInvert) 1244 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 1245 auto Or = 1246 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)}); 1247 if (!Or.constrainAllUses(TII, TRI, RBI)) 1248 return false; 1249 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 1250 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS)) 1251 // FIXME: If LHS and RHS are the same we can use a single FEQ. 1252 NeedInvert = Pred == CmpInst::FCMP_UNO; 1253 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size), 1254 {&RISCV::GPRRegClass}, {LHS, LHS}); 1255 if (!Cmp1.constrainAllUses(TII, TRI, RBI)) 1256 return false; 1257 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size), 1258 {&RISCV::GPRRegClass}, {RHS, RHS}); 1259 if (!Cmp2.constrainAllUses(TII, TRI, RBI)) 1260 return false; 1261 if (NeedInvert) 1262 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass); 1263 auto And = 1264 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)}); 1265 if (!And.constrainAllUses(TII, TRI, RBI)) 1266 return false; 1267 } else 1268 llvm_unreachable("Unhandled predicate"); 1269 1270 // Emit an XORI to invert the result if needed. 1271 if (NeedInvert) { 1272 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1); 1273 if (!Xor.constrainAllUses(TII, TRI, RBI)) 1274 return false; 1275 } 1276 1277 MI.eraseFromParent(); 1278 return true; 1279 } 1280 1281 void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering, 1282 SyncScope::ID FenceSSID, 1283 MachineIRBuilder &MIB) const { 1284 if (STI.hasStdExtZtso()) { 1285 // The only fence that needs an instruction is a sequentially-consistent 1286 // cross-thread fence. 1287 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 1288 FenceSSID == SyncScope::System) { 1289 // fence rw, rw 1290 MIB.buildInstr(RISCV::FENCE, {}, {}) 1291 .addImm(RISCVFenceField::R | RISCVFenceField::W) 1292 .addImm(RISCVFenceField::R | RISCVFenceField::W); 1293 return; 1294 } 1295 1296 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 1297 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {}); 1298 return; 1299 } 1300 1301 // singlethread fences only synchronize with signal handlers on the same 1302 // thread and thus only need to preserve instruction order, not actually 1303 // enforce memory ordering. 1304 if (FenceSSID == SyncScope::SingleThread) { 1305 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {}); 1306 return; 1307 } 1308 1309 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set 1310 // Manual: Volume I. 1311 unsigned Pred, Succ; 1312 switch (FenceOrdering) { 1313 default: 1314 llvm_unreachable("Unexpected ordering"); 1315 case AtomicOrdering::AcquireRelease: 1316 // fence acq_rel -> fence.tso 1317 MIB.buildInstr(RISCV::FENCE_TSO, {}, {}); 1318 return; 1319 case AtomicOrdering::Acquire: 1320 // fence acquire -> fence r, rw 1321 Pred = RISCVFenceField::R; 1322 Succ = RISCVFenceField::R | RISCVFenceField::W; 1323 break; 1324 case AtomicOrdering::Release: 1325 // fence release -> fence rw, w 1326 Pred = RISCVFenceField::R | RISCVFenceField::W; 1327 Succ = RISCVFenceField::W; 1328 break; 1329 case AtomicOrdering::SequentiallyConsistent: 1330 // fence seq_cst -> fence rw, rw 1331 Pred = RISCVFenceField::R | RISCVFenceField::W; 1332 Succ = RISCVFenceField::R | RISCVFenceField::W; 1333 break; 1334 } 1335 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ); 1336 } 1337 1338 namespace llvm { 1339 InstructionSelector * 1340 createRISCVInstructionSelector(const RISCVTargetMachine &TM, 1341 const RISCVSubtarget &Subtarget, 1342 const RISCVRegisterBankInfo &RBI) { 1343 return new RISCVInstructionSelector(TM, Subtarget, RBI); 1344 } 1345 } // end namespace llvm 1346