1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file This file implements the utility functions used by the GlobalISel 9 /// pipeline. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/Utils.h" 13 #include "llvm/ADT/APFloat.h" 14 #include "llvm/ADT/APInt.h" 15 #include "llvm/ADT/Optional.h" 16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 20 #include "llvm/CodeGen/MachineInstr.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 23 #include "llvm/CodeGen/MachineSizeOpts.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/StackProtector.h" 26 #include "llvm/CodeGen/TargetInstrInfo.h" 27 #include "llvm/CodeGen/TargetLowering.h" 28 #include "llvm/CodeGen/TargetPassConfig.h" 29 #include "llvm/CodeGen/TargetRegisterInfo.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/Target/TargetMachine.h" 32 33 #define DEBUG_TYPE "globalisel-utils" 34 35 using namespace llvm; 36 using namespace MIPatternMatch; 37 38 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI, 39 const TargetInstrInfo &TII, 40 const RegisterBankInfo &RBI, Register Reg, 41 const TargetRegisterClass &RegClass) { 42 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) 43 return MRI.createVirtualRegister(&RegClass); 44 45 return Reg; 46 } 47 48 Register llvm::constrainOperandRegClass( 49 const MachineFunction &MF, const TargetRegisterInfo &TRI, 50 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 51 const RegisterBankInfo &RBI, MachineInstr &InsertPt, 52 const TargetRegisterClass &RegClass, MachineOperand &RegMO) { 53 Register Reg = RegMO.getReg(); 54 // Assume physical registers are properly constrained. 55 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 56 57 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); 58 // If we created a new virtual register because the class is not compatible 59 // then create a copy between the new and the old register. 60 if (ConstrainedReg != Reg) { 61 MachineBasicBlock::iterator InsertIt(&InsertPt); 62 MachineBasicBlock &MBB = *InsertPt.getParent(); 63 if (RegMO.isUse()) { 64 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), 65 TII.get(TargetOpcode::COPY), ConstrainedReg) 66 .addReg(Reg); 67 } else { 68 assert(RegMO.isDef() && "Must be a definition"); 69 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), 70 TII.get(TargetOpcode::COPY), Reg) 71 .addReg(ConstrainedReg); 72 } 73 if (GISelChangeObserver *Observer = MF.getObserver()) { 74 Observer->changingInstr(*RegMO.getParent()); 75 } 76 RegMO.setReg(ConstrainedReg); 77 if (GISelChangeObserver *Observer = MF.getObserver()) { 78 Observer->changedInstr(*RegMO.getParent()); 79 } 80 } else { 81 if (GISelChangeObserver *Observer = MF.getObserver()) { 82 if (!RegMO.isDef()) { 83 MachineInstr *RegDef = MRI.getVRegDef(Reg); 84 Observer->changedInstr(*RegDef); 85 } 86 Observer->changingAllUsesOfReg(MRI, Reg); 87 Observer->finishedChangingAllUsesOfReg(); 88 } 89 } 90 return ConstrainedReg; 91 } 92 93 Register llvm::constrainOperandRegClass( 94 const MachineFunction &MF, const TargetRegisterInfo &TRI, 95 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 96 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, 97 MachineOperand &RegMO, unsigned OpIdx) { 98 Register Reg = RegMO.getReg(); 99 // Assume physical registers are properly constrained. 100 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 101 102 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); 103 // Some of the target independent instructions, like COPY, may not impose any 104 // register class constraints on some of their operands: If it's a use, we can 105 // skip constraining as the instruction defining the register would constrain 106 // it. 107 108 // We can't constrain unallocatable register classes, because we can't create 109 // virtual registers for these classes, so we need to let targets handled this 110 // case. 111 if (RegClass && !RegClass->isAllocatable()) 112 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); 113 114 if (!RegClass) { 115 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && 116 "Register class constraint is required unless either the " 117 "instruction is target independent or the operand is a use"); 118 // FIXME: Just bailing out like this here could be not enough, unless we 119 // expect the users of this function to do the right thing for PHIs and 120 // COPY: 121 // v1 = COPY v0 122 // v2 = COPY v1 123 // v1 here may end up not being constrained at all. Please notice that to 124 // reproduce the issue we likely need a destination pattern of a selection 125 // rule producing such extra copies, not just an input GMIR with them as 126 // every existing target using selectImpl handles copies before calling it 127 // and they never reach this function. 128 return Reg; 129 } 130 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass, 131 RegMO); 132 } 133 134 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, 135 const TargetInstrInfo &TII, 136 const TargetRegisterInfo &TRI, 137 const RegisterBankInfo &RBI) { 138 assert(!isPreISelGenericOpcode(I.getOpcode()) && 139 "A selected instruction is expected"); 140 MachineBasicBlock &MBB = *I.getParent(); 141 MachineFunction &MF = *MBB.getParent(); 142 MachineRegisterInfo &MRI = MF.getRegInfo(); 143 144 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { 145 MachineOperand &MO = I.getOperand(OpI); 146 147 // There's nothing to be done on non-register operands. 148 if (!MO.isReg()) 149 continue; 150 151 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); 152 assert(MO.isReg() && "Unsupported non-reg operand"); 153 154 Register Reg = MO.getReg(); 155 // Physical registers don't need to be constrained. 156 if (Register::isPhysicalRegister(Reg)) 157 continue; 158 159 // Register operands with a value of 0 (e.g. predicate operands) don't need 160 // to be constrained. 161 if (Reg == 0) 162 continue; 163 164 // If the operand is a vreg, we should constrain its regclass, and only 165 // insert COPYs if that's impossible. 166 // constrainOperandRegClass does that for us. 167 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI); 168 169 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been 170 // done. 171 if (MO.isUse()) { 172 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); 173 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) 174 I.tieOperands(DefIdx, OpI); 175 } 176 } 177 return true; 178 } 179 180 bool llvm::canReplaceReg(Register DstReg, Register SrcReg, 181 MachineRegisterInfo &MRI) { 182 // Give up if either DstReg or SrcReg is a physical register. 183 if (DstReg.isPhysical() || SrcReg.isPhysical()) 184 return false; 185 // Give up if the types don't match. 186 if (MRI.getType(DstReg) != MRI.getType(SrcReg)) 187 return false; 188 // Replace if either DstReg has no constraints or the register 189 // constraints match. 190 return !MRI.getRegClassOrRegBank(DstReg) || 191 MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg); 192 } 193 194 bool llvm::isTriviallyDead(const MachineInstr &MI, 195 const MachineRegisterInfo &MRI) { 196 // FIXME: This logical is mostly duplicated with 197 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in 198 // MachineInstr::isLabel? 199 200 // Don't delete frame allocation labels. 201 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) 202 return false; 203 // LIFETIME markers should be preserved even if they seem dead. 204 if (MI.getOpcode() == TargetOpcode::LIFETIME_START || 205 MI.getOpcode() == TargetOpcode::LIFETIME_END) 206 return false; 207 208 // If we can move an instruction, we can remove it. Otherwise, it has 209 // a side-effect of some sort. 210 bool SawStore = false; 211 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) 212 return false; 213 214 // Instructions without side-effects are dead iff they only define dead vregs. 215 for (auto &MO : MI.operands()) { 216 if (!MO.isReg() || !MO.isDef()) 217 continue; 218 219 Register Reg = MO.getReg(); 220 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) 221 return false; 222 } 223 return true; 224 } 225 226 static void reportGISelDiagnostic(DiagnosticSeverity Severity, 227 MachineFunction &MF, 228 const TargetPassConfig &TPC, 229 MachineOptimizationRemarkEmitter &MORE, 230 MachineOptimizationRemarkMissed &R) { 231 bool IsFatal = Severity == DS_Error && 232 TPC.isGlobalISelAbortEnabled(); 233 // Print the function name explicitly if we don't have a debug location (which 234 // makes the diagnostic less useful) or if we're going to emit a raw error. 235 if (!R.getLocation().isValid() || IsFatal) 236 R << (" (in function: " + MF.getName() + ")").str(); 237 238 if (IsFatal) 239 report_fatal_error(R.getMsg()); 240 else 241 MORE.emit(R); 242 } 243 244 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, 245 MachineOptimizationRemarkEmitter &MORE, 246 MachineOptimizationRemarkMissed &R) { 247 reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R); 248 } 249 250 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 251 MachineOptimizationRemarkEmitter &MORE, 252 MachineOptimizationRemarkMissed &R) { 253 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 254 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R); 255 } 256 257 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 258 MachineOptimizationRemarkEmitter &MORE, 259 const char *PassName, StringRef Msg, 260 const MachineInstr &MI) { 261 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", 262 MI.getDebugLoc(), MI.getParent()); 263 R << Msg; 264 // Printing MI is expensive; only do it if expensive remarks are enabled. 265 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) 266 R << ": " << ore::MNV("Inst", MI); 267 reportGISelFailure(MF, TPC, MORE, R); 268 } 269 270 Optional<APInt> llvm::getConstantVRegVal(Register VReg, 271 const MachineRegisterInfo &MRI) { 272 Optional<ValueAndVReg> ValAndVReg = 273 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); 274 assert((!ValAndVReg || ValAndVReg->VReg == VReg) && 275 "Value found while looking through instrs"); 276 if (!ValAndVReg) 277 return None; 278 return ValAndVReg->Value; 279 } 280 281 Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg, 282 const MachineRegisterInfo &MRI) { 283 Optional<APInt> Val = getConstantVRegVal(VReg, MRI); 284 if (Val && Val->getBitWidth() <= 64) 285 return Val->getSExtValue(); 286 return None; 287 } 288 289 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( 290 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, 291 bool HandleFConstant, bool LookThroughAnyExt) { 292 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; 293 MachineInstr *MI; 294 auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { 295 return Opcode == TargetOpcode::G_CONSTANT || 296 (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); 297 }; 298 auto GetImmediateValue = [HandleFConstant, 299 &MRI](const MachineInstr &MI) -> Optional<APInt> { 300 const MachineOperand &CstVal = MI.getOperand(1); 301 if (!CstVal.isImm() && !CstVal.isCImm() && 302 (!HandleFConstant || !CstVal.isFPImm())) 303 return None; 304 if (!CstVal.isFPImm()) { 305 unsigned BitWidth = 306 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 307 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) 308 : CstVal.getCImm()->getValue(); 309 assert(Val.getBitWidth() == BitWidth && 310 "Value bitwidth doesn't match definition type"); 311 return Val; 312 } 313 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); 314 }; 315 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && 316 LookThroughInstrs) { 317 switch (MI->getOpcode()) { 318 case TargetOpcode::G_ANYEXT: 319 if (!LookThroughAnyExt) 320 return None; 321 LLVM_FALLTHROUGH; 322 case TargetOpcode::G_TRUNC: 323 case TargetOpcode::G_SEXT: 324 case TargetOpcode::G_ZEXT: 325 SeenOpcodes.push_back(std::make_pair( 326 MI->getOpcode(), 327 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); 328 VReg = MI->getOperand(1).getReg(); 329 break; 330 case TargetOpcode::COPY: 331 VReg = MI->getOperand(1).getReg(); 332 if (Register::isPhysicalRegister(VReg)) 333 return None; 334 break; 335 case TargetOpcode::G_INTTOPTR: 336 VReg = MI->getOperand(1).getReg(); 337 break; 338 default: 339 return None; 340 } 341 } 342 if (!MI || !IsConstantOpcode(MI->getOpcode())) 343 return None; 344 345 Optional<APInt> MaybeVal = GetImmediateValue(*MI); 346 if (!MaybeVal) 347 return None; 348 APInt &Val = *MaybeVal; 349 while (!SeenOpcodes.empty()) { 350 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); 351 switch (OpcodeAndSize.first) { 352 case TargetOpcode::G_TRUNC: 353 Val = Val.trunc(OpcodeAndSize.second); 354 break; 355 case TargetOpcode::G_ANYEXT: 356 case TargetOpcode::G_SEXT: 357 Val = Val.sext(OpcodeAndSize.second); 358 break; 359 case TargetOpcode::G_ZEXT: 360 Val = Val.zext(OpcodeAndSize.second); 361 break; 362 } 363 } 364 365 return ValueAndVReg{Val, VReg}; 366 } 367 368 const ConstantInt *llvm::getConstantIntVRegVal(Register VReg, 369 const MachineRegisterInfo &MRI) { 370 MachineInstr *MI = MRI.getVRegDef(VReg); 371 if (MI->getOpcode() != TargetOpcode::G_CONSTANT) 372 return nullptr; 373 return MI->getOperand(1).getCImm(); 374 } 375 376 const ConstantFP * 377 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { 378 MachineInstr *MI = MRI.getVRegDef(VReg); 379 if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) 380 return nullptr; 381 return MI->getOperand(1).getFPImm(); 382 } 383 384 Optional<DefinitionAndSourceRegister> 385 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { 386 Register DefSrcReg = Reg; 387 auto *DefMI = MRI.getVRegDef(Reg); 388 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); 389 if (!DstTy.isValid()) 390 return None; 391 unsigned Opc = DefMI->getOpcode(); 392 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) { 393 Register SrcReg = DefMI->getOperand(1).getReg(); 394 auto SrcTy = MRI.getType(SrcReg); 395 if (!SrcTy.isValid()) 396 break; 397 DefMI = MRI.getVRegDef(SrcReg); 398 DefSrcReg = SrcReg; 399 Opc = DefMI->getOpcode(); 400 } 401 return DefinitionAndSourceRegister{DefMI, DefSrcReg}; 402 } 403 404 MachineInstr *llvm::getDefIgnoringCopies(Register Reg, 405 const MachineRegisterInfo &MRI) { 406 Optional<DefinitionAndSourceRegister> DefSrcReg = 407 getDefSrcRegIgnoringCopies(Reg, MRI); 408 return DefSrcReg ? DefSrcReg->MI : nullptr; 409 } 410 411 Register llvm::getSrcRegIgnoringCopies(Register Reg, 412 const MachineRegisterInfo &MRI) { 413 Optional<DefinitionAndSourceRegister> DefSrcReg = 414 getDefSrcRegIgnoringCopies(Reg, MRI); 415 return DefSrcReg ? DefSrcReg->Reg : Register(); 416 } 417 418 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, 419 const MachineRegisterInfo &MRI) { 420 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); 421 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; 422 } 423 424 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { 425 if (Size == 32) 426 return APFloat(float(Val)); 427 if (Size == 64) 428 return APFloat(Val); 429 if (Size != 16) 430 llvm_unreachable("Unsupported FPConstant size"); 431 bool Ignored; 432 APFloat APF(Val); 433 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 434 return APF; 435 } 436 437 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, 438 const Register Op2, 439 const MachineRegisterInfo &MRI) { 440 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); 441 if (!MaybeOp2Cst) 442 return None; 443 444 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); 445 if (!MaybeOp1Cst) 446 return None; 447 448 const APInt &C1 = *MaybeOp1Cst; 449 const APInt &C2 = *MaybeOp2Cst; 450 switch (Opcode) { 451 default: 452 break; 453 case TargetOpcode::G_ADD: 454 return C1 + C2; 455 case TargetOpcode::G_AND: 456 return C1 & C2; 457 case TargetOpcode::G_ASHR: 458 return C1.ashr(C2); 459 case TargetOpcode::G_LSHR: 460 return C1.lshr(C2); 461 case TargetOpcode::G_MUL: 462 return C1 * C2; 463 case TargetOpcode::G_OR: 464 return C1 | C2; 465 case TargetOpcode::G_SHL: 466 return C1 << C2; 467 case TargetOpcode::G_SUB: 468 return C1 - C2; 469 case TargetOpcode::G_XOR: 470 return C1 ^ C2; 471 case TargetOpcode::G_UDIV: 472 if (!C2.getBoolValue()) 473 break; 474 return C1.udiv(C2); 475 case TargetOpcode::G_SDIV: 476 if (!C2.getBoolValue()) 477 break; 478 return C1.sdiv(C2); 479 case TargetOpcode::G_UREM: 480 if (!C2.getBoolValue()) 481 break; 482 return C1.urem(C2); 483 case TargetOpcode::G_SREM: 484 if (!C2.getBoolValue()) 485 break; 486 return C1.srem(C2); 487 } 488 489 return None; 490 } 491 492 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, 493 const Register Op2, 494 const MachineRegisterInfo &MRI) { 495 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); 496 if (!Op2Cst) 497 return None; 498 499 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI); 500 if (!Op1Cst) 501 return None; 502 503 APFloat C1 = Op1Cst->getValueAPF(); 504 const APFloat &C2 = Op2Cst->getValueAPF(); 505 switch (Opcode) { 506 case TargetOpcode::G_FADD: 507 C1.add(C2, APFloat::rmNearestTiesToEven); 508 return C1; 509 case TargetOpcode::G_FSUB: 510 C1.subtract(C2, APFloat::rmNearestTiesToEven); 511 return C1; 512 case TargetOpcode::G_FMUL: 513 C1.multiply(C2, APFloat::rmNearestTiesToEven); 514 return C1; 515 case TargetOpcode::G_FDIV: 516 C1.divide(C2, APFloat::rmNearestTiesToEven); 517 return C1; 518 case TargetOpcode::G_FREM: 519 C1.mod(C2); 520 return C1; 521 case TargetOpcode::G_FCOPYSIGN: 522 C1.copySign(C2); 523 return C1; 524 case TargetOpcode::G_FMINNUM: 525 return minnum(C1, C2); 526 case TargetOpcode::G_FMAXNUM: 527 return maxnum(C1, C2); 528 case TargetOpcode::G_FMINIMUM: 529 return minimum(C1, C2); 530 case TargetOpcode::G_FMAXIMUM: 531 return maximum(C1, C2); 532 case TargetOpcode::G_FMINNUM_IEEE: 533 case TargetOpcode::G_FMAXNUM_IEEE: 534 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not 535 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax, 536 // and currently there isn't a nice wrapper in APFloat for the version with 537 // correct snan handling. 538 break; 539 default: 540 break; 541 } 542 543 return None; 544 } 545 546 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, 547 bool SNaN) { 548 const MachineInstr *DefMI = MRI.getVRegDef(Val); 549 if (!DefMI) 550 return false; 551 552 const TargetMachine& TM = DefMI->getMF()->getTarget(); 553 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath) 554 return true; 555 556 // If the value is a constant, we can obviously see if it is a NaN or not. 557 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) { 558 return !FPVal->getValueAPF().isNaN() || 559 (SNaN && !FPVal->getValueAPF().isSignaling()); 560 } 561 562 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) { 563 for (const auto &Op : DefMI->uses()) 564 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN)) 565 return false; 566 return true; 567 } 568 569 switch (DefMI->getOpcode()) { 570 default: 571 break; 572 case TargetOpcode::G_FMINNUM_IEEE: 573 case TargetOpcode::G_FMAXNUM_IEEE: { 574 if (SNaN) 575 return true; 576 // This can return a NaN if either operand is an sNaN, or if both operands 577 // are NaN. 578 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) && 579 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) || 580 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) && 581 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI)); 582 } 583 case TargetOpcode::G_FMINNUM: 584 case TargetOpcode::G_FMAXNUM: { 585 // Only one needs to be known not-nan, since it will be returned if the 586 // other ends up being one. 587 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) || 588 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN); 589 } 590 } 591 592 if (SNaN) { 593 // FP operations quiet. For now, just handle the ones inserted during 594 // legalization. 595 switch (DefMI->getOpcode()) { 596 case TargetOpcode::G_FPEXT: 597 case TargetOpcode::G_FPTRUNC: 598 case TargetOpcode::G_FCANONICALIZE: 599 return true; 600 default: 601 return false; 602 } 603 } 604 605 return false; 606 } 607 608 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF, 609 const MachinePointerInfo &MPO) { 610 auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>(); 611 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) { 612 MachineFrameInfo &MFI = MF.getFrameInfo(); 613 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()), 614 MPO.Offset); 615 } 616 617 if (const Value *V = MPO.V.dyn_cast<const Value *>()) { 618 const Module *M = MF.getFunction().getParent(); 619 return V->getPointerAlignment(M->getDataLayout()); 620 } 621 622 return Align(1); 623 } 624 625 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, 626 const TargetInstrInfo &TII, 627 MCRegister PhysReg, 628 const TargetRegisterClass &RC, 629 LLT RegTy) { 630 DebugLoc DL; // FIXME: Is no location the right choice? 631 MachineBasicBlock &EntryMBB = MF.front(); 632 MachineRegisterInfo &MRI = MF.getRegInfo(); 633 Register LiveIn = MRI.getLiveInVirtReg(PhysReg); 634 if (LiveIn) { 635 MachineInstr *Def = MRI.getVRegDef(LiveIn); 636 if (Def) { 637 // FIXME: Should the verifier check this is in the entry block? 638 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block"); 639 return LiveIn; 640 } 641 642 // It's possible the incoming argument register and copy was added during 643 // lowering, but later deleted due to being/becoming dead. If this happens, 644 // re-insert the copy. 645 } else { 646 // The live in register was not present, so add it. 647 LiveIn = MF.addLiveIn(PhysReg, &RC); 648 if (RegTy.isValid()) 649 MRI.setType(LiveIn, RegTy); 650 } 651 652 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn) 653 .addReg(PhysReg); 654 if (!EntryMBB.isLiveIn(PhysReg)) 655 EntryMBB.addLiveIn(PhysReg); 656 return LiveIn; 657 } 658 659 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, 660 uint64_t Imm, 661 const MachineRegisterInfo &MRI) { 662 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); 663 if (MaybeOp1Cst) { 664 switch (Opcode) { 665 default: 666 break; 667 case TargetOpcode::G_SEXT_INREG: { 668 LLT Ty = MRI.getType(Op1); 669 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); 670 } 671 } 672 } 673 return None; 674 } 675 676 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, 677 GISelKnownBits *KB) { 678 Optional<DefinitionAndSourceRegister> DefSrcReg = 679 getDefSrcRegIgnoringCopies(Reg, MRI); 680 if (!DefSrcReg) 681 return false; 682 683 const MachineInstr &MI = *DefSrcReg->MI; 684 const LLT Ty = MRI.getType(Reg); 685 686 switch (MI.getOpcode()) { 687 case TargetOpcode::G_CONSTANT: { 688 unsigned BitWidth = Ty.getScalarSizeInBits(); 689 const ConstantInt *CI = MI.getOperand(1).getCImm(); 690 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2(); 691 } 692 case TargetOpcode::G_SHL: { 693 // A left-shift of a constant one will have exactly one bit set because 694 // shifting the bit off the end is undefined. 695 696 // TODO: Constant splat 697 if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { 698 if (*ConstLHS == 1) 699 return true; 700 } 701 702 break; 703 } 704 case TargetOpcode::G_LSHR: { 705 if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { 706 if (ConstLHS->isSignMask()) 707 return true; 708 } 709 710 break; 711 } 712 case TargetOpcode::G_BUILD_VECTOR: { 713 // TODO: Probably should have a recursion depth guard since you could have 714 // bitcasted vector elements. 715 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { 716 if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB)) 717 return false; 718 } 719 720 return true; 721 } 722 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 723 // Only handle constants since we would need to know if number of leading 724 // zeros is greater than the truncation amount. 725 const unsigned BitWidth = Ty.getScalarSizeInBits(); 726 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { 727 auto Const = getConstantVRegVal(MI.getOperand(I).getReg(), MRI); 728 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2()) 729 return false; 730 } 731 732 return true; 733 } 734 default: 735 break; 736 } 737 738 if (!KB) 739 return false; 740 741 // More could be done here, though the above checks are enough 742 // to handle some common cases. 743 744 // Fall back to computeKnownBits to catch other known cases. 745 KnownBits Known = KB->getKnownBits(Reg); 746 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 747 } 748 749 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { 750 AU.addPreserved<StackProtector>(); 751 } 752 753 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) { 754 unsigned Mul = OrigSize * TargetSize; 755 unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize); 756 return Mul / GCDSize; 757 } 758 759 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { 760 const unsigned OrigSize = OrigTy.getSizeInBits(); 761 const unsigned TargetSize = TargetTy.getSizeInBits(); 762 763 if (OrigSize == TargetSize) 764 return OrigTy; 765 766 if (OrigTy.isVector()) { 767 const LLT OrigElt = OrigTy.getElementType(); 768 769 if (TargetTy.isVector()) { 770 const LLT TargetElt = TargetTy.getElementType(); 771 772 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { 773 int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(), 774 TargetTy.getNumElements()); 775 // Prefer the original element type. 776 ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements(); 777 return LLT::vector(Mul.divideCoefficientBy(GCDElts), 778 OrigTy.getElementType()); 779 } 780 } else { 781 if (OrigElt.getSizeInBits() == TargetSize) 782 return OrigTy; 783 } 784 785 unsigned LCMSize = getLCMSize(OrigSize, TargetSize); 786 return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt); 787 } 788 789 if (TargetTy.isVector()) { 790 unsigned LCMSize = getLCMSize(OrigSize, TargetSize); 791 return LLT::fixed_vector(LCMSize / OrigSize, OrigTy); 792 } 793 794 unsigned LCMSize = getLCMSize(OrigSize, TargetSize); 795 796 // Preserve pointer types. 797 if (LCMSize == OrigSize) 798 return OrigTy; 799 if (LCMSize == TargetSize) 800 return TargetTy; 801 802 return LLT::scalar(LCMSize); 803 } 804 805 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { 806 const unsigned OrigSize = OrigTy.getSizeInBits(); 807 const unsigned TargetSize = TargetTy.getSizeInBits(); 808 809 if (OrigSize == TargetSize) 810 return OrigTy; 811 812 if (OrigTy.isVector()) { 813 LLT OrigElt = OrigTy.getElementType(); 814 if (TargetTy.isVector()) { 815 LLT TargetElt = TargetTy.getElementType(); 816 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { 817 int GCD = greatestCommonDivisor(OrigTy.getNumElements(), 818 TargetTy.getNumElements()); 819 return LLT::scalarOrVector(GCD, OrigElt); 820 } 821 } else { 822 // If the source is a vector of pointers, return a pointer element. 823 if (OrigElt.getSizeInBits() == TargetSize) 824 return OrigElt; 825 } 826 827 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); 828 if (GCD == OrigElt.getSizeInBits()) 829 return OrigElt; 830 831 // If we can't produce the original element type, we have to use a smaller 832 // scalar. 833 if (GCD < OrigElt.getSizeInBits()) 834 return LLT::scalar(GCD); 835 return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt); 836 } 837 838 if (TargetTy.isVector()) { 839 // Try to preserve the original element type. 840 LLT TargetElt = TargetTy.getElementType(); 841 if (TargetElt.getSizeInBits() == OrigSize) 842 return OrigTy; 843 } 844 845 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); 846 return LLT::scalar(GCD); 847 } 848 849 Optional<int> llvm::getSplatIndex(MachineInstr &MI) { 850 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 851 "Only G_SHUFFLE_VECTOR can have a splat index!"); 852 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 853 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; }); 854 855 // If all elements are undefined, this shuffle can be considered a splat. 856 // Return 0 for better potential for callers to simplify. 857 if (FirstDefinedIdx == Mask.end()) 858 return 0; 859 860 // Make sure all remaining elements are either undef or the same 861 // as the first non-undef value. 862 int SplatValue = *FirstDefinedIdx; 863 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()), 864 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; })) 865 return None; 866 867 return SplatValue; 868 } 869 870 static bool isBuildVectorOp(unsigned Opcode) { 871 return Opcode == TargetOpcode::G_BUILD_VECTOR || 872 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC; 873 } 874 875 // TODO: Handle mixed undef elements. 876 static bool isBuildVectorConstantSplat(const MachineInstr &MI, 877 const MachineRegisterInfo &MRI, 878 int64_t SplatValue) { 879 if (!isBuildVectorOp(MI.getOpcode())) 880 return false; 881 882 const unsigned NumOps = MI.getNumOperands(); 883 for (unsigned I = 1; I != NumOps; ++I) { 884 Register Element = MI.getOperand(I).getReg(); 885 if (!mi_match(Element, MRI, m_SpecificICst(SplatValue))) 886 return false; 887 } 888 889 return true; 890 } 891 892 Optional<int64_t> 893 llvm::getBuildVectorConstantSplat(const MachineInstr &MI, 894 const MachineRegisterInfo &MRI) { 895 if (!isBuildVectorOp(MI.getOpcode())) 896 return None; 897 898 const unsigned NumOps = MI.getNumOperands(); 899 Optional<int64_t> Scalar; 900 for (unsigned I = 1; I != NumOps; ++I) { 901 Register Element = MI.getOperand(I).getReg(); 902 int64_t ElementValue; 903 if (!mi_match(Element, MRI, m_ICst(ElementValue))) 904 return None; 905 if (!Scalar) 906 Scalar = ElementValue; 907 else if (*Scalar != ElementValue) 908 return None; 909 } 910 911 return Scalar; 912 } 913 914 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, 915 const MachineRegisterInfo &MRI) { 916 return isBuildVectorConstantSplat(MI, MRI, 0); 917 } 918 919 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, 920 const MachineRegisterInfo &MRI) { 921 return isBuildVectorConstantSplat(MI, MRI, -1); 922 } 923 924 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI, 925 const MachineRegisterInfo &MRI) { 926 unsigned Opc = MI.getOpcode(); 927 if (!isBuildVectorOp(Opc)) 928 return None; 929 if (auto Splat = getBuildVectorConstantSplat(MI, MRI)) 930 return RegOrConstant(*Splat); 931 auto Reg = MI.getOperand(1).getReg(); 932 if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()), 933 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; })) 934 return None; 935 return RegOrConstant(Reg); 936 } 937 938 bool llvm::matchUnaryPredicate( 939 const MachineRegisterInfo &MRI, Register Reg, 940 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) { 941 942 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 943 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) 944 return Match(nullptr); 945 946 // TODO: Also handle fconstant 947 if (Def->getOpcode() == TargetOpcode::G_CONSTANT) 948 return Match(Def->getOperand(1).getCImm()); 949 950 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR) 951 return false; 952 953 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) { 954 Register SrcElt = Def->getOperand(I).getReg(); 955 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI); 956 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) { 957 if (!Match(nullptr)) 958 return false; 959 continue; 960 } 961 962 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT || 963 !Match(SrcDef->getOperand(1).getCImm())) 964 return false; 965 } 966 967 return true; 968 } 969 970 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, 971 bool IsFP) { 972 switch (TLI.getBooleanContents(IsVector, IsFP)) { 973 case TargetLowering::UndefinedBooleanContent: 974 return Val & 0x1; 975 case TargetLowering::ZeroOrOneBooleanContent: 976 return Val == 1; 977 case TargetLowering::ZeroOrNegativeOneBooleanContent: 978 return Val == -1; 979 } 980 llvm_unreachable("Invalid boolean contents"); 981 } 982 983 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector, 984 bool IsFP) { 985 switch (TLI.getBooleanContents(IsVector, IsFP)) { 986 case TargetLowering::UndefinedBooleanContent: 987 case TargetLowering::ZeroOrOneBooleanContent: 988 return 1; 989 case TargetLowering::ZeroOrNegativeOneBooleanContent: 990 return -1; 991 } 992 llvm_unreachable("Invalid boolean contents"); 993 } 994 995 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB, 996 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 997 const auto &F = MBB.getParent()->getFunction(); 998 return F.hasOptSize() || F.hasMinSize() || 999 llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI); 1000 } 1001 1002 unsigned llvm::getIntrinsicID(const MachineInstr &MI) { 1003 #ifndef NDEBUG 1004 unsigned Opc = MI.getOpcode(); 1005 assert(Opc == TargetOpcode::G_INTRINSIC || 1006 Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 1007 #endif 1008 return MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID(); 1009 } 1010