1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file This file implements the utility functions used by the GlobalISel 9 /// pipeline. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/Utils.h" 13 #include "llvm/ADT/APFloat.h" 14 #include "llvm/ADT/APInt.h" 15 #include "llvm/ADT/Optional.h" 16 #include "llvm/CodeGen/CodeGenCommonISel.h" 17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 18 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 20 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h" 21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 22 #include "llvm/CodeGen/MachineInstr.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/MachineSizeOpts.h" 27 #include "llvm/CodeGen/RegisterBankInfo.h" 28 #include "llvm/CodeGen/StackProtector.h" 29 #include "llvm/CodeGen/TargetInstrInfo.h" 30 #include "llvm/CodeGen/TargetLowering.h" 31 #include "llvm/CodeGen/TargetPassConfig.h" 32 #include "llvm/CodeGen/TargetRegisterInfo.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include "llvm/Transforms/Utils/SizeOpts.h" 36 #include <numeric> 37 38 #define DEBUG_TYPE "globalisel-utils" 39 40 using namespace llvm; 41 using namespace MIPatternMatch; 42 43 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI, 44 const TargetInstrInfo &TII, 45 const RegisterBankInfo &RBI, Register Reg, 46 const TargetRegisterClass &RegClass) { 47 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) 48 return MRI.createVirtualRegister(&RegClass); 49 50 return Reg; 51 } 52 53 Register llvm::constrainOperandRegClass( 54 const MachineFunction &MF, const TargetRegisterInfo &TRI, 55 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 56 const RegisterBankInfo &RBI, MachineInstr &InsertPt, 57 const TargetRegisterClass &RegClass, MachineOperand &RegMO) { 58 Register Reg = RegMO.getReg(); 59 // Assume physical registers are properly constrained. 60 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 61 62 // Save the old register class to check whether 63 // the change notifications will be required. 64 // TODO: A better approach would be to pass 65 // the observers to constrainRegToClass(). 66 auto *OldRegClass = MRI.getRegClassOrNull(Reg); 67 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); 68 // If we created a new virtual register because the class is not compatible 69 // then create a copy between the new and the old register. 70 if (ConstrainedReg != Reg) { 71 MachineBasicBlock::iterator InsertIt(&InsertPt); 72 MachineBasicBlock &MBB = *InsertPt.getParent(); 73 // FIXME: The copy needs to have the classes constrained for its operands. 74 // Use operand's regbank to get the class for old register (Reg). 75 if (RegMO.isUse()) { 76 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), 77 TII.get(TargetOpcode::COPY), ConstrainedReg) 78 .addReg(Reg); 79 } else { 80 assert(RegMO.isDef() && "Must be a definition"); 81 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), 82 TII.get(TargetOpcode::COPY), Reg) 83 .addReg(ConstrainedReg); 84 } 85 if (GISelChangeObserver *Observer = MF.getObserver()) { 86 Observer->changingInstr(*RegMO.getParent()); 87 } 88 RegMO.setReg(ConstrainedReg); 89 if (GISelChangeObserver *Observer = MF.getObserver()) { 90 Observer->changedInstr(*RegMO.getParent()); 91 } 92 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) { 93 if (GISelChangeObserver *Observer = MF.getObserver()) { 94 if (!RegMO.isDef()) { 95 MachineInstr *RegDef = MRI.getVRegDef(Reg); 96 Observer->changedInstr(*RegDef); 97 } 98 Observer->changingAllUsesOfReg(MRI, Reg); 99 Observer->finishedChangingAllUsesOfReg(); 100 } 101 } 102 return ConstrainedReg; 103 } 104 105 Register llvm::constrainOperandRegClass( 106 const MachineFunction &MF, const TargetRegisterInfo &TRI, 107 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 108 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, 109 MachineOperand &RegMO, unsigned OpIdx) { 110 Register Reg = RegMO.getReg(); 111 // Assume physical registers are properly constrained. 112 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 113 114 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF); 115 // Some of the target independent instructions, like COPY, may not impose any 116 // register class constraints on some of their operands: If it's a use, we can 117 // skip constraining as the instruction defining the register would constrain 118 // it. 119 120 if (OpRC) { 121 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands 122 // can have multiple regbanks for a superclass that combine different 123 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity 124 // resolved by targets during regbankselect should not be overridden. 125 if (const auto *SubRC = TRI.getCommonSubClass( 126 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI))) 127 OpRC = SubRC; 128 129 OpRC = TRI.getAllocatableClass(OpRC); 130 } 131 132 if (!OpRC) { 133 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && 134 "Register class constraint is required unless either the " 135 "instruction is target independent or the operand is a use"); 136 // FIXME: Just bailing out like this here could be not enough, unless we 137 // expect the users of this function to do the right thing for PHIs and 138 // COPY: 139 // v1 = COPY v0 140 // v2 = COPY v1 141 // v1 here may end up not being constrained at all. Please notice that to 142 // reproduce the issue we likely need a destination pattern of a selection 143 // rule producing such extra copies, not just an input GMIR with them as 144 // every existing target using selectImpl handles copies before calling it 145 // and they never reach this function. 146 return Reg; 147 } 148 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC, 149 RegMO); 150 } 151 152 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, 153 const TargetInstrInfo &TII, 154 const TargetRegisterInfo &TRI, 155 const RegisterBankInfo &RBI) { 156 assert(!isPreISelGenericOpcode(I.getOpcode()) && 157 "A selected instruction is expected"); 158 MachineBasicBlock &MBB = *I.getParent(); 159 MachineFunction &MF = *MBB.getParent(); 160 MachineRegisterInfo &MRI = MF.getRegInfo(); 161 162 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { 163 MachineOperand &MO = I.getOperand(OpI); 164 165 // There's nothing to be done on non-register operands. 166 if (!MO.isReg()) 167 continue; 168 169 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); 170 assert(MO.isReg() && "Unsupported non-reg operand"); 171 172 Register Reg = MO.getReg(); 173 // Physical registers don't need to be constrained. 174 if (Register::isPhysicalRegister(Reg)) 175 continue; 176 177 // Register operands with a value of 0 (e.g. predicate operands) don't need 178 // to be constrained. 179 if (Reg == 0) 180 continue; 181 182 // If the operand is a vreg, we should constrain its regclass, and only 183 // insert COPYs if that's impossible. 184 // constrainOperandRegClass does that for us. 185 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI); 186 187 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been 188 // done. 189 if (MO.isUse()) { 190 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); 191 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) 192 I.tieOperands(DefIdx, OpI); 193 } 194 } 195 return true; 196 } 197 198 bool llvm::canReplaceReg(Register DstReg, Register SrcReg, 199 MachineRegisterInfo &MRI) { 200 // Give up if either DstReg or SrcReg is a physical register. 201 if (DstReg.isPhysical() || SrcReg.isPhysical()) 202 return false; 203 // Give up if the types don't match. 204 if (MRI.getType(DstReg) != MRI.getType(SrcReg)) 205 return false; 206 // Replace if either DstReg has no constraints or the register 207 // constraints match. 208 return !MRI.getRegClassOrRegBank(DstReg) || 209 MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg); 210 } 211 212 bool llvm::isTriviallyDead(const MachineInstr &MI, 213 const MachineRegisterInfo &MRI) { 214 // FIXME: This logical is mostly duplicated with 215 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in 216 // MachineInstr::isLabel? 217 218 // Don't delete frame allocation labels. 219 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) 220 return false; 221 // LIFETIME markers should be preserved even if they seem dead. 222 if (MI.getOpcode() == TargetOpcode::LIFETIME_START || 223 MI.getOpcode() == TargetOpcode::LIFETIME_END) 224 return false; 225 226 // If we can move an instruction, we can remove it. Otherwise, it has 227 // a side-effect of some sort. 228 bool SawStore = false; 229 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) 230 return false; 231 232 // Instructions without side-effects are dead iff they only define dead vregs. 233 for (const auto &MO : MI.operands()) { 234 if (!MO.isReg() || !MO.isDef()) 235 continue; 236 237 Register Reg = MO.getReg(); 238 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) 239 return false; 240 } 241 return true; 242 } 243 244 static void reportGISelDiagnostic(DiagnosticSeverity Severity, 245 MachineFunction &MF, 246 const TargetPassConfig &TPC, 247 MachineOptimizationRemarkEmitter &MORE, 248 MachineOptimizationRemarkMissed &R) { 249 bool IsFatal = Severity == DS_Error && 250 TPC.isGlobalISelAbortEnabled(); 251 // Print the function name explicitly if we don't have a debug location (which 252 // makes the diagnostic less useful) or if we're going to emit a raw error. 253 if (!R.getLocation().isValid() || IsFatal) 254 R << (" (in function: " + MF.getName() + ")").str(); 255 256 if (IsFatal) 257 report_fatal_error(Twine(R.getMsg())); 258 else 259 MORE.emit(R); 260 } 261 262 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, 263 MachineOptimizationRemarkEmitter &MORE, 264 MachineOptimizationRemarkMissed &R) { 265 reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R); 266 } 267 268 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 269 MachineOptimizationRemarkEmitter &MORE, 270 MachineOptimizationRemarkMissed &R) { 271 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 272 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R); 273 } 274 275 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 276 MachineOptimizationRemarkEmitter &MORE, 277 const char *PassName, StringRef Msg, 278 const MachineInstr &MI) { 279 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", 280 MI.getDebugLoc(), MI.getParent()); 281 R << Msg; 282 // Printing MI is expensive; only do it if expensive remarks are enabled. 283 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) 284 R << ": " << ore::MNV("Inst", MI); 285 reportGISelFailure(MF, TPC, MORE, R); 286 } 287 288 Optional<APInt> llvm::getIConstantVRegVal(Register VReg, 289 const MachineRegisterInfo &MRI) { 290 Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough( 291 VReg, MRI, /*LookThroughInstrs*/ false); 292 assert((!ValAndVReg || ValAndVReg->VReg == VReg) && 293 "Value found while looking through instrs"); 294 if (!ValAndVReg) 295 return None; 296 return ValAndVReg->Value; 297 } 298 299 Optional<int64_t> 300 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { 301 Optional<APInt> Val = getIConstantVRegVal(VReg, MRI); 302 if (Val && Val->getBitWidth() <= 64) 303 return Val->getSExtValue(); 304 return None; 305 } 306 307 namespace { 308 309 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn; 310 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn; 311 312 Optional<ValueAndVReg> getConstantVRegValWithLookThrough( 313 Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode, 314 GetAPCstFn getAPCstValue, bool LookThroughInstrs = true, 315 bool LookThroughAnyExt = false) { 316 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; 317 MachineInstr *MI; 318 319 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) && 320 LookThroughInstrs) { 321 switch (MI->getOpcode()) { 322 case TargetOpcode::G_ANYEXT: 323 if (!LookThroughAnyExt) 324 return None; 325 [[fallthrough]]; 326 case TargetOpcode::G_TRUNC: 327 case TargetOpcode::G_SEXT: 328 case TargetOpcode::G_ZEXT: 329 SeenOpcodes.push_back(std::make_pair( 330 MI->getOpcode(), 331 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); 332 VReg = MI->getOperand(1).getReg(); 333 break; 334 case TargetOpcode::COPY: 335 VReg = MI->getOperand(1).getReg(); 336 if (Register::isPhysicalRegister(VReg)) 337 return None; 338 break; 339 case TargetOpcode::G_INTTOPTR: 340 VReg = MI->getOperand(1).getReg(); 341 break; 342 default: 343 return None; 344 } 345 } 346 if (!MI || !IsConstantOpcode(MI)) 347 return None; 348 349 Optional<APInt> MaybeVal = getAPCstValue(MI); 350 if (!MaybeVal) 351 return None; 352 APInt &Val = *MaybeVal; 353 while (!SeenOpcodes.empty()) { 354 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); 355 switch (OpcodeAndSize.first) { 356 case TargetOpcode::G_TRUNC: 357 Val = Val.trunc(OpcodeAndSize.second); 358 break; 359 case TargetOpcode::G_ANYEXT: 360 case TargetOpcode::G_SEXT: 361 Val = Val.sext(OpcodeAndSize.second); 362 break; 363 case TargetOpcode::G_ZEXT: 364 Val = Val.zext(OpcodeAndSize.second); 365 break; 366 } 367 } 368 369 return ValueAndVReg{Val, VReg}; 370 } 371 372 bool isIConstant(const MachineInstr *MI) { 373 if (!MI) 374 return false; 375 return MI->getOpcode() == TargetOpcode::G_CONSTANT; 376 } 377 378 bool isFConstant(const MachineInstr *MI) { 379 if (!MI) 380 return false; 381 return MI->getOpcode() == TargetOpcode::G_FCONSTANT; 382 } 383 384 bool isAnyConstant(const MachineInstr *MI) { 385 if (!MI) 386 return false; 387 unsigned Opc = MI->getOpcode(); 388 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT; 389 } 390 391 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) { 392 const MachineOperand &CstVal = MI->getOperand(1); 393 if (CstVal.isCImm()) 394 return CstVal.getCImm()->getValue(); 395 return None; 396 } 397 398 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) { 399 const MachineOperand &CstVal = MI->getOperand(1); 400 if (CstVal.isCImm()) 401 return CstVal.getCImm()->getValue(); 402 if (CstVal.isFPImm()) 403 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); 404 return None; 405 } 406 407 } // end anonymous namespace 408 409 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough( 410 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { 411 return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant, 412 getCImmAsAPInt, LookThroughInstrs); 413 } 414 415 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough( 416 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, 417 bool LookThroughAnyExt) { 418 return getConstantVRegValWithLookThrough( 419 VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs, 420 LookThroughAnyExt); 421 } 422 423 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough( 424 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { 425 auto Reg = getConstantVRegValWithLookThrough( 426 VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); 427 if (!Reg) 428 return None; 429 return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(), 430 Reg->VReg}; 431 } 432 433 const ConstantFP * 434 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { 435 MachineInstr *MI = MRI.getVRegDef(VReg); 436 if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) 437 return nullptr; 438 return MI->getOperand(1).getFPImm(); 439 } 440 441 Optional<DefinitionAndSourceRegister> 442 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { 443 Register DefSrcReg = Reg; 444 auto *DefMI = MRI.getVRegDef(Reg); 445 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); 446 if (!DstTy.isValid()) 447 return None; 448 unsigned Opc = DefMI->getOpcode(); 449 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) { 450 Register SrcReg = DefMI->getOperand(1).getReg(); 451 auto SrcTy = MRI.getType(SrcReg); 452 if (!SrcTy.isValid()) 453 break; 454 DefMI = MRI.getVRegDef(SrcReg); 455 DefSrcReg = SrcReg; 456 Opc = DefMI->getOpcode(); 457 } 458 return DefinitionAndSourceRegister{DefMI, DefSrcReg}; 459 } 460 461 MachineInstr *llvm::getDefIgnoringCopies(Register Reg, 462 const MachineRegisterInfo &MRI) { 463 Optional<DefinitionAndSourceRegister> DefSrcReg = 464 getDefSrcRegIgnoringCopies(Reg, MRI); 465 return DefSrcReg ? DefSrcReg->MI : nullptr; 466 } 467 468 Register llvm::getSrcRegIgnoringCopies(Register Reg, 469 const MachineRegisterInfo &MRI) { 470 Optional<DefinitionAndSourceRegister> DefSrcReg = 471 getDefSrcRegIgnoringCopies(Reg, MRI); 472 return DefSrcReg ? DefSrcReg->Reg : Register(); 473 } 474 475 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, 476 const MachineRegisterInfo &MRI) { 477 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); 478 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; 479 } 480 481 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { 482 if (Size == 32) 483 return APFloat(float(Val)); 484 if (Size == 64) 485 return APFloat(Val); 486 if (Size != 16) 487 llvm_unreachable("Unsupported FPConstant size"); 488 bool Ignored; 489 APFloat APF(Val); 490 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 491 return APF; 492 } 493 494 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, 495 const Register Op2, 496 const MachineRegisterInfo &MRI) { 497 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false); 498 if (!MaybeOp2Cst) 499 return None; 500 501 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false); 502 if (!MaybeOp1Cst) 503 return None; 504 505 const APInt &C1 = MaybeOp1Cst->Value; 506 const APInt &C2 = MaybeOp2Cst->Value; 507 switch (Opcode) { 508 default: 509 break; 510 case TargetOpcode::G_ADD: 511 case TargetOpcode::G_PTR_ADD: 512 return C1 + C2; 513 case TargetOpcode::G_AND: 514 return C1 & C2; 515 case TargetOpcode::G_ASHR: 516 return C1.ashr(C2); 517 case TargetOpcode::G_LSHR: 518 return C1.lshr(C2); 519 case TargetOpcode::G_MUL: 520 return C1 * C2; 521 case TargetOpcode::G_OR: 522 return C1 | C2; 523 case TargetOpcode::G_SHL: 524 return C1 << C2; 525 case TargetOpcode::G_SUB: 526 return C1 - C2; 527 case TargetOpcode::G_XOR: 528 return C1 ^ C2; 529 case TargetOpcode::G_UDIV: 530 if (!C2.getBoolValue()) 531 break; 532 return C1.udiv(C2); 533 case TargetOpcode::G_SDIV: 534 if (!C2.getBoolValue()) 535 break; 536 return C1.sdiv(C2); 537 case TargetOpcode::G_UREM: 538 if (!C2.getBoolValue()) 539 break; 540 return C1.urem(C2); 541 case TargetOpcode::G_SREM: 542 if (!C2.getBoolValue()) 543 break; 544 return C1.srem(C2); 545 case TargetOpcode::G_SMIN: 546 return APIntOps::smin(C1, C2); 547 case TargetOpcode::G_SMAX: 548 return APIntOps::smax(C1, C2); 549 case TargetOpcode::G_UMIN: 550 return APIntOps::umin(C1, C2); 551 case TargetOpcode::G_UMAX: 552 return APIntOps::umax(C1, C2); 553 } 554 555 return None; 556 } 557 558 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, 559 const Register Op2, 560 const MachineRegisterInfo &MRI) { 561 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); 562 if (!Op2Cst) 563 return None; 564 565 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI); 566 if (!Op1Cst) 567 return None; 568 569 APFloat C1 = Op1Cst->getValueAPF(); 570 const APFloat &C2 = Op2Cst->getValueAPF(); 571 switch (Opcode) { 572 case TargetOpcode::G_FADD: 573 C1.add(C2, APFloat::rmNearestTiesToEven); 574 return C1; 575 case TargetOpcode::G_FSUB: 576 C1.subtract(C2, APFloat::rmNearestTiesToEven); 577 return C1; 578 case TargetOpcode::G_FMUL: 579 C1.multiply(C2, APFloat::rmNearestTiesToEven); 580 return C1; 581 case TargetOpcode::G_FDIV: 582 C1.divide(C2, APFloat::rmNearestTiesToEven); 583 return C1; 584 case TargetOpcode::G_FREM: 585 C1.mod(C2); 586 return C1; 587 case TargetOpcode::G_FCOPYSIGN: 588 C1.copySign(C2); 589 return C1; 590 case TargetOpcode::G_FMINNUM: 591 return minnum(C1, C2); 592 case TargetOpcode::G_FMAXNUM: 593 return maxnum(C1, C2); 594 case TargetOpcode::G_FMINIMUM: 595 return minimum(C1, C2); 596 case TargetOpcode::G_FMAXIMUM: 597 return maximum(C1, C2); 598 case TargetOpcode::G_FMINNUM_IEEE: 599 case TargetOpcode::G_FMAXNUM_IEEE: 600 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not 601 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax, 602 // and currently there isn't a nice wrapper in APFloat for the version with 603 // correct snan handling. 604 break; 605 default: 606 break; 607 } 608 609 return None; 610 } 611 612 SmallVector<APInt> 613 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, 614 const Register Op2, 615 const MachineRegisterInfo &MRI) { 616 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI); 617 if (!SrcVec2) 618 return SmallVector<APInt>(); 619 620 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI); 621 if (!SrcVec1) 622 return SmallVector<APInt>(); 623 624 SmallVector<APInt> FoldedElements; 625 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) { 626 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx), 627 SrcVec2->getSourceReg(Idx), MRI); 628 if (!MaybeCst) 629 return SmallVector<APInt>(); 630 FoldedElements.push_back(*MaybeCst); 631 } 632 return FoldedElements; 633 } 634 635 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, 636 bool SNaN) { 637 const MachineInstr *DefMI = MRI.getVRegDef(Val); 638 if (!DefMI) 639 return false; 640 641 const TargetMachine& TM = DefMI->getMF()->getTarget(); 642 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath) 643 return true; 644 645 // If the value is a constant, we can obviously see if it is a NaN or not. 646 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) { 647 return !FPVal->getValueAPF().isNaN() || 648 (SNaN && !FPVal->getValueAPF().isSignaling()); 649 } 650 651 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) { 652 for (const auto &Op : DefMI->uses()) 653 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN)) 654 return false; 655 return true; 656 } 657 658 switch (DefMI->getOpcode()) { 659 default: 660 break; 661 case TargetOpcode::G_FADD: 662 case TargetOpcode::G_FSUB: 663 case TargetOpcode::G_FMUL: 664 case TargetOpcode::G_FDIV: 665 case TargetOpcode::G_FREM: 666 case TargetOpcode::G_FSIN: 667 case TargetOpcode::G_FCOS: 668 case TargetOpcode::G_FMA: 669 case TargetOpcode::G_FMAD: 670 if (SNaN) 671 return true; 672 673 // TODO: Need isKnownNeverInfinity 674 return false; 675 case TargetOpcode::G_FMINNUM_IEEE: 676 case TargetOpcode::G_FMAXNUM_IEEE: { 677 if (SNaN) 678 return true; 679 // This can return a NaN if either operand is an sNaN, or if both operands 680 // are NaN. 681 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) && 682 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) || 683 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) && 684 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI)); 685 } 686 case TargetOpcode::G_FMINNUM: 687 case TargetOpcode::G_FMAXNUM: { 688 // Only one needs to be known not-nan, since it will be returned if the 689 // other ends up being one. 690 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) || 691 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN); 692 } 693 } 694 695 if (SNaN) { 696 // FP operations quiet. For now, just handle the ones inserted during 697 // legalization. 698 switch (DefMI->getOpcode()) { 699 case TargetOpcode::G_FPEXT: 700 case TargetOpcode::G_FPTRUNC: 701 case TargetOpcode::G_FCANONICALIZE: 702 return true; 703 default: 704 return false; 705 } 706 } 707 708 return false; 709 } 710 711 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF, 712 const MachinePointerInfo &MPO) { 713 auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>(); 714 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) { 715 MachineFrameInfo &MFI = MF.getFrameInfo(); 716 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()), 717 MPO.Offset); 718 } 719 720 if (const Value *V = MPO.V.dyn_cast<const Value *>()) { 721 const Module *M = MF.getFunction().getParent(); 722 return V->getPointerAlignment(M->getDataLayout()); 723 } 724 725 return Align(1); 726 } 727 728 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, 729 const TargetInstrInfo &TII, 730 MCRegister PhysReg, 731 const TargetRegisterClass &RC, 732 const DebugLoc &DL, LLT RegTy) { 733 MachineBasicBlock &EntryMBB = MF.front(); 734 MachineRegisterInfo &MRI = MF.getRegInfo(); 735 Register LiveIn = MRI.getLiveInVirtReg(PhysReg); 736 if (LiveIn) { 737 MachineInstr *Def = MRI.getVRegDef(LiveIn); 738 if (Def) { 739 // FIXME: Should the verifier check this is in the entry block? 740 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block"); 741 return LiveIn; 742 } 743 744 // It's possible the incoming argument register and copy was added during 745 // lowering, but later deleted due to being/becoming dead. If this happens, 746 // re-insert the copy. 747 } else { 748 // The live in register was not present, so add it. 749 LiveIn = MF.addLiveIn(PhysReg, &RC); 750 if (RegTy.isValid()) 751 MRI.setType(LiveIn, RegTy); 752 } 753 754 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn) 755 .addReg(PhysReg); 756 if (!EntryMBB.isLiveIn(PhysReg)) 757 EntryMBB.addLiveIn(PhysReg); 758 return LiveIn; 759 } 760 761 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, 762 uint64_t Imm, 763 const MachineRegisterInfo &MRI) { 764 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI); 765 if (MaybeOp1Cst) { 766 switch (Opcode) { 767 default: 768 break; 769 case TargetOpcode::G_SEXT_INREG: { 770 LLT Ty = MRI.getType(Op1); 771 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); 772 } 773 } 774 } 775 return None; 776 } 777 778 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, 779 Register Src, 780 const MachineRegisterInfo &MRI) { 781 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP); 782 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) { 783 APFloat DstVal(getFltSemanticForLLT(DstTy)); 784 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP, 785 APFloat::rmNearestTiesToEven); 786 return DstVal; 787 } 788 return None; 789 } 790 791 Optional<SmallVector<unsigned>> 792 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) { 793 LLT Ty = MRI.getType(Src); 794 SmallVector<unsigned> FoldedCTLZs; 795 auto tryFoldScalar = [&](Register R) -> Optional<unsigned> { 796 auto MaybeCst = getIConstantVRegVal(R, MRI); 797 if (!MaybeCst) 798 return None; 799 return MaybeCst->countLeadingZeros(); 800 }; 801 if (Ty.isVector()) { 802 // Try to constant fold each element. 803 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI); 804 if (!BV) 805 return None; 806 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) { 807 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) { 808 FoldedCTLZs.emplace_back(*MaybeFold); 809 continue; 810 } 811 return None; 812 } 813 return FoldedCTLZs; 814 } 815 if (auto MaybeCst = tryFoldScalar(Src)) { 816 FoldedCTLZs.emplace_back(*MaybeCst); 817 return FoldedCTLZs; 818 } 819 return None; 820 } 821 822 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, 823 GISelKnownBits *KB) { 824 Optional<DefinitionAndSourceRegister> DefSrcReg = 825 getDefSrcRegIgnoringCopies(Reg, MRI); 826 if (!DefSrcReg) 827 return false; 828 829 const MachineInstr &MI = *DefSrcReg->MI; 830 const LLT Ty = MRI.getType(Reg); 831 832 switch (MI.getOpcode()) { 833 case TargetOpcode::G_CONSTANT: { 834 unsigned BitWidth = Ty.getScalarSizeInBits(); 835 const ConstantInt *CI = MI.getOperand(1).getCImm(); 836 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2(); 837 } 838 case TargetOpcode::G_SHL: { 839 // A left-shift of a constant one will have exactly one bit set because 840 // shifting the bit off the end is undefined. 841 842 // TODO: Constant splat 843 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { 844 if (*ConstLHS == 1) 845 return true; 846 } 847 848 break; 849 } 850 case TargetOpcode::G_LSHR: { 851 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { 852 if (ConstLHS->isSignMask()) 853 return true; 854 } 855 856 break; 857 } 858 case TargetOpcode::G_BUILD_VECTOR: { 859 // TODO: Probably should have a recursion depth guard since you could have 860 // bitcasted vector elements. 861 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) 862 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB)) 863 return false; 864 865 return true; 866 } 867 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 868 // Only handle constants since we would need to know if number of leading 869 // zeros is greater than the truncation amount. 870 const unsigned BitWidth = Ty.getScalarSizeInBits(); 871 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) { 872 auto Const = getIConstantVRegVal(MO.getReg(), MRI); 873 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2()) 874 return false; 875 } 876 877 return true; 878 } 879 default: 880 break; 881 } 882 883 if (!KB) 884 return false; 885 886 // More could be done here, though the above checks are enough 887 // to handle some common cases. 888 889 // Fall back to computeKnownBits to catch other known cases. 890 KnownBits Known = KB->getKnownBits(Reg); 891 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 892 } 893 894 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { 895 AU.addPreserved<StackProtector>(); 896 } 897 898 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { 899 const unsigned OrigSize = OrigTy.getSizeInBits(); 900 const unsigned TargetSize = TargetTy.getSizeInBits(); 901 902 if (OrigSize == TargetSize) 903 return OrigTy; 904 905 if (OrigTy.isVector()) { 906 const LLT OrigElt = OrigTy.getElementType(); 907 908 if (TargetTy.isVector()) { 909 const LLT TargetElt = TargetTy.getElementType(); 910 911 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { 912 int GCDElts = 913 std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements()); 914 // Prefer the original element type. 915 ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements(); 916 return LLT::vector(Mul.divideCoefficientBy(GCDElts), 917 OrigTy.getElementType()); 918 } 919 } else { 920 if (OrigElt.getSizeInBits() == TargetSize) 921 return OrigTy; 922 } 923 924 unsigned LCMSize = std::lcm(OrigSize, TargetSize); 925 return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt); 926 } 927 928 if (TargetTy.isVector()) { 929 unsigned LCMSize = std::lcm(OrigSize, TargetSize); 930 return LLT::fixed_vector(LCMSize / OrigSize, OrigTy); 931 } 932 933 unsigned LCMSize = std::lcm(OrigSize, TargetSize); 934 935 // Preserve pointer types. 936 if (LCMSize == OrigSize) 937 return OrigTy; 938 if (LCMSize == TargetSize) 939 return TargetTy; 940 941 return LLT::scalar(LCMSize); 942 } 943 944 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) { 945 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy || 946 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits())) 947 return getLCMType(OrigTy, TargetTy); 948 949 unsigned OrigTyNumElts = OrigTy.getNumElements(); 950 unsigned TargetTyNumElts = TargetTy.getNumElements(); 951 if (OrigTyNumElts % TargetTyNumElts == 0) 952 return OrigTy; 953 954 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts); 955 return LLT::scalarOrVector(ElementCount::getFixed(NumElts), 956 OrigTy.getElementType()); 957 } 958 959 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { 960 const unsigned OrigSize = OrigTy.getSizeInBits(); 961 const unsigned TargetSize = TargetTy.getSizeInBits(); 962 963 if (OrigSize == TargetSize) 964 return OrigTy; 965 966 if (OrigTy.isVector()) { 967 LLT OrigElt = OrigTy.getElementType(); 968 if (TargetTy.isVector()) { 969 LLT TargetElt = TargetTy.getElementType(); 970 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) { 971 int GCD = std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements()); 972 return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt); 973 } 974 } else { 975 // If the source is a vector of pointers, return a pointer element. 976 if (OrigElt.getSizeInBits() == TargetSize) 977 return OrigElt; 978 } 979 980 unsigned GCD = std::gcd(OrigSize, TargetSize); 981 if (GCD == OrigElt.getSizeInBits()) 982 return OrigElt; 983 984 // If we can't produce the original element type, we have to use a smaller 985 // scalar. 986 if (GCD < OrigElt.getSizeInBits()) 987 return LLT::scalar(GCD); 988 return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt); 989 } 990 991 if (TargetTy.isVector()) { 992 // Try to preserve the original element type. 993 LLT TargetElt = TargetTy.getElementType(); 994 if (TargetElt.getSizeInBits() == OrigSize) 995 return OrigTy; 996 } 997 998 unsigned GCD = std::gcd(OrigSize, TargetSize); 999 return LLT::scalar(GCD); 1000 } 1001 1002 Optional<int> llvm::getSplatIndex(MachineInstr &MI) { 1003 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 1004 "Only G_SHUFFLE_VECTOR can have a splat index!"); 1005 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 1006 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; }); 1007 1008 // If all elements are undefined, this shuffle can be considered a splat. 1009 // Return 0 for better potential for callers to simplify. 1010 if (FirstDefinedIdx == Mask.end()) 1011 return 0; 1012 1013 // Make sure all remaining elements are either undef or the same 1014 // as the first non-undef value. 1015 int SplatValue = *FirstDefinedIdx; 1016 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()), 1017 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; })) 1018 return None; 1019 1020 return SplatValue; 1021 } 1022 1023 static bool isBuildVectorOp(unsigned Opcode) { 1024 return Opcode == TargetOpcode::G_BUILD_VECTOR || 1025 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC; 1026 } 1027 1028 namespace { 1029 1030 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg, 1031 const MachineRegisterInfo &MRI, 1032 bool AllowUndef) { 1033 MachineInstr *MI = getDefIgnoringCopies(VReg, MRI); 1034 if (!MI) 1035 return None; 1036 1037 if (!isBuildVectorOp(MI->getOpcode())) 1038 return None; 1039 1040 Optional<ValueAndVReg> SplatValAndReg; 1041 for (MachineOperand &Op : MI->uses()) { 1042 Register Element = Op.getReg(); 1043 auto ElementValAndReg = 1044 getAnyConstantVRegValWithLookThrough(Element, MRI, true, true); 1045 1046 // If AllowUndef, treat undef as value that will result in a constant splat. 1047 if (!ElementValAndReg) { 1048 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element))) 1049 continue; 1050 return None; 1051 } 1052 1053 // Record splat value 1054 if (!SplatValAndReg) 1055 SplatValAndReg = ElementValAndReg; 1056 1057 // Different constant then the one already recorded, not a constant splat. 1058 if (SplatValAndReg->Value != ElementValAndReg->Value) 1059 return None; 1060 } 1061 1062 return SplatValAndReg; 1063 } 1064 1065 } // end anonymous namespace 1066 1067 bool llvm::isBuildVectorConstantSplat(const Register Reg, 1068 const MachineRegisterInfo &MRI, 1069 int64_t SplatValue, bool AllowUndef) { 1070 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) 1071 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue)); 1072 return false; 1073 } 1074 1075 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI, 1076 const MachineRegisterInfo &MRI, 1077 int64_t SplatValue, bool AllowUndef) { 1078 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue, 1079 AllowUndef); 1080 } 1081 1082 Optional<APInt> llvm::getIConstantSplatVal(const Register Reg, 1083 const MachineRegisterInfo &MRI) { 1084 if (auto SplatValAndReg = 1085 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) { 1086 Optional<ValueAndVReg> ValAndVReg = 1087 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); 1088 return ValAndVReg->Value; 1089 } 1090 1091 return None; 1092 } 1093 1094 Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI, 1095 const MachineRegisterInfo &MRI) { 1096 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI); 1097 } 1098 1099 Optional<int64_t> 1100 llvm::getIConstantSplatSExtVal(const Register Reg, 1101 const MachineRegisterInfo &MRI) { 1102 if (auto SplatValAndReg = 1103 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) 1104 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI); 1105 return None; 1106 } 1107 1108 Optional<int64_t> 1109 llvm::getIConstantSplatSExtVal(const MachineInstr &MI, 1110 const MachineRegisterInfo &MRI) { 1111 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI); 1112 } 1113 1114 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg, 1115 const MachineRegisterInfo &MRI, 1116 bool AllowUndef) { 1117 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef)) 1118 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI); 1119 return None; 1120 } 1121 1122 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, 1123 const MachineRegisterInfo &MRI, 1124 bool AllowUndef) { 1125 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef); 1126 } 1127 1128 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, 1129 const MachineRegisterInfo &MRI, 1130 bool AllowUndef) { 1131 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef); 1132 } 1133 1134 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI, 1135 const MachineRegisterInfo &MRI) { 1136 unsigned Opc = MI.getOpcode(); 1137 if (!isBuildVectorOp(Opc)) 1138 return None; 1139 if (auto Splat = getIConstantSplatSExtVal(MI, MRI)) 1140 return RegOrConstant(*Splat); 1141 auto Reg = MI.getOperand(1).getReg(); 1142 if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()), 1143 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; })) 1144 return None; 1145 return RegOrConstant(Reg); 1146 } 1147 1148 static bool isConstantScalar(const MachineInstr &MI, 1149 const MachineRegisterInfo &MRI, 1150 bool AllowFP = true, 1151 bool AllowOpaqueConstants = true) { 1152 switch (MI.getOpcode()) { 1153 case TargetOpcode::G_CONSTANT: 1154 case TargetOpcode::G_IMPLICIT_DEF: 1155 return true; 1156 case TargetOpcode::G_FCONSTANT: 1157 return AllowFP; 1158 case TargetOpcode::G_GLOBAL_VALUE: 1159 case TargetOpcode::G_FRAME_INDEX: 1160 case TargetOpcode::G_BLOCK_ADDR: 1161 case TargetOpcode::G_JUMP_TABLE: 1162 return AllowOpaqueConstants; 1163 default: 1164 return false; 1165 } 1166 } 1167 1168 bool llvm::isConstantOrConstantVector(MachineInstr &MI, 1169 const MachineRegisterInfo &MRI) { 1170 Register Def = MI.getOperand(0).getReg(); 1171 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI)) 1172 return true; 1173 GBuildVector *BV = dyn_cast<GBuildVector>(&MI); 1174 if (!BV) 1175 return false; 1176 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) { 1177 if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) || 1178 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI)) 1179 continue; 1180 return false; 1181 } 1182 return true; 1183 } 1184 1185 bool llvm::isConstantOrConstantVector(const MachineInstr &MI, 1186 const MachineRegisterInfo &MRI, 1187 bool AllowFP, bool AllowOpaqueConstants) { 1188 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants)) 1189 return true; 1190 1191 if (!isBuildVectorOp(MI.getOpcode())) 1192 return false; 1193 1194 const unsigned NumOps = MI.getNumOperands(); 1195 for (unsigned I = 1; I != NumOps; ++I) { 1196 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg()); 1197 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants)) 1198 return false; 1199 } 1200 1201 return true; 1202 } 1203 1204 Optional<APInt> 1205 llvm::isConstantOrConstantSplatVector(MachineInstr &MI, 1206 const MachineRegisterInfo &MRI) { 1207 Register Def = MI.getOperand(0).getReg(); 1208 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI)) 1209 return C->Value; 1210 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI); 1211 if (!MaybeCst) 1212 return None; 1213 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits(); 1214 return APInt(ScalarSize, *MaybeCst, true); 1215 } 1216 1217 bool llvm::isNullOrNullSplat(const MachineInstr &MI, 1218 const MachineRegisterInfo &MRI, bool AllowUndefs) { 1219 switch (MI.getOpcode()) { 1220 case TargetOpcode::G_IMPLICIT_DEF: 1221 return AllowUndefs; 1222 case TargetOpcode::G_CONSTANT: 1223 return MI.getOperand(1).getCImm()->isNullValue(); 1224 case TargetOpcode::G_FCONSTANT: { 1225 const ConstantFP *FPImm = MI.getOperand(1).getFPImm(); 1226 return FPImm->isZero() && !FPImm->isNegative(); 1227 } 1228 default: 1229 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already 1230 return false; 1231 return isBuildVectorAllZeros(MI, MRI); 1232 } 1233 } 1234 1235 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI, 1236 const MachineRegisterInfo &MRI, 1237 bool AllowUndefs) { 1238 switch (MI.getOpcode()) { 1239 case TargetOpcode::G_IMPLICIT_DEF: 1240 return AllowUndefs; 1241 case TargetOpcode::G_CONSTANT: 1242 return MI.getOperand(1).getCImm()->isAllOnesValue(); 1243 default: 1244 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already 1245 return false; 1246 return isBuildVectorAllOnes(MI, MRI); 1247 } 1248 } 1249 1250 bool llvm::matchUnaryPredicate( 1251 const MachineRegisterInfo &MRI, Register Reg, 1252 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) { 1253 1254 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 1255 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) 1256 return Match(nullptr); 1257 1258 // TODO: Also handle fconstant 1259 if (Def->getOpcode() == TargetOpcode::G_CONSTANT) 1260 return Match(Def->getOperand(1).getCImm()); 1261 1262 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR) 1263 return false; 1264 1265 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) { 1266 Register SrcElt = Def->getOperand(I).getReg(); 1267 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI); 1268 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) { 1269 if (!Match(nullptr)) 1270 return false; 1271 continue; 1272 } 1273 1274 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT || 1275 !Match(SrcDef->getOperand(1).getCImm())) 1276 return false; 1277 } 1278 1279 return true; 1280 } 1281 1282 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, 1283 bool IsFP) { 1284 switch (TLI.getBooleanContents(IsVector, IsFP)) { 1285 case TargetLowering::UndefinedBooleanContent: 1286 return Val & 0x1; 1287 case TargetLowering::ZeroOrOneBooleanContent: 1288 return Val == 1; 1289 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1290 return Val == -1; 1291 } 1292 llvm_unreachable("Invalid boolean contents"); 1293 } 1294 1295 bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val, 1296 bool IsVector, bool IsFP) { 1297 switch (TLI.getBooleanContents(IsVector, IsFP)) { 1298 case TargetLowering::UndefinedBooleanContent: 1299 return ~Val & 0x1; 1300 case TargetLowering::ZeroOrOneBooleanContent: 1301 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1302 return Val == 0; 1303 } 1304 llvm_unreachable("Invalid boolean contents"); 1305 } 1306 1307 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector, 1308 bool IsFP) { 1309 switch (TLI.getBooleanContents(IsVector, IsFP)) { 1310 case TargetLowering::UndefinedBooleanContent: 1311 case TargetLowering::ZeroOrOneBooleanContent: 1312 return 1; 1313 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1314 return -1; 1315 } 1316 llvm_unreachable("Invalid boolean contents"); 1317 } 1318 1319 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB, 1320 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 1321 const auto &F = MBB.getParent()->getFunction(); 1322 return F.hasOptSize() || F.hasMinSize() || 1323 llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI); 1324 } 1325 1326 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, 1327 LostDebugLocObserver *LocObserver, 1328 SmallInstListTy &DeadInstChain) { 1329 for (MachineOperand &Op : MI.uses()) { 1330 if (Op.isReg() && Op.getReg().isVirtual()) 1331 DeadInstChain.insert(MRI.getVRegDef(Op.getReg())); 1332 } 1333 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n"); 1334 DeadInstChain.remove(&MI); 1335 MI.eraseFromParent(); 1336 if (LocObserver) 1337 LocObserver->checkpoint(false); 1338 } 1339 1340 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, 1341 MachineRegisterInfo &MRI, 1342 LostDebugLocObserver *LocObserver) { 1343 SmallInstListTy DeadInstChain; 1344 for (MachineInstr *MI : DeadInstrs) 1345 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain); 1346 1347 while (!DeadInstChain.empty()) { 1348 MachineInstr *Inst = DeadInstChain.pop_back_val(); 1349 if (!isTriviallyDead(*Inst, MRI)) 1350 continue; 1351 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain); 1352 } 1353 } 1354 1355 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, 1356 LostDebugLocObserver *LocObserver) { 1357 return eraseInstrs({&MI}, MRI, LocObserver); 1358 } 1359 1360 void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) { 1361 for (auto &Def : MI.defs()) { 1362 assert(Def.isReg() && "Must be a reg"); 1363 1364 SmallVector<MachineOperand *, 16> DbgUsers; 1365 for (auto &MOUse : MRI.use_operands(Def.getReg())) { 1366 MachineInstr *DbgValue = MOUse.getParent(); 1367 // Ignore partially formed DBG_VALUEs. 1368 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) { 1369 DbgUsers.push_back(&MOUse); 1370 } 1371 } 1372 1373 if (!DbgUsers.empty()) { 1374 salvageDebugInfoForDbgValue(MRI, MI, DbgUsers); 1375 } 1376 } 1377 } 1378