1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file This file implements the utility functions used by the GlobalISel 9 /// pipeline. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/Utils.h" 13 #include "llvm/ADT/APFloat.h" 14 #include "llvm/ADT/Twine.h" 15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 16 #include "llvm/CodeGen/MachineInstr.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/StackProtector.h" 21 #include "llvm/CodeGen/TargetInstrInfo.h" 22 #include "llvm/CodeGen/TargetPassConfig.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/IR/Constants.h" 25 26 #define DEBUG_TYPE "globalisel-utils" 27 28 using namespace llvm; 29 30 unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI, 31 const TargetInstrInfo &TII, 32 const RegisterBankInfo &RBI, unsigned Reg, 33 const TargetRegisterClass &RegClass) { 34 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) 35 return MRI.createVirtualRegister(&RegClass); 36 37 return Reg; 38 } 39 40 unsigned llvm::constrainOperandRegClass( 41 const MachineFunction &MF, const TargetRegisterInfo &TRI, 42 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 43 const RegisterBankInfo &RBI, MachineInstr &InsertPt, 44 const TargetRegisterClass &RegClass, const MachineOperand &RegMO, 45 unsigned OpIdx) { 46 Register Reg = RegMO.getReg(); 47 // Assume physical registers are properly constrained. 48 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 49 50 unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); 51 // If we created a new virtual register because the class is not compatible 52 // then create a copy between the new and the old register. 53 if (ConstrainedReg != Reg) { 54 MachineBasicBlock::iterator InsertIt(&InsertPt); 55 MachineBasicBlock &MBB = *InsertPt.getParent(); 56 if (RegMO.isUse()) { 57 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), 58 TII.get(TargetOpcode::COPY), ConstrainedReg) 59 .addReg(Reg); 60 } else { 61 assert(RegMO.isDef() && "Must be a definition"); 62 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), 63 TII.get(TargetOpcode::COPY), Reg) 64 .addReg(ConstrainedReg); 65 } 66 } 67 return ConstrainedReg; 68 } 69 70 unsigned llvm::constrainOperandRegClass( 71 const MachineFunction &MF, const TargetRegisterInfo &TRI, 72 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 73 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, 74 const MachineOperand &RegMO, unsigned OpIdx) { 75 Register Reg = RegMO.getReg(); 76 // Assume physical registers are properly constrained. 77 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); 78 79 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); 80 // Some of the target independent instructions, like COPY, may not impose any 81 // register class constraints on some of their operands: If it's a use, we can 82 // skip constraining as the instruction defining the register would constrain 83 // it. 84 85 // We can't constrain unallocatable register classes, because we can't create 86 // virtual registers for these classes, so we need to let targets handled this 87 // case. 88 if (RegClass && !RegClass->isAllocatable()) 89 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); 90 91 if (!RegClass) { 92 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && 93 "Register class constraint is required unless either the " 94 "instruction is target independent or the operand is a use"); 95 // FIXME: Just bailing out like this here could be not enough, unless we 96 // expect the users of this function to do the right thing for PHIs and 97 // COPY: 98 // v1 = COPY v0 99 // v2 = COPY v1 100 // v1 here may end up not being constrained at all. Please notice that to 101 // reproduce the issue we likely need a destination pattern of a selection 102 // rule producing such extra copies, not just an input GMIR with them as 103 // every existing target using selectImpl handles copies before calling it 104 // and they never reach this function. 105 return Reg; 106 } 107 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass, 108 RegMO, OpIdx); 109 } 110 111 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, 112 const TargetInstrInfo &TII, 113 const TargetRegisterInfo &TRI, 114 const RegisterBankInfo &RBI) { 115 assert(!isPreISelGenericOpcode(I.getOpcode()) && 116 "A selected instruction is expected"); 117 MachineBasicBlock &MBB = *I.getParent(); 118 MachineFunction &MF = *MBB.getParent(); 119 MachineRegisterInfo &MRI = MF.getRegInfo(); 120 121 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { 122 MachineOperand &MO = I.getOperand(OpI); 123 124 // There's nothing to be done on non-register operands. 125 if (!MO.isReg()) 126 continue; 127 128 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); 129 assert(MO.isReg() && "Unsupported non-reg operand"); 130 131 Register Reg = MO.getReg(); 132 // Physical registers don't need to be constrained. 133 if (Register::isPhysicalRegister(Reg)) 134 continue; 135 136 // Register operands with a value of 0 (e.g. predicate operands) don't need 137 // to be constrained. 138 if (Reg == 0) 139 continue; 140 141 // If the operand is a vreg, we should constrain its regclass, and only 142 // insert COPYs if that's impossible. 143 // constrainOperandRegClass does that for us. 144 MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), 145 MO, OpI)); 146 147 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been 148 // done. 149 if (MO.isUse()) { 150 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); 151 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) 152 I.tieOperands(DefIdx, OpI); 153 } 154 } 155 return true; 156 } 157 158 bool llvm::isTriviallyDead(const MachineInstr &MI, 159 const MachineRegisterInfo &MRI) { 160 // If we can move an instruction, we can remove it. Otherwise, it has 161 // a side-effect of some sort. 162 bool SawStore = false; 163 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) 164 return false; 165 166 // Instructions without side-effects are dead iff they only define dead vregs. 167 for (auto &MO : MI.operands()) { 168 if (!MO.isReg() || !MO.isDef()) 169 continue; 170 171 Register Reg = MO.getReg(); 172 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) 173 return false; 174 } 175 return true; 176 } 177 178 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 179 MachineOptimizationRemarkEmitter &MORE, 180 MachineOptimizationRemarkMissed &R) { 181 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 182 183 // Print the function name explicitly if we don't have a debug location (which 184 // makes the diagnostic less useful) or if we're going to emit a raw error. 185 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 186 R << (" (in function: " + MF.getName() + ")").str(); 187 188 if (TPC.isGlobalISelAbortEnabled()) 189 report_fatal_error(R.getMsg()); 190 else 191 MORE.emit(R); 192 } 193 194 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 195 MachineOptimizationRemarkEmitter &MORE, 196 const char *PassName, StringRef Msg, 197 const MachineInstr &MI) { 198 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", 199 MI.getDebugLoc(), MI.getParent()); 200 R << Msg; 201 // Printing MI is expensive; only do it if expensive remarks are enabled. 202 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) 203 R << ": " << ore::MNV("Inst", MI); 204 reportGISelFailure(MF, TPC, MORE, R); 205 } 206 207 Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg, 208 const MachineRegisterInfo &MRI) { 209 Optional<ValueAndVReg> ValAndVReg = 210 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); 211 assert((!ValAndVReg || ValAndVReg->VReg == VReg) && 212 "Value found while looking through instrs"); 213 if (!ValAndVReg) 214 return None; 215 return ValAndVReg->Value; 216 } 217 218 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( 219 unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, 220 bool HandleFConstant) { 221 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; 222 MachineInstr *MI; 223 auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { 224 return Opcode == TargetOpcode::G_CONSTANT || 225 (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); 226 }; 227 auto GetImmediateValue = [HandleFConstant, 228 &MRI](const MachineInstr &MI) -> Optional<APInt> { 229 const MachineOperand &CstVal = MI.getOperand(1); 230 if (!CstVal.isImm() && !CstVal.isCImm() && 231 (!HandleFConstant || !CstVal.isFPImm())) 232 return None; 233 if (!CstVal.isFPImm()) { 234 unsigned BitWidth = 235 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 236 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) 237 : CstVal.getCImm()->getValue(); 238 assert(Val.getBitWidth() == BitWidth && 239 "Value bitwidth doesn't match definition type"); 240 return Val; 241 } else { 242 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); 243 } 244 }; 245 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && 246 LookThroughInstrs) { 247 switch (MI->getOpcode()) { 248 case TargetOpcode::G_TRUNC: 249 case TargetOpcode::G_SEXT: 250 case TargetOpcode::G_ZEXT: 251 SeenOpcodes.push_back(std::make_pair( 252 MI->getOpcode(), 253 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); 254 VReg = MI->getOperand(1).getReg(); 255 break; 256 case TargetOpcode::COPY: 257 VReg = MI->getOperand(1).getReg(); 258 if (Register::isPhysicalRegister(VReg)) 259 return None; 260 break; 261 case TargetOpcode::G_INTTOPTR: 262 VReg = MI->getOperand(1).getReg(); 263 break; 264 default: 265 return None; 266 } 267 } 268 if (!MI || !IsConstantOpcode(MI->getOpcode())) 269 return None; 270 271 Optional<APInt> MaybeVal = GetImmediateValue(*MI); 272 if (!MaybeVal) 273 return None; 274 APInt &Val = *MaybeVal; 275 while (!SeenOpcodes.empty()) { 276 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); 277 switch (OpcodeAndSize.first) { 278 case TargetOpcode::G_TRUNC: 279 Val = Val.trunc(OpcodeAndSize.second); 280 break; 281 case TargetOpcode::G_SEXT: 282 Val = Val.sext(OpcodeAndSize.second); 283 break; 284 case TargetOpcode::G_ZEXT: 285 Val = Val.zext(OpcodeAndSize.second); 286 break; 287 } 288 } 289 290 if (Val.getBitWidth() > 64) 291 return None; 292 293 return ValueAndVReg{Val.getSExtValue(), VReg}; 294 } 295 296 const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg, 297 const MachineRegisterInfo &MRI) { 298 MachineInstr *MI = MRI.getVRegDef(VReg); 299 if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) 300 return nullptr; 301 return MI->getOperand(1).getFPImm(); 302 } 303 304 llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg, 305 const MachineRegisterInfo &MRI) { 306 auto *DefMI = MRI.getVRegDef(Reg); 307 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); 308 if (!DstTy.isValid()) 309 return nullptr; 310 while (DefMI->getOpcode() == TargetOpcode::COPY) { 311 Register SrcReg = DefMI->getOperand(1).getReg(); 312 auto SrcTy = MRI.getType(SrcReg); 313 if (!SrcTy.isValid() || SrcTy != DstTy) 314 break; 315 DefMI = MRI.getVRegDef(SrcReg); 316 } 317 return DefMI; 318 } 319 320 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, 321 const MachineRegisterInfo &MRI) { 322 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); 323 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; 324 } 325 326 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { 327 if (Size == 32) 328 return APFloat(float(Val)); 329 if (Size == 64) 330 return APFloat(Val); 331 if (Size != 16) 332 llvm_unreachable("Unsupported FPConstant size"); 333 bool Ignored; 334 APFloat APF(Val); 335 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 336 return APF; 337 } 338 339 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1, 340 const unsigned Op2, 341 const MachineRegisterInfo &MRI) { 342 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); 343 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); 344 if (MaybeOp1Cst && MaybeOp2Cst) { 345 LLT Ty = MRI.getType(Op1); 346 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); 347 APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true); 348 switch (Opcode) { 349 default: 350 break; 351 case TargetOpcode::G_ADD: 352 return C1 + C2; 353 case TargetOpcode::G_AND: 354 return C1 & C2; 355 case TargetOpcode::G_ASHR: 356 return C1.ashr(C2); 357 case TargetOpcode::G_LSHR: 358 return C1.lshr(C2); 359 case TargetOpcode::G_MUL: 360 return C1 * C2; 361 case TargetOpcode::G_OR: 362 return C1 | C2; 363 case TargetOpcode::G_SHL: 364 return C1 << C2; 365 case TargetOpcode::G_SUB: 366 return C1 - C2; 367 case TargetOpcode::G_XOR: 368 return C1 ^ C2; 369 case TargetOpcode::G_UDIV: 370 if (!C2.getBoolValue()) 371 break; 372 return C1.udiv(C2); 373 case TargetOpcode::G_SDIV: 374 if (!C2.getBoolValue()) 375 break; 376 return C1.sdiv(C2); 377 case TargetOpcode::G_UREM: 378 if (!C2.getBoolValue()) 379 break; 380 return C1.urem(C2); 381 case TargetOpcode::G_SREM: 382 if (!C2.getBoolValue()) 383 break; 384 return C1.srem(C2); 385 } 386 } 387 return None; 388 } 389 390 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, 391 bool SNaN) { 392 const MachineInstr *DefMI = MRI.getVRegDef(Val); 393 if (!DefMI) 394 return false; 395 396 if (DefMI->getFlag(MachineInstr::FmNoNans)) 397 return true; 398 399 if (SNaN) { 400 // FP operations quiet. For now, just handle the ones inserted during 401 // legalization. 402 switch (DefMI->getOpcode()) { 403 case TargetOpcode::G_FPEXT: 404 case TargetOpcode::G_FPTRUNC: 405 case TargetOpcode::G_FCANONICALIZE: 406 return true; 407 default: 408 return false; 409 } 410 } 411 412 return false; 413 } 414 415 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1, 416 uint64_t Imm, 417 const MachineRegisterInfo &MRI) { 418 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); 419 if (MaybeOp1Cst) { 420 LLT Ty = MRI.getType(Op1); 421 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); 422 switch (Opcode) { 423 default: 424 break; 425 case TargetOpcode::G_SEXT_INREG: 426 return C1.trunc(Imm).sext(C1.getBitWidth()); 427 } 428 } 429 return None; 430 } 431 432 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { 433 AU.addPreserved<StackProtector>(); 434 } 435 436 MVT llvm::getMVTForLLT(LLT Ty) { 437 if (!Ty.isVector()) 438 return MVT::getIntegerVT(Ty.getSizeInBits()); 439 440 return MVT::getVectorVT( 441 MVT::getIntegerVT(Ty.getElementType().getSizeInBits()), 442 Ty.getNumElements()); 443 } 444 445 LLT llvm::getLLTForMVT(MVT Ty) { 446 if (!Ty.isVector()) 447 return LLT::scalar(Ty.getSizeInBits()); 448 449 return LLT::vector(Ty.getVectorNumElements(), 450 Ty.getVectorElementType().getSizeInBits()); 451 } 452