1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file This file implements the utility functions used by the GlobalISel 9 /// pipeline. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/Utils.h" 13 #include "llvm/ADT/APFloat.h" 14 #include "llvm/ADT/Twine.h" 15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 16 #include "llvm/CodeGen/MachineInstr.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/StackProtector.h" 21 #include "llvm/CodeGen/TargetInstrInfo.h" 22 #include "llvm/CodeGen/TargetPassConfig.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/IR/Constants.h" 25 26 #define DEBUG_TYPE "globalisel-utils" 27 28 using namespace llvm; 29 30 unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI, 31 const TargetInstrInfo &TII, 32 const RegisterBankInfo &RBI, 33 MachineInstr &InsertPt, unsigned Reg, 34 const TargetRegisterClass &RegClass) { 35 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) { 36 unsigned NewReg = MRI.createVirtualRegister(&RegClass); 37 BuildMI(*InsertPt.getParent(), InsertPt, InsertPt.getDebugLoc(), 38 TII.get(TargetOpcode::COPY), NewReg) 39 .addReg(Reg); 40 return NewReg; 41 } 42 43 return Reg; 44 } 45 46 unsigned llvm::constrainOperandRegClass( 47 const MachineFunction &MF, const TargetRegisterInfo &TRI, 48 MachineRegisterInfo &MRI, const TargetInstrInfo &TII, 49 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, 50 const MachineOperand &RegMO, unsigned OpIdx) { 51 unsigned Reg = RegMO.getReg(); 52 // Assume physical registers are properly constrained. 53 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 54 "PhysReg not implemented"); 55 56 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); 57 // Some of the target independent instructions, like COPY, may not impose any 58 // register class constraints on some of their operands: If it's a use, we can 59 // skip constraining as the instruction defining the register would constrain 60 // it. 61 62 // We can't constrain unallocatable register classes, because we can't create 63 // virtual registers for these classes, so we need to let targets handled this 64 // case. 65 if (RegClass && !RegClass->isAllocatable()) 66 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); 67 68 if (!RegClass) { 69 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && 70 "Register class constraint is required unless either the " 71 "instruction is target independent or the operand is a use"); 72 // FIXME: Just bailing out like this here could be not enough, unless we 73 // expect the users of this function to do the right thing for PHIs and 74 // COPY: 75 // v1 = COPY v0 76 // v2 = COPY v1 77 // v1 here may end up not being constrained at all. Please notice that to 78 // reproduce the issue we likely need a destination pattern of a selection 79 // rule producing such extra copies, not just an input GMIR with them as 80 // every existing target using selectImpl handles copies before calling it 81 // and they never reach this function. 82 return Reg; 83 } 84 return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass); 85 } 86 87 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, 88 const TargetInstrInfo &TII, 89 const TargetRegisterInfo &TRI, 90 const RegisterBankInfo &RBI) { 91 assert(!isPreISelGenericOpcode(I.getOpcode()) && 92 "A selected instruction is expected"); 93 MachineBasicBlock &MBB = *I.getParent(); 94 MachineFunction &MF = *MBB.getParent(); 95 MachineRegisterInfo &MRI = MF.getRegInfo(); 96 97 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { 98 MachineOperand &MO = I.getOperand(OpI); 99 100 // There's nothing to be done on non-register operands. 101 if (!MO.isReg()) 102 continue; 103 104 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); 105 assert(MO.isReg() && "Unsupported non-reg operand"); 106 107 unsigned Reg = MO.getReg(); 108 // Physical registers don't need to be constrained. 109 if (TRI.isPhysicalRegister(Reg)) 110 continue; 111 112 // Register operands with a value of 0 (e.g. predicate operands) don't need 113 // to be constrained. 114 if (Reg == 0) 115 continue; 116 117 // If the operand is a vreg, we should constrain its regclass, and only 118 // insert COPYs if that's impossible. 119 // constrainOperandRegClass does that for us. 120 MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), 121 MO, OpI)); 122 123 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been 124 // done. 125 if (MO.isUse()) { 126 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); 127 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) 128 I.tieOperands(DefIdx, OpI); 129 } 130 } 131 return true; 132 } 133 134 bool llvm::isTriviallyDead(const MachineInstr &MI, 135 const MachineRegisterInfo &MRI) { 136 // If we can move an instruction, we can remove it. Otherwise, it has 137 // a side-effect of some sort. 138 bool SawStore = false; 139 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) 140 return false; 141 142 // Instructions without side-effects are dead iff they only define dead vregs. 143 for (auto &MO : MI.operands()) { 144 if (!MO.isReg() || !MO.isDef()) 145 continue; 146 147 unsigned Reg = MO.getReg(); 148 if (TargetRegisterInfo::isPhysicalRegister(Reg) || 149 !MRI.use_nodbg_empty(Reg)) 150 return false; 151 } 152 return true; 153 } 154 155 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 156 MachineOptimizationRemarkEmitter &MORE, 157 MachineOptimizationRemarkMissed &R) { 158 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 159 160 // Print the function name explicitly if we don't have a debug location (which 161 // makes the diagnostic less useful) or if we're going to emit a raw error. 162 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 163 R << (" (in function: " + MF.getName() + ")").str(); 164 165 if (TPC.isGlobalISelAbortEnabled()) 166 report_fatal_error(R.getMsg()); 167 else 168 MORE.emit(R); 169 } 170 171 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 172 MachineOptimizationRemarkEmitter &MORE, 173 const char *PassName, StringRef Msg, 174 const MachineInstr &MI) { 175 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", 176 MI.getDebugLoc(), MI.getParent()); 177 R << Msg; 178 // Printing MI is expensive; only do it if expensive remarks are enabled. 179 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) 180 R << ": " << ore::MNV("Inst", MI); 181 reportGISelFailure(MF, TPC, MORE, R); 182 } 183 184 Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg, 185 const MachineRegisterInfo &MRI) { 186 MachineInstr *MI = MRI.getVRegDef(VReg); 187 if (MI->getOpcode() != TargetOpcode::G_CONSTANT) 188 return None; 189 190 if (MI->getOperand(1).isImm()) 191 return MI->getOperand(1).getImm(); 192 193 if (MI->getOperand(1).isCImm() && 194 MI->getOperand(1).getCImm()->getBitWidth() <= 64) 195 return MI->getOperand(1).getCImm()->getSExtValue(); 196 197 return None; 198 } 199 200 const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg, 201 const MachineRegisterInfo &MRI) { 202 MachineInstr *MI = MRI.getVRegDef(VReg); 203 if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) 204 return nullptr; 205 return MI->getOperand(1).getFPImm(); 206 } 207 208 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg, 209 const MachineRegisterInfo &MRI) { 210 auto *DefMI = MRI.getVRegDef(Reg); 211 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); 212 if (!DstTy.isValid()) 213 return nullptr; 214 while (DefMI->getOpcode() == TargetOpcode::COPY) { 215 unsigned SrcReg = DefMI->getOperand(1).getReg(); 216 auto SrcTy = MRI.getType(SrcReg); 217 if (!SrcTy.isValid() || SrcTy != DstTy) 218 break; 219 DefMI = MRI.getVRegDef(SrcReg); 220 } 221 return DefMI->getOpcode() == Opcode ? DefMI : nullptr; 222 } 223 224 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { 225 if (Size == 32) 226 return APFloat(float(Val)); 227 if (Size == 64) 228 return APFloat(Val); 229 if (Size != 16) 230 llvm_unreachable("Unsupported FPConstant size"); 231 bool Ignored; 232 APFloat APF(Val); 233 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 234 return APF; 235 } 236 237 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1, 238 const unsigned Op2, 239 const MachineRegisterInfo &MRI) { 240 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); 241 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); 242 if (MaybeOp1Cst && MaybeOp2Cst) { 243 LLT Ty = MRI.getType(Op1); 244 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); 245 APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true); 246 switch (Opcode) { 247 default: 248 break; 249 case TargetOpcode::G_ADD: 250 return C1 + C2; 251 case TargetOpcode::G_AND: 252 return C1 & C2; 253 case TargetOpcode::G_ASHR: 254 return C1.ashr(C2); 255 case TargetOpcode::G_LSHR: 256 return C1.lshr(C2); 257 case TargetOpcode::G_MUL: 258 return C1 * C2; 259 case TargetOpcode::G_OR: 260 return C1 | C2; 261 case TargetOpcode::G_SHL: 262 return C1 << C2; 263 case TargetOpcode::G_SUB: 264 return C1 - C2; 265 case TargetOpcode::G_XOR: 266 return C1 ^ C2; 267 case TargetOpcode::G_UDIV: 268 if (!C2.getBoolValue()) 269 break; 270 return C1.udiv(C2); 271 case TargetOpcode::G_SDIV: 272 if (!C2.getBoolValue()) 273 break; 274 return C1.sdiv(C2); 275 case TargetOpcode::G_UREM: 276 if (!C2.getBoolValue()) 277 break; 278 return C1.urem(C2); 279 case TargetOpcode::G_SREM: 280 if (!C2.getBoolValue()) 281 break; 282 return C1.srem(C2); 283 } 284 } 285 return None; 286 } 287 288 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { 289 AU.addPreserved<StackProtector>(); 290 } 291