1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This file declares the API of helper functions used throughout the 10 /// GlobalISel pipeline. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H 15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H 16 17 #include "GISelWorkList.h" 18 #include "LostDebugLocObserver.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/Register.h" 23 #include "llvm/Support/Alignment.h" 24 #include "llvm/Support/LowLevelTypeImpl.h" 25 #include <cstdint> 26 27 namespace llvm { 28 29 class AnalysisUsage; 30 class BlockFrequencyInfo; 31 class GISelKnownBits; 32 class MachineFunction; 33 class MachineInstr; 34 class MachineOperand; 35 class MachineOptimizationRemarkEmitter; 36 class MachineOptimizationRemarkMissed; 37 struct MachinePointerInfo; 38 class MachineRegisterInfo; 39 class MCInstrDesc; 40 class ProfileSummaryInfo; 41 class RegisterBankInfo; 42 class TargetInstrInfo; 43 class TargetLowering; 44 class TargetPassConfig; 45 class TargetRegisterInfo; 46 class TargetRegisterClass; 47 class ConstantFP; 48 class APFloat; 49 class MachineIRBuilder; 50 51 // Convenience macros for dealing with vector reduction opcodes. 52 #define GISEL_VECREDUCE_CASES_ALL \ 53 case TargetOpcode::G_VECREDUCE_SEQ_FADD: \ 54 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \ 55 case TargetOpcode::G_VECREDUCE_FADD: \ 56 case TargetOpcode::G_VECREDUCE_FMUL: \ 57 case TargetOpcode::G_VECREDUCE_FMAX: \ 58 case TargetOpcode::G_VECREDUCE_FMIN: \ 59 case TargetOpcode::G_VECREDUCE_ADD: \ 60 case TargetOpcode::G_VECREDUCE_MUL: \ 61 case TargetOpcode::G_VECREDUCE_AND: \ 62 case TargetOpcode::G_VECREDUCE_OR: \ 63 case TargetOpcode::G_VECREDUCE_XOR: \ 64 case TargetOpcode::G_VECREDUCE_SMAX: \ 65 case TargetOpcode::G_VECREDUCE_SMIN: \ 66 case TargetOpcode::G_VECREDUCE_UMAX: \ 67 case TargetOpcode::G_VECREDUCE_UMIN: 68 69 #define GISEL_VECREDUCE_CASES_NONSEQ \ 70 case TargetOpcode::G_VECREDUCE_FADD: \ 71 case TargetOpcode::G_VECREDUCE_FMUL: \ 72 case TargetOpcode::G_VECREDUCE_FMAX: \ 73 case TargetOpcode::G_VECREDUCE_FMIN: \ 74 case TargetOpcode::G_VECREDUCE_ADD: \ 75 case TargetOpcode::G_VECREDUCE_MUL: \ 76 case TargetOpcode::G_VECREDUCE_AND: \ 77 case TargetOpcode::G_VECREDUCE_OR: \ 78 case TargetOpcode::G_VECREDUCE_XOR: \ 79 case TargetOpcode::G_VECREDUCE_SMAX: \ 80 case TargetOpcode::G_VECREDUCE_SMIN: \ 81 case TargetOpcode::G_VECREDUCE_UMAX: \ 82 case TargetOpcode::G_VECREDUCE_UMIN: 83 84 /// Try to constrain Reg to the specified register class. If this fails, 85 /// create a new virtual register in the correct class. 86 /// 87 /// \return The virtual register constrained to the right register class. 88 Register constrainRegToClass(MachineRegisterInfo &MRI, 89 const TargetInstrInfo &TII, 90 const RegisterBankInfo &RBI, Register Reg, 91 const TargetRegisterClass &RegClass); 92 93 /// Constrain the Register operand OpIdx, so that it is now constrained to the 94 /// TargetRegisterClass passed as an argument (RegClass). 95 /// If this fails, create a new virtual register in the correct class and insert 96 /// a COPY before \p InsertPt if it is a use or after if it is a definition. 97 /// In both cases, the function also updates the register of RegMo. The debug 98 /// location of \p InsertPt is used for the new copy. 99 /// 100 /// \return The virtual register constrained to the right register class. 101 Register constrainOperandRegClass(const MachineFunction &MF, 102 const TargetRegisterInfo &TRI, 103 MachineRegisterInfo &MRI, 104 const TargetInstrInfo &TII, 105 const RegisterBankInfo &RBI, 106 MachineInstr &InsertPt, 107 const TargetRegisterClass &RegClass, 108 MachineOperand &RegMO); 109 110 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided 111 /// MCInstrDesc \p II. If this fails, create a new virtual register in the 112 /// correct class and insert a COPY before \p InsertPt if it is a use or after 113 /// if it is a definition. In both cases, the function also updates the register 114 /// of RegMo. 115 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...) 116 /// with RegClass obtained from the MCInstrDesc. The debug location of \p 117 /// InsertPt is used for the new copy. 118 /// 119 /// \return The virtual register constrained to the right register class. 120 Register constrainOperandRegClass(const MachineFunction &MF, 121 const TargetRegisterInfo &TRI, 122 MachineRegisterInfo &MRI, 123 const TargetInstrInfo &TII, 124 const RegisterBankInfo &RBI, 125 MachineInstr &InsertPt, const MCInstrDesc &II, 126 MachineOperand &RegMO, unsigned OpIdx); 127 128 /// Mutate the newly-selected instruction \p I to constrain its (possibly 129 /// generic) virtual register operands to the instruction's register class. 130 /// This could involve inserting COPYs before (for uses) or after (for defs). 131 /// This requires the number of operands to match the instruction description. 132 /// \returns whether operand regclass constraining succeeded. 133 /// 134 // FIXME: Not all instructions have the same number of operands. We should 135 // probably expose a constrain helper per operand and let the target selector 136 // constrain individual registers, like fast-isel. 137 bool constrainSelectedInstRegOperands(MachineInstr &I, 138 const TargetInstrInfo &TII, 139 const TargetRegisterInfo &TRI, 140 const RegisterBankInfo &RBI); 141 142 /// Check if DstReg can be replaced with SrcReg depending on the register 143 /// constraints. 144 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI); 145 146 /// Check whether an instruction \p MI is dead: it only defines dead virtual 147 /// registers, and doesn't have other side effects. 148 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI); 149 150 /// Report an ISel error as a missed optimization remark to the LLVMContext's 151 /// diagnostic stream. Set the FailedISel MachineFunction property. 152 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 153 MachineOptimizationRemarkEmitter &MORE, 154 MachineOptimizationRemarkMissed &R); 155 156 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, 157 MachineOptimizationRemarkEmitter &MORE, 158 const char *PassName, StringRef Msg, 159 const MachineInstr &MI); 160 161 /// Report an ISel warning as a missed optimization remark to the LLVMContext's 162 /// diagnostic stream. 163 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, 164 MachineOptimizationRemarkEmitter &MORE, 165 MachineOptimizationRemarkMissed &R); 166 167 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value. 168 Optional<APInt> getIConstantVRegVal(Register VReg, 169 const MachineRegisterInfo &MRI); 170 171 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it. 172 Optional<int64_t> getIConstantVRegSExtVal(Register VReg, 173 const MachineRegisterInfo &MRI); 174 175 /// Simple struct used to hold a constant integer value and a virtual 176 /// register. 177 struct ValueAndVReg { 178 APInt Value; 179 Register VReg; 180 }; 181 182 /// If \p VReg is defined by a statically evaluable chain of instructions rooted 183 /// on a G_CONSTANT returns its APInt value and def register. 184 Optional<ValueAndVReg> 185 getIConstantVRegValWithLookThrough(Register VReg, 186 const MachineRegisterInfo &MRI, 187 bool LookThroughInstrs = true); 188 189 /// If \p VReg is defined by a statically evaluable chain of instructions rooted 190 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register. 191 Optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough( 192 Register VReg, const MachineRegisterInfo &MRI, 193 bool LookThroughInstrs = true, bool LookThroughAnyExt = false); 194 195 struct FPValueAndVReg { 196 APFloat Value; 197 Register VReg; 198 }; 199 200 /// If \p VReg is defined by a statically evaluable chain of instructions rooted 201 /// on a G_FCONSTANT returns its APFloat value and def register. 202 Optional<FPValueAndVReg> 203 getFConstantVRegValWithLookThrough(Register VReg, 204 const MachineRegisterInfo &MRI, 205 bool LookThroughInstrs = true); 206 207 const ConstantFP* getConstantFPVRegVal(Register VReg, 208 const MachineRegisterInfo &MRI); 209 210 /// See if Reg is defined by an single def instruction that is 211 /// Opcode. Also try to do trivial folding if it's a COPY with 212 /// same types. Returns null otherwise. 213 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg, 214 const MachineRegisterInfo &MRI); 215 216 /// Simple struct used to hold a Register value and the instruction which 217 /// defines it. 218 struct DefinitionAndSourceRegister { 219 MachineInstr *MI; 220 Register Reg; 221 }; 222 223 /// Find the def instruction for \p Reg, and underlying value Register folding 224 /// away any copies. 225 /// 226 /// Also walks through hints such as G_ASSERT_ZEXT. 227 Optional<DefinitionAndSourceRegister> 228 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI); 229 230 /// Find the def instruction for \p Reg, folding away any trivial copies. May 231 /// return nullptr if \p Reg is not a generic virtual register. 232 /// 233 /// Also walks through hints such as G_ASSERT_ZEXT. 234 MachineInstr *getDefIgnoringCopies(Register Reg, 235 const MachineRegisterInfo &MRI); 236 237 /// Find the source register for \p Reg, folding away any trivial copies. It 238 /// will be an output register of the instruction that getDefIgnoringCopies 239 /// returns. May return an invalid register if \p Reg is not a generic virtual 240 /// register. 241 /// 242 /// Also walks through hints such as G_ASSERT_ZEXT. 243 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI); 244 245 // Templated variant of getOpcodeDef returning a MachineInstr derived T. 246 /// See if Reg is defined by an single def instruction of type T 247 /// Also try to do trivial folding if it's a COPY with 248 /// same types. Returns null otherwise. 249 template <class T> 250 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) { 251 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); 252 return dyn_cast_or_null<T>(DefMI); 253 } 254 255 /// Returns an APFloat from Val converted to the appropriate size. 256 APFloat getAPFloatFromSize(double Val, unsigned Size); 257 258 /// Modify analysis usage so it preserves passes required for the SelectionDAG 259 /// fallback. 260 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU); 261 262 Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1, 263 const Register Op2, 264 const MachineRegisterInfo &MRI); 265 Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, 266 const Register Op2, 267 const MachineRegisterInfo &MRI); 268 269 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2. 270 /// If successful, returns the G_BUILD_VECTOR representing the folded vector 271 /// constant. \p MIB should have an insertion point already set to create new 272 /// G_CONSTANT instructions as needed. 273 Register ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, 274 const Register Op2, 275 const MachineRegisterInfo &MRI, 276 MachineIRBuilder &MIB); 277 278 Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1, 279 uint64_t Imm, const MachineRegisterInfo &MRI); 280 281 Optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, 282 Register Src, 283 const MachineRegisterInfo &MRI); 284 285 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector 286 /// then it tries to do an element-wise constant fold. 287 Optional<SmallVector<unsigned>> 288 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI); 289 290 /// Test if the given value is known to have exactly one bit set. This differs 291 /// from computeKnownBits in that it doesn't necessarily determine which bit is 292 /// set. 293 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI, 294 GISelKnownBits *KnownBits = nullptr); 295 296 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true, 297 /// this returns if \p Val can be assumed to never be a signaling NaN. 298 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, 299 bool SNaN = false); 300 301 /// Returns true if \p Val can be assumed to never be a signaling NaN. 302 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) { 303 return isKnownNeverNaN(Val, MRI, true); 304 } 305 306 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO); 307 308 /// Return a virtual register corresponding to the incoming argument register \p 309 /// PhysReg. This register is expected to have class \p RC, and optional type \p 310 /// RegTy. This assumes all references to the register will use the same type. 311 /// 312 /// If there is an existing live-in argument register, it will be returned. 313 /// This will also ensure there is a valid copy 314 Register getFunctionLiveInPhysReg(MachineFunction &MF, 315 const TargetInstrInfo &TII, 316 MCRegister PhysReg, 317 const TargetRegisterClass &RC, 318 const DebugLoc &DL, LLT RegTy = LLT()); 319 320 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the 321 /// number of vector elements or scalar bitwidth. The intent is a 322 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from 323 /// \p OrigTy elements, and unmerged into \p TargetTy 324 LLVM_READNONE 325 LLT getLCMType(LLT OrigTy, LLT TargetTy); 326 327 LLVM_READNONE 328 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is 329 /// multiple of TargetTy. 330 LLT getCoverTy(LLT OrigTy, LLT TargetTy); 331 332 /// Return a type where the total size is the greatest common divisor of \p 333 /// OrigTy and \p TargetTy. This will try to either change the number of vector 334 /// elements, or bitwidth of scalars. The intent is the result type can be used 335 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some 336 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly 337 /// with intermediate casts) can re-form \p TargetTy. 338 /// 339 /// If these are vectors with different element types, this will try to produce 340 /// a vector with a compatible total size, but the element type of \p OrigTy. If 341 /// this can't be satisfied, this will produce a scalar smaller than the 342 /// original vector elements. 343 /// 344 /// In the worst case, this returns LLT::scalar(1) 345 LLVM_READNONE 346 LLT getGCDType(LLT OrigTy, LLT TargetTy); 347 348 /// Represents a value which can be a Register or a constant. 349 /// 350 /// This is useful in situations where an instruction may have an interesting 351 /// register operand or interesting constant operand. For a concrete example, 352 /// \see getVectorSplat. 353 class RegOrConstant { 354 int64_t Cst; 355 Register Reg; 356 bool IsReg; 357 358 public: 359 explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {} 360 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {} 361 bool isReg() const { return IsReg; } 362 bool isCst() const { return !IsReg; } 363 Register getReg() const { 364 assert(isReg() && "Expected a register!"); 365 return Reg; 366 } 367 int64_t getCst() const { 368 assert(isCst() && "Expected a constant!"); 369 return Cst; 370 } 371 }; 372 373 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat. 374 /// If \p MI is not a splat, returns None. 375 Optional<int> getSplatIndex(MachineInstr &MI); 376 377 /// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists. 378 Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI, 379 const MachineRegisterInfo &MRI); 380 381 /// Returns a floating point scalar constant of a build vector splat if it 382 /// exists. When \p AllowUndef == true some elements can be undef but not all. 383 Optional<FPValueAndVReg> getFConstantSplat(Register VReg, 384 const MachineRegisterInfo &MRI, 385 bool AllowUndef = true); 386 387 /// Return true if the specified register is defined by G_BUILD_VECTOR or 388 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef. 389 bool isBuildVectorConstantSplat(const Register Reg, 390 const MachineRegisterInfo &MRI, 391 int64_t SplatValue, bool AllowUndef); 392 393 /// Return true if the specified instruction is a G_BUILD_VECTOR or 394 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef. 395 bool isBuildVectorConstantSplat(const MachineInstr &MI, 396 const MachineRegisterInfo &MRI, 397 int64_t SplatValue, bool AllowUndef); 398 399 /// Return true if the specified instruction is a G_BUILD_VECTOR or 400 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef. 401 bool isBuildVectorAllZeros(const MachineInstr &MI, 402 const MachineRegisterInfo &MRI, 403 bool AllowUndef = false); 404 405 /// Return true if the specified instruction is a G_BUILD_VECTOR or 406 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef. 407 bool isBuildVectorAllOnes(const MachineInstr &MI, 408 const MachineRegisterInfo &MRI, 409 bool AllowUndef = false); 410 411 /// \returns a value when \p MI is a vector splat. The splat can be either a 412 /// Register or a constant. 413 /// 414 /// Examples: 415 /// 416 /// \code 417 /// %reg = COPY $physreg 418 /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg 419 /// \endcode 420 /// 421 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant 422 /// containing %reg. 423 /// 424 /// \code 425 /// %cst = G_CONSTANT iN 4 426 /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst 427 /// \endcode 428 /// 429 /// In the above case, this will return a RegOrConstant containing 4. 430 Optional<RegOrConstant> getVectorSplat(const MachineInstr &MI, 431 const MachineRegisterInfo &MRI); 432 433 /// Determines if \p MI defines a constant integer or a build vector of 434 /// constant integers. Treats undef values as constants. 435 bool isConstantOrConstantVector(MachineInstr &MI, 436 const MachineRegisterInfo &MRI); 437 438 /// Determines if \p MI defines a constant integer or a splat vector of 439 /// constant integers. 440 /// \returns the scalar constant or None. 441 Optional<APInt> isConstantOrConstantSplatVector(MachineInstr &MI, 442 const MachineRegisterInfo &MRI); 443 444 /// Attempt to match a unary predicate against a scalar/splat constant or every 445 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source 446 /// value was undef. 447 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, 448 std::function<bool(const Constant *ConstVal)> Match, 449 bool AllowUndefs = false); 450 451 /// Returns true if given the TargetLowering's boolean contents information, 452 /// the value \p Val contains a true value. 453 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, 454 bool IsFP); 455 456 /// Returns an integer representing true, as defined by the 457 /// TargetBooleanContents. 458 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP); 459 460 /// Returns true if the given block should be optimized for size. 461 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, 462 BlockFrequencyInfo *BFI); 463 464 using SmallInstListTy = GISelWorkList<4>; 465 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, 466 LostDebugLocObserver *LocObserver, 467 SmallInstListTy &DeadInstChain); 468 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI, 469 LostDebugLocObserver *LocObserver = nullptr); 470 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, 471 LostDebugLocObserver *LocObserver = nullptr); 472 473 } // End namespace llvm. 474 #endif 475