xref: /llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h (revision 3cac26f5419b68d37e1919001e1c46a765df294f)
1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16 
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/Register.h"
21 #include "llvm/CodeGenTypes/LowLevelType.h"
22 #include "llvm/IR/DebugLoc.h"
23 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Casting.h"
25 
26 #include <cstdint>
27 
28 namespace llvm {
29 
30 class AnalysisUsage;
31 class LostDebugLocObserver;
32 class MachineBasicBlock;
33 class BlockFrequencyInfo;
34 class GISelKnownBits;
35 class MachineFunction;
36 class MachineInstr;
37 class MachineIRBuilder;
38 class MachineOperand;
39 class MachineOptimizationRemarkEmitter;
40 class MachineOptimizationRemarkMissed;
41 struct MachinePointerInfo;
42 class MachineRegisterInfo;
43 class MCInstrDesc;
44 class ProfileSummaryInfo;
45 class RegisterBankInfo;
46 class TargetInstrInfo;
47 class TargetLowering;
48 class TargetPassConfig;
49 class TargetRegisterInfo;
50 class TargetRegisterClass;
51 class ConstantFP;
52 class APFloat;
53 
54 // Convenience macros for dealing with vector reduction opcodes.
55 #define GISEL_VECREDUCE_CASES_ALL                                              \
56   case TargetOpcode::G_VECREDUCE_SEQ_FADD:                                     \
57   case TargetOpcode::G_VECREDUCE_SEQ_FMUL:                                     \
58   case TargetOpcode::G_VECREDUCE_FADD:                                         \
59   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
60   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
61   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
62   case TargetOpcode::G_VECREDUCE_FMAXIMUM:                                     \
63   case TargetOpcode::G_VECREDUCE_FMINIMUM:                                     \
64   case TargetOpcode::G_VECREDUCE_ADD:                                          \
65   case TargetOpcode::G_VECREDUCE_MUL:                                          \
66   case TargetOpcode::G_VECREDUCE_AND:                                          \
67   case TargetOpcode::G_VECREDUCE_OR:                                           \
68   case TargetOpcode::G_VECREDUCE_XOR:                                          \
69   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
70   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
71   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
72   case TargetOpcode::G_VECREDUCE_UMIN:
73 
74 #define GISEL_VECREDUCE_CASES_NONSEQ                                           \
75   case TargetOpcode::G_VECREDUCE_FADD:                                         \
76   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
77   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
78   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
79   case TargetOpcode::G_VECREDUCE_FMAXIMUM:                                     \
80   case TargetOpcode::G_VECREDUCE_FMINIMUM:                                     \
81   case TargetOpcode::G_VECREDUCE_ADD:                                          \
82   case TargetOpcode::G_VECREDUCE_MUL:                                          \
83   case TargetOpcode::G_VECREDUCE_AND:                                          \
84   case TargetOpcode::G_VECREDUCE_OR:                                           \
85   case TargetOpcode::G_VECREDUCE_XOR:                                          \
86   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
87   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
88   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
89   case TargetOpcode::G_VECREDUCE_UMIN:
90 
91 /// Try to constrain Reg to the specified register class. If this fails,
92 /// create a new virtual register in the correct class.
93 ///
94 /// \return The virtual register constrained to the right register class.
95 Register constrainRegToClass(MachineRegisterInfo &MRI,
96                              const TargetInstrInfo &TII,
97                              const RegisterBankInfo &RBI, Register Reg,
98                              const TargetRegisterClass &RegClass);
99 
100 /// Constrain the Register operand OpIdx, so that it is now constrained to the
101 /// TargetRegisterClass passed as an argument (RegClass).
102 /// If this fails, create a new virtual register in the correct class and insert
103 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
104 /// In both cases, the function also updates the register of RegMo. The debug
105 /// location of \p InsertPt is used for the new copy.
106 ///
107 /// \return The virtual register constrained to the right register class.
108 Register constrainOperandRegClass(const MachineFunction &MF,
109                                   const TargetRegisterInfo &TRI,
110                                   MachineRegisterInfo &MRI,
111                                   const TargetInstrInfo &TII,
112                                   const RegisterBankInfo &RBI,
113                                   MachineInstr &InsertPt,
114                                   const TargetRegisterClass &RegClass,
115                                   MachineOperand &RegMO);
116 
117 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
118 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
119 /// correct class and insert a COPY before \p InsertPt if it is a use or after
120 /// if it is a definition. In both cases, the function also updates the register
121 /// of RegMo.
122 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
123 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
124 /// InsertPt is used for the new copy.
125 ///
126 /// \return The virtual register constrained to the right register class.
127 Register constrainOperandRegClass(const MachineFunction &MF,
128                                   const TargetRegisterInfo &TRI,
129                                   MachineRegisterInfo &MRI,
130                                   const TargetInstrInfo &TII,
131                                   const RegisterBankInfo &RBI,
132                                   MachineInstr &InsertPt, const MCInstrDesc &II,
133                                   MachineOperand &RegMO, unsigned OpIdx);
134 
135 /// Mutate the newly-selected instruction \p I to constrain its (possibly
136 /// generic) virtual register operands to the instruction's register class.
137 /// This could involve inserting COPYs before (for uses) or after (for defs).
138 /// This requires the number of operands to match the instruction description.
139 /// \returns whether operand regclass constraining succeeded.
140 ///
141 // FIXME: Not all instructions have the same number of operands. We should
142 // probably expose a constrain helper per operand and let the target selector
143 // constrain individual registers, like fast-isel.
144 bool constrainSelectedInstRegOperands(MachineInstr &I,
145                                       const TargetInstrInfo &TII,
146                                       const TargetRegisterInfo &TRI,
147                                       const RegisterBankInfo &RBI);
148 
149 /// Check if DstReg can be replaced with SrcReg depending on the register
150 /// constraints.
151 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
152 
153 /// Check whether an instruction \p MI is dead: it only defines dead virtual
154 /// registers, and doesn't have other side effects.
155 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
156 
157 /// Report an ISel error as a missed optimization remark to the LLVMContext's
158 /// diagnostic stream.  Set the FailedISel MachineFunction property.
159 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
160                         MachineOptimizationRemarkEmitter &MORE,
161                         MachineOptimizationRemarkMissed &R);
162 
163 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
164                         MachineOptimizationRemarkEmitter &MORE,
165                         const char *PassName, StringRef Msg,
166                         const MachineInstr &MI);
167 
168 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
169 /// diagnostic stream.
170 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
171                         MachineOptimizationRemarkEmitter &MORE,
172                         MachineOptimizationRemarkMissed &R);
173 
174 /// Returns the inverse opcode of \p MinMaxOpc, which is a generic min/max
175 /// opcode like G_SMIN.
176 unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc);
177 
178 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
179 std::optional<APInt> getIConstantVRegVal(Register VReg,
180                                          const MachineRegisterInfo &MRI);
181 
182 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
183 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
184                                                const MachineRegisterInfo &MRI);
185 
186 /// \p VReg is defined by a G_CONSTANT, return the corresponding value.
187 const APInt &getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI);
188 
189 /// Simple struct used to hold a constant integer value and a virtual
190 /// register.
191 struct ValueAndVReg {
192   APInt Value;
193   Register VReg;
194 };
195 
196 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
197 /// on a G_CONSTANT returns its APInt value and def register.
198 std::optional<ValueAndVReg>
199 getIConstantVRegValWithLookThrough(Register VReg,
200                                    const MachineRegisterInfo &MRI,
201                                    bool LookThroughInstrs = true);
202 
203 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
204 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
205 std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
206     Register VReg, const MachineRegisterInfo &MRI,
207     bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
208 
209 struct FPValueAndVReg {
210   APFloat Value;
211   Register VReg;
212 };
213 
214 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
215 /// on a G_FCONSTANT returns its APFloat value and def register.
216 std::optional<FPValueAndVReg>
217 getFConstantVRegValWithLookThrough(Register VReg,
218                                    const MachineRegisterInfo &MRI,
219                                    bool LookThroughInstrs = true);
220 
221 const ConstantFP* getConstantFPVRegVal(Register VReg,
222                                        const MachineRegisterInfo &MRI);
223 
224 /// See if Reg is defined by an single def instruction that is
225 /// Opcode. Also try to do trivial folding if it's a COPY with
226 /// same types. Returns null otherwise.
227 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
228                            const MachineRegisterInfo &MRI);
229 
230 /// Simple struct used to hold a Register value and the instruction which
231 /// defines it.
232 struct DefinitionAndSourceRegister {
233   MachineInstr *MI;
234   Register Reg;
235 };
236 
237 /// Find the def instruction for \p Reg, and underlying value Register folding
238 /// away any copies.
239 ///
240 /// Also walks through hints such as G_ASSERT_ZEXT.
241 std::optional<DefinitionAndSourceRegister>
242 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
243 
244 /// Find the def instruction for \p Reg, folding away any trivial copies. May
245 /// return nullptr if \p Reg is not a generic virtual register.
246 ///
247 /// Also walks through hints such as G_ASSERT_ZEXT.
248 MachineInstr *getDefIgnoringCopies(Register Reg,
249                                    const MachineRegisterInfo &MRI);
250 
251 /// Find the source register for \p Reg, folding away any trivial copies. It
252 /// will be an output register of the instruction that getDefIgnoringCopies
253 /// returns. May return an invalid register if \p Reg is not a generic virtual
254 /// register.
255 ///
256 /// Also walks through hints such as G_ASSERT_ZEXT.
257 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
258 
259 /// Helper function to split a wide generic register into bitwise blocks with
260 /// the given Type (which implies the number of blocks needed). The generic
261 /// registers created are appended to Ops, starting at bit 0 of Reg.
262 void extractParts(Register Reg, LLT Ty, int NumParts,
263                   SmallVectorImpl<Register> &VRegs,
264                   MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
265 
266 /// Version which handles irregular splits.
267 bool extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
268                   SmallVectorImpl<Register> &VRegs,
269                   SmallVectorImpl<Register> &LeftoverVRegs,
270                   MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
271 
272 /// Version which handles irregular sub-vector splits.
273 void extractVectorParts(Register Reg, unsigned NumElts,
274                         SmallVectorImpl<Register> &VRegs,
275                         MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
276 
277 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
278 /// See if Reg is defined by an single def instruction of type T
279 /// Also try to do trivial folding if it's a COPY with
280 /// same types. Returns null otherwise.
281 template <class T>
282 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
283   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
284   return dyn_cast_or_null<T>(DefMI);
285 }
286 
287 /// Returns an APFloat from Val converted to the appropriate size.
288 APFloat getAPFloatFromSize(double Val, unsigned Size);
289 
290 /// Modify analysis usage so it preserves passes required for the SelectionDAG
291 /// fallback.
292 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
293 
294 std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
295                                        const Register Op2,
296                                        const MachineRegisterInfo &MRI);
297 std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
298                                            const Register Op2,
299                                            const MachineRegisterInfo &MRI);
300 
301 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
302 /// Returns an empty vector on failure.
303 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
304                                            const Register Op2,
305                                            const MachineRegisterInfo &MRI);
306 
307 std::optional<APInt> ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
308                                         const Register Op0,
309                                         const MachineRegisterInfo &MRI);
310 
311 std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
312                                        uint64_t Imm,
313                                        const MachineRegisterInfo &MRI);
314 
315 std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
316                                               Register Src,
317                                               const MachineRegisterInfo &MRI);
318 
319 /// Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on \p
320 /// Src. If \p Src is a vector then it tries to do an element-wise constant
321 /// fold.
322 std::optional<SmallVector<unsigned>>
323 ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
324                        std::function<unsigned(APInt)> CB);
325 
326 std::optional<SmallVector<APInt>>
327 ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
328                  const MachineRegisterInfo &MRI);
329 
330 /// Test if the given value is known to have exactly one bit set. This differs
331 /// from computeKnownBits in that it doesn't necessarily determine which bit is
332 /// set.
333 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
334                             GISelKnownBits *KnownBits = nullptr);
335 
336 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
337 /// this returns if \p Val can be assumed to never be a signaling NaN.
338 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
339                      bool SNaN = false);
340 
341 /// Returns true if \p Val can be assumed to never be a signaling NaN.
342 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
343   return isKnownNeverNaN(Val, MRI, true);
344 }
345 
346 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
347 
348 /// Return a virtual register corresponding to the incoming argument register \p
349 /// PhysReg. This register is expected to have class \p RC, and optional type \p
350 /// RegTy. This assumes all references to the register will use the same type.
351 ///
352 /// If there is an existing live-in argument register, it will be returned.
353 /// This will also ensure there is a valid copy
354 Register getFunctionLiveInPhysReg(MachineFunction &MF,
355                                   const TargetInstrInfo &TII,
356                                   MCRegister PhysReg,
357                                   const TargetRegisterClass &RC,
358                                   const DebugLoc &DL, LLT RegTy = LLT());
359 
360 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by
361 /// changing the number of vector elements or scalar bitwidth. The intent is a
362 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
363 /// \p OrigTy elements, and unmerged into \p TargetTy. It is an error to call
364 /// this function where one argument is a fixed vector and the other is a
365 /// scalable vector, since it is illegal to build a G_{MERGE|UNMERGE}_VALUES
366 /// between fixed and scalable vectors.
367 LLVM_READNONE
368 LLT getLCMType(LLT OrigTy, LLT TargetTy);
369 
370 LLVM_READNONE
371 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
372 /// multiple of TargetTy.
373 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
374 
375 /// Return a type where the total size is the greatest common divisor of \p
376 /// OrigTy and \p TargetTy. This will try to either change the number of vector
377 /// elements, or bitwidth of scalars. The intent is the result type can be used
378 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
379 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
380 /// with intermediate casts) can re-form \p TargetTy.
381 ///
382 /// If these are vectors with different element types, this will try to produce
383 /// a vector with a compatible total size, but the element type of \p OrigTy. If
384 /// this can't be satisfied, this will produce a scalar smaller than the
385 /// original vector elements. It is an error to call this function where
386 /// one argument is a fixed vector and the other is a scalable vector, since it
387 /// is illegal to build a G_{MERGE|UNMERGE}_VALUES between fixed and scalable
388 /// vectors.
389 ///
390 /// In the worst case, this returns LLT::scalar(1)
391 LLVM_READNONE
392 LLT getGCDType(LLT OrigTy, LLT TargetTy);
393 
394 /// Represents a value which can be a Register or a constant.
395 ///
396 /// This is useful in situations where an instruction may have an interesting
397 /// register operand or interesting constant operand. For a concrete example,
398 /// \see getVectorSplat.
399 class RegOrConstant {
400   int64_t Cst;
401   Register Reg;
402   bool IsReg;
403 
404 public:
405   explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
406   explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
407   bool isReg() const { return IsReg; }
408   bool isCst() const { return !IsReg; }
409   Register getReg() const {
410     assert(isReg() && "Expected a register!");
411     return Reg;
412   }
413   int64_t getCst() const {
414     assert(isCst() && "Expected a constant!");
415     return Cst;
416   }
417 };
418 
419 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
420 /// If \p MI is not a splat, returns std::nullopt.
421 std::optional<int> getSplatIndex(MachineInstr &MI);
422 
423 /// \returns the scalar integral splat value of \p Reg if possible.
424 std::optional<APInt> getIConstantSplatVal(const Register Reg,
425                                           const MachineRegisterInfo &MRI);
426 
427 /// \returns the scalar integral splat value defined by \p MI if possible.
428 std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
429                                           const MachineRegisterInfo &MRI);
430 
431 /// \returns the scalar sign extended integral splat value of \p Reg if
432 /// possible.
433 std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
434                                                 const MachineRegisterInfo &MRI);
435 
436 /// \returns the scalar sign extended integral splat value defined by \p MI if
437 /// possible.
438 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
439                                                 const MachineRegisterInfo &MRI);
440 
441 /// Returns a floating point scalar constant of a build vector splat if it
442 /// exists. When \p AllowUndef == true some elements can be undef but not all.
443 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
444                                                 const MachineRegisterInfo &MRI,
445                                                 bool AllowUndef = true);
446 
447 /// Return true if the specified register is defined by G_BUILD_VECTOR or
448 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
449 bool isBuildVectorConstantSplat(const Register Reg,
450                                 const MachineRegisterInfo &MRI,
451                                 int64_t SplatValue, bool AllowUndef);
452 
453 /// Return true if the specified instruction is a G_BUILD_VECTOR or
454 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
455 bool isBuildVectorConstantSplat(const MachineInstr &MI,
456                                 const MachineRegisterInfo &MRI,
457                                 int64_t SplatValue, bool AllowUndef);
458 
459 /// Return true if the specified instruction is a G_BUILD_VECTOR or
460 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
461 bool isBuildVectorAllZeros(const MachineInstr &MI,
462                            const MachineRegisterInfo &MRI,
463                            bool AllowUndef = false);
464 
465 /// Return true if the specified instruction is a G_BUILD_VECTOR or
466 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
467 bool isBuildVectorAllOnes(const MachineInstr &MI,
468                           const MachineRegisterInfo &MRI,
469                           bool AllowUndef = false);
470 
471 /// Return true if the specified instruction is known to be a constant, or a
472 /// vector of constants.
473 ///
474 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
475 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
476 /// such as G_GLOBAL_VALUE will also be considered.
477 bool isConstantOrConstantVector(const MachineInstr &MI,
478                                 const MachineRegisterInfo &MRI,
479                                 bool AllowFP = true,
480                                 bool AllowOpaqueConstants = true);
481 
482 /// Return true if the value is a constant 0 integer or a splatted vector of a
483 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
484 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
485 /// for null values.
486 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
487                        bool AllowUndefs = false);
488 
489 /// Return true if the value is a constant -1 integer or a splatted vector of a
490 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
491 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
492                              const MachineRegisterInfo &MRI,
493                              bool AllowUndefs = false);
494 
495 /// \returns a value when \p MI is a vector splat. The splat can be either a
496 /// Register or a constant.
497 ///
498 /// Examples:
499 ///
500 /// \code
501 ///   %reg = COPY $physreg
502 ///   %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
503 /// \endcode
504 ///
505 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
506 /// containing %reg.
507 ///
508 /// \code
509 ///   %cst = G_CONSTANT iN 4
510 ///   %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
511 /// \endcode
512 ///
513 /// In the above case, this will return a RegOrConstant containing 4.
514 std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
515                                             const MachineRegisterInfo &MRI);
516 
517 /// Determines if \p MI defines a constant integer or a build vector of
518 /// constant integers. Treats undef values as constants.
519 bool isConstantOrConstantVector(MachineInstr &MI,
520                                 const MachineRegisterInfo &MRI);
521 
522 /// Determines if \p MI defines a constant integer or a splat vector of
523 /// constant integers.
524 /// \returns the scalar constant or std::nullopt.
525 std::optional<APInt>
526 isConstantOrConstantSplatVector(MachineInstr &MI,
527                                 const MachineRegisterInfo &MRI);
528 
529 /// Determines if \p MI defines a float constant integer or a splat vector of
530 /// float constant integers.
531 /// \returns the float constant or std::nullopt.
532 std::optional<APFloat>
533 isConstantOrConstantSplatVectorFP(MachineInstr &MI,
534                                   const MachineRegisterInfo &MRI);
535 
536 /// Attempt to match a unary predicate against a scalar/splat constant or every
537 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
538 /// value was undef.
539 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
540                          std::function<bool(const Constant *ConstVal)> Match,
541                          bool AllowUndefs = false);
542 
543 /// Returns true if given the TargetLowering's boolean contents information,
544 /// the value \p Val contains a true value.
545 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
546                     bool IsFP);
547 /// \returns true if given the TargetLowering's boolean contents information,
548 /// the value \p Val contains a false value.
549 bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
550                     bool IsFP);
551 
552 /// Returns an integer representing true, as defined by the
553 /// TargetBooleanContents.
554 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
555 
556 using SmallInstListTy = GISelWorkList<4>;
557 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
558                       LostDebugLocObserver *LocObserver,
559                       SmallInstListTy &DeadInstChain);
560 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
561                  LostDebugLocObserver *LocObserver = nullptr);
562 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
563                 LostDebugLocObserver *LocObserver = nullptr);
564 
565 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
566 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
567 void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
568 
569 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
570 /// having only floating-point operands.
571 bool isPreISelGenericFloatingPointOpcode(unsigned Opc);
572 
573 /// Returns true if \p Reg can create undef or poison from non-undef &
574 /// non-poison operands. \p ConsiderFlagsAndMetadata controls whether poison
575 /// producing flags and metadata on the instruction are considered. This can be
576 /// used to see if the instruction could still introduce undef or poison even
577 /// without poison generating flags and metadata which might be on the
578 /// instruction.
579 bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
580                             bool ConsiderFlagsAndMetadata = true);
581 
582 /// Returns true if \p Reg can create poison from non-poison operands.
583 bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI,
584                      bool ConsiderFlagsAndMetadata = true);
585 
586 /// Returns true if \p Reg cannot be poison and undef.
587 bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
588                                       const MachineRegisterInfo &MRI,
589                                       unsigned Depth = 0);
590 
591 /// Returns true if \p Reg cannot be poison, but may be undef.
592 bool isGuaranteedNotToBePoison(Register Reg, const MachineRegisterInfo &MRI,
593                                unsigned Depth = 0);
594 
595 /// Returns true if \p Reg cannot be undef, but may be poison.
596 bool isGuaranteedNotToBeUndef(Register Reg, const MachineRegisterInfo &MRI,
597                               unsigned Depth = 0);
598 
599 /// Get the type back from LLT. It won't be 100 percent accurate but returns an
600 /// estimate of the type.
601 Type *getTypeForLLT(LLT Ty, LLVMContext &C);
602 
603 /// An integer-like constant.
604 ///
605 /// It abstracts over scalar, fixed-length vectors, and scalable vectors.
606 /// In the common case, it provides a common API and feels like an APInt,
607 /// while still providing low-level access.
608 /// It can be used for constant-folding.
609 ///
610 /// bool isZero()
611 /// abstracts over the kind.
612 ///
613 /// switch(const.getKind())
614 /// {
615 /// }
616 /// provides low-level access.
617 class GIConstant {
618 public:
619   enum class GIConstantKind { Scalar, FixedVector, ScalableVector };
620 
621 private:
622   GIConstantKind Kind;
623   SmallVector<APInt> Values;
624   APInt Value;
625 
626 public:
627   GIConstant(ArrayRef<APInt> Values)
628       : Kind(GIConstantKind::FixedVector), Values(Values) {};
629   GIConstant(const APInt &Value, GIConstantKind Kind)
630       : Kind(Kind), Value(Value) {};
631 
632   /// Returns the kind of of this constant, e.g, Scalar.
633   GIConstantKind getKind() const { return Kind; }
634 
635   /// Returns the value, if this constant is a scalar.
636   APInt getScalarValue() const;
637 
638   static std::optional<GIConstant> getConstant(Register Const,
639                                                const MachineRegisterInfo &MRI);
640 };
641 
642 /// An floating-point-like constant.
643 ///
644 /// It abstracts over scalar, fixed-length vectors, and scalable vectors.
645 /// In the common case, it provides a common API and feels like an APFloat,
646 /// while still providing low-level access.
647 /// It can be used for constant-folding.
648 ///
649 /// bool isZero()
650 /// abstracts over the kind.
651 ///
652 /// switch(const.getKind())
653 /// {
654 /// }
655 /// provides low-level access.
656 class GFConstant {
657 public:
658   enum class GFConstantKind { Scalar, FixedVector, ScalableVector };
659 
660 private:
661   GFConstantKind Kind;
662   SmallVector<APFloat> Values;
663 
664 public:
665   GFConstant(ArrayRef<APFloat> Values)
666       : Kind(GFConstantKind::FixedVector), Values(Values) {};
667   GFConstant(const APFloat &Value, GFConstantKind Kind) : Kind(Kind) {
668     Values.push_back(Value);
669   }
670 
671   /// Returns the kind of of this constant, e.g, Scalar.
672   GFConstantKind getKind() const { return Kind; }
673 
674   /// Returns the value, if this constant is a scalar.
675   APFloat getScalarValue() const;
676 
677   static std::optional<GFConstant> getConstant(Register Const,
678                                                const MachineRegisterInfo &MRI);
679 };
680 
681 } // End namespace llvm.
682 #endif
683