xref: /llvm-project/llvm/include/llvm/Analysis/ValueTracking.h (revision 03e7862962d01a5605f1eeeb26626083584945ff)
1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
15 #define LLVM_ANALYSIS_VALUETRACKING_H
16 
17 #include "llvm/Analysis/SimplifyQuery.h"
18 #include "llvm/Analysis/WithCache.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/FMF.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include <cassert>
26 #include <cstdint>
27 
28 namespace llvm {
29 
30 class Operator;
31 class AddOperator;
32 class AssumptionCache;
33 class DominatorTree;
34 class GEPOperator;
35 class WithOverflowInst;
36 struct KnownBits;
37 class Loop;
38 class LoopInfo;
39 class MDNode;
40 class StringRef;
41 class TargetLibraryInfo;
42 template <typename T> class ArrayRef;
43 
44 constexpr unsigned MaxAnalysisRecursionDepth = 6;
45 
46 /// Determine which bits of V are known to be either zero or one and return
47 /// them in the KnownZero/KnownOne bit sets.
48 ///
49 /// This function is defined on values with integer type, values with pointer
50 /// type, and vectors of integers.  In the case
51 /// where V is a vector, the known zero and known one values are the
52 /// same width as the vector element, and the bit is set only if it is true
53 /// for all of the elements in the vector.
54 void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
55                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
56                       const Instruction *CxtI = nullptr,
57                       const DominatorTree *DT = nullptr,
58                       bool UseInstrInfo = true);
59 
60 /// Returns the known bits rather than passing by reference.
61 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
62                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
63                            const Instruction *CxtI = nullptr,
64                            const DominatorTree *DT = nullptr,
65                            bool UseInstrInfo = true);
66 
67 /// Returns the known bits rather than passing by reference.
68 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
69                            const DataLayout &DL, unsigned Depth = 0,
70                            AssumptionCache *AC = nullptr,
71                            const Instruction *CxtI = nullptr,
72                            const DominatorTree *DT = nullptr,
73                            bool UseInstrInfo = true);
74 
75 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
76                            unsigned Depth, const SimplifyQuery &Q);
77 
78 KnownBits computeKnownBits(const Value *V, unsigned Depth,
79                            const SimplifyQuery &Q);
80 
81 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
82                       const SimplifyQuery &Q);
83 
84 /// Compute known bits from the range metadata.
85 /// \p KnownZero the set of bits that are known to be zero
86 /// \p KnownOne the set of bits that are known to be one
87 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
88 
89 /// Merge bits known from context-dependent facts into Known.
90 void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
91                                  unsigned Depth, const SimplifyQuery &Q);
92 
93 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
94 KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
95                                        const KnownBits &KnownLHS,
96                                        const KnownBits &KnownRHS,
97                                        unsigned Depth, const SimplifyQuery &SQ);
98 
99 /// Adjust \p Known for the given select \p Arm to include information from the
100 /// select \p Cond.
101 void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm,
102                                  bool Invert, unsigned Depth,
103                                  const SimplifyQuery &Q);
104 
105 /// Return true if LHS and RHS have no common bits set.
106 bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
107                          const WithCache<const Value *> &RHSCache,
108                          const SimplifyQuery &SQ);
109 
110 /// Return true if the given value is known to have exactly one bit set when
111 /// defined. For vectors return true if every element is known to be a power
112 /// of two when defined. Supports values with integer or pointer type and
113 /// vectors of integers. If 'OrZero' is set, then return true if the given
114 /// value is either a power of two or zero.
115 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
116                             bool OrZero = false, unsigned Depth = 0,
117                             AssumptionCache *AC = nullptr,
118                             const Instruction *CxtI = nullptr,
119                             const DominatorTree *DT = nullptr,
120                             bool UseInstrInfo = true);
121 
122 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
123                             const SimplifyQuery &Q);
124 
125 bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
126 
127 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
128 
129 /// Return true if the given value is known to be non-zero when defined. For
130 /// vectors, return true if every element is known to be non-zero when
131 /// defined. For pointers, if the context instruction and dominator tree are
132 /// specified, perform context-sensitive analysis and return true if the
133 /// pointer couldn't possibly be null at the specified instruction.
134 /// Supports values with integer or pointer type and vectors of integers.
135 bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
136 
137 /// Return true if the two given values are negation.
138 /// Currently can recoginze Value pair:
139 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
140 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
141 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
142                      bool AllowPoison = true);
143 
144 /// Return true iff:
145 /// 1. X is poison implies Y is poison.
146 /// 2. X is true implies Y is false.
147 /// 3. X is false implies Y is true.
148 /// Otherwise, return false.
149 bool isKnownInversion(const Value *X, const Value *Y);
150 
151 /// Returns true if the give value is known to be non-negative.
152 bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
153                         unsigned Depth = 0);
154 
155 /// Returns true if the given value is known be positive (i.e. non-negative
156 /// and non-zero).
157 bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
158                      unsigned Depth = 0);
159 
160 /// Returns true if the given value is known be negative (i.e. non-positive
161 /// and non-zero).
162 bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
163                      unsigned Depth = 0);
164 
165 /// Return true if the given values are known to be non-equal when defined.
166 /// Supports scalar integer types only.
167 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
168                      AssumptionCache *AC = nullptr,
169                      const Instruction *CxtI = nullptr,
170                      const DominatorTree *DT = nullptr,
171                      bool UseInstrInfo = true);
172 
173 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
174 /// simplify operations downstream. Mask is known to be zero for bits that V
175 /// cannot have.
176 ///
177 /// This function is defined on values with integer type, values with pointer
178 /// type, and vectors of integers.  In the case
179 /// where V is a vector, the mask, known zero, and known one values are the
180 /// same width as the vector element, and the bit is set only if it is true
181 /// for all of the elements in the vector.
182 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
183                        const SimplifyQuery &SQ, unsigned Depth = 0);
184 
185 /// Return the number of times the sign bit of the register is replicated into
186 /// the other bits. We know that at least 1 bit is always equal to the sign
187 /// bit (itself), but other cases can give us information. For example,
188 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
189 /// equal to each other, so we return 3. For vectors, return the number of
190 /// sign bits for the vector element with the mininum number of known sign
191 /// bits.
192 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
193                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
194                             const Instruction *CxtI = nullptr,
195                             const DominatorTree *DT = nullptr,
196                             bool UseInstrInfo = true);
197 
198 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
199 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
200 /// Similar to the APInt::getSignificantBits function.
201 unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
202                                    unsigned Depth = 0,
203                                    AssumptionCache *AC = nullptr,
204                                    const Instruction *CxtI = nullptr,
205                                    const DominatorTree *DT = nullptr);
206 
207 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
208 /// intrinsics are treated as-if they were intrinsics.
209 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
210                                       const TargetLibraryInfo *TLI);
211 
212 /// Given an exploded icmp instruction, return true if the comparison only
213 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
214 /// the result of the comparison is true when the input value is signed.
215 bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
216                     bool &TrueIfSigned);
217 
218 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
219 /// same result as an fcmp with the given operands.
220 ///
221 /// If \p LookThroughSrc is true, consider the input value when computing the
222 /// mask.
223 ///
224 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
225 /// element will always be LHS.
226 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
227                                                 const Function &F, Value *LHS,
228                                                 Value *RHS,
229                                                 bool LookThroughSrc = true);
230 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
231                                                 const Function &F, Value *LHS,
232                                                 const APFloat *ConstRHS,
233                                                 bool LookThroughSrc = true);
234 
235 /// Compute the possible floating-point classes that \p LHS could be based on
236 /// fcmp \Pred \p LHS, \p RHS.
237 ///
238 /// \returns { TestedValue, ClassesIfTrue, ClassesIfFalse }
239 ///
240 /// If the compare returns an exact class test, ClassesIfTrue == ~ClassesIfFalse
241 ///
242 /// This is a less exact version of fcmpToClassTest (e.g. fcmpToClassTest will
243 /// only succeed for a test of x > 0 implies positive, but not x > 1).
244 ///
245 /// If \p LookThroughSrc is true, consider the input value when computing the
246 /// mask. This may look through sign bit operations.
247 ///
248 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
249 /// element will always be LHS.
250 ///
251 std::tuple<Value *, FPClassTest, FPClassTest>
252 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
253                  Value *RHS, bool LookThroughSrc = true);
254 std::tuple<Value *, FPClassTest, FPClassTest>
255 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
256                  FPClassTest RHS, bool LookThroughSrc = true);
257 std::tuple<Value *, FPClassTest, FPClassTest>
258 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
259                  const APFloat &RHS, bool LookThroughSrc = true);
260 
261 struct KnownFPClass {
262   /// Floating-point classes the value could be one of.
263   FPClassTest KnownFPClasses = fcAllFlags;
264 
265   /// std::nullopt if the sign bit is unknown, true if the sign bit is
266   /// definitely set or false if the sign bit is definitely unset.
267   std::optional<bool> SignBit;
268 
269   bool operator==(KnownFPClass Other) const {
270     return KnownFPClasses == Other.KnownFPClasses && SignBit == Other.SignBit;
271   }
272 
273   /// Return true if it's known this can never be one of the mask entries.
274   bool isKnownNever(FPClassTest Mask) const {
275     return (KnownFPClasses & Mask) == fcNone;
276   }
277 
278   bool isKnownAlways(FPClassTest Mask) const { return isKnownNever(~Mask); }
279 
280   bool isUnknown() const {
281     return KnownFPClasses == fcAllFlags && !SignBit;
282   }
283 
284   /// Return true if it's known this can never be a nan.
285   bool isKnownNeverNaN() const {
286     return isKnownNever(fcNan);
287   }
288 
289   /// Return true if it's known this must always be a nan.
290   bool isKnownAlwaysNaN() const { return isKnownAlways(fcNan); }
291 
292   /// Return true if it's known this can never be an infinity.
293   bool isKnownNeverInfinity() const {
294     return isKnownNever(fcInf);
295   }
296 
297   /// Return true if it's known this can never be +infinity.
298   bool isKnownNeverPosInfinity() const {
299     return isKnownNever(fcPosInf);
300   }
301 
302   /// Return true if it's known this can never be -infinity.
303   bool isKnownNeverNegInfinity() const {
304     return isKnownNever(fcNegInf);
305   }
306 
307   /// Return true if it's known this can never be a subnormal
308   bool isKnownNeverSubnormal() const {
309     return isKnownNever(fcSubnormal);
310   }
311 
312   /// Return true if it's known this can never be a positive subnormal
313   bool isKnownNeverPosSubnormal() const {
314     return isKnownNever(fcPosSubnormal);
315   }
316 
317   /// Return true if it's known this can never be a negative subnormal
318   bool isKnownNeverNegSubnormal() const {
319     return isKnownNever(fcNegSubnormal);
320   }
321 
322   /// Return true if it's known this can never be a zero. This means a literal
323   /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
324   bool isKnownNeverZero() const {
325     return isKnownNever(fcZero);
326   }
327 
328   /// Return true if it's known this can never be a literal positive zero.
329   bool isKnownNeverPosZero() const {
330     return isKnownNever(fcPosZero);
331   }
332 
333   /// Return true if it's known this can never be a negative zero. This means a
334   /// literal -0 and does not include denormal inputs implicitly treated as -0.
335   bool isKnownNeverNegZero() const {
336     return isKnownNever(fcNegZero);
337   }
338 
339   /// Return true if it's know this can never be interpreted as a zero. This
340   /// extends isKnownNeverZero to cover the case where the assumed
341   /// floating-point mode for the function interprets denormals as zero.
342   bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
343 
344   /// Return true if it's know this can never be interpreted as a negative zero.
345   bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
346 
347   /// Return true if it's know this can never be interpreted as a positive zero.
348   bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
349 
350   static constexpr FPClassTest OrderedLessThanZeroMask =
351       fcNegSubnormal | fcNegNormal | fcNegInf;
352   static constexpr FPClassTest OrderedGreaterThanZeroMask =
353       fcPosSubnormal | fcPosNormal | fcPosInf;
354 
355   /// Return true if we can prove that the analyzed floating-point value is
356   /// either NaN or never less than -0.0.
357   ///
358   ///      NaN --> true
359   ///       +0 --> true
360   ///       -0 --> true
361   ///   x > +0 --> true
362   ///   x < -0 --> false
363   bool cannotBeOrderedLessThanZero() const {
364     return isKnownNever(OrderedLessThanZeroMask);
365   }
366 
367   /// Return true if we can prove that the analyzed floating-point value is
368   /// either NaN or never greater than -0.0.
369   ///      NaN --> true
370   ///       +0 --> true
371   ///       -0 --> true
372   ///   x > +0 --> false
373   ///   x < -0 --> true
374   bool cannotBeOrderedGreaterThanZero() const {
375     return isKnownNever(OrderedGreaterThanZeroMask);
376   }
377 
378   KnownFPClass &operator|=(const KnownFPClass &RHS) {
379     KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
380 
381     if (SignBit != RHS.SignBit)
382       SignBit = std::nullopt;
383     return *this;
384   }
385 
386   void knownNot(FPClassTest RuleOut) {
387     KnownFPClasses = KnownFPClasses & ~RuleOut;
388     if (isKnownNever(fcNan) && !SignBit) {
389       if (isKnownNever(fcNegative))
390         SignBit = false;
391       else if (isKnownNever(fcPositive))
392         SignBit = true;
393     }
394   }
395 
396   void fneg() {
397     KnownFPClasses = llvm::fneg(KnownFPClasses);
398     if (SignBit)
399       SignBit = !*SignBit;
400   }
401 
402   void fabs() {
403     if (KnownFPClasses & fcNegZero)
404       KnownFPClasses |= fcPosZero;
405 
406     if (KnownFPClasses & fcNegInf)
407       KnownFPClasses |= fcPosInf;
408 
409     if (KnownFPClasses & fcNegSubnormal)
410       KnownFPClasses |= fcPosSubnormal;
411 
412     if (KnownFPClasses & fcNegNormal)
413       KnownFPClasses |= fcPosNormal;
414 
415     signBitMustBeZero();
416   }
417 
418   /// Return true if the sign bit must be 0, ignoring the sign of nans.
419   bool signBitIsZeroOrNaN() const {
420     return isKnownNever(fcNegative);
421   }
422 
423   /// Assume the sign bit is zero.
424   void signBitMustBeZero() {
425     KnownFPClasses &= (fcPositive | fcNan);
426     SignBit = false;
427   }
428 
429   /// Assume the sign bit is one.
430   void signBitMustBeOne() {
431     KnownFPClasses &= (fcNegative | fcNan);
432     SignBit = true;
433   }
434 
435   void copysign(const KnownFPClass &Sign) {
436     // Don't know anything about the sign of the source. Expand the possible set
437     // to its opposite sign pair.
438     if (KnownFPClasses & fcZero)
439       KnownFPClasses |= fcZero;
440     if (KnownFPClasses & fcSubnormal)
441       KnownFPClasses |= fcSubnormal;
442     if (KnownFPClasses & fcNormal)
443       KnownFPClasses |= fcNormal;
444     if (KnownFPClasses & fcInf)
445       KnownFPClasses |= fcInf;
446 
447     // Sign bit is exactly preserved even for nans.
448     SignBit = Sign.SignBit;
449 
450     // Clear sign bits based on the input sign mask.
451     if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
452       KnownFPClasses &= (fcNegative | fcNan);
453     if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
454       KnownFPClasses &= (fcPositive | fcNan);
455   }
456 
457   // Propagate knowledge that a non-NaN source implies the result can also not
458   // be a NaN. For unconstrained operations, signaling nans are not guaranteed
459   // to be quieted but cannot be introduced.
460   void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
461     if (Src.isKnownNever(fcNan)) {
462       knownNot(fcNan);
463       if (PreserveSign)
464         SignBit = Src.SignBit;
465     } else if (Src.isKnownNever(fcSNan))
466       knownNot(fcSNan);
467   }
468 
469   /// Propagate knowledge from a source value that could be a denormal or
470   /// zero. We have to be conservative since output flushing is not guaranteed,
471   /// so known-never-zero may not hold.
472   ///
473   /// This assumes a copy-like operation and will replace any currently known
474   /// information.
475   void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
476 
477   /// Report known classes if \p Src is evaluated through a potentially
478   /// canonicalizing operation. We can assume signaling nans will not be
479   /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
480   ///
481   /// This assumes a copy-like operation and will replace any currently known
482   /// information.
483   void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
484                                   Type *Ty);
485 
486   void resetAll() { *this = KnownFPClass(); }
487 };
488 
489 inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
490   LHS |= RHS;
491   return LHS;
492 }
493 
494 inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
495   RHS |= LHS;
496   return std::move(RHS);
497 }
498 
499 /// Determine which floating-point classes are valid for \p V, and return them
500 /// in KnownFPClass bit sets.
501 ///
502 /// This function is defined on values with floating-point type, values vectors
503 /// of floating-point type, and arrays of floating-point type.
504 
505 /// \p InterestedClasses is a compile time optimization hint for which floating
506 /// point classes should be queried. Queries not specified in \p
507 /// InterestedClasses should be reliable if they are determined during the
508 /// query.
509 KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
510                                  FPClassTest InterestedClasses, unsigned Depth,
511                                  const SimplifyQuery &SQ);
512 
513 KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
514                                  unsigned Depth, const SimplifyQuery &SQ);
515 
516 inline KnownFPClass computeKnownFPClass(
517     const Value *V, const DataLayout &DL,
518     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
519     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
520     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
521     bool UseInstrInfo = true) {
522   return computeKnownFPClass(
523       V, InterestedClasses, Depth,
524       SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo));
525 }
526 
527 /// Wrapper to account for known fast math flags at the use instruction.
528 inline KnownFPClass
529 computeKnownFPClass(const Value *V, const APInt &DemandedElts,
530                     FastMathFlags FMF, FPClassTest InterestedClasses,
531                     unsigned Depth, const SimplifyQuery &SQ) {
532   if (FMF.noNaNs())
533     InterestedClasses &= ~fcNan;
534   if (FMF.noInfs())
535     InterestedClasses &= ~fcInf;
536 
537   KnownFPClass Result =
538       computeKnownFPClass(V, DemandedElts, InterestedClasses, Depth, SQ);
539 
540   if (FMF.noNaNs())
541     Result.KnownFPClasses &= ~fcNan;
542   if (FMF.noInfs())
543     Result.KnownFPClasses &= ~fcInf;
544   return Result;
545 }
546 
547 inline KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
548                                         FPClassTest InterestedClasses,
549                                         unsigned Depth,
550                                         const SimplifyQuery &SQ) {
551   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
552   APInt DemandedElts =
553       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
554   return computeKnownFPClass(V, DemandedElts, FMF, InterestedClasses, Depth,
555                              SQ);
556 }
557 
558 /// Return true if we can prove that the specified FP value is never equal to
559 /// -0.0. Users should use caution when considering PreserveSign
560 /// denormal-fp-math.
561 inline bool cannotBeNegativeZero(const Value *V, unsigned Depth,
562                                  const SimplifyQuery &SQ) {
563   KnownFPClass Known = computeKnownFPClass(V, fcNegZero, Depth, SQ);
564   return Known.isKnownNeverNegZero();
565 }
566 
567 /// Return true if we can prove that the specified FP value is either NaN or
568 /// never less than -0.0.
569 ///
570 ///      NaN --> true
571 ///       +0 --> true
572 ///       -0 --> true
573 ///   x > +0 --> true
574 ///   x < -0 --> false
575 inline bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
576                                         const SimplifyQuery &SQ) {
577   KnownFPClass Known =
578       computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, Depth, SQ);
579   return Known.cannotBeOrderedLessThanZero();
580 }
581 
582 /// Return true if the floating-point scalar value is not an infinity or if
583 /// the floating-point vector value has no infinities. Return false if a value
584 /// could ever be infinity.
585 inline bool isKnownNeverInfinity(const Value *V, unsigned Depth,
586                                  const SimplifyQuery &SQ) {
587   KnownFPClass Known = computeKnownFPClass(V, fcInf, Depth, SQ);
588   return Known.isKnownNeverInfinity();
589 }
590 
591 /// Return true if the floating-point value can never contain a NaN or infinity.
592 inline bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
593                                  const SimplifyQuery &SQ) {
594   KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, Depth, SQ);
595   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
596 }
597 
598 /// Return true if the floating-point scalar value is not a NaN or if the
599 /// floating-point vector value has no NaN elements. Return false if a value
600 /// could ever be NaN.
601 inline bool isKnownNeverNaN(const Value *V, unsigned Depth,
602                             const SimplifyQuery &SQ) {
603   KnownFPClass Known = computeKnownFPClass(V, fcNan, Depth, SQ);
604   return Known.isKnownNeverNaN();
605 }
606 
607 /// Return false if we can prove that the specified FP value's sign bit is 0.
608 /// Return true if we can prove that the specified FP value's sign bit is 1.
609 /// Otherwise return std::nullopt.
610 inline std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
611                                                  const SimplifyQuery &SQ) {
612   KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, Depth, SQ);
613   return Known.SignBit;
614 }
615 
616 /// If the specified value can be set by repeating the same byte in memory,
617 /// return the i8 value that it is represented with. This is true for all i8
618 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
619 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
620 /// i16 0x1234), return null. If the value is entirely undef and padding,
621 /// return undef.
622 Value *isBytewiseValue(Value *V, const DataLayout &DL);
623 
624 /// Given an aggregate and an sequence of indices, see if the scalar value
625 /// indexed is already around as a register, for example if it were inserted
626 /// directly into the aggregate.
627 ///
628 /// If InsertBefore is not empty, this function will duplicate (modified)
629 /// insertvalues when a part of a nested struct is extracted.
630 Value *FindInsertedValue(
631     Value *V, ArrayRef<unsigned> idx_range,
632     std::optional<BasicBlock::iterator> InsertBefore = std::nullopt);
633 
634 /// Analyze the specified pointer to see if it can be expressed as a base
635 /// pointer plus a constant offset. Return the base and offset to the caller.
636 ///
637 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
638 /// creates and later unpacks the required APInt.
639 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
640                                                const DataLayout &DL,
641                                                bool AllowNonInbounds = true) {
642   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
643   Value *Base =
644       Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
645 
646   Offset = OffsetAPInt.getSExtValue();
647   return Base;
648 }
649 inline const Value *
650 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
651                                  const DataLayout &DL,
652                                  bool AllowNonInbounds = true) {
653   return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
654                                           AllowNonInbounds);
655 }
656 
657 /// Returns true if the GEP is based on a pointer to a string (array of
658 // \p CharSize integers) and is indexing into this string.
659 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
660 
661 /// Represents offset+length into a ConstantDataArray.
662 struct ConstantDataArraySlice {
663   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
664   /// initializer, it just doesn't fit the ConstantDataArray interface).
665   const ConstantDataArray *Array;
666 
667   /// Slice starts at this Offset.
668   uint64_t Offset;
669 
670   /// Length of the slice.
671   uint64_t Length;
672 
673   /// Moves the Offset and adjusts Length accordingly.
674   void move(uint64_t Delta) {
675     assert(Delta < Length);
676     Offset += Delta;
677     Length -= Delta;
678   }
679 
680   /// Convenience accessor for elements in the slice.
681   uint64_t operator[](unsigned I) const {
682     return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
683   }
684 };
685 
686 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
687 /// If successful \p Slice will point to a ConstantDataArray info object
688 /// with an appropriate offset.
689 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
690                               unsigned ElementSize, uint64_t Offset = 0);
691 
692 /// This function computes the length of a null-terminated C string pointed to
693 /// by V. If successful, it returns true and returns the string in Str. If
694 /// unsuccessful, it returns false. This does not include the trailing null
695 /// character by default. If TrimAtNul is set to false, then this returns any
696 /// trailing null characters as well as any other characters that come after
697 /// it.
698 bool getConstantStringInfo(const Value *V, StringRef &Str,
699                            bool TrimAtNul = true);
700 
701 /// If we can compute the length of the string pointed to by the specified
702 /// pointer, return 'len+1'.  If we can't, return 0.
703 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
704 
705 /// This function returns call pointer argument that is considered the same by
706 /// aliasing rules. You CAN'T use it to replace one value with another. If
707 /// \p MustPreserveNullness is true, the call must preserve the nullness of
708 /// the pointer.
709 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
710                                                   bool MustPreserveNullness);
711 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
712                                                    bool MustPreserveNullness) {
713   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
714       const_cast<const CallBase *>(Call), MustPreserveNullness));
715 }
716 
717 /// {launder,strip}.invariant.group returns pointer that aliases its argument,
718 /// and it only captures pointer by returning it.
719 /// These intrinsics are not marked as nocapture, because returning is
720 /// considered as capture. The arguments are not marked as returned neither,
721 /// because it would make it useless. If \p MustPreserveNullness is true,
722 /// the intrinsic must preserve the nullness of the pointer.
723 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
724     const CallBase *Call, bool MustPreserveNullness);
725 
726 /// This method strips off any GEP address adjustments, pointer casts
727 /// or `llvm.threadlocal.address` from the specified value \p V, returning the
728 /// original object being addressed. Note that the returned value has pointer
729 /// type if the specified value does. If the \p MaxLookup value is non-zero, it
730 /// limits the number of instructions to be stripped off.
731 const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
732 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
733   // Force const to avoid infinite recursion.
734   const Value *VConst = V;
735   return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
736 }
737 
738 /// Like getUnderlyingObject(), but will try harder to find a single underlying
739 /// object. In particular, this function also looks through selects and phis.
740 const Value *getUnderlyingObjectAggressive(const Value *V);
741 
742 /// This method is similar to getUnderlyingObject except that it can
743 /// look through phi and select instructions and return multiple objects.
744 ///
745 /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
746 /// accesses different objects in each iteration, we don't look through the
747 /// phi node. E.g. consider this loop nest:
748 ///
749 ///   int **A;
750 ///   for (i)
751 ///     for (j) {
752 ///        A[i][j] = A[i-1][j] * B[j]
753 ///     }
754 ///
755 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
756 /// of the outer loop:
757 ///
758 ///   Curr = A[0];          // Prev_0
759 ///   for (i: 1..N) {
760 ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
761 ///     Curr = A[i];
762 ///     for (j: 0..N) {
763 ///        Curr[j] = Prev[j] * B[j]
764 ///     }
765 ///   }
766 ///
767 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
768 /// should not assume that Curr and Prev share the same underlying object thus
769 /// it shouldn't look through the phi above.
770 void getUnderlyingObjects(const Value *V,
771                           SmallVectorImpl<const Value *> &Objects,
772                           const LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
773 
774 /// This is a wrapper around getUnderlyingObjects and adds support for basic
775 /// ptrtoint+arithmetic+inttoptr sequences.
776 bool getUnderlyingObjectsForCodeGen(const Value *V,
777                                     SmallVectorImpl<Value *> &Objects);
778 
779 /// Returns unique alloca where the value comes from, or nullptr.
780 /// If OffsetZero is true check that V points to the begining of the alloca.
781 AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
782 inline const AllocaInst *findAllocaForValue(const Value *V,
783                                             bool OffsetZero = false) {
784   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
785 }
786 
787 /// Return true if the only users of this pointer are lifetime markers.
788 bool onlyUsedByLifetimeMarkers(const Value *V);
789 
790 /// Return true if the only users of this pointer are lifetime markers or
791 /// droppable instructions.
792 bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
793 
794 /// Return true if the instruction doesn't potentially cross vector lanes. This
795 /// condition is weaker than checking that the instruction is lanewise: lanewise
796 /// means that the same operation is splatted across all lanes, but we also
797 /// include the case where there is a different operation on each lane, as long
798 /// as the operation only uses data from that lane. An example of an operation
799 /// that is not lanewise, but doesn't cross vector lanes is insertelement.
800 bool isNotCrossLaneOperation(const Instruction *I);
801 
802 /// Return true if the instruction does not have any effects besides
803 /// calculating the result and does not have undefined behavior.
804 ///
805 /// This method never returns true for an instruction that returns true for
806 /// mayHaveSideEffects; however, this method also does some other checks in
807 /// addition. It checks for undefined behavior, like dividing by zero or
808 /// loading from an invalid pointer (but not for undefined results, like a
809 /// shift with a shift amount larger than the width of the result). It checks
810 /// for malloc and alloca because speculatively executing them might cause a
811 /// memory leak. It also returns false for instructions related to control
812 /// flow, specifically terminators and PHI nodes.
813 ///
814 /// If the CtxI is specified this method performs context-sensitive analysis
815 /// and returns true if it is safe to execute the instruction immediately
816 /// before the CtxI. If the instruction has (transitive) operands that don't
817 /// dominate CtxI, the analysis is performed under the assumption that these
818 /// operands will also be speculated to a point before CxtI.
819 ///
820 /// If the CtxI is NOT specified this method only looks at the instruction
821 /// itself and its operands, so if this method returns true, it is safe to
822 /// move the instruction as long as the correct dominance relationships for
823 /// the operands and users hold.
824 ///
825 /// This method can return true for instructions that read memory;
826 /// for such instructions, moving them may change the resulting value.
827 bool isSafeToSpeculativelyExecute(const Instruction *I,
828                                   const Instruction *CtxI = nullptr,
829                                   AssumptionCache *AC = nullptr,
830                                   const DominatorTree *DT = nullptr,
831                                   const TargetLibraryInfo *TLI = nullptr,
832                                   bool UseVariableInfo = true);
833 
834 inline bool isSafeToSpeculativelyExecute(const Instruction *I,
835                                          BasicBlock::iterator CtxI,
836                                          AssumptionCache *AC = nullptr,
837                                          const DominatorTree *DT = nullptr,
838                                          const TargetLibraryInfo *TLI = nullptr,
839                                          bool UseVariableInfo = true) {
840   // Take an iterator, and unwrap it into an Instruction *.
841   return isSafeToSpeculativelyExecute(I, &*CtxI, AC, DT, TLI, UseVariableInfo);
842 }
843 
844 /// Don't use information from its non-constant operands. This helper is used
845 /// when its operands are going to be replaced.
846 inline bool
847 isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I) {
848   return isSafeToSpeculativelyExecute(I, nullptr, nullptr, nullptr, nullptr,
849                                       /*UseVariableInfo=*/false);
850 }
851 
852 /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
853 /// the actual opcode of Inst. If the provided and actual opcode differ, the
854 /// function (virtually) overrides the opcode of Inst with the provided
855 /// Opcode. There are come constraints in this case:
856 /// * If Opcode has a fixed number of operands (eg, as binary operators do),
857 ///   then Inst has to have at least as many leading operands. The function
858 ///   will ignore all trailing operands beyond that number.
859 /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
860 ///   do), then all operands are considered.
861 /// * The virtual instruction has to satisfy all typing rules of the provided
862 ///   Opcode.
863 /// * This function is pessimistic in the following sense: If one actually
864 ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
865 ///   may say that the materialized instruction is speculatable whereas this
866 ///   function may have said that the instruction wouldn't be speculatable.
867 ///   This behavior is a shortcoming in the current implementation and not
868 ///   intentional.
869 bool isSafeToSpeculativelyExecuteWithOpcode(
870     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
871     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
872     const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true);
873 
874 /// Returns true if the result or effects of the given instructions \p I
875 /// depend values not reachable through the def use graph.
876 /// * Memory dependence arises for example if the instruction reads from
877 ///   memory or may produce effects or undefined behaviour. Memory dependent
878 ///   instructions generally cannot be reorderd with respect to other memory
879 ///   dependent instructions.
880 /// * Control dependence arises for example if the instruction may fault
881 ///   if lifted above a throwing call or infinite loop.
882 bool mayHaveNonDefUseDependency(const Instruction &I);
883 
884 /// Return true if it is an intrinsic that cannot be speculated but also
885 /// cannot trap.
886 bool isAssumeLikeIntrinsic(const Instruction *I);
887 
888 /// Return true if it is valid to use the assumptions provided by an
889 /// assume intrinsic, I, at the point in the control-flow identified by the
890 /// context instruction, CxtI. By default, ephemeral values of the assumption
891 /// are treated as an invalid context, to prevent the assumption from being used
892 /// to optimize away its argument. If the caller can ensure that this won't
893 /// happen, it can call with AllowEphemerals set to true to get more valid
894 /// assumptions.
895 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
896                              const DominatorTree *DT = nullptr,
897                              bool AllowEphemerals = false);
898 
899 enum class OverflowResult {
900   /// Always overflows in the direction of signed/unsigned min value.
901   AlwaysOverflowsLow,
902   /// Always overflows in the direction of signed/unsigned max value.
903   AlwaysOverflowsHigh,
904   /// May or may not overflow.
905   MayOverflow,
906   /// Never overflows.
907   NeverOverflows,
908 };
909 
910 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
911                                              const SimplifyQuery &SQ,
912                                              bool IsNSW = false);
913 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
914                                            const SimplifyQuery &SQ);
915 OverflowResult
916 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
917                               const WithCache<const Value *> &RHS,
918                               const SimplifyQuery &SQ);
919 OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
920                                            const WithCache<const Value *> &RHS,
921                                            const SimplifyQuery &SQ);
922 /// This version also leverages the sign bit of Add if known.
923 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
924                                            const SimplifyQuery &SQ);
925 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
926                                              const SimplifyQuery &SQ);
927 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
928                                            const SimplifyQuery &SQ);
929 
930 /// Returns true if the arithmetic part of the \p WO 's result is
931 /// used only along the paths control dependent on the computation
932 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
933 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
934                                const DominatorTree &DT);
935 
936 /// Determine the possible constant range of vscale with the given bit width,
937 /// based on the vscale_range function attribute.
938 ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
939 
940 /// Determine the possible constant range of an integer or vector of integer
941 /// value. This is intended as a cheap, non-recursive check.
942 ConstantRange computeConstantRange(const Value *V, bool ForSigned,
943                                    bool UseInstrInfo = true,
944                                    AssumptionCache *AC = nullptr,
945                                    const Instruction *CtxI = nullptr,
946                                    const DominatorTree *DT = nullptr,
947                                    unsigned Depth = 0);
948 
949 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
950 ConstantRange
951 computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
952                                        bool ForSigned, const SimplifyQuery &SQ);
953 
954 /// Return true if this function can prove that the instruction I will
955 /// always transfer execution to one of its successors (including the next
956 /// instruction that follows within a basic block). E.g. this is not
957 /// guaranteed for function calls that could loop infinitely.
958 ///
959 /// In other words, this function returns false for instructions that may
960 /// transfer execution or fail to transfer execution in a way that is not
961 /// captured in the CFG nor in the sequence of instructions within a basic
962 /// block.
963 ///
964 /// Undefined behavior is assumed not to happen, so e.g. division is
965 /// guaranteed to transfer execution to the following instruction even
966 /// though division by zero might cause undefined behavior.
967 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
968 
969 /// Returns true if this block does not contain a potential implicit exit.
970 /// This is equivelent to saying that all instructions within the basic block
971 /// are guaranteed to transfer execution to their successor within the basic
972 /// block. This has the same assumptions w.r.t. undefined behavior as the
973 /// instruction variant of this function.
974 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
975 
976 /// Return true if every instruction in the range (Begin, End) is
977 /// guaranteed to transfer execution to its static successor. \p ScanLimit
978 /// bounds the search to avoid scanning huge blocks.
979 bool isGuaranteedToTransferExecutionToSuccessor(
980     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
981     unsigned ScanLimit = 32);
982 
983 /// Same as previous, but with range expressed via iterator_range.
984 bool isGuaranteedToTransferExecutionToSuccessor(
985     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
986 
987 /// Return true if this function can prove that the instruction I
988 /// is executed for every iteration of the loop L.
989 ///
990 /// Note that this currently only considers the loop header.
991 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
992                                             const Loop *L);
993 
994 /// Return true if \p PoisonOp's user yields poison or raises UB if its
995 /// operand \p PoisonOp is poison.
996 ///
997 /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
998 /// single value, any poison element in /p PoisonOp should make the result
999 /// poison or raise UB.
1000 ///
1001 /// To filter out operands that raise UB on poison, you can use
1002 /// getGuaranteedNonPoisonOp.
1003 bool propagatesPoison(const Use &PoisonOp);
1004 
1005 /// Insert operands of I into Ops such that I will trigger undefined behavior
1006 /// if I is executed and that operand has a poison value.
1007 void getGuaranteedNonPoisonOps(const Instruction *I,
1008                                SmallVectorImpl<const Value *> &Ops);
1009 
1010 /// Insert operands of I into Ops such that I will trigger undefined behavior
1011 /// if I is executed and that operand is not a well-defined value
1012 /// (i.e. has undef bits or poison).
1013 void getGuaranteedWellDefinedOps(const Instruction *I,
1014                                  SmallVectorImpl<const Value *> &Ops);
1015 
1016 /// Return true if the given instruction must trigger undefined behavior
1017 /// when I is executed with any operands which appear in KnownPoison holding
1018 /// a poison value at the point of execution.
1019 bool mustTriggerUB(const Instruction *I,
1020                    const SmallPtrSetImpl<const Value *> &KnownPoison);
1021 
1022 /// Return true if this function can prove that if Inst is executed
1023 /// and yields a poison value or undef bits, then that will trigger
1024 /// undefined behavior.
1025 ///
1026 /// Note that this currently only considers the basic block that is
1027 /// the parent of Inst.
1028 bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
1029 bool programUndefinedIfPoison(const Instruction *Inst);
1030 
1031 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
1032 /// non-undef & non-poison operands.
1033 /// For vectors, canCreateUndefOrPoison returns true if there is potential
1034 /// poison or undef in any element of the result when vectors without
1035 /// undef/poison poison are given as operands.
1036 /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
1037 /// true. If Op raises immediate UB but never creates poison or undef
1038 /// (e.g. sdiv I, 0), canCreatePoison returns false.
1039 ///
1040 /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
1041 /// metadata on the instruction are considered.  This can be used to see if the
1042 /// instruction could still introduce undef or poison even without poison
1043 /// generating flags and metadata which might be on the instruction.
1044 /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
1045 /// poison or undef)
1046 ///
1047 /// canCreatePoison returns true if Op can create poison from non-poison
1048 /// operands.
1049 bool canCreateUndefOrPoison(const Operator *Op,
1050                             bool ConsiderFlagsAndMetadata = true);
1051 bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
1052 
1053 /// Return true if V is poison given that ValAssumedPoison is already poison.
1054 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
1055 /// impliesPoison returns true.
1056 bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
1057 
1058 /// Return true if this function can prove that V does not have undef bits
1059 /// and is never poison. If V is an aggregate value or vector, check whether
1060 /// all elements (except padding) are not undef or poison.
1061 /// Note that this is different from canCreateUndefOrPoison because the
1062 /// function assumes Op's operands are not poison/undef.
1063 ///
1064 /// If CtxI and DT are specified this method performs flow-sensitive analysis
1065 /// and returns true if it is guaranteed to be never undef or poison
1066 /// immediately before the CtxI.
1067 bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
1068                                       AssumptionCache *AC = nullptr,
1069                                       const Instruction *CtxI = nullptr,
1070                                       const DominatorTree *DT = nullptr,
1071                                       unsigned Depth = 0);
1072 
1073 /// Returns true if V cannot be poison, but may be undef.
1074 bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
1075                                const Instruction *CtxI = nullptr,
1076                                const DominatorTree *DT = nullptr,
1077                                unsigned Depth = 0);
1078 
1079 inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
1080                                       BasicBlock::iterator CtxI,
1081                                       const DominatorTree *DT = nullptr,
1082                                       unsigned Depth = 0) {
1083   // Takes an iterator as a position, passes down to Instruction *
1084   // implementation.
1085   return isGuaranteedNotToBePoison(V, AC, &*CtxI, DT, Depth);
1086 }
1087 
1088 /// Returns true if V cannot be undef, but may be poison.
1089 bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
1090                               const Instruction *CtxI = nullptr,
1091                               const DominatorTree *DT = nullptr,
1092                               unsigned Depth = 0);
1093 
1094 /// Return true if undefined behavior would provable be executed on the path to
1095 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
1096 /// anything about whether OnPathTo is actually executed or whether Root is
1097 /// actually poison.  This can be used to assess whether a new use of Root can
1098 /// be added at a location which is control equivalent with OnPathTo (such as
1099 /// immediately before it) without introducing UB which didn't previously
1100 /// exist.  Note that a false result conveys no information.
1101 bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1102                                    Instruction *OnPathTo,
1103                                    DominatorTree *DT);
1104 
1105 /// Convert an integer comparison with a constant RHS into an equivalent
1106 /// form with the strictness flipped predicate. Return the new predicate and
1107 /// corresponding constant RHS if possible. Otherwise return std::nullopt.
1108 /// E.g., (icmp sgt X, 0) -> (icmp sle X, 1).
1109 std::optional<std::pair<CmpPredicate, Constant *>>
1110 getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C);
1111 
1112 /// Specific patterns of select instructions we can match.
1113 enum SelectPatternFlavor {
1114   SPF_UNKNOWN = 0,
1115   SPF_SMIN,    /// Signed minimum
1116   SPF_UMIN,    /// Unsigned minimum
1117   SPF_SMAX,    /// Signed maximum
1118   SPF_UMAX,    /// Unsigned maximum
1119   SPF_FMINNUM, /// Floating point minnum
1120   SPF_FMAXNUM, /// Floating point maxnum
1121   SPF_ABS,     /// Absolute value
1122   SPF_NABS     /// Negated absolute value
1123 };
1124 
1125 /// Behavior when a floating point min/max is given one NaN and one
1126 /// non-NaN as input.
1127 enum SelectPatternNaNBehavior {
1128   SPNB_NA = 0,        /// NaN behavior not applicable.
1129   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
1130   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1131   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
1132                       /// it has been determined that no operands can
1133                       /// be NaN).
1134 };
1135 
1136 struct SelectPatternResult {
1137   SelectPatternFlavor Flavor;
1138   SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1139                                         /// SPF_FMINNUM or SPF_FMAXNUM.
1140   bool Ordered; /// When implementing this min/max pattern as
1141                 /// fcmp; select, does the fcmp have to be
1142                 /// ordered?
1143 
1144   /// Return true if \p SPF is a min or a max pattern.
1145   static bool isMinOrMax(SelectPatternFlavor SPF) {
1146     return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1147   }
1148 };
1149 
1150 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1151 /// and providing the out parameter results if we successfully match.
1152 ///
1153 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1154 /// the negation instruction from the idiom.
1155 ///
1156 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1157 /// not match that of the original select. If this is the case, the cast
1158 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
1159 /// type of LHS and RHS into the type of V is returned in CastOp.
1160 ///
1161 /// For example:
1162 ///   %1 = icmp slt i32 %a, i32 4
1163 ///   %2 = sext i32 %a to i64
1164 ///   %3 = select i1 %1, i64 %2, i64 4
1165 ///
1166 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1167 ///
1168 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1169                                        Instruction::CastOps *CastOp = nullptr,
1170                                        unsigned Depth = 0);
1171 
1172 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1173                                               const Value *&RHS) {
1174   Value *L = const_cast<Value *>(LHS);
1175   Value *R = const_cast<Value *>(RHS);
1176   auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
1177   LHS = L;
1178   RHS = R;
1179   return Result;
1180 }
1181 
1182 /// Determine the pattern that a select with the given compare as its
1183 /// predicate and given values as its true/false operands would match.
1184 SelectPatternResult matchDecomposedSelectPattern(
1185     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1186     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1187 
1188 /// Determine the pattern for predicate `X Pred Y ? X : Y`.
1189 SelectPatternResult
1190 getSelectPattern(CmpInst::Predicate Pred,
1191                  SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
1192                  bool Ordered = false);
1193 
1194 /// Return the canonical comparison predicate for the specified
1195 /// minimum/maximum flavor.
1196 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1197 
1198 /// Convert given `SPF` to equivalent min/max intrinsic.
1199 /// Caller must ensure `SPF` is an integer min or max pattern.
1200 Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF);
1201 
1202 /// Return the inverse minimum/maximum flavor of the specified flavor.
1203 /// For example, signed minimum is the inverse of signed maximum.
1204 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1205 
1206 Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1207 
1208 /// Return the minimum or maximum constant value for the specified integer
1209 /// min/max flavor and type.
1210 APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1211 
1212 /// Check if the values in \p VL are select instructions that can be converted
1213 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1214 /// conversion is possible, together with a bool indicating whether all select
1215 /// conditions are only used by the selects. Otherwise return
1216 /// Intrinsic::not_intrinsic.
1217 std::pair<Intrinsic::ID, bool>
1218 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1219 
1220 /// Attempt to match a simple first order recurrence cycle of the form:
1221 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1222 ///   %inc = binop %iv, %step
1223 /// OR
1224 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1225 ///   %inc = binop %step, %iv
1226 ///
1227 /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1228 ///
1229 /// A couple of notes on subtleties in that definition:
1230 /// * The Step does not have to be loop invariant.  In math terms, it can
1231 ///   be a free variable.  We allow recurrences with both constant and
1232 ///   variable coefficients. Callers may wish to filter cases where Step
1233 ///   does not dominate P.
1234 /// * For non-commutative operators, we will match both forms.  This
1235 ///   results in some odd recurrence structures.  Callers may wish to filter
1236 ///   out recurrences where the phi is not the LHS of the returned operator.
1237 /// * Because of the structure matched, the caller can assume as a post
1238 ///   condition of the match the presence of a Loop with P's parent as it's
1239 ///   header *except* in unreachable code.  (Dominance decays in unreachable
1240 ///   code.)
1241 ///
1242 /// NOTE: This is intentional simple.  If you want the ability to analyze
1243 /// non-trivial loop conditons, see ScalarEvolution instead.
1244 bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1245                            Value *&Step);
1246 
1247 /// Analogous to the above, but starting from the binary operator
1248 bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1249                            Value *&Step);
1250 
1251 /// Return true if RHS is known to be implied true by LHS.  Return false if
1252 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
1253 /// no implication can be made. A & B must be i1 (boolean) values or a vector of
1254 /// such values. Note that the truth table for implication is the same as <=u on
1255 /// i1 values (but not
1256 /// <=s!).  The truth table for both is:
1257 ///    | T | F (B)
1258 ///  T | T | F
1259 ///  F | T | T
1260 /// (A)
1261 std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1262                                        const DataLayout &DL,
1263                                        bool LHSIsTrue = true,
1264                                        unsigned Depth = 0);
1265 std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
1266                                        const Value *RHSOp0, const Value *RHSOp1,
1267                                        const DataLayout &DL,
1268                                        bool LHSIsTrue = true,
1269                                        unsigned Depth = 0);
1270 
1271 /// Return the boolean condition value in the context of the given instruction
1272 /// if it is known based on dominating conditions.
1273 std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1274                                             const Instruction *ContextI,
1275                                             const DataLayout &DL);
1276 std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
1277                                             const Value *RHS,
1278                                             const Instruction *ContextI,
1279                                             const DataLayout &DL);
1280 
1281 /// Call \p InsertAffected on all Values whose known bits / value may be
1282 /// affected by the condition \p Cond. Used by AssumptionCache and
1283 /// DomConditionCache.
1284 void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
1285                                    function_ref<void(Value *)> InsertAffected);
1286 
1287 } // end namespace llvm
1288 
1289 #endif // LLVM_ANALYSIS_VALUETRACKING_H
1290