1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsRISCV.h"
56 #include "llvm/IR/IntrinsicsX86.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/IR/Operator.h"
61 #include "llvm/IR/PatternMatch.h"
62 #include "llvm/IR/Type.h"
63 #include "llvm/IR/User.h"
64 #include "llvm/IR/Value.h"
65 #include "llvm/Support/Casting.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Compiler.h"
68 #include "llvm/Support/ErrorHandling.h"
69 #include "llvm/Support/KnownBits.h"
70 #include "llvm/Support/MathExtras.h"
71 #include <algorithm>
72 #include <array>
73 #include <cassert>
74 #include <cstdint>
75 #include <iterator>
76 #include <utility>
77
78 using namespace llvm;
79 using namespace llvm::PatternMatch;
80
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
85
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
getBitWidth(Type * Ty,const DataLayout & DL)88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89 if (unsigned BitWidth = Ty->getScalarSizeInBits())
90 return BitWidth;
91
92 return DL.getPointerTypeSizeInBits(Ty);
93 }
94
95 namespace {
96
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102 const DataLayout &DL;
103 AssumptionCache *AC;
104 const Instruction *CxtI;
105 const DominatorTree *DT;
106
107 // Unlike the other analyses, this may be a nullptr because not all clients
108 // provide it currently.
109 OptimizationRemarkEmitter *ORE;
110
111 /// If true, it is safe to use metadata during simplification.
112 InstrInfoQuery IIQ;
113
Query__anonab875fbe0111::Query114 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
115 const DominatorTree *DT, bool UseInstrInfo,
116 OptimizationRemarkEmitter *ORE = nullptr)
117 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
118 };
119
120 } // end anonymous namespace
121
122 // Given the provided Value and, potentially, a context instruction, return
123 // the preferred context instruction (if any).
safeCxtI(const Value * V,const Instruction * CxtI)124 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
125 // If we've been provided with a context instruction, then use that (provided
126 // it has been inserted).
127 if (CxtI && CxtI->getParent())
128 return CxtI;
129
130 // If the value is really an already-inserted instruction, then use that.
131 CxtI = dyn_cast<Instruction>(V);
132 if (CxtI && CxtI->getParent())
133 return CxtI;
134
135 return nullptr;
136 }
137
safeCxtI(const Value * V1,const Value * V2,const Instruction * CxtI)138 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
139 // If we've been provided with a context instruction, then use that (provided
140 // it has been inserted).
141 if (CxtI && CxtI->getParent())
142 return CxtI;
143
144 // If the value is really an already-inserted instruction, then use that.
145 CxtI = dyn_cast<Instruction>(V1);
146 if (CxtI && CxtI->getParent())
147 return CxtI;
148
149 CxtI = dyn_cast<Instruction>(V2);
150 if (CxtI && CxtI->getParent())
151 return CxtI;
152
153 return nullptr;
154 }
155
getShuffleDemandedElts(const ShuffleVectorInst * Shuf,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)156 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
157 const APInt &DemandedElts,
158 APInt &DemandedLHS, APInt &DemandedRHS) {
159 // The length of scalable vectors is unknown at compile time, thus we
160 // cannot check their values
161 if (isa<ScalableVectorType>(Shuf->getType()))
162 return false;
163
164 int NumElts =
165 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
166 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
167 DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
168 if (DemandedElts.isNullValue())
169 return true;
170 // Simple case of a shuffle with zeroinitializer.
171 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
172 DemandedLHS.setBit(0);
173 return true;
174 }
175 for (int i = 0; i != NumMaskElts; ++i) {
176 if (!DemandedElts[i])
177 continue;
178 int M = Shuf->getMaskValue(i);
179 assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
180
181 // For undef elements, we don't know anything about the common state of
182 // the shuffle result.
183 if (M == -1)
184 return false;
185 if (M < NumElts)
186 DemandedLHS.setBit(M % NumElts);
187 else
188 DemandedRHS.setBit(M % NumElts);
189 }
190
191 return true;
192 }
193
194 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
195 KnownBits &Known, unsigned Depth, const Query &Q);
196
computeKnownBits(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)197 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
198 const Query &Q) {
199 // FIXME: We currently have no way to represent the DemandedElts of a scalable
200 // vector
201 if (isa<ScalableVectorType>(V->getType())) {
202 Known.resetAll();
203 return;
204 }
205
206 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
207 APInt DemandedElts =
208 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
209 computeKnownBits(V, DemandedElts, Known, Depth, Q);
210 }
211
computeKnownBits(const Value * V,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)212 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
213 const DataLayout &DL, unsigned Depth,
214 AssumptionCache *AC, const Instruction *CxtI,
215 const DominatorTree *DT,
216 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
217 ::computeKnownBits(V, Known, Depth,
218 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
219 }
220
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)221 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
222 KnownBits &Known, const DataLayout &DL,
223 unsigned Depth, AssumptionCache *AC,
224 const Instruction *CxtI, const DominatorTree *DT,
225 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
226 ::computeKnownBits(V, DemandedElts, Known, Depth,
227 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
228 }
229
230 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
231 unsigned Depth, const Query &Q);
232
233 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
234 const Query &Q);
235
computeKnownBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)236 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
237 unsigned Depth, AssumptionCache *AC,
238 const Instruction *CxtI,
239 const DominatorTree *DT,
240 OptimizationRemarkEmitter *ORE,
241 bool UseInstrInfo) {
242 return ::computeKnownBits(
243 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
244 }
245
computeKnownBits(const Value * V,const APInt & DemandedElts,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)246 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
247 const DataLayout &DL, unsigned Depth,
248 AssumptionCache *AC, const Instruction *CxtI,
249 const DominatorTree *DT,
250 OptimizationRemarkEmitter *ORE,
251 bool UseInstrInfo) {
252 return ::computeKnownBits(
253 V, DemandedElts, Depth,
254 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
255 }
256
haveNoCommonBitsSet(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)257 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
258 const DataLayout &DL, AssumptionCache *AC,
259 const Instruction *CxtI, const DominatorTree *DT,
260 bool UseInstrInfo) {
261 assert(LHS->getType() == RHS->getType() &&
262 "LHS and RHS should have the same type");
263 assert(LHS->getType()->isIntOrIntVectorTy() &&
264 "LHS and RHS should be integers");
265 // Look for an inverted mask: (X & ~M) op (Y & M).
266 Value *M;
267 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
268 match(RHS, m_c_And(m_Specific(M), m_Value())))
269 return true;
270 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
271 match(LHS, m_c_And(m_Specific(M), m_Value())))
272 return true;
273 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
274 KnownBits LHSKnown(IT->getBitWidth());
275 KnownBits RHSKnown(IT->getBitWidth());
276 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
277 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
278 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
279 }
280
isOnlyUsedInZeroEqualityComparison(const Instruction * CxtI)281 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
282 for (const User *U : CxtI->users()) {
283 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
284 if (IC->isEquality())
285 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
286 if (C->isNullValue())
287 continue;
288 return false;
289 }
290 return true;
291 }
292
293 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
294 const Query &Q);
295
isKnownToBeAPowerOfTwo(const Value * V,const DataLayout & DL,bool OrZero,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)296 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
297 bool OrZero, unsigned Depth,
298 AssumptionCache *AC, const Instruction *CxtI,
299 const DominatorTree *DT, bool UseInstrInfo) {
300 return ::isKnownToBeAPowerOfTwo(
301 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
302 }
303
304 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
305 unsigned Depth, const Query &Q);
306
307 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
308
isKnownNonZero(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)309 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
310 AssumptionCache *AC, const Instruction *CxtI,
311 const DominatorTree *DT, bool UseInstrInfo) {
312 return ::isKnownNonZero(V, Depth,
313 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
314 }
315
isKnownNonNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)316 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
317 unsigned Depth, AssumptionCache *AC,
318 const Instruction *CxtI, const DominatorTree *DT,
319 bool UseInstrInfo) {
320 KnownBits Known =
321 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
322 return Known.isNonNegative();
323 }
324
isKnownPositive(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)325 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
326 AssumptionCache *AC, const Instruction *CxtI,
327 const DominatorTree *DT, bool UseInstrInfo) {
328 if (auto *CI = dyn_cast<ConstantInt>(V))
329 return CI->getValue().isStrictlyPositive();
330
331 // TODO: We'd doing two recursive queries here. We should factor this such
332 // that only a single query is needed.
333 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
334 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
335 }
336
isKnownNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)337 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
338 AssumptionCache *AC, const Instruction *CxtI,
339 const DominatorTree *DT, bool UseInstrInfo) {
340 KnownBits Known =
341 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
342 return Known.isNegative();
343 }
344
345 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
346 const Query &Q);
347
isKnownNonEqual(const Value * V1,const Value * V2,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)348 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
349 const DataLayout &DL, AssumptionCache *AC,
350 const Instruction *CxtI, const DominatorTree *DT,
351 bool UseInstrInfo) {
352 return ::isKnownNonEqual(V1, V2, 0,
353 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
354 UseInstrInfo, /*ORE=*/nullptr));
355 }
356
357 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
358 const Query &Q);
359
MaskedValueIsZero(const Value * V,const APInt & Mask,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)360 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
361 const DataLayout &DL, unsigned Depth,
362 AssumptionCache *AC, const Instruction *CxtI,
363 const DominatorTree *DT, bool UseInstrInfo) {
364 return ::MaskedValueIsZero(
365 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
366 }
367
368 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
369 unsigned Depth, const Query &Q);
370
ComputeNumSignBits(const Value * V,unsigned Depth,const Query & Q)371 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
372 const Query &Q) {
373 // FIXME: We currently have no way to represent the DemandedElts of a scalable
374 // vector
375 if (isa<ScalableVectorType>(V->getType()))
376 return 1;
377
378 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
379 APInt DemandedElts =
380 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
381 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
382 }
383
ComputeNumSignBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)384 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
385 unsigned Depth, AssumptionCache *AC,
386 const Instruction *CxtI,
387 const DominatorTree *DT, bool UseInstrInfo) {
388 return ::ComputeNumSignBits(
389 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
390 }
391
computeKnownBitsAddSub(bool Add,const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & KnownOut,KnownBits & Known2,unsigned Depth,const Query & Q)392 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
393 bool NSW, const APInt &DemandedElts,
394 KnownBits &KnownOut, KnownBits &Known2,
395 unsigned Depth, const Query &Q) {
396 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
397
398 // If one operand is unknown and we have no nowrap information,
399 // the result will be unknown independently of the second operand.
400 if (KnownOut.isUnknown() && !NSW)
401 return;
402
403 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
404 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
405 }
406
computeKnownBitsMul(const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q)407 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
408 const APInt &DemandedElts, KnownBits &Known,
409 KnownBits &Known2, unsigned Depth,
410 const Query &Q) {
411 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
412 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
413
414 bool isKnownNegative = false;
415 bool isKnownNonNegative = false;
416 // If the multiplication is known not to overflow, compute the sign bit.
417 if (NSW) {
418 if (Op0 == Op1) {
419 // The product of a number with itself is non-negative.
420 isKnownNonNegative = true;
421 } else {
422 bool isKnownNonNegativeOp1 = Known.isNonNegative();
423 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
424 bool isKnownNegativeOp1 = Known.isNegative();
425 bool isKnownNegativeOp0 = Known2.isNegative();
426 // The product of two numbers with the same sign is non-negative.
427 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
428 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
429 // The product of a negative number and a non-negative number is either
430 // negative or zero.
431 if (!isKnownNonNegative)
432 isKnownNegative =
433 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
434 Known2.isNonZero()) ||
435 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
436 }
437 }
438
439 Known = KnownBits::mul(Known, Known2);
440
441 // Only make use of no-wrap flags if we failed to compute the sign bit
442 // directly. This matters if the multiplication always overflows, in
443 // which case we prefer to follow the result of the direct computation,
444 // though as the program is invoking undefined behaviour we can choose
445 // whatever we like here.
446 if (isKnownNonNegative && !Known.isNegative())
447 Known.makeNonNegative();
448 else if (isKnownNegative && !Known.isNonNegative())
449 Known.makeNegative();
450 }
451
computeKnownBitsFromRangeMetadata(const MDNode & Ranges,KnownBits & Known)452 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
453 KnownBits &Known) {
454 unsigned BitWidth = Known.getBitWidth();
455 unsigned NumRanges = Ranges.getNumOperands() / 2;
456 assert(NumRanges >= 1);
457
458 Known.Zero.setAllBits();
459 Known.One.setAllBits();
460
461 for (unsigned i = 0; i < NumRanges; ++i) {
462 ConstantInt *Lower =
463 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
464 ConstantInt *Upper =
465 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
466 ConstantRange Range(Lower->getValue(), Upper->getValue());
467
468 // The first CommonPrefixBits of all values in Range are equal.
469 unsigned CommonPrefixBits =
470 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
471 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
472 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
473 Known.One &= UnsignedMax & Mask;
474 Known.Zero &= ~UnsignedMax & Mask;
475 }
476 }
477
isEphemeralValueOf(const Instruction * I,const Value * E)478 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
479 SmallVector<const Value *, 16> WorkSet(1, I);
480 SmallPtrSet<const Value *, 32> Visited;
481 SmallPtrSet<const Value *, 16> EphValues;
482
483 // The instruction defining an assumption's condition itself is always
484 // considered ephemeral to that assumption (even if it has other
485 // non-ephemeral users). See r246696's test case for an example.
486 if (is_contained(I->operands(), E))
487 return true;
488
489 while (!WorkSet.empty()) {
490 const Value *V = WorkSet.pop_back_val();
491 if (!Visited.insert(V).second)
492 continue;
493
494 // If all uses of this value are ephemeral, then so is this value.
495 if (llvm::all_of(V->users(), [&](const User *U) {
496 return EphValues.count(U);
497 })) {
498 if (V == E)
499 return true;
500
501 if (V == I || isSafeToSpeculativelyExecute(V)) {
502 EphValues.insert(V);
503 if (const User *U = dyn_cast<User>(V))
504 append_range(WorkSet, U->operands());
505 }
506 }
507 }
508
509 return false;
510 }
511
512 // Is this an intrinsic that cannot be speculated but also cannot trap?
isAssumeLikeIntrinsic(const Instruction * I)513 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
514 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
515 return CI->isAssumeLikeIntrinsic();
516
517 return false;
518 }
519
isValidAssumeForContext(const Instruction * Inv,const Instruction * CxtI,const DominatorTree * DT)520 bool llvm::isValidAssumeForContext(const Instruction *Inv,
521 const Instruction *CxtI,
522 const DominatorTree *DT) {
523 // There are two restrictions on the use of an assume:
524 // 1. The assume must dominate the context (or the control flow must
525 // reach the assume whenever it reaches the context).
526 // 2. The context must not be in the assume's set of ephemeral values
527 // (otherwise we will use the assume to prove that the condition
528 // feeding the assume is trivially true, thus causing the removal of
529 // the assume).
530
531 if (Inv->getParent() == CxtI->getParent()) {
532 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
533 // in the BB.
534 if (Inv->comesBefore(CxtI))
535 return true;
536
537 // Don't let an assume affect itself - this would cause the problems
538 // `isEphemeralValueOf` is trying to prevent, and it would also make
539 // the loop below go out of bounds.
540 if (Inv == CxtI)
541 return false;
542
543 // The context comes first, but they're both in the same block.
544 // Make sure there is nothing in between that might interrupt
545 // the control flow, not even CxtI itself.
546 // We limit the scan distance between the assume and its context instruction
547 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
548 // it can be adjusted if needed (could be turned into a cl::opt).
549 unsigned ScanLimit = 15;
550 for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
551 if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
552 return false;
553
554 return !isEphemeralValueOf(Inv, CxtI);
555 }
556
557 // Inv and CxtI are in different blocks.
558 if (DT) {
559 if (DT->dominates(Inv, CxtI))
560 return true;
561 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
562 // We don't have a DT, but this trivially dominates.
563 return true;
564 }
565
566 return false;
567 }
568
cmpExcludesZero(CmpInst::Predicate Pred,const Value * RHS)569 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
570 // v u> y implies v != 0.
571 if (Pred == ICmpInst::ICMP_UGT)
572 return true;
573
574 // Special-case v != 0 to also handle v != null.
575 if (Pred == ICmpInst::ICMP_NE)
576 return match(RHS, m_Zero());
577
578 // All other predicates - rely on generic ConstantRange handling.
579 const APInt *C;
580 if (!match(RHS, m_APInt(C)))
581 return false;
582
583 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
584 return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
585 }
586
isKnownNonZeroFromAssume(const Value * V,const Query & Q)587 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
588 // Use of assumptions is context-sensitive. If we don't have a context, we
589 // cannot use them!
590 if (!Q.AC || !Q.CxtI)
591 return false;
592
593 if (Q.CxtI && V->getType()->isPointerTy()) {
594 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
595 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
596 V->getType()->getPointerAddressSpace()))
597 AttrKinds.push_back(Attribute::Dereferenceable);
598
599 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
600 return true;
601 }
602
603 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
604 if (!AssumeVH)
605 continue;
606 CallInst *I = cast<CallInst>(AssumeVH);
607 assert(I->getFunction() == Q.CxtI->getFunction() &&
608 "Got assumption for the wrong function!");
609
610 // Warning: This loop can end up being somewhat performance sensitive.
611 // We're running this loop for once for each value queried resulting in a
612 // runtime of ~O(#assumes * #values).
613
614 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
615 "must be an assume intrinsic");
616
617 Value *RHS;
618 CmpInst::Predicate Pred;
619 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
620 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
621 return false;
622
623 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
624 return true;
625 }
626
627 return false;
628 }
629
computeKnownBitsFromAssume(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)630 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
631 unsigned Depth, const Query &Q) {
632 // Use of assumptions is context-sensitive. If we don't have a context, we
633 // cannot use them!
634 if (!Q.AC || !Q.CxtI)
635 return;
636
637 unsigned BitWidth = Known.getBitWidth();
638
639 // Refine Known set if the pointer alignment is set by assume bundles.
640 if (V->getType()->isPointerTy()) {
641 if (RetainedKnowledge RK = getKnowledgeValidInContext(
642 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
643 Known.Zero.setLowBits(Log2_32(RK.ArgValue));
644 }
645 }
646
647 // Note that the patterns below need to be kept in sync with the code
648 // in AssumptionCache::updateAffectedValues.
649
650 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
651 if (!AssumeVH)
652 continue;
653 CallInst *I = cast<CallInst>(AssumeVH);
654 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
655 "Got assumption for the wrong function!");
656
657 // Warning: This loop can end up being somewhat performance sensitive.
658 // We're running this loop for once for each value queried resulting in a
659 // runtime of ~O(#assumes * #values).
660
661 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
662 "must be an assume intrinsic");
663
664 Value *Arg = I->getArgOperand(0);
665
666 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
667 assert(BitWidth == 1 && "assume operand is not i1?");
668 Known.setAllOnes();
669 return;
670 }
671 if (match(Arg, m_Not(m_Specific(V))) &&
672 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
673 assert(BitWidth == 1 && "assume operand is not i1?");
674 Known.setAllZero();
675 return;
676 }
677
678 // The remaining tests are all recursive, so bail out if we hit the limit.
679 if (Depth == MaxAnalysisRecursionDepth)
680 continue;
681
682 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
683 if (!Cmp)
684 continue;
685
686 // We are attempting to compute known bits for the operands of an assume.
687 // Do not try to use other assumptions for those recursive calls because
688 // that can lead to mutual recursion and a compile-time explosion.
689 // An example of the mutual recursion: computeKnownBits can call
690 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
691 // and so on.
692 Query QueryNoAC = Q;
693 QueryNoAC.AC = nullptr;
694
695 // Note that ptrtoint may change the bitwidth.
696 Value *A, *B;
697 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
698
699 CmpInst::Predicate Pred;
700 uint64_t C;
701 switch (Cmp->getPredicate()) {
702 default:
703 break;
704 case ICmpInst::ICMP_EQ:
705 // assume(v = a)
706 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
707 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
708 KnownBits RHSKnown =
709 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
710 Known.Zero |= RHSKnown.Zero;
711 Known.One |= RHSKnown.One;
712 // assume(v & b = a)
713 } else if (match(Cmp,
714 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
715 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
716 KnownBits RHSKnown =
717 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
718 KnownBits MaskKnown =
719 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
720
721 // For those bits in the mask that are known to be one, we can propagate
722 // known bits from the RHS to V.
723 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
724 Known.One |= RHSKnown.One & MaskKnown.One;
725 // assume(~(v & b) = a)
726 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
727 m_Value(A))) &&
728 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729 KnownBits RHSKnown =
730 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
731 KnownBits MaskKnown =
732 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
733
734 // For those bits in the mask that are known to be one, we can propagate
735 // inverted known bits from the RHS to V.
736 Known.Zero |= RHSKnown.One & MaskKnown.One;
737 Known.One |= RHSKnown.Zero & MaskKnown.One;
738 // assume(v | b = a)
739 } else if (match(Cmp,
740 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
741 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
742 KnownBits RHSKnown =
743 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
744 KnownBits BKnown =
745 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
746
747 // For those bits in B that are known to be zero, we can propagate known
748 // bits from the RHS to V.
749 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
750 Known.One |= RHSKnown.One & BKnown.Zero;
751 // assume(~(v | b) = a)
752 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
753 m_Value(A))) &&
754 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
755 KnownBits RHSKnown =
756 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
757 KnownBits BKnown =
758 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
759
760 // For those bits in B that are known to be zero, we can propagate
761 // inverted known bits from the RHS to V.
762 Known.Zero |= RHSKnown.One & BKnown.Zero;
763 Known.One |= RHSKnown.Zero & BKnown.Zero;
764 // assume(v ^ b = a)
765 } else if (match(Cmp,
766 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
767 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
768 KnownBits RHSKnown =
769 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
770 KnownBits BKnown =
771 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
772
773 // For those bits in B that are known to be zero, we can propagate known
774 // bits from the RHS to V. For those bits in B that are known to be one,
775 // we can propagate inverted known bits from the RHS to V.
776 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
777 Known.One |= RHSKnown.One & BKnown.Zero;
778 Known.Zero |= RHSKnown.One & BKnown.One;
779 Known.One |= RHSKnown.Zero & BKnown.One;
780 // assume(~(v ^ b) = a)
781 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
782 m_Value(A))) &&
783 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
784 KnownBits RHSKnown =
785 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
786 KnownBits BKnown =
787 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
788
789 // For those bits in B that are known to be zero, we can propagate
790 // inverted known bits from the RHS to V. For those bits in B that are
791 // known to be one, we can propagate known bits from the RHS to V.
792 Known.Zero |= RHSKnown.One & BKnown.Zero;
793 Known.One |= RHSKnown.Zero & BKnown.Zero;
794 Known.Zero |= RHSKnown.Zero & BKnown.One;
795 Known.One |= RHSKnown.One & BKnown.One;
796 // assume(v << c = a)
797 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
798 m_Value(A))) &&
799 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
800 KnownBits RHSKnown =
801 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
802
803 // For those bits in RHS that are known, we can propagate them to known
804 // bits in V shifted to the right by C.
805 RHSKnown.Zero.lshrInPlace(C);
806 Known.Zero |= RHSKnown.Zero;
807 RHSKnown.One.lshrInPlace(C);
808 Known.One |= RHSKnown.One;
809 // assume(~(v << c) = a)
810 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
811 m_Value(A))) &&
812 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
813 KnownBits RHSKnown =
814 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
815 // For those bits in RHS that are known, we can propagate them inverted
816 // to known bits in V shifted to the right by C.
817 RHSKnown.One.lshrInPlace(C);
818 Known.Zero |= RHSKnown.One;
819 RHSKnown.Zero.lshrInPlace(C);
820 Known.One |= RHSKnown.Zero;
821 // assume(v >> c = a)
822 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
823 m_Value(A))) &&
824 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
825 KnownBits RHSKnown =
826 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
827 // For those bits in RHS that are known, we can propagate them to known
828 // bits in V shifted to the right by C.
829 Known.Zero |= RHSKnown.Zero << C;
830 Known.One |= RHSKnown.One << C;
831 // assume(~(v >> c) = a)
832 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
833 m_Value(A))) &&
834 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
835 KnownBits RHSKnown =
836 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
837 // For those bits in RHS that are known, we can propagate them inverted
838 // to known bits in V shifted to the right by C.
839 Known.Zero |= RHSKnown.One << C;
840 Known.One |= RHSKnown.Zero << C;
841 }
842 break;
843 case ICmpInst::ICMP_SGE:
844 // assume(v >=_s c) where c is non-negative
845 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
846 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
847 KnownBits RHSKnown =
848 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
849
850 if (RHSKnown.isNonNegative()) {
851 // We know that the sign bit is zero.
852 Known.makeNonNegative();
853 }
854 }
855 break;
856 case ICmpInst::ICMP_SGT:
857 // assume(v >_s c) where c is at least -1.
858 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
859 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
860 KnownBits RHSKnown =
861 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
862
863 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
864 // We know that the sign bit is zero.
865 Known.makeNonNegative();
866 }
867 }
868 break;
869 case ICmpInst::ICMP_SLE:
870 // assume(v <=_s c) where c is negative
871 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
872 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
873 KnownBits RHSKnown =
874 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
875
876 if (RHSKnown.isNegative()) {
877 // We know that the sign bit is one.
878 Known.makeNegative();
879 }
880 }
881 break;
882 case ICmpInst::ICMP_SLT:
883 // assume(v <_s c) where c is non-positive
884 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
885 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
886 KnownBits RHSKnown =
887 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
888
889 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
890 // We know that the sign bit is one.
891 Known.makeNegative();
892 }
893 }
894 break;
895 case ICmpInst::ICMP_ULE:
896 // assume(v <=_u c)
897 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
898 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
899 KnownBits RHSKnown =
900 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
901
902 // Whatever high bits in c are zero are known to be zero.
903 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
904 }
905 break;
906 case ICmpInst::ICMP_ULT:
907 // assume(v <_u c)
908 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
909 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
910 KnownBits RHSKnown =
911 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
912
913 // If the RHS is known zero, then this assumption must be wrong (nothing
914 // is unsigned less than zero). Signal a conflict and get out of here.
915 if (RHSKnown.isZero()) {
916 Known.Zero.setAllBits();
917 Known.One.setAllBits();
918 break;
919 }
920
921 // Whatever high bits in c are zero are known to be zero (if c is a power
922 // of 2, then one more).
923 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
924 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
925 else
926 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
927 }
928 break;
929 }
930 }
931
932 // If assumptions conflict with each other or previous known bits, then we
933 // have a logical fallacy. It's possible that the assumption is not reachable,
934 // so this isn't a real bug. On the other hand, the program may have undefined
935 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
936 // clear out the known bits, try to warn the user, and hope for the best.
937 if (Known.Zero.intersects(Known.One)) {
938 Known.resetAll();
939
940 if (Q.ORE)
941 Q.ORE->emit([&]() {
942 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
943 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
944 CxtI)
945 << "Detected conflicting code assumptions. Program may "
946 "have undefined behavior, or compiler may have "
947 "internal error.";
948 });
949 }
950 }
951
952 /// Compute known bits from a shift operator, including those with a
953 /// non-constant shift amount. Known is the output of this function. Known2 is a
954 /// pre-allocated temporary with the same bit width as Known and on return
955 /// contains the known bit of the shift value source. KF is an
956 /// operator-specific function that, given the known-bits and a shift amount,
957 /// compute the implied known-bits of the shift operator's result respectively
958 /// for that shift amount. The results from calling KF are conservatively
959 /// combined for all permitted shift amounts.
computeKnownBitsFromShiftOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q,function_ref<KnownBits (const KnownBits &,const KnownBits &)> KF)960 static void computeKnownBitsFromShiftOperator(
961 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
962 KnownBits &Known2, unsigned Depth, const Query &Q,
963 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
964 unsigned BitWidth = Known.getBitWidth();
965 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
966 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
967
968 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
969 // BitWidth > 64 and any upper bits are known, we'll end up returning the
970 // limit value (which implies all bits are known).
971 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
972 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
973 bool ShiftAmtIsConstant = Known.isConstant();
974 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
975
976 if (ShiftAmtIsConstant) {
977 Known = KF(Known2, Known);
978
979 // If the known bits conflict, this must be an overflowing left shift, so
980 // the shift result is poison. We can return anything we want. Choose 0 for
981 // the best folding opportunity.
982 if (Known.hasConflict())
983 Known.setAllZero();
984
985 return;
986 }
987
988 // If the shift amount could be greater than or equal to the bit-width of the
989 // LHS, the value could be poison, but bail out because the check below is
990 // expensive.
991 // TODO: Should we just carry on?
992 if (MaxShiftAmtIsOutOfRange) {
993 Known.resetAll();
994 return;
995 }
996
997 // It would be more-clearly correct to use the two temporaries for this
998 // calculation. Reusing the APInts here to prevent unnecessary allocations.
999 Known.resetAll();
1000
1001 // If we know the shifter operand is nonzero, we can sometimes infer more
1002 // known bits. However this is expensive to compute, so be lazy about it and
1003 // only compute it when absolutely necessary.
1004 Optional<bool> ShifterOperandIsNonZero;
1005
1006 // Early exit if we can't constrain any well-defined shift amount.
1007 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1008 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1009 ShifterOperandIsNonZero =
1010 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1011 if (!*ShifterOperandIsNonZero)
1012 return;
1013 }
1014
1015 Known.Zero.setAllBits();
1016 Known.One.setAllBits();
1017 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1018 // Combine the shifted known input bits only for those shift amounts
1019 // compatible with its known constraints.
1020 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1021 continue;
1022 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1023 continue;
1024 // If we know the shifter is nonzero, we may be able to infer more known
1025 // bits. This check is sunk down as far as possible to avoid the expensive
1026 // call to isKnownNonZero if the cheaper checks above fail.
1027 if (ShiftAmt == 0) {
1028 if (!ShifterOperandIsNonZero.hasValue())
1029 ShifterOperandIsNonZero =
1030 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1031 if (*ShifterOperandIsNonZero)
1032 continue;
1033 }
1034
1035 Known = KnownBits::commonBits(
1036 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1037 }
1038
1039 // If the known bits conflict, the result is poison. Return a 0 and hope the
1040 // caller can further optimize that.
1041 if (Known.hasConflict())
1042 Known.setAllZero();
1043 }
1044
computeKnownBitsFromOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1045 static void computeKnownBitsFromOperator(const Operator *I,
1046 const APInt &DemandedElts,
1047 KnownBits &Known, unsigned Depth,
1048 const Query &Q) {
1049 unsigned BitWidth = Known.getBitWidth();
1050
1051 KnownBits Known2(BitWidth);
1052 switch (I->getOpcode()) {
1053 default: break;
1054 case Instruction::Load:
1055 if (MDNode *MD =
1056 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1057 computeKnownBitsFromRangeMetadata(*MD, Known);
1058 break;
1059 case Instruction::And: {
1060 // If either the LHS or the RHS are Zero, the result is zero.
1061 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1062 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1063
1064 Known &= Known2;
1065
1066 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1067 // here we handle the more general case of adding any odd number by
1068 // matching the form add(x, add(x, y)) where y is odd.
1069 // TODO: This could be generalized to clearing any bit set in y where the
1070 // following bit is known to be unset in y.
1071 Value *X = nullptr, *Y = nullptr;
1072 if (!Known.Zero[0] && !Known.One[0] &&
1073 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1074 Known2.resetAll();
1075 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1076 if (Known2.countMinTrailingOnes() > 0)
1077 Known.Zero.setBit(0);
1078 }
1079 break;
1080 }
1081 case Instruction::Or:
1082 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1083 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1084
1085 Known |= Known2;
1086 break;
1087 case Instruction::Xor:
1088 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1089 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1090
1091 Known ^= Known2;
1092 break;
1093 case Instruction::Mul: {
1094 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1095 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1096 Known, Known2, Depth, Q);
1097 break;
1098 }
1099 case Instruction::UDiv: {
1100 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1101 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1102 Known = KnownBits::udiv(Known, Known2);
1103 break;
1104 }
1105 case Instruction::Select: {
1106 const Value *LHS = nullptr, *RHS = nullptr;
1107 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1108 if (SelectPatternResult::isMinOrMax(SPF)) {
1109 computeKnownBits(RHS, Known, Depth + 1, Q);
1110 computeKnownBits(LHS, Known2, Depth + 1, Q);
1111 switch (SPF) {
1112 default:
1113 llvm_unreachable("Unhandled select pattern flavor!");
1114 case SPF_SMAX:
1115 Known = KnownBits::smax(Known, Known2);
1116 break;
1117 case SPF_SMIN:
1118 Known = KnownBits::smin(Known, Known2);
1119 break;
1120 case SPF_UMAX:
1121 Known = KnownBits::umax(Known, Known2);
1122 break;
1123 case SPF_UMIN:
1124 Known = KnownBits::umin(Known, Known2);
1125 break;
1126 }
1127 break;
1128 }
1129
1130 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1131 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1132
1133 // Only known if known in both the LHS and RHS.
1134 Known = KnownBits::commonBits(Known, Known2);
1135
1136 if (SPF == SPF_ABS) {
1137 // RHS from matchSelectPattern returns the negation part of abs pattern.
1138 // If the negate has an NSW flag we can assume the sign bit of the result
1139 // will be 0 because that makes abs(INT_MIN) undefined.
1140 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1141 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1142 Known.Zero.setSignBit();
1143 }
1144
1145 break;
1146 }
1147 case Instruction::FPTrunc:
1148 case Instruction::FPExt:
1149 case Instruction::FPToUI:
1150 case Instruction::FPToSI:
1151 case Instruction::SIToFP:
1152 case Instruction::UIToFP:
1153 break; // Can't work with floating point.
1154 case Instruction::PtrToInt:
1155 case Instruction::IntToPtr:
1156 // Fall through and handle them the same as zext/trunc.
1157 LLVM_FALLTHROUGH;
1158 case Instruction::ZExt:
1159 case Instruction::Trunc: {
1160 Type *SrcTy = I->getOperand(0)->getType();
1161
1162 unsigned SrcBitWidth;
1163 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1164 // which fall through here.
1165 Type *ScalarTy = SrcTy->getScalarType();
1166 SrcBitWidth = ScalarTy->isPointerTy() ?
1167 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1168 Q.DL.getTypeSizeInBits(ScalarTy);
1169
1170 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1171 Known = Known.anyextOrTrunc(SrcBitWidth);
1172 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1173 Known = Known.zextOrTrunc(BitWidth);
1174 break;
1175 }
1176 case Instruction::BitCast: {
1177 Type *SrcTy = I->getOperand(0)->getType();
1178 if (SrcTy->isIntOrPtrTy() &&
1179 // TODO: For now, not handling conversions like:
1180 // (bitcast i64 %x to <2 x i32>)
1181 !I->getType()->isVectorTy()) {
1182 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1183 break;
1184 }
1185 break;
1186 }
1187 case Instruction::SExt: {
1188 // Compute the bits in the result that are not present in the input.
1189 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1190
1191 Known = Known.trunc(SrcBitWidth);
1192 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1193 // If the sign bit of the input is known set or clear, then we know the
1194 // top bits of the result.
1195 Known = Known.sext(BitWidth);
1196 break;
1197 }
1198 case Instruction::Shl: {
1199 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1200 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1201 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1202 // If this shift has "nsw" keyword, then the result is either a poison
1203 // value or has the same sign bit as the first operand.
1204 if (NSW) {
1205 if (KnownVal.Zero.isSignBitSet())
1206 Result.Zero.setSignBit();
1207 if (KnownVal.One.isSignBitSet())
1208 Result.One.setSignBit();
1209 }
1210 return Result;
1211 };
1212 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1213 KF);
1214 // Trailing zeros of a right-shifted constant never decrease.
1215 const APInt *C;
1216 if (match(I->getOperand(0), m_APInt(C)))
1217 Known.Zero.setLowBits(C->countTrailingZeros());
1218 break;
1219 }
1220 case Instruction::LShr: {
1221 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1222 return KnownBits::lshr(KnownVal, KnownAmt);
1223 };
1224 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1225 KF);
1226 // Leading zeros of a left-shifted constant never decrease.
1227 const APInt *C;
1228 if (match(I->getOperand(0), m_APInt(C)))
1229 Known.Zero.setHighBits(C->countLeadingZeros());
1230 break;
1231 }
1232 case Instruction::AShr: {
1233 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1234 return KnownBits::ashr(KnownVal, KnownAmt);
1235 };
1236 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1237 KF);
1238 break;
1239 }
1240 case Instruction::Sub: {
1241 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1242 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1243 DemandedElts, Known, Known2, Depth, Q);
1244 break;
1245 }
1246 case Instruction::Add: {
1247 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1248 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1249 DemandedElts, Known, Known2, Depth, Q);
1250 break;
1251 }
1252 case Instruction::SRem:
1253 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1254 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1255 Known = KnownBits::srem(Known, Known2);
1256 break;
1257
1258 case Instruction::URem:
1259 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1260 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1261 Known = KnownBits::urem(Known, Known2);
1262 break;
1263 case Instruction::Alloca:
1264 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1265 break;
1266 case Instruction::GetElementPtr: {
1267 // Analyze all of the subscripts of this getelementptr instruction
1268 // to determine if we can prove known low zero bits.
1269 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1270 // Accumulate the constant indices in a separate variable
1271 // to minimize the number of calls to computeForAddSub.
1272 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1273
1274 gep_type_iterator GTI = gep_type_begin(I);
1275 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1276 // TrailZ can only become smaller, short-circuit if we hit zero.
1277 if (Known.isUnknown())
1278 break;
1279
1280 Value *Index = I->getOperand(i);
1281
1282 // Handle case when index is zero.
1283 Constant *CIndex = dyn_cast<Constant>(Index);
1284 if (CIndex && CIndex->isZeroValue())
1285 continue;
1286
1287 if (StructType *STy = GTI.getStructTypeOrNull()) {
1288 // Handle struct member offset arithmetic.
1289
1290 assert(CIndex &&
1291 "Access to structure field must be known at compile time");
1292
1293 if (CIndex->getType()->isVectorTy())
1294 Index = CIndex->getSplatValue();
1295
1296 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1297 const StructLayout *SL = Q.DL.getStructLayout(STy);
1298 uint64_t Offset = SL->getElementOffset(Idx);
1299 AccConstIndices += Offset;
1300 continue;
1301 }
1302
1303 // Handle array index arithmetic.
1304 Type *IndexedTy = GTI.getIndexedType();
1305 if (!IndexedTy->isSized()) {
1306 Known.resetAll();
1307 break;
1308 }
1309
1310 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1311 KnownBits IndexBits(IndexBitWidth);
1312 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1313 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1314 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1315 KnownBits ScalingFactor(IndexBitWidth);
1316 // Multiply by current sizeof type.
1317 // &A[i] == A + i * sizeof(*A[i]).
1318 if (IndexTypeSize.isScalable()) {
1319 // For scalable types the only thing we know about sizeof is
1320 // that this is a multiple of the minimum size.
1321 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1322 } else if (IndexBits.isConstant()) {
1323 APInt IndexConst = IndexBits.getConstant();
1324 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1325 IndexConst *= ScalingFactor;
1326 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1327 continue;
1328 } else {
1329 ScalingFactor =
1330 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1331 }
1332 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1333
1334 // If the offsets have a different width from the pointer, according
1335 // to the language reference we need to sign-extend or truncate them
1336 // to the width of the pointer.
1337 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1338
1339 // Note that inbounds does *not* guarantee nsw for the addition, as only
1340 // the offset is signed, while the base address is unsigned.
1341 Known = KnownBits::computeForAddSub(
1342 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1343 }
1344 if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1345 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1346 Known = KnownBits::computeForAddSub(
1347 /*Add=*/true, /*NSW=*/false, Known, Index);
1348 }
1349 break;
1350 }
1351 case Instruction::PHI: {
1352 const PHINode *P = cast<PHINode>(I);
1353 BinaryOperator *BO = nullptr;
1354 Value *R = nullptr, *L = nullptr;
1355 if (matchSimpleRecurrence(P, BO, R, L)) {
1356 // Handle the case of a simple two-predecessor recurrence PHI.
1357 // There's a lot more that could theoretically be done here, but
1358 // this is sufficient to catch some interesting cases.
1359 unsigned Opcode = BO->getOpcode();
1360
1361 // If this is a shift recurrence, we know the bits being shifted in.
1362 // We can combine that with information about the start value of the
1363 // recurrence to conclude facts about the result.
1364 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1365 Opcode == Instruction::Shl) &&
1366 BO->getOperand(0) == I) {
1367
1368 // We have matched a recurrence of the form:
1369 // %iv = [R, %entry], [%iv.next, %backedge]
1370 // %iv.next = shift_op %iv, L
1371
1372 // Recurse with the phi context to avoid concern about whether facts
1373 // inferred hold at original context instruction. TODO: It may be
1374 // correct to use the original context. IF warranted, explore and
1375 // add sufficient tests to cover.
1376 Query RecQ = Q;
1377 RecQ.CxtI = P;
1378 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1379 switch (Opcode) {
1380 case Instruction::Shl:
1381 // A shl recurrence will only increase the tailing zeros
1382 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1383 break;
1384 case Instruction::LShr:
1385 // A lshr recurrence will preserve the leading zeros of the
1386 // start value
1387 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1388 break;
1389 case Instruction::AShr:
1390 // An ashr recurrence will extend the initial sign bit
1391 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1392 Known.One.setHighBits(Known2.countMinLeadingOnes());
1393 break;
1394 };
1395 }
1396
1397 // Check for operations that have the property that if
1398 // both their operands have low zero bits, the result
1399 // will have low zero bits.
1400 if (Opcode == Instruction::Add ||
1401 Opcode == Instruction::Sub ||
1402 Opcode == Instruction::And ||
1403 Opcode == Instruction::Or ||
1404 Opcode == Instruction::Mul) {
1405 // Change the context instruction to the "edge" that flows into the
1406 // phi. This is important because that is where the value is actually
1407 // "evaluated" even though it is used later somewhere else. (see also
1408 // D69571).
1409 Query RecQ = Q;
1410
1411 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1412 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1413 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1414
1415 // Ok, we have a PHI of the form L op= R. Check for low
1416 // zero bits.
1417 RecQ.CxtI = RInst;
1418 computeKnownBits(R, Known2, Depth + 1, RecQ);
1419
1420 // We need to take the minimum number of known bits
1421 KnownBits Known3(BitWidth);
1422 RecQ.CxtI = LInst;
1423 computeKnownBits(L, Known3, Depth + 1, RecQ);
1424
1425 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1426 Known3.countMinTrailingZeros()));
1427
1428 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1429 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1430 // If initial value of recurrence is nonnegative, and we are adding
1431 // a nonnegative number with nsw, the result can only be nonnegative
1432 // or poison value regardless of the number of times we execute the
1433 // add in phi recurrence. If initial value is negative and we are
1434 // adding a negative number with nsw, the result can only be
1435 // negative or poison value. Similar arguments apply to sub and mul.
1436 //
1437 // (add non-negative, non-negative) --> non-negative
1438 // (add negative, negative) --> negative
1439 if (Opcode == Instruction::Add) {
1440 if (Known2.isNonNegative() && Known3.isNonNegative())
1441 Known.makeNonNegative();
1442 else if (Known2.isNegative() && Known3.isNegative())
1443 Known.makeNegative();
1444 }
1445
1446 // (sub nsw non-negative, negative) --> non-negative
1447 // (sub nsw negative, non-negative) --> negative
1448 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1449 if (Known2.isNonNegative() && Known3.isNegative())
1450 Known.makeNonNegative();
1451 else if (Known2.isNegative() && Known3.isNonNegative())
1452 Known.makeNegative();
1453 }
1454
1455 // (mul nsw non-negative, non-negative) --> non-negative
1456 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1457 Known3.isNonNegative())
1458 Known.makeNonNegative();
1459 }
1460
1461 break;
1462 }
1463 }
1464
1465 // Unreachable blocks may have zero-operand PHI nodes.
1466 if (P->getNumIncomingValues() == 0)
1467 break;
1468
1469 // Otherwise take the unions of the known bit sets of the operands,
1470 // taking conservative care to avoid excessive recursion.
1471 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1472 // Skip if every incoming value references to ourself.
1473 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1474 break;
1475
1476 Known.Zero.setAllBits();
1477 Known.One.setAllBits();
1478 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1479 Value *IncValue = P->getIncomingValue(u);
1480 // Skip direct self references.
1481 if (IncValue == P) continue;
1482
1483 // Change the context instruction to the "edge" that flows into the
1484 // phi. This is important because that is where the value is actually
1485 // "evaluated" even though it is used later somewhere else. (see also
1486 // D69571).
1487 Query RecQ = Q;
1488 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1489
1490 Known2 = KnownBits(BitWidth);
1491 // Recurse, but cap the recursion to one level, because we don't
1492 // want to waste time spinning around in loops.
1493 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1494 Known = KnownBits::commonBits(Known, Known2);
1495 // If all bits have been ruled out, there's no need to check
1496 // more operands.
1497 if (Known.isUnknown())
1498 break;
1499 }
1500 }
1501 break;
1502 }
1503 case Instruction::Call:
1504 case Instruction::Invoke:
1505 // If range metadata is attached to this call, set known bits from that,
1506 // and then intersect with known bits based on other properties of the
1507 // function.
1508 if (MDNode *MD =
1509 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1510 computeKnownBitsFromRangeMetadata(*MD, Known);
1511 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1512 computeKnownBits(RV, Known2, Depth + 1, Q);
1513 Known.Zero |= Known2.Zero;
1514 Known.One |= Known2.One;
1515 }
1516 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1517 switch (II->getIntrinsicID()) {
1518 default: break;
1519 case Intrinsic::abs: {
1520 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1521 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1522 Known = Known2.abs(IntMinIsPoison);
1523 break;
1524 }
1525 case Intrinsic::bitreverse:
1526 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1527 Known.Zero |= Known2.Zero.reverseBits();
1528 Known.One |= Known2.One.reverseBits();
1529 break;
1530 case Intrinsic::bswap:
1531 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1532 Known.Zero |= Known2.Zero.byteSwap();
1533 Known.One |= Known2.One.byteSwap();
1534 break;
1535 case Intrinsic::ctlz: {
1536 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1537 // If we have a known 1, its position is our upper bound.
1538 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1539 // If this call is undefined for 0, the result will be less than 2^n.
1540 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1541 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1542 unsigned LowBits = Log2_32(PossibleLZ)+1;
1543 Known.Zero.setBitsFrom(LowBits);
1544 break;
1545 }
1546 case Intrinsic::cttz: {
1547 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1548 // If we have a known 1, its position is our upper bound.
1549 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1550 // If this call is undefined for 0, the result will be less than 2^n.
1551 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1552 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1553 unsigned LowBits = Log2_32(PossibleTZ)+1;
1554 Known.Zero.setBitsFrom(LowBits);
1555 break;
1556 }
1557 case Intrinsic::ctpop: {
1558 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1559 // We can bound the space the count needs. Also, bits known to be zero
1560 // can't contribute to the population.
1561 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1562 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1563 Known.Zero.setBitsFrom(LowBits);
1564 // TODO: we could bound KnownOne using the lower bound on the number
1565 // of bits which might be set provided by popcnt KnownOne2.
1566 break;
1567 }
1568 case Intrinsic::fshr:
1569 case Intrinsic::fshl: {
1570 const APInt *SA;
1571 if (!match(I->getOperand(2), m_APInt(SA)))
1572 break;
1573
1574 // Normalize to funnel shift left.
1575 uint64_t ShiftAmt = SA->urem(BitWidth);
1576 if (II->getIntrinsicID() == Intrinsic::fshr)
1577 ShiftAmt = BitWidth - ShiftAmt;
1578
1579 KnownBits Known3(BitWidth);
1580 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1581 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1582
1583 Known.Zero =
1584 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1585 Known.One =
1586 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1587 break;
1588 }
1589 case Intrinsic::uadd_sat:
1590 case Intrinsic::usub_sat: {
1591 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1592 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1593 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1594
1595 // Add: Leading ones of either operand are preserved.
1596 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1597 // as leading zeros in the result.
1598 unsigned LeadingKnown;
1599 if (IsAdd)
1600 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1601 Known2.countMinLeadingOnes());
1602 else
1603 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1604 Known2.countMinLeadingOnes());
1605
1606 Known = KnownBits::computeForAddSub(
1607 IsAdd, /* NSW */ false, Known, Known2);
1608
1609 // We select between the operation result and all-ones/zero
1610 // respectively, so we can preserve known ones/zeros.
1611 if (IsAdd) {
1612 Known.One.setHighBits(LeadingKnown);
1613 Known.Zero.clearAllBits();
1614 } else {
1615 Known.Zero.setHighBits(LeadingKnown);
1616 Known.One.clearAllBits();
1617 }
1618 break;
1619 }
1620 case Intrinsic::umin:
1621 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1622 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1623 Known = KnownBits::umin(Known, Known2);
1624 break;
1625 case Intrinsic::umax:
1626 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1627 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1628 Known = KnownBits::umax(Known, Known2);
1629 break;
1630 case Intrinsic::smin:
1631 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1632 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1633 Known = KnownBits::smin(Known, Known2);
1634 break;
1635 case Intrinsic::smax:
1636 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1637 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1638 Known = KnownBits::smax(Known, Known2);
1639 break;
1640 case Intrinsic::x86_sse42_crc32_64_64:
1641 Known.Zero.setBitsFrom(32);
1642 break;
1643 case Intrinsic::riscv_vsetvli:
1644 case Intrinsic::riscv_vsetvlimax:
1645 // Assume that VL output is positive and would fit in an int32_t.
1646 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1647 if (BitWidth >= 32)
1648 Known.Zero.setBitsFrom(31);
1649 break;
1650 }
1651 }
1652 break;
1653 case Instruction::ShuffleVector: {
1654 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1655 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1656 if (!Shuf) {
1657 Known.resetAll();
1658 return;
1659 }
1660 // For undef elements, we don't know anything about the common state of
1661 // the shuffle result.
1662 APInt DemandedLHS, DemandedRHS;
1663 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1664 Known.resetAll();
1665 return;
1666 }
1667 Known.One.setAllBits();
1668 Known.Zero.setAllBits();
1669 if (!!DemandedLHS) {
1670 const Value *LHS = Shuf->getOperand(0);
1671 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1672 // If we don't know any bits, early out.
1673 if (Known.isUnknown())
1674 break;
1675 }
1676 if (!!DemandedRHS) {
1677 const Value *RHS = Shuf->getOperand(1);
1678 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1679 Known = KnownBits::commonBits(Known, Known2);
1680 }
1681 break;
1682 }
1683 case Instruction::InsertElement: {
1684 const Value *Vec = I->getOperand(0);
1685 const Value *Elt = I->getOperand(1);
1686 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1687 // Early out if the index is non-constant or out-of-range.
1688 unsigned NumElts = DemandedElts.getBitWidth();
1689 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1690 Known.resetAll();
1691 return;
1692 }
1693 Known.One.setAllBits();
1694 Known.Zero.setAllBits();
1695 unsigned EltIdx = CIdx->getZExtValue();
1696 // Do we demand the inserted element?
1697 if (DemandedElts[EltIdx]) {
1698 computeKnownBits(Elt, Known, Depth + 1, Q);
1699 // If we don't know any bits, early out.
1700 if (Known.isUnknown())
1701 break;
1702 }
1703 // We don't need the base vector element that has been inserted.
1704 APInt DemandedVecElts = DemandedElts;
1705 DemandedVecElts.clearBit(EltIdx);
1706 if (!!DemandedVecElts) {
1707 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1708 Known = KnownBits::commonBits(Known, Known2);
1709 }
1710 break;
1711 }
1712 case Instruction::ExtractElement: {
1713 // Look through extract element. If the index is non-constant or
1714 // out-of-range demand all elements, otherwise just the extracted element.
1715 const Value *Vec = I->getOperand(0);
1716 const Value *Idx = I->getOperand(1);
1717 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1718 if (isa<ScalableVectorType>(Vec->getType())) {
1719 // FIXME: there's probably *something* we can do with scalable vectors
1720 Known.resetAll();
1721 break;
1722 }
1723 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1724 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1725 if (CIdx && CIdx->getValue().ult(NumElts))
1726 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1727 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1728 break;
1729 }
1730 case Instruction::ExtractValue:
1731 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1732 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1733 if (EVI->getNumIndices() != 1) break;
1734 if (EVI->getIndices()[0] == 0) {
1735 switch (II->getIntrinsicID()) {
1736 default: break;
1737 case Intrinsic::uadd_with_overflow:
1738 case Intrinsic::sadd_with_overflow:
1739 computeKnownBitsAddSub(true, II->getArgOperand(0),
1740 II->getArgOperand(1), false, DemandedElts,
1741 Known, Known2, Depth, Q);
1742 break;
1743 case Intrinsic::usub_with_overflow:
1744 case Intrinsic::ssub_with_overflow:
1745 computeKnownBitsAddSub(false, II->getArgOperand(0),
1746 II->getArgOperand(1), false, DemandedElts,
1747 Known, Known2, Depth, Q);
1748 break;
1749 case Intrinsic::umul_with_overflow:
1750 case Intrinsic::smul_with_overflow:
1751 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1752 DemandedElts, Known, Known2, Depth, Q);
1753 break;
1754 }
1755 }
1756 }
1757 break;
1758 case Instruction::Freeze:
1759 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1760 Depth + 1))
1761 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1762 break;
1763 }
1764 }
1765
1766 /// Determine which bits of V are known to be either zero or one and return
1767 /// them.
computeKnownBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)1768 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1769 unsigned Depth, const Query &Q) {
1770 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1771 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1772 return Known;
1773 }
1774
1775 /// Determine which bits of V are known to be either zero or one and return
1776 /// them.
computeKnownBits(const Value * V,unsigned Depth,const Query & Q)1777 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1778 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1779 computeKnownBits(V, Known, Depth, Q);
1780 return Known;
1781 }
1782
1783 /// Determine which bits of V are known to be either zero or one and return
1784 /// them in the Known bit set.
1785 ///
1786 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1787 /// we cannot optimize based on the assumption that it is zero without changing
1788 /// it to be an explicit zero. If we don't change it to zero, other code could
1789 /// optimized based on the contradictory assumption that it is non-zero.
1790 /// Because instcombine aggressively folds operations with undef args anyway,
1791 /// this won't lose us code quality.
1792 ///
1793 /// This function is defined on values with integer type, values with pointer
1794 /// type, and vectors of integers. In the case
1795 /// where V is a vector, known zero, and known one values are the
1796 /// same width as the vector element, and the bit is set only if it is true
1797 /// for all of the demanded elements in the vector specified by DemandedElts.
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1798 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1799 KnownBits &Known, unsigned Depth, const Query &Q) {
1800 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1801 // No demanded elts or V is a scalable vector, better to assume we don't
1802 // know anything.
1803 Known.resetAll();
1804 return;
1805 }
1806
1807 assert(V && "No Value?");
1808 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1809
1810 #ifndef NDEBUG
1811 Type *Ty = V->getType();
1812 unsigned BitWidth = Known.getBitWidth();
1813
1814 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1815 "Not integer or pointer type!");
1816
1817 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1818 assert(
1819 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1820 "DemandedElt width should equal the fixed vector number of elements");
1821 } else {
1822 assert(DemandedElts == APInt(1, 1) &&
1823 "DemandedElt width should be 1 for scalars");
1824 }
1825
1826 Type *ScalarTy = Ty->getScalarType();
1827 if (ScalarTy->isPointerTy()) {
1828 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1829 "V and Known should have same BitWidth");
1830 } else {
1831 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1832 "V and Known should have same BitWidth");
1833 }
1834 #endif
1835
1836 const APInt *C;
1837 if (match(V, m_APInt(C))) {
1838 // We know all of the bits for a scalar constant or a splat vector constant!
1839 Known = KnownBits::makeConstant(*C);
1840 return;
1841 }
1842 // Null and aggregate-zero are all-zeros.
1843 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1844 Known.setAllZero();
1845 return;
1846 }
1847 // Handle a constant vector by taking the intersection of the known bits of
1848 // each element.
1849 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1850 // We know that CDV must be a vector of integers. Take the intersection of
1851 // each element.
1852 Known.Zero.setAllBits(); Known.One.setAllBits();
1853 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1854 if (!DemandedElts[i])
1855 continue;
1856 APInt Elt = CDV->getElementAsAPInt(i);
1857 Known.Zero &= ~Elt;
1858 Known.One &= Elt;
1859 }
1860 return;
1861 }
1862
1863 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1864 // We know that CV must be a vector of integers. Take the intersection of
1865 // each element.
1866 Known.Zero.setAllBits(); Known.One.setAllBits();
1867 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1868 if (!DemandedElts[i])
1869 continue;
1870 Constant *Element = CV->getAggregateElement(i);
1871 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1872 if (!ElementCI) {
1873 Known.resetAll();
1874 return;
1875 }
1876 const APInt &Elt = ElementCI->getValue();
1877 Known.Zero &= ~Elt;
1878 Known.One &= Elt;
1879 }
1880 return;
1881 }
1882
1883 // Start out not knowing anything.
1884 Known.resetAll();
1885
1886 // We can't imply anything about undefs.
1887 if (isa<UndefValue>(V))
1888 return;
1889
1890 // There's no point in looking through other users of ConstantData for
1891 // assumptions. Confirm that we've handled them all.
1892 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1893
1894 // All recursive calls that increase depth must come after this.
1895 if (Depth == MaxAnalysisRecursionDepth)
1896 return;
1897
1898 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1899 // the bits of its aliasee.
1900 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1901 if (!GA->isInterposable())
1902 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1903 return;
1904 }
1905
1906 if (const Operator *I = dyn_cast<Operator>(V))
1907 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1908
1909 // Aligned pointers have trailing zeros - refine Known.Zero set
1910 if (isa<PointerType>(V->getType())) {
1911 Align Alignment = V->getPointerAlignment(Q.DL);
1912 Known.Zero.setLowBits(Log2(Alignment));
1913 }
1914
1915 // computeKnownBitsFromAssume strictly refines Known.
1916 // Therefore, we run them after computeKnownBitsFromOperator.
1917
1918 // Check whether a nearby assume intrinsic can determine some known bits.
1919 computeKnownBitsFromAssume(V, Known, Depth, Q);
1920
1921 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1922 }
1923
1924 /// Return true if the given value is known to have exactly one
1925 /// bit set when defined. For vectors return true if every element is known to
1926 /// be a power of two when defined. Supports values with integer or pointer
1927 /// types and vectors of integers.
isKnownToBeAPowerOfTwo(const Value * V,bool OrZero,unsigned Depth,const Query & Q)1928 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1929 const Query &Q) {
1930 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1931
1932 // Attempt to match against constants.
1933 if (OrZero && match(V, m_Power2OrZero()))
1934 return true;
1935 if (match(V, m_Power2()))
1936 return true;
1937
1938 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1939 // it is shifted off the end then the result is undefined.
1940 if (match(V, m_Shl(m_One(), m_Value())))
1941 return true;
1942
1943 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1944 // the bottom. If it is shifted off the bottom then the result is undefined.
1945 if (match(V, m_LShr(m_SignMask(), m_Value())))
1946 return true;
1947
1948 // The remaining tests are all recursive, so bail out if we hit the limit.
1949 if (Depth++ == MaxAnalysisRecursionDepth)
1950 return false;
1951
1952 Value *X = nullptr, *Y = nullptr;
1953 // A shift left or a logical shift right of a power of two is a power of two
1954 // or zero.
1955 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1956 match(V, m_LShr(m_Value(X), m_Value()))))
1957 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1958
1959 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1960 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1961
1962 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1963 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1964 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1965
1966 // Peek through min/max.
1967 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
1968 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
1969 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
1970 }
1971
1972 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1973 // A power of two and'd with anything is a power of two or zero.
1974 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1975 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1976 return true;
1977 // X & (-X) is always a power of two or zero.
1978 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1979 return true;
1980 return false;
1981 }
1982
1983 // Adding a power-of-two or zero to the same power-of-two or zero yields
1984 // either the original power-of-two, a larger power-of-two or zero.
1985 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1986 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1987 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1988 Q.IIQ.hasNoSignedWrap(VOBO)) {
1989 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1990 match(X, m_And(m_Value(), m_Specific(Y))))
1991 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1992 return true;
1993 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1994 match(Y, m_And(m_Value(), m_Specific(X))))
1995 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1996 return true;
1997
1998 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1999 KnownBits LHSBits(BitWidth);
2000 computeKnownBits(X, LHSBits, Depth, Q);
2001
2002 KnownBits RHSBits(BitWidth);
2003 computeKnownBits(Y, RHSBits, Depth, Q);
2004 // If i8 V is a power of two or zero:
2005 // ZeroBits: 1 1 1 0 1 1 1 1
2006 // ~ZeroBits: 0 0 0 1 0 0 0 0
2007 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2008 // If OrZero isn't set, we cannot give back a zero result.
2009 // Make sure either the LHS or RHS has a bit set.
2010 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2011 return true;
2012 }
2013 }
2014
2015 // An exact divide or right shift can only shift off zero bits, so the result
2016 // is a power of two only if the first operand is a power of two and not
2017 // copying a sign bit (sdiv int_min, 2).
2018 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2019 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2020 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2021 Depth, Q);
2022 }
2023
2024 return false;
2025 }
2026
2027 /// Test whether a GEP's result is known to be non-null.
2028 ///
2029 /// Uses properties inherent in a GEP to try to determine whether it is known
2030 /// to be non-null.
2031 ///
2032 /// Currently this routine does not support vector GEPs.
isGEPKnownNonNull(const GEPOperator * GEP,unsigned Depth,const Query & Q)2033 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2034 const Query &Q) {
2035 const Function *F = nullptr;
2036 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2037 F = I->getFunction();
2038
2039 if (!GEP->isInBounds() ||
2040 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2041 return false;
2042
2043 // FIXME: Support vector-GEPs.
2044 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2045
2046 // If the base pointer is non-null, we cannot walk to a null address with an
2047 // inbounds GEP in address space zero.
2048 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2049 return true;
2050
2051 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2052 // If so, then the GEP cannot produce a null pointer, as doing so would
2053 // inherently violate the inbounds contract within address space zero.
2054 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2055 GTI != GTE; ++GTI) {
2056 // Struct types are easy -- they must always be indexed by a constant.
2057 if (StructType *STy = GTI.getStructTypeOrNull()) {
2058 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2059 unsigned ElementIdx = OpC->getZExtValue();
2060 const StructLayout *SL = Q.DL.getStructLayout(STy);
2061 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2062 if (ElementOffset > 0)
2063 return true;
2064 continue;
2065 }
2066
2067 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2068 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2069 continue;
2070
2071 // Fast path the constant operand case both for efficiency and so we don't
2072 // increment Depth when just zipping down an all-constant GEP.
2073 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2074 if (!OpC->isZero())
2075 return true;
2076 continue;
2077 }
2078
2079 // We post-increment Depth here because while isKnownNonZero increments it
2080 // as well, when we pop back up that increment won't persist. We don't want
2081 // to recurse 10k times just because we have 10k GEP operands. We don't
2082 // bail completely out because we want to handle constant GEPs regardless
2083 // of depth.
2084 if (Depth++ >= MaxAnalysisRecursionDepth)
2085 continue;
2086
2087 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2088 return true;
2089 }
2090
2091 return false;
2092 }
2093
isKnownNonNullFromDominatingCondition(const Value * V,const Instruction * CtxI,const DominatorTree * DT)2094 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2095 const Instruction *CtxI,
2096 const DominatorTree *DT) {
2097 if (isa<Constant>(V))
2098 return false;
2099
2100 if (!CtxI || !DT)
2101 return false;
2102
2103 unsigned NumUsesExplored = 0;
2104 for (auto *U : V->users()) {
2105 // Avoid massive lists
2106 if (NumUsesExplored >= DomConditionsMaxUses)
2107 break;
2108 NumUsesExplored++;
2109
2110 // If the value is used as an argument to a call or invoke, then argument
2111 // attributes may provide an answer about null-ness.
2112 if (const auto *CB = dyn_cast<CallBase>(U))
2113 if (auto *CalledFunc = CB->getCalledFunction())
2114 for (const Argument &Arg : CalledFunc->args())
2115 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2116 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2117 DT->dominates(CB, CtxI))
2118 return true;
2119
2120 // If the value is used as a load/store, then the pointer must be non null.
2121 if (V == getLoadStorePointerOperand(U)) {
2122 const Instruction *I = cast<Instruction>(U);
2123 if (!NullPointerIsDefined(I->getFunction(),
2124 V->getType()->getPointerAddressSpace()) &&
2125 DT->dominates(I, CtxI))
2126 return true;
2127 }
2128
2129 // Consider only compare instructions uniquely controlling a branch
2130 Value *RHS;
2131 CmpInst::Predicate Pred;
2132 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2133 continue;
2134
2135 bool NonNullIfTrue;
2136 if (cmpExcludesZero(Pred, RHS))
2137 NonNullIfTrue = true;
2138 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2139 NonNullIfTrue = false;
2140 else
2141 continue;
2142
2143 SmallVector<const User *, 4> WorkList;
2144 SmallPtrSet<const User *, 4> Visited;
2145 for (auto *CmpU : U->users()) {
2146 assert(WorkList.empty() && "Should be!");
2147 if (Visited.insert(CmpU).second)
2148 WorkList.push_back(CmpU);
2149
2150 while (!WorkList.empty()) {
2151 auto *Curr = WorkList.pop_back_val();
2152
2153 // If a user is an AND, add all its users to the work list. We only
2154 // propagate "pred != null" condition through AND because it is only
2155 // correct to assume that all conditions of AND are met in true branch.
2156 // TODO: Support similar logic of OR and EQ predicate?
2157 if (NonNullIfTrue)
2158 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2159 for (auto *CurrU : Curr->users())
2160 if (Visited.insert(CurrU).second)
2161 WorkList.push_back(CurrU);
2162 continue;
2163 }
2164
2165 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2166 assert(BI->isConditional() && "uses a comparison!");
2167
2168 BasicBlock *NonNullSuccessor =
2169 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2170 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2171 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2172 return true;
2173 } else if (NonNullIfTrue && isGuard(Curr) &&
2174 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2175 return true;
2176 }
2177 }
2178 }
2179 }
2180
2181 return false;
2182 }
2183
2184 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2185 /// ensure that the value it's attached to is never Value? 'RangeType' is
2186 /// is the type of the value described by the range.
rangeMetadataExcludesValue(const MDNode * Ranges,const APInt & Value)2187 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2188 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2189 assert(NumRanges >= 1);
2190 for (unsigned i = 0; i < NumRanges; ++i) {
2191 ConstantInt *Lower =
2192 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2193 ConstantInt *Upper =
2194 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2195 ConstantRange Range(Lower->getValue(), Upper->getValue());
2196 if (Range.contains(Value))
2197 return false;
2198 }
2199 return true;
2200 }
2201
2202 /// Try to detect a recurrence that monotonically increases/decreases from a
2203 /// non-zero starting value. These are common as induction variables.
isNonZeroRecurrence(const PHINode * PN)2204 static bool isNonZeroRecurrence(const PHINode *PN) {
2205 BinaryOperator *BO = nullptr;
2206 Value *Start = nullptr, *Step = nullptr;
2207 const APInt *StartC, *StepC;
2208 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2209 !match(Start, m_APInt(StartC)) || StartC->isNullValue())
2210 return false;
2211
2212 switch (BO->getOpcode()) {
2213 case Instruction::Add:
2214 // Starting from non-zero and stepping away from zero can never wrap back
2215 // to zero.
2216 return BO->hasNoUnsignedWrap() ||
2217 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2218 StartC->isNegative() == StepC->isNegative());
2219 case Instruction::Mul:
2220 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2221 match(Step, m_APInt(StepC)) && !StepC->isNullValue();
2222 case Instruction::Shl:
2223 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2224 case Instruction::AShr:
2225 case Instruction::LShr:
2226 return BO->isExact();
2227 default:
2228 return false;
2229 }
2230 }
2231
2232 /// Return true if the given value is known to be non-zero when defined. For
2233 /// vectors, return true if every demanded element is known to be non-zero when
2234 /// defined. For pointers, if the context instruction and dominator tree are
2235 /// specified, perform context-sensitive analysis and return true if the
2236 /// pointer couldn't possibly be null at the specified instruction.
2237 /// Supports values with integer or pointer type and vectors of integers.
isKnownNonZero(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2238 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2239 const Query &Q) {
2240 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2241 // vector
2242 if (isa<ScalableVectorType>(V->getType()))
2243 return false;
2244
2245 if (auto *C = dyn_cast<Constant>(V)) {
2246 if (C->isNullValue())
2247 return false;
2248 if (isa<ConstantInt>(C))
2249 // Must be non-zero due to null test above.
2250 return true;
2251
2252 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2253 // See the comment for IntToPtr/PtrToInt instructions below.
2254 if (CE->getOpcode() == Instruction::IntToPtr ||
2255 CE->getOpcode() == Instruction::PtrToInt)
2256 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2257 .getFixedSize() <=
2258 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2259 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2260 }
2261
2262 // For constant vectors, check that all elements are undefined or known
2263 // non-zero to determine that the whole vector is known non-zero.
2264 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2265 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2266 if (!DemandedElts[i])
2267 continue;
2268 Constant *Elt = C->getAggregateElement(i);
2269 if (!Elt || Elt->isNullValue())
2270 return false;
2271 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2272 return false;
2273 }
2274 return true;
2275 }
2276
2277 // A global variable in address space 0 is non null unless extern weak
2278 // or an absolute symbol reference. Other address spaces may have null as a
2279 // valid address for a global, so we can't assume anything.
2280 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2281 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2282 GV->getType()->getAddressSpace() == 0)
2283 return true;
2284 } else
2285 return false;
2286 }
2287
2288 if (auto *I = dyn_cast<Instruction>(V)) {
2289 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2290 // If the possible ranges don't contain zero, then the value is
2291 // definitely non-zero.
2292 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2293 const APInt ZeroValue(Ty->getBitWidth(), 0);
2294 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2295 return true;
2296 }
2297 }
2298 }
2299
2300 if (isKnownNonZeroFromAssume(V, Q))
2301 return true;
2302
2303 // Some of the tests below are recursive, so bail out if we hit the limit.
2304 if (Depth++ >= MaxAnalysisRecursionDepth)
2305 return false;
2306
2307 // Check for pointer simplifications.
2308
2309 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2310 // Alloca never returns null, malloc might.
2311 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2312 return true;
2313
2314 // A byval, inalloca may not be null in a non-default addres space. A
2315 // nonnull argument is assumed never 0.
2316 if (const Argument *A = dyn_cast<Argument>(V)) {
2317 if (((A->hasPassPointeeByValueCopyAttr() &&
2318 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2319 A->hasNonNullAttr()))
2320 return true;
2321 }
2322
2323 // A Load tagged with nonnull metadata is never null.
2324 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2325 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2326 return true;
2327
2328 if (const auto *Call = dyn_cast<CallBase>(V)) {
2329 if (Call->isReturnNonNull())
2330 return true;
2331 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2332 return isKnownNonZero(RP, Depth, Q);
2333 }
2334 }
2335
2336 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2337 return true;
2338
2339 // Check for recursive pointer simplifications.
2340 if (V->getType()->isPointerTy()) {
2341 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2342 // do not alter the value, or at least not the nullness property of the
2343 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2344 //
2345 // Note that we have to take special care to avoid looking through
2346 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2347 // as casts that can alter the value, e.g., AddrSpaceCasts.
2348 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2349 return isGEPKnownNonNull(GEP, Depth, Q);
2350
2351 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2352 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2353
2354 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2355 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2356 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2357 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2358 }
2359
2360 // Similar to int2ptr above, we can look through ptr2int here if the cast
2361 // is a no-op or an extend and not a truncate.
2362 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2363 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2364 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2365 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2366
2367 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2368
2369 // X | Y != 0 if X != 0 or Y != 0.
2370 Value *X = nullptr, *Y = nullptr;
2371 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2372 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2373 isKnownNonZero(Y, DemandedElts, Depth, Q);
2374
2375 // ext X != 0 if X != 0.
2376 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2377 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2378
2379 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2380 // if the lowest bit is shifted off the end.
2381 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2382 // shl nuw can't remove any non-zero bits.
2383 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2384 if (Q.IIQ.hasNoUnsignedWrap(BO))
2385 return isKnownNonZero(X, Depth, Q);
2386
2387 KnownBits Known(BitWidth);
2388 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2389 if (Known.One[0])
2390 return true;
2391 }
2392 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2393 // defined if the sign bit is shifted off the end.
2394 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2395 // shr exact can only shift out zero bits.
2396 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2397 if (BO->isExact())
2398 return isKnownNonZero(X, Depth, Q);
2399
2400 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2401 if (Known.isNegative())
2402 return true;
2403
2404 // If the shifter operand is a constant, and all of the bits shifted
2405 // out are known to be zero, and X is known non-zero then at least one
2406 // non-zero bit must remain.
2407 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2408 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2409 // Is there a known one in the portion not shifted out?
2410 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2411 return true;
2412 // Are all the bits to be shifted out known zero?
2413 if (Known.countMinTrailingZeros() >= ShiftVal)
2414 return isKnownNonZero(X, DemandedElts, Depth, Q);
2415 }
2416 }
2417 // div exact can only produce a zero if the dividend is zero.
2418 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2419 return isKnownNonZero(X, DemandedElts, Depth, Q);
2420 }
2421 // X + Y.
2422 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2423 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2424 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2425
2426 // If X and Y are both non-negative (as signed values) then their sum is not
2427 // zero unless both X and Y are zero.
2428 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2429 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2430 isKnownNonZero(Y, DemandedElts, Depth, Q))
2431 return true;
2432
2433 // If X and Y are both negative (as signed values) then their sum is not
2434 // zero unless both X and Y equal INT_MIN.
2435 if (XKnown.isNegative() && YKnown.isNegative()) {
2436 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2437 // The sign bit of X is set. If some other bit is set then X is not equal
2438 // to INT_MIN.
2439 if (XKnown.One.intersects(Mask))
2440 return true;
2441 // The sign bit of Y is set. If some other bit is set then Y is not equal
2442 // to INT_MIN.
2443 if (YKnown.One.intersects(Mask))
2444 return true;
2445 }
2446
2447 // The sum of a non-negative number and a power of two is not zero.
2448 if (XKnown.isNonNegative() &&
2449 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2450 return true;
2451 if (YKnown.isNonNegative() &&
2452 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2453 return true;
2454 }
2455 // X * Y.
2456 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2457 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2458 // If X and Y are non-zero then so is X * Y as long as the multiplication
2459 // does not overflow.
2460 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2461 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2462 isKnownNonZero(Y, DemandedElts, Depth, Q))
2463 return true;
2464 }
2465 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2466 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2467 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2468 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2469 return true;
2470 }
2471 // PHI
2472 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2473 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2474 return true;
2475
2476 // Check if all incoming values are non-zero using recursion.
2477 Query RecQ = Q;
2478 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2479 return llvm::all_of(PN->operands(), [&](const Use &U) {
2480 if (U.get() == PN)
2481 return true;
2482 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2483 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2484 });
2485 }
2486 // ExtractElement
2487 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2488 const Value *Vec = EEI->getVectorOperand();
2489 const Value *Idx = EEI->getIndexOperand();
2490 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2491 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2492 unsigned NumElts = VecTy->getNumElements();
2493 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2494 if (CIdx && CIdx->getValue().ult(NumElts))
2495 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2496 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2497 }
2498 }
2499 // Freeze
2500 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2501 auto *Op = FI->getOperand(0);
2502 if (isKnownNonZero(Op, Depth, Q) &&
2503 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2504 return true;
2505 }
2506
2507 KnownBits Known(BitWidth);
2508 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2509 return Known.One != 0;
2510 }
2511
isKnownNonZero(const Value * V,unsigned Depth,const Query & Q)2512 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2513 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2514 // vector
2515 if (isa<ScalableVectorType>(V->getType()))
2516 return false;
2517
2518 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2519 APInt DemandedElts =
2520 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2521 return isKnownNonZero(V, DemandedElts, Depth, Q);
2522 }
2523
2524 /// If the pair of operators are the same invertible function, return the
2525 /// the operands of the function corresponding to each input. Otherwise,
2526 /// return None. An invertible function is one that is 1-to-1 and maps
2527 /// every input value to exactly one output value. This is equivalent to
2528 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2529 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2530 static Optional<std::pair<Value*, Value*>>
getInvertibleOperands(const Operator * Op1,const Operator * Op2)2531 getInvertibleOperands(const Operator *Op1,
2532 const Operator *Op2) {
2533 if (Op1->getOpcode() != Op2->getOpcode())
2534 return None;
2535
2536 auto getOperands = [&](unsigned OpNum) -> auto {
2537 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2538 };
2539
2540 switch (Op1->getOpcode()) {
2541 default:
2542 break;
2543 case Instruction::Add:
2544 case Instruction::Sub:
2545 if (Op1->getOperand(0) == Op2->getOperand(0))
2546 return getOperands(1);
2547 if (Op1->getOperand(1) == Op2->getOperand(1))
2548 return getOperands(0);
2549 break;
2550 case Instruction::Mul: {
2551 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2552 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2553 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2554 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2555 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2556 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2557 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2558 break;
2559
2560 // Assume operand order has been canonicalized
2561 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2562 isa<ConstantInt>(Op1->getOperand(1)) &&
2563 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2564 return getOperands(0);
2565 break;
2566 }
2567 case Instruction::Shl: {
2568 // Same as multiplies, with the difference that we don't need to check
2569 // for a non-zero multiply. Shifts always multiply by non-zero.
2570 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2571 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2572 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2573 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2574 break;
2575
2576 if (Op1->getOperand(1) == Op2->getOperand(1))
2577 return getOperands(0);
2578 break;
2579 }
2580 case Instruction::AShr:
2581 case Instruction::LShr: {
2582 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2583 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2584 if (!PEO1->isExact() || !PEO2->isExact())
2585 break;
2586
2587 if (Op1->getOperand(1) == Op2->getOperand(1))
2588 return getOperands(0);
2589 break;
2590 }
2591 case Instruction::SExt:
2592 case Instruction::ZExt:
2593 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2594 return getOperands(0);
2595 break;
2596 case Instruction::PHI: {
2597 const PHINode *PN1 = cast<PHINode>(Op1);
2598 const PHINode *PN2 = cast<PHINode>(Op2);
2599
2600 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2601 // are a single invertible function of the start values? Note that repeated
2602 // application of an invertible function is also invertible
2603 BinaryOperator *BO1 = nullptr;
2604 Value *Start1 = nullptr, *Step1 = nullptr;
2605 BinaryOperator *BO2 = nullptr;
2606 Value *Start2 = nullptr, *Step2 = nullptr;
2607 if (PN1->getParent() != PN2->getParent() ||
2608 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2609 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2610 break;
2611
2612 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2613 cast<Operator>(BO2));
2614 if (!Values)
2615 break;
2616
2617 // We have to be careful of mutually defined recurrences here. Ex:
2618 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2619 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2620 // The invertibility of these is complicated, and not worth reasoning
2621 // about (yet?).
2622 if (Values->first != PN1 || Values->second != PN2)
2623 break;
2624
2625 return std::make_pair(Start1, Start2);
2626 }
2627 }
2628 return None;
2629 }
2630
2631 /// Return true if V2 == V1 + X, where X is known non-zero.
isAddOfNonZero(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2632 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2633 const Query &Q) {
2634 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2635 if (!BO || BO->getOpcode() != Instruction::Add)
2636 return false;
2637 Value *Op = nullptr;
2638 if (V2 == BO->getOperand(0))
2639 Op = BO->getOperand(1);
2640 else if (V2 == BO->getOperand(1))
2641 Op = BO->getOperand(0);
2642 else
2643 return false;
2644 return isKnownNonZero(Op, Depth + 1, Q);
2645 }
2646
2647 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2648 /// the multiplication is nuw or nsw.
isNonEqualMul(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2649 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2650 const Query &Q) {
2651 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2652 const APInt *C;
2653 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2654 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2655 !C->isNullValue() && !C->isOneValue() &&
2656 isKnownNonZero(V1, Depth + 1, Q);
2657 }
2658 return false;
2659 }
2660
2661 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2662 /// the shift is nuw or nsw.
isNonEqualShl(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2663 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2664 const Query &Q) {
2665 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2666 const APInt *C;
2667 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2668 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2669 !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q);
2670 }
2671 return false;
2672 }
2673
isNonEqualPHIs(const PHINode * PN1,const PHINode * PN2,unsigned Depth,const Query & Q)2674 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2675 unsigned Depth, const Query &Q) {
2676 // Check two PHIs are in same block.
2677 if (PN1->getParent() != PN2->getParent())
2678 return false;
2679
2680 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2681 bool UsedFullRecursion = false;
2682 for (const BasicBlock *IncomBB : PN1->blocks()) {
2683 if (!VisitedBBs.insert(IncomBB).second)
2684 continue; // Don't reprocess blocks that we have dealt with already.
2685 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2686 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2687 const APInt *C1, *C2;
2688 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2689 continue;
2690
2691 // Only one pair of phi operands is allowed for full recursion.
2692 if (UsedFullRecursion)
2693 return false;
2694
2695 Query RecQ = Q;
2696 RecQ.CxtI = IncomBB->getTerminator();
2697 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2698 return false;
2699 UsedFullRecursion = true;
2700 }
2701 return true;
2702 }
2703
2704 /// Return true if it is known that V1 != V2.
isKnownNonEqual(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2705 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2706 const Query &Q) {
2707 if (V1 == V2)
2708 return false;
2709 if (V1->getType() != V2->getType())
2710 // We can't look through casts yet.
2711 return false;
2712
2713 if (Depth >= MaxAnalysisRecursionDepth)
2714 return false;
2715
2716 // See if we can recurse through (exactly one of) our operands. This
2717 // requires our operation be 1-to-1 and map every input value to exactly
2718 // one output value. Such an operation is invertible.
2719 auto *O1 = dyn_cast<Operator>(V1);
2720 auto *O2 = dyn_cast<Operator>(V2);
2721 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2722 if (auto Values = getInvertibleOperands(O1, O2))
2723 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2724
2725 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2726 const PHINode *PN2 = cast<PHINode>(V2);
2727 // FIXME: This is missing a generalization to handle the case where one is
2728 // a PHI and another one isn't.
2729 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2730 return true;
2731 };
2732 }
2733
2734 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2735 return true;
2736
2737 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2738 return true;
2739
2740 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2741 return true;
2742
2743 if (V1->getType()->isIntOrIntVectorTy()) {
2744 // Are any known bits in V1 contradictory to known bits in V2? If V1
2745 // has a known zero where V2 has a known one, they must not be equal.
2746 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2747 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2748
2749 if (Known1.Zero.intersects(Known2.One) ||
2750 Known2.Zero.intersects(Known1.One))
2751 return true;
2752 }
2753 return false;
2754 }
2755
2756 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2757 /// simplify operations downstream. Mask is known to be zero for bits that V
2758 /// cannot have.
2759 ///
2760 /// This function is defined on values with integer type, values with pointer
2761 /// type, and vectors of integers. In the case
2762 /// where V is a vector, the mask, known zero, and known one values are the
2763 /// same width as the vector element, and the bit is set only if it is true
2764 /// for all of the elements in the vector.
MaskedValueIsZero(const Value * V,const APInt & Mask,unsigned Depth,const Query & Q)2765 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2766 const Query &Q) {
2767 KnownBits Known(Mask.getBitWidth());
2768 computeKnownBits(V, Known, Depth, Q);
2769 return Mask.isSubsetOf(Known.Zero);
2770 }
2771
2772 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2773 // Returns the input and lower/upper bounds.
isSignedMinMaxClamp(const Value * Select,const Value * & In,const APInt * & CLow,const APInt * & CHigh)2774 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2775 const APInt *&CLow, const APInt *&CHigh) {
2776 assert(isa<Operator>(Select) &&
2777 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2778 "Input should be a Select!");
2779
2780 const Value *LHS = nullptr, *RHS = nullptr;
2781 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2782 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2783 return false;
2784
2785 if (!match(RHS, m_APInt(CLow)))
2786 return false;
2787
2788 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2789 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2790 if (getInverseMinMaxFlavor(SPF) != SPF2)
2791 return false;
2792
2793 if (!match(RHS2, m_APInt(CHigh)))
2794 return false;
2795
2796 if (SPF == SPF_SMIN)
2797 std::swap(CLow, CHigh);
2798
2799 In = LHS2;
2800 return CLow->sle(*CHigh);
2801 }
2802
2803 /// For vector constants, loop over the elements and find the constant with the
2804 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2805 /// or if any element was not analyzed; otherwise, return the count for the
2806 /// element with the minimum number of sign bits.
computeNumSignBitsVectorConstant(const Value * V,const APInt & DemandedElts,unsigned TyBits)2807 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2808 const APInt &DemandedElts,
2809 unsigned TyBits) {
2810 const auto *CV = dyn_cast<Constant>(V);
2811 if (!CV || !isa<FixedVectorType>(CV->getType()))
2812 return 0;
2813
2814 unsigned MinSignBits = TyBits;
2815 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2816 for (unsigned i = 0; i != NumElts; ++i) {
2817 if (!DemandedElts[i])
2818 continue;
2819 // If we find a non-ConstantInt, bail out.
2820 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2821 if (!Elt)
2822 return 0;
2823
2824 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2825 }
2826
2827 return MinSignBits;
2828 }
2829
2830 static unsigned ComputeNumSignBitsImpl(const Value *V,
2831 const APInt &DemandedElts,
2832 unsigned Depth, const Query &Q);
2833
ComputeNumSignBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2834 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2835 unsigned Depth, const Query &Q) {
2836 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2837 assert(Result > 0 && "At least one sign bit needs to be present!");
2838 return Result;
2839 }
2840
2841 /// Return the number of times the sign bit of the register is replicated into
2842 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2843 /// (itself), but other cases can give us information. For example, immediately
2844 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2845 /// other, so we return 3. For vectors, return the number of sign bits for the
2846 /// vector element with the minimum number of known sign bits of the demanded
2847 /// elements in the vector specified by DemandedElts.
ComputeNumSignBitsImpl(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2848 static unsigned ComputeNumSignBitsImpl(const Value *V,
2849 const APInt &DemandedElts,
2850 unsigned Depth, const Query &Q) {
2851 Type *Ty = V->getType();
2852
2853 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2854 // vector
2855 if (isa<ScalableVectorType>(Ty))
2856 return 1;
2857
2858 #ifndef NDEBUG
2859 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2860
2861 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2862 assert(
2863 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2864 "DemandedElt width should equal the fixed vector number of elements");
2865 } else {
2866 assert(DemandedElts == APInt(1, 1) &&
2867 "DemandedElt width should be 1 for scalars");
2868 }
2869 #endif
2870
2871 // We return the minimum number of sign bits that are guaranteed to be present
2872 // in V, so for undef we have to conservatively return 1. We don't have the
2873 // same behavior for poison though -- that's a FIXME today.
2874
2875 Type *ScalarTy = Ty->getScalarType();
2876 unsigned TyBits = ScalarTy->isPointerTy() ?
2877 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2878 Q.DL.getTypeSizeInBits(ScalarTy);
2879
2880 unsigned Tmp, Tmp2;
2881 unsigned FirstAnswer = 1;
2882
2883 // Note that ConstantInt is handled by the general computeKnownBits case
2884 // below.
2885
2886 if (Depth == MaxAnalysisRecursionDepth)
2887 return 1;
2888
2889 if (auto *U = dyn_cast<Operator>(V)) {
2890 switch (Operator::getOpcode(V)) {
2891 default: break;
2892 case Instruction::SExt:
2893 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2894 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2895
2896 case Instruction::SDiv: {
2897 const APInt *Denominator;
2898 // sdiv X, C -> adds log(C) sign bits.
2899 if (match(U->getOperand(1), m_APInt(Denominator))) {
2900
2901 // Ignore non-positive denominator.
2902 if (!Denominator->isStrictlyPositive())
2903 break;
2904
2905 // Calculate the incoming numerator bits.
2906 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2907
2908 // Add floor(log(C)) bits to the numerator bits.
2909 return std::min(TyBits, NumBits + Denominator->logBase2());
2910 }
2911 break;
2912 }
2913
2914 case Instruction::SRem: {
2915 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2916
2917 const APInt *Denominator;
2918 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2919 // positive constant. This let us put a lower bound on the number of sign
2920 // bits.
2921 if (match(U->getOperand(1), m_APInt(Denominator))) {
2922
2923 // Ignore non-positive denominator.
2924 if (Denominator->isStrictlyPositive()) {
2925 // Calculate the leading sign bit constraints by examining the
2926 // denominator. Given that the denominator is positive, there are two
2927 // cases:
2928 //
2929 // 1. The numerator is positive. The result range is [0,C) and
2930 // [0,C) u< (1 << ceilLogBase2(C)).
2931 //
2932 // 2. The numerator is negative. Then the result range is (-C,0] and
2933 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2934 //
2935 // Thus a lower bound on the number of sign bits is `TyBits -
2936 // ceilLogBase2(C)`.
2937
2938 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2939 Tmp = std::max(Tmp, ResBits);
2940 }
2941 }
2942 return Tmp;
2943 }
2944
2945 case Instruction::AShr: {
2946 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2947 // ashr X, C -> adds C sign bits. Vectors too.
2948 const APInt *ShAmt;
2949 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2950 if (ShAmt->uge(TyBits))
2951 break; // Bad shift.
2952 unsigned ShAmtLimited = ShAmt->getZExtValue();
2953 Tmp += ShAmtLimited;
2954 if (Tmp > TyBits) Tmp = TyBits;
2955 }
2956 return Tmp;
2957 }
2958 case Instruction::Shl: {
2959 const APInt *ShAmt;
2960 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2961 // shl destroys sign bits.
2962 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2963 if (ShAmt->uge(TyBits) || // Bad shift.
2964 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2965 Tmp2 = ShAmt->getZExtValue();
2966 return Tmp - Tmp2;
2967 }
2968 break;
2969 }
2970 case Instruction::And:
2971 case Instruction::Or:
2972 case Instruction::Xor: // NOT is handled here.
2973 // Logical binary ops preserve the number of sign bits at the worst.
2974 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2975 if (Tmp != 1) {
2976 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2977 FirstAnswer = std::min(Tmp, Tmp2);
2978 // We computed what we know about the sign bits as our first
2979 // answer. Now proceed to the generic code that uses
2980 // computeKnownBits, and pick whichever answer is better.
2981 }
2982 break;
2983
2984 case Instruction::Select: {
2985 // If we have a clamp pattern, we know that the number of sign bits will
2986 // be the minimum of the clamp min/max range.
2987 const Value *X;
2988 const APInt *CLow, *CHigh;
2989 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2990 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2991
2992 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2993 if (Tmp == 1) break;
2994 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2995 return std::min(Tmp, Tmp2);
2996 }
2997
2998 case Instruction::Add:
2999 // Add can have at most one carry bit. Thus we know that the output
3000 // is, at worst, one more bit than the inputs.
3001 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3002 if (Tmp == 1) break;
3003
3004 // Special case decrementing a value (ADD X, -1):
3005 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3006 if (CRHS->isAllOnesValue()) {
3007 KnownBits Known(TyBits);
3008 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3009
3010 // If the input is known to be 0 or 1, the output is 0/-1, which is
3011 // all sign bits set.
3012 if ((Known.Zero | 1).isAllOnesValue())
3013 return TyBits;
3014
3015 // If we are subtracting one from a positive number, there is no carry
3016 // out of the result.
3017 if (Known.isNonNegative())
3018 return Tmp;
3019 }
3020
3021 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3022 if (Tmp2 == 1) break;
3023 return std::min(Tmp, Tmp2) - 1;
3024
3025 case Instruction::Sub:
3026 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3027 if (Tmp2 == 1) break;
3028
3029 // Handle NEG.
3030 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3031 if (CLHS->isNullValue()) {
3032 KnownBits Known(TyBits);
3033 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3034 // If the input is known to be 0 or 1, the output is 0/-1, which is
3035 // all sign bits set.
3036 if ((Known.Zero | 1).isAllOnesValue())
3037 return TyBits;
3038
3039 // If the input is known to be positive (the sign bit is known clear),
3040 // the output of the NEG has the same number of sign bits as the
3041 // input.
3042 if (Known.isNonNegative())
3043 return Tmp2;
3044
3045 // Otherwise, we treat this like a SUB.
3046 }
3047
3048 // Sub can have at most one carry bit. Thus we know that the output
3049 // is, at worst, one more bit than the inputs.
3050 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3051 if (Tmp == 1) break;
3052 return std::min(Tmp, Tmp2) - 1;
3053
3054 case Instruction::Mul: {
3055 // The output of the Mul can be at most twice the valid bits in the
3056 // inputs.
3057 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3058 if (SignBitsOp0 == 1) break;
3059 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3060 if (SignBitsOp1 == 1) break;
3061 unsigned OutValidBits =
3062 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3063 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3064 }
3065
3066 case Instruction::PHI: {
3067 const PHINode *PN = cast<PHINode>(U);
3068 unsigned NumIncomingValues = PN->getNumIncomingValues();
3069 // Don't analyze large in-degree PHIs.
3070 if (NumIncomingValues > 4) break;
3071 // Unreachable blocks may have zero-operand PHI nodes.
3072 if (NumIncomingValues == 0) break;
3073
3074 // Take the minimum of all incoming values. This can't infinitely loop
3075 // because of our depth threshold.
3076 Query RecQ = Q;
3077 Tmp = TyBits;
3078 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3079 if (Tmp == 1) return Tmp;
3080 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3081 Tmp = std::min(
3082 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3083 }
3084 return Tmp;
3085 }
3086
3087 case Instruction::Trunc:
3088 // FIXME: it's tricky to do anything useful for this, but it is an
3089 // important case for targets like X86.
3090 break;
3091
3092 case Instruction::ExtractElement:
3093 // Look through extract element. At the moment we keep this simple and
3094 // skip tracking the specific element. But at least we might find
3095 // information valid for all elements of the vector (for example if vector
3096 // is sign extended, shifted, etc).
3097 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3098
3099 case Instruction::ShuffleVector: {
3100 // Collect the minimum number of sign bits that are shared by every vector
3101 // element referenced by the shuffle.
3102 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3103 if (!Shuf) {
3104 // FIXME: Add support for shufflevector constant expressions.
3105 return 1;
3106 }
3107 APInt DemandedLHS, DemandedRHS;
3108 // For undef elements, we don't know anything about the common state of
3109 // the shuffle result.
3110 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3111 return 1;
3112 Tmp = std::numeric_limits<unsigned>::max();
3113 if (!!DemandedLHS) {
3114 const Value *LHS = Shuf->getOperand(0);
3115 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3116 }
3117 // If we don't know anything, early out and try computeKnownBits
3118 // fall-back.
3119 if (Tmp == 1)
3120 break;
3121 if (!!DemandedRHS) {
3122 const Value *RHS = Shuf->getOperand(1);
3123 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3124 Tmp = std::min(Tmp, Tmp2);
3125 }
3126 // If we don't know anything, early out and try computeKnownBits
3127 // fall-back.
3128 if (Tmp == 1)
3129 break;
3130 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3131 return Tmp;
3132 }
3133 case Instruction::Call: {
3134 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3135 switch (II->getIntrinsicID()) {
3136 default: break;
3137 case Intrinsic::abs:
3138 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3139 if (Tmp == 1) break;
3140
3141 // Absolute value reduces number of sign bits by at most 1.
3142 return Tmp - 1;
3143 }
3144 }
3145 }
3146 }
3147 }
3148
3149 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3150 // use this information.
3151
3152 // If we can examine all elements of a vector constant successfully, we're
3153 // done (we can't do any better than that). If not, keep trying.
3154 if (unsigned VecSignBits =
3155 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3156 return VecSignBits;
3157
3158 KnownBits Known(TyBits);
3159 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3160
3161 // If we know that the sign bit is either zero or one, determine the number of
3162 // identical bits in the top of the input value.
3163 return std::max(FirstAnswer, Known.countMinSignBits());
3164 }
3165
3166 /// This function computes the integer multiple of Base that equals V.
3167 /// If successful, it returns true and returns the multiple in
3168 /// Multiple. If unsuccessful, it returns false. It looks
3169 /// through SExt instructions only if LookThroughSExt is true.
ComputeMultiple(Value * V,unsigned Base,Value * & Multiple,bool LookThroughSExt,unsigned Depth)3170 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3171 bool LookThroughSExt, unsigned Depth) {
3172 assert(V && "No Value?");
3173 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3174 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3175
3176 Type *T = V->getType();
3177
3178 ConstantInt *CI = dyn_cast<ConstantInt>(V);
3179
3180 if (Base == 0)
3181 return false;
3182
3183 if (Base == 1) {
3184 Multiple = V;
3185 return true;
3186 }
3187
3188 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3189 Constant *BaseVal = ConstantInt::get(T, Base);
3190 if (CO && CO == BaseVal) {
3191 // Multiple is 1.
3192 Multiple = ConstantInt::get(T, 1);
3193 return true;
3194 }
3195
3196 if (CI && CI->getZExtValue() % Base == 0) {
3197 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3198 return true;
3199 }
3200
3201 if (Depth == MaxAnalysisRecursionDepth) return false;
3202
3203 Operator *I = dyn_cast<Operator>(V);
3204 if (!I) return false;
3205
3206 switch (I->getOpcode()) {
3207 default: break;
3208 case Instruction::SExt:
3209 if (!LookThroughSExt) return false;
3210 // otherwise fall through to ZExt
3211 LLVM_FALLTHROUGH;
3212 case Instruction::ZExt:
3213 return ComputeMultiple(I->getOperand(0), Base, Multiple,
3214 LookThroughSExt, Depth+1);
3215 case Instruction::Shl:
3216 case Instruction::Mul: {
3217 Value *Op0 = I->getOperand(0);
3218 Value *Op1 = I->getOperand(1);
3219
3220 if (I->getOpcode() == Instruction::Shl) {
3221 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3222 if (!Op1CI) return false;
3223 // Turn Op0 << Op1 into Op0 * 2^Op1
3224 APInt Op1Int = Op1CI->getValue();
3225 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3226 APInt API(Op1Int.getBitWidth(), 0);
3227 API.setBit(BitToSet);
3228 Op1 = ConstantInt::get(V->getContext(), API);
3229 }
3230
3231 Value *Mul0 = nullptr;
3232 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3233 if (Constant *Op1C = dyn_cast<Constant>(Op1))
3234 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3235 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3236 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3237 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3238 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3239 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3240 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3241
3242 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3243 Multiple = ConstantExpr::getMul(MulC, Op1C);
3244 return true;
3245 }
3246
3247 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3248 if (Mul0CI->getValue() == 1) {
3249 // V == Base * Op1, so return Op1
3250 Multiple = Op1;
3251 return true;
3252 }
3253 }
3254
3255 Value *Mul1 = nullptr;
3256 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3257 if (Constant *Op0C = dyn_cast<Constant>(Op0))
3258 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3259 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3260 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3261 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3262 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3263 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3264 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3265
3266 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3267 Multiple = ConstantExpr::getMul(MulC, Op0C);
3268 return true;
3269 }
3270
3271 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3272 if (Mul1CI->getValue() == 1) {
3273 // V == Base * Op0, so return Op0
3274 Multiple = Op0;
3275 return true;
3276 }
3277 }
3278 }
3279 }
3280
3281 // We could not determine if V is a multiple of Base.
3282 return false;
3283 }
3284
getIntrinsicForCallSite(const CallBase & CB,const TargetLibraryInfo * TLI)3285 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3286 const TargetLibraryInfo *TLI) {
3287 const Function *F = CB.getCalledFunction();
3288 if (!F)
3289 return Intrinsic::not_intrinsic;
3290
3291 if (F->isIntrinsic())
3292 return F->getIntrinsicID();
3293
3294 // We are going to infer semantics of a library function based on mapping it
3295 // to an LLVM intrinsic. Check that the library function is available from
3296 // this callbase and in this environment.
3297 LibFunc Func;
3298 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3299 !CB.onlyReadsMemory())
3300 return Intrinsic::not_intrinsic;
3301
3302 switch (Func) {
3303 default:
3304 break;
3305 case LibFunc_sin:
3306 case LibFunc_sinf:
3307 case LibFunc_sinl:
3308 return Intrinsic::sin;
3309 case LibFunc_cos:
3310 case LibFunc_cosf:
3311 case LibFunc_cosl:
3312 return Intrinsic::cos;
3313 case LibFunc_exp:
3314 case LibFunc_expf:
3315 case LibFunc_expl:
3316 return Intrinsic::exp;
3317 case LibFunc_exp2:
3318 case LibFunc_exp2f:
3319 case LibFunc_exp2l:
3320 return Intrinsic::exp2;
3321 case LibFunc_log:
3322 case LibFunc_logf:
3323 case LibFunc_logl:
3324 return Intrinsic::log;
3325 case LibFunc_log10:
3326 case LibFunc_log10f:
3327 case LibFunc_log10l:
3328 return Intrinsic::log10;
3329 case LibFunc_log2:
3330 case LibFunc_log2f:
3331 case LibFunc_log2l:
3332 return Intrinsic::log2;
3333 case LibFunc_fabs:
3334 case LibFunc_fabsf:
3335 case LibFunc_fabsl:
3336 return Intrinsic::fabs;
3337 case LibFunc_fmin:
3338 case LibFunc_fminf:
3339 case LibFunc_fminl:
3340 return Intrinsic::minnum;
3341 case LibFunc_fmax:
3342 case LibFunc_fmaxf:
3343 case LibFunc_fmaxl:
3344 return Intrinsic::maxnum;
3345 case LibFunc_copysign:
3346 case LibFunc_copysignf:
3347 case LibFunc_copysignl:
3348 return Intrinsic::copysign;
3349 case LibFunc_floor:
3350 case LibFunc_floorf:
3351 case LibFunc_floorl:
3352 return Intrinsic::floor;
3353 case LibFunc_ceil:
3354 case LibFunc_ceilf:
3355 case LibFunc_ceill:
3356 return Intrinsic::ceil;
3357 case LibFunc_trunc:
3358 case LibFunc_truncf:
3359 case LibFunc_truncl:
3360 return Intrinsic::trunc;
3361 case LibFunc_rint:
3362 case LibFunc_rintf:
3363 case LibFunc_rintl:
3364 return Intrinsic::rint;
3365 case LibFunc_nearbyint:
3366 case LibFunc_nearbyintf:
3367 case LibFunc_nearbyintl:
3368 return Intrinsic::nearbyint;
3369 case LibFunc_round:
3370 case LibFunc_roundf:
3371 case LibFunc_roundl:
3372 return Intrinsic::round;
3373 case LibFunc_roundeven:
3374 case LibFunc_roundevenf:
3375 case LibFunc_roundevenl:
3376 return Intrinsic::roundeven;
3377 case LibFunc_pow:
3378 case LibFunc_powf:
3379 case LibFunc_powl:
3380 return Intrinsic::pow;
3381 case LibFunc_sqrt:
3382 case LibFunc_sqrtf:
3383 case LibFunc_sqrtl:
3384 return Intrinsic::sqrt;
3385 }
3386
3387 return Intrinsic::not_intrinsic;
3388 }
3389
3390 /// Return true if we can prove that the specified FP value is never equal to
3391 /// -0.0.
3392 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3393 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3394 /// the same as +0.0 in floating-point ops.
3395 ///
3396 /// NOTE: this function will need to be revisited when we support non-default
3397 /// rounding modes!
CannotBeNegativeZero(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3398 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3399 unsigned Depth) {
3400 if (auto *CFP = dyn_cast<ConstantFP>(V))
3401 return !CFP->getValueAPF().isNegZero();
3402
3403 if (Depth == MaxAnalysisRecursionDepth)
3404 return false;
3405
3406 auto *Op = dyn_cast<Operator>(V);
3407 if (!Op)
3408 return false;
3409
3410 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3411 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3412 return true;
3413
3414 // sitofp and uitofp turn into +0.0 for zero.
3415 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3416 return true;
3417
3418 if (auto *Call = dyn_cast<CallInst>(Op)) {
3419 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3420 switch (IID) {
3421 default:
3422 break;
3423 // sqrt(-0.0) = -0.0, no other negative results are possible.
3424 case Intrinsic::sqrt:
3425 case Intrinsic::canonicalize:
3426 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3427 // fabs(x) != -0.0
3428 case Intrinsic::fabs:
3429 return true;
3430 }
3431 }
3432
3433 return false;
3434 }
3435
3436 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3437 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3438 /// bit despite comparing equal.
cannotBeOrderedLessThanZeroImpl(const Value * V,const TargetLibraryInfo * TLI,bool SignBitOnly,unsigned Depth)3439 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3440 const TargetLibraryInfo *TLI,
3441 bool SignBitOnly,
3442 unsigned Depth) {
3443 // TODO: This function does not do the right thing when SignBitOnly is true
3444 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3445 // which flips the sign bits of NaNs. See
3446 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3447
3448 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3449 return !CFP->getValueAPF().isNegative() ||
3450 (!SignBitOnly && CFP->getValueAPF().isZero());
3451 }
3452
3453 // Handle vector of constants.
3454 if (auto *CV = dyn_cast<Constant>(V)) {
3455 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3456 unsigned NumElts = CVFVTy->getNumElements();
3457 for (unsigned i = 0; i != NumElts; ++i) {
3458 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3459 if (!CFP)
3460 return false;
3461 if (CFP->getValueAPF().isNegative() &&
3462 (SignBitOnly || !CFP->getValueAPF().isZero()))
3463 return false;
3464 }
3465
3466 // All non-negative ConstantFPs.
3467 return true;
3468 }
3469 }
3470
3471 if (Depth == MaxAnalysisRecursionDepth)
3472 return false;
3473
3474 const Operator *I = dyn_cast<Operator>(V);
3475 if (!I)
3476 return false;
3477
3478 switch (I->getOpcode()) {
3479 default:
3480 break;
3481 // Unsigned integers are always nonnegative.
3482 case Instruction::UIToFP:
3483 return true;
3484 case Instruction::FMul:
3485 case Instruction::FDiv:
3486 // X * X is always non-negative or a NaN.
3487 // X / X is always exactly 1.0 or a NaN.
3488 if (I->getOperand(0) == I->getOperand(1) &&
3489 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3490 return true;
3491
3492 LLVM_FALLTHROUGH;
3493 case Instruction::FAdd:
3494 case Instruction::FRem:
3495 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3496 Depth + 1) &&
3497 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3498 Depth + 1);
3499 case Instruction::Select:
3500 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3501 Depth + 1) &&
3502 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3503 Depth + 1);
3504 case Instruction::FPExt:
3505 case Instruction::FPTrunc:
3506 // Widening/narrowing never change sign.
3507 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3508 Depth + 1);
3509 case Instruction::ExtractElement:
3510 // Look through extract element. At the moment we keep this simple and skip
3511 // tracking the specific element. But at least we might find information
3512 // valid for all elements of the vector.
3513 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3514 Depth + 1);
3515 case Instruction::Call:
3516 const auto *CI = cast<CallInst>(I);
3517 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3518 switch (IID) {
3519 default:
3520 break;
3521 case Intrinsic::maxnum: {
3522 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3523 auto isPositiveNum = [&](Value *V) {
3524 if (SignBitOnly) {
3525 // With SignBitOnly, this is tricky because the result of
3526 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3527 // a constant strictly greater than 0.0.
3528 const APFloat *C;
3529 return match(V, m_APFloat(C)) &&
3530 *C > APFloat::getZero(C->getSemantics());
3531 }
3532
3533 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3534 // maxnum can't be ordered-less-than-zero.
3535 return isKnownNeverNaN(V, TLI) &&
3536 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3537 };
3538
3539 // TODO: This could be improved. We could also check that neither operand
3540 // has its sign bit set (and at least 1 is not-NAN?).
3541 return isPositiveNum(V0) || isPositiveNum(V1);
3542 }
3543
3544 case Intrinsic::maximum:
3545 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3546 Depth + 1) ||
3547 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3548 Depth + 1);
3549 case Intrinsic::minnum:
3550 case Intrinsic::minimum:
3551 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3552 Depth + 1) &&
3553 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3554 Depth + 1);
3555 case Intrinsic::exp:
3556 case Intrinsic::exp2:
3557 case Intrinsic::fabs:
3558 return true;
3559
3560 case Intrinsic::sqrt:
3561 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3562 if (!SignBitOnly)
3563 return true;
3564 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3565 CannotBeNegativeZero(CI->getOperand(0), TLI));
3566
3567 case Intrinsic::powi:
3568 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3569 // powi(x,n) is non-negative if n is even.
3570 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3571 return true;
3572 }
3573 // TODO: This is not correct. Given that exp is an integer, here are the
3574 // ways that pow can return a negative value:
3575 //
3576 // pow(x, exp) --> negative if exp is odd and x is negative.
3577 // pow(-0, exp) --> -inf if exp is negative odd.
3578 // pow(-0, exp) --> -0 if exp is positive odd.
3579 // pow(-inf, exp) --> -0 if exp is negative odd.
3580 // pow(-inf, exp) --> -inf if exp is positive odd.
3581 //
3582 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3583 // but we must return false if x == -0. Unfortunately we do not currently
3584 // have a way of expressing this constraint. See details in
3585 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3586 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3587 Depth + 1);
3588
3589 case Intrinsic::fma:
3590 case Intrinsic::fmuladd:
3591 // x*x+y is non-negative if y is non-negative.
3592 return I->getOperand(0) == I->getOperand(1) &&
3593 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3594 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3595 Depth + 1);
3596 }
3597 break;
3598 }
3599 return false;
3600 }
3601
CannotBeOrderedLessThanZero(const Value * V,const TargetLibraryInfo * TLI)3602 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3603 const TargetLibraryInfo *TLI) {
3604 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3605 }
3606
SignBitMustBeZero(const Value * V,const TargetLibraryInfo * TLI)3607 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3608 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3609 }
3610
isKnownNeverInfinity(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3611 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3612 unsigned Depth) {
3613 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3614
3615 // If we're told that infinities won't happen, assume they won't.
3616 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3617 if (FPMathOp->hasNoInfs())
3618 return true;
3619
3620 // Handle scalar constants.
3621 if (auto *CFP = dyn_cast<ConstantFP>(V))
3622 return !CFP->isInfinity();
3623
3624 if (Depth == MaxAnalysisRecursionDepth)
3625 return false;
3626
3627 if (auto *Inst = dyn_cast<Instruction>(V)) {
3628 switch (Inst->getOpcode()) {
3629 case Instruction::Select: {
3630 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3631 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3632 }
3633 case Instruction::SIToFP:
3634 case Instruction::UIToFP: {
3635 // Get width of largest magnitude integer (remove a bit if signed).
3636 // This still works for a signed minimum value because the largest FP
3637 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3638 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3639 if (Inst->getOpcode() == Instruction::SIToFP)
3640 --IntSize;
3641
3642 // If the exponent of the largest finite FP value can hold the largest
3643 // integer, the result of the cast must be finite.
3644 Type *FPTy = Inst->getType()->getScalarType();
3645 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3646 }
3647 default:
3648 break;
3649 }
3650 }
3651
3652 // try to handle fixed width vector constants
3653 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3654 if (VFVTy && isa<Constant>(V)) {
3655 // For vectors, verify that each element is not infinity.
3656 unsigned NumElts = VFVTy->getNumElements();
3657 for (unsigned i = 0; i != NumElts; ++i) {
3658 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3659 if (!Elt)
3660 return false;
3661 if (isa<UndefValue>(Elt))
3662 continue;
3663 auto *CElt = dyn_cast<ConstantFP>(Elt);
3664 if (!CElt || CElt->isInfinity())
3665 return false;
3666 }
3667 // All elements were confirmed non-infinity or undefined.
3668 return true;
3669 }
3670
3671 // was not able to prove that V never contains infinity
3672 return false;
3673 }
3674
isKnownNeverNaN(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3675 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3676 unsigned Depth) {
3677 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3678
3679 // If we're told that NaNs won't happen, assume they won't.
3680 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3681 if (FPMathOp->hasNoNaNs())
3682 return true;
3683
3684 // Handle scalar constants.
3685 if (auto *CFP = dyn_cast<ConstantFP>(V))
3686 return !CFP->isNaN();
3687
3688 if (Depth == MaxAnalysisRecursionDepth)
3689 return false;
3690
3691 if (auto *Inst = dyn_cast<Instruction>(V)) {
3692 switch (Inst->getOpcode()) {
3693 case Instruction::FAdd:
3694 case Instruction::FSub:
3695 // Adding positive and negative infinity produces NaN.
3696 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3697 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3698 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3699 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3700
3701 case Instruction::FMul:
3702 // Zero multiplied with infinity produces NaN.
3703 // FIXME: If neither side can be zero fmul never produces NaN.
3704 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3705 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3706 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3707 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3708
3709 case Instruction::FDiv:
3710 case Instruction::FRem:
3711 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3712 return false;
3713
3714 case Instruction::Select: {
3715 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3716 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3717 }
3718 case Instruction::SIToFP:
3719 case Instruction::UIToFP:
3720 return true;
3721 case Instruction::FPTrunc:
3722 case Instruction::FPExt:
3723 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3724 default:
3725 break;
3726 }
3727 }
3728
3729 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3730 switch (II->getIntrinsicID()) {
3731 case Intrinsic::canonicalize:
3732 case Intrinsic::fabs:
3733 case Intrinsic::copysign:
3734 case Intrinsic::exp:
3735 case Intrinsic::exp2:
3736 case Intrinsic::floor:
3737 case Intrinsic::ceil:
3738 case Intrinsic::trunc:
3739 case Intrinsic::rint:
3740 case Intrinsic::nearbyint:
3741 case Intrinsic::round:
3742 case Intrinsic::roundeven:
3743 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3744 case Intrinsic::sqrt:
3745 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3746 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3747 case Intrinsic::minnum:
3748 case Intrinsic::maxnum:
3749 // If either operand is not NaN, the result is not NaN.
3750 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3751 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3752 default:
3753 return false;
3754 }
3755 }
3756
3757 // Try to handle fixed width vector constants
3758 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3759 if (VFVTy && isa<Constant>(V)) {
3760 // For vectors, verify that each element is not NaN.
3761 unsigned NumElts = VFVTy->getNumElements();
3762 for (unsigned i = 0; i != NumElts; ++i) {
3763 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3764 if (!Elt)
3765 return false;
3766 if (isa<UndefValue>(Elt))
3767 continue;
3768 auto *CElt = dyn_cast<ConstantFP>(Elt);
3769 if (!CElt || CElt->isNaN())
3770 return false;
3771 }
3772 // All elements were confirmed not-NaN or undefined.
3773 return true;
3774 }
3775
3776 // Was not able to prove that V never contains NaN
3777 return false;
3778 }
3779
isBytewiseValue(Value * V,const DataLayout & DL)3780 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3781
3782 // All byte-wide stores are splatable, even of arbitrary variables.
3783 if (V->getType()->isIntegerTy(8))
3784 return V;
3785
3786 LLVMContext &Ctx = V->getContext();
3787
3788 // Undef don't care.
3789 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3790 if (isa<UndefValue>(V))
3791 return UndefInt8;
3792
3793 // Return Undef for zero-sized type.
3794 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3795 return UndefInt8;
3796
3797 Constant *C = dyn_cast<Constant>(V);
3798 if (!C) {
3799 // Conceptually, we could handle things like:
3800 // %a = zext i8 %X to i16
3801 // %b = shl i16 %a, 8
3802 // %c = or i16 %a, %b
3803 // but until there is an example that actually needs this, it doesn't seem
3804 // worth worrying about.
3805 return nullptr;
3806 }
3807
3808 // Handle 'null' ConstantArrayZero etc.
3809 if (C->isNullValue())
3810 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3811
3812 // Constant floating-point values can be handled as integer values if the
3813 // corresponding integer value is "byteable". An important case is 0.0.
3814 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3815 Type *Ty = nullptr;
3816 if (CFP->getType()->isHalfTy())
3817 Ty = Type::getInt16Ty(Ctx);
3818 else if (CFP->getType()->isFloatTy())
3819 Ty = Type::getInt32Ty(Ctx);
3820 else if (CFP->getType()->isDoubleTy())
3821 Ty = Type::getInt64Ty(Ctx);
3822 // Don't handle long double formats, which have strange constraints.
3823 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3824 : nullptr;
3825 }
3826
3827 // We can handle constant integers that are multiple of 8 bits.
3828 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3829 if (CI->getBitWidth() % 8 == 0) {
3830 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3831 if (!CI->getValue().isSplat(8))
3832 return nullptr;
3833 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3834 }
3835 }
3836
3837 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3838 if (CE->getOpcode() == Instruction::IntToPtr) {
3839 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3840 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3841 return isBytewiseValue(
3842 ConstantExpr::getIntegerCast(CE->getOperand(0),
3843 Type::getIntNTy(Ctx, BitWidth), false),
3844 DL);
3845 }
3846 }
3847 }
3848
3849 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3850 if (LHS == RHS)
3851 return LHS;
3852 if (!LHS || !RHS)
3853 return nullptr;
3854 if (LHS == UndefInt8)
3855 return RHS;
3856 if (RHS == UndefInt8)
3857 return LHS;
3858 return nullptr;
3859 };
3860
3861 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3862 Value *Val = UndefInt8;
3863 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3864 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3865 return nullptr;
3866 return Val;
3867 }
3868
3869 if (isa<ConstantAggregate>(C)) {
3870 Value *Val = UndefInt8;
3871 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3872 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3873 return nullptr;
3874 return Val;
3875 }
3876
3877 // Don't try to handle the handful of other constants.
3878 return nullptr;
3879 }
3880
3881 // This is the recursive version of BuildSubAggregate. It takes a few different
3882 // arguments. Idxs is the index within the nested struct From that we are
3883 // looking at now (which is of type IndexedType). IdxSkip is the number of
3884 // indices from Idxs that should be left out when inserting into the resulting
3885 // struct. To is the result struct built so far, new insertvalue instructions
3886 // build on that.
BuildSubAggregate(Value * From,Value * To,Type * IndexedType,SmallVectorImpl<unsigned> & Idxs,unsigned IdxSkip,Instruction * InsertBefore)3887 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3888 SmallVectorImpl<unsigned> &Idxs,
3889 unsigned IdxSkip,
3890 Instruction *InsertBefore) {
3891 StructType *STy = dyn_cast<StructType>(IndexedType);
3892 if (STy) {
3893 // Save the original To argument so we can modify it
3894 Value *OrigTo = To;
3895 // General case, the type indexed by Idxs is a struct
3896 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3897 // Process each struct element recursively
3898 Idxs.push_back(i);
3899 Value *PrevTo = To;
3900 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3901 InsertBefore);
3902 Idxs.pop_back();
3903 if (!To) {
3904 // Couldn't find any inserted value for this index? Cleanup
3905 while (PrevTo != OrigTo) {
3906 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3907 PrevTo = Del->getAggregateOperand();
3908 Del->eraseFromParent();
3909 }
3910 // Stop processing elements
3911 break;
3912 }
3913 }
3914 // If we successfully found a value for each of our subaggregates
3915 if (To)
3916 return To;
3917 }
3918 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3919 // the struct's elements had a value that was inserted directly. In the latter
3920 // case, perhaps we can't determine each of the subelements individually, but
3921 // we might be able to find the complete struct somewhere.
3922
3923 // Find the value that is at that particular spot
3924 Value *V = FindInsertedValue(From, Idxs);
3925
3926 if (!V)
3927 return nullptr;
3928
3929 // Insert the value in the new (sub) aggregate
3930 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3931 "tmp", InsertBefore);
3932 }
3933
3934 // This helper takes a nested struct and extracts a part of it (which is again a
3935 // struct) into a new value. For example, given the struct:
3936 // { a, { b, { c, d }, e } }
3937 // and the indices "1, 1" this returns
3938 // { c, d }.
3939 //
3940 // It does this by inserting an insertvalue for each element in the resulting
3941 // struct, as opposed to just inserting a single struct. This will only work if
3942 // each of the elements of the substruct are known (ie, inserted into From by an
3943 // insertvalue instruction somewhere).
3944 //
3945 // All inserted insertvalue instructions are inserted before InsertBefore
BuildSubAggregate(Value * From,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3946 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3947 Instruction *InsertBefore) {
3948 assert(InsertBefore && "Must have someplace to insert!");
3949 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3950 idx_range);
3951 Value *To = UndefValue::get(IndexedType);
3952 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3953 unsigned IdxSkip = Idxs.size();
3954
3955 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3956 }
3957
3958 /// Given an aggregate and a sequence of indices, see if the scalar value
3959 /// indexed is already around as a register, for example if it was inserted
3960 /// directly into the aggregate.
3961 ///
3962 /// If InsertBefore is not null, this function will duplicate (modified)
3963 /// insertvalues when a part of a nested struct is extracted.
FindInsertedValue(Value * V,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3964 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3965 Instruction *InsertBefore) {
3966 // Nothing to index? Just return V then (this is useful at the end of our
3967 // recursion).
3968 if (idx_range.empty())
3969 return V;
3970 // We have indices, so V should have an indexable type.
3971 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3972 "Not looking at a struct or array?");
3973 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3974 "Invalid indices for type?");
3975
3976 if (Constant *C = dyn_cast<Constant>(V)) {
3977 C = C->getAggregateElement(idx_range[0]);
3978 if (!C) return nullptr;
3979 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3980 }
3981
3982 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3983 // Loop the indices for the insertvalue instruction in parallel with the
3984 // requested indices
3985 const unsigned *req_idx = idx_range.begin();
3986 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3987 i != e; ++i, ++req_idx) {
3988 if (req_idx == idx_range.end()) {
3989 // We can't handle this without inserting insertvalues
3990 if (!InsertBefore)
3991 return nullptr;
3992
3993 // The requested index identifies a part of a nested aggregate. Handle
3994 // this specially. For example,
3995 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3996 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3997 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3998 // This can be changed into
3999 // %A = insertvalue {i32, i32 } undef, i32 10, 0
4000 // %C = insertvalue {i32, i32 } %A, i32 11, 1
4001 // which allows the unused 0,0 element from the nested struct to be
4002 // removed.
4003 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4004 InsertBefore);
4005 }
4006
4007 // This insert value inserts something else than what we are looking for.
4008 // See if the (aggregate) value inserted into has the value we are
4009 // looking for, then.
4010 if (*req_idx != *i)
4011 return FindInsertedValue(I->getAggregateOperand(), idx_range,
4012 InsertBefore);
4013 }
4014 // If we end up here, the indices of the insertvalue match with those
4015 // requested (though possibly only partially). Now we recursively look at
4016 // the inserted value, passing any remaining indices.
4017 return FindInsertedValue(I->getInsertedValueOperand(),
4018 makeArrayRef(req_idx, idx_range.end()),
4019 InsertBefore);
4020 }
4021
4022 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4023 // If we're extracting a value from an aggregate that was extracted from
4024 // something else, we can extract from that something else directly instead.
4025 // However, we will need to chain I's indices with the requested indices.
4026
4027 // Calculate the number of indices required
4028 unsigned size = I->getNumIndices() + idx_range.size();
4029 // Allocate some space to put the new indices in
4030 SmallVector<unsigned, 5> Idxs;
4031 Idxs.reserve(size);
4032 // Add indices from the extract value instruction
4033 Idxs.append(I->idx_begin(), I->idx_end());
4034
4035 // Add requested indices
4036 Idxs.append(idx_range.begin(), idx_range.end());
4037
4038 assert(Idxs.size() == size
4039 && "Number of indices added not correct?");
4040
4041 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4042 }
4043 // Otherwise, we don't know (such as, extracting from a function return value
4044 // or load instruction)
4045 return nullptr;
4046 }
4047
isGEPBasedOnPointerToString(const GEPOperator * GEP,unsigned CharSize)4048 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4049 unsigned CharSize) {
4050 // Make sure the GEP has exactly three arguments.
4051 if (GEP->getNumOperands() != 3)
4052 return false;
4053
4054 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4055 // CharSize.
4056 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4057 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4058 return false;
4059
4060 // Check to make sure that the first operand of the GEP is an integer and
4061 // has value 0 so that we are sure we're indexing into the initializer.
4062 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4063 if (!FirstIdx || !FirstIdx->isZero())
4064 return false;
4065
4066 return true;
4067 }
4068
getConstantDataArrayInfo(const Value * V,ConstantDataArraySlice & Slice,unsigned ElementSize,uint64_t Offset)4069 bool llvm::getConstantDataArrayInfo(const Value *V,
4070 ConstantDataArraySlice &Slice,
4071 unsigned ElementSize, uint64_t Offset) {
4072 assert(V);
4073
4074 // Look through bitcast instructions and geps.
4075 V = V->stripPointerCasts();
4076
4077 // If the value is a GEP instruction or constant expression, treat it as an
4078 // offset.
4079 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4080 // The GEP operator should be based on a pointer to string constant, and is
4081 // indexing into the string constant.
4082 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4083 return false;
4084
4085 // If the second index isn't a ConstantInt, then this is a variable index
4086 // into the array. If this occurs, we can't say anything meaningful about
4087 // the string.
4088 uint64_t StartIdx = 0;
4089 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4090 StartIdx = CI->getZExtValue();
4091 else
4092 return false;
4093 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4094 StartIdx + Offset);
4095 }
4096
4097 // The GEP instruction, constant or instruction, must reference a global
4098 // variable that is a constant and is initialized. The referenced constant
4099 // initializer is the array that we'll use for optimization.
4100 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4101 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4102 return false;
4103
4104 const ConstantDataArray *Array;
4105 ArrayType *ArrayTy;
4106 if (GV->getInitializer()->isNullValue()) {
4107 Type *GVTy = GV->getValueType();
4108 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4109 // A zeroinitializer for the array; there is no ConstantDataArray.
4110 Array = nullptr;
4111 } else {
4112 const DataLayout &DL = GV->getParent()->getDataLayout();
4113 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4114 uint64_t Length = SizeInBytes / (ElementSize / 8);
4115 if (Length <= Offset)
4116 return false;
4117
4118 Slice.Array = nullptr;
4119 Slice.Offset = 0;
4120 Slice.Length = Length - Offset;
4121 return true;
4122 }
4123 } else {
4124 // This must be a ConstantDataArray.
4125 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4126 if (!Array)
4127 return false;
4128 ArrayTy = Array->getType();
4129 }
4130 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4131 return false;
4132
4133 uint64_t NumElts = ArrayTy->getArrayNumElements();
4134 if (Offset > NumElts)
4135 return false;
4136
4137 Slice.Array = Array;
4138 Slice.Offset = Offset;
4139 Slice.Length = NumElts - Offset;
4140 return true;
4141 }
4142
4143 /// This function computes the length of a null-terminated C string pointed to
4144 /// by V. If successful, it returns true and returns the string in Str.
4145 /// If unsuccessful, it returns false.
getConstantStringInfo(const Value * V,StringRef & Str,uint64_t Offset,bool TrimAtNul)4146 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4147 uint64_t Offset, bool TrimAtNul) {
4148 ConstantDataArraySlice Slice;
4149 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4150 return false;
4151
4152 if (Slice.Array == nullptr) {
4153 if (TrimAtNul) {
4154 Str = StringRef();
4155 return true;
4156 }
4157 if (Slice.Length == 1) {
4158 Str = StringRef("", 1);
4159 return true;
4160 }
4161 // We cannot instantiate a StringRef as we do not have an appropriate string
4162 // of 0s at hand.
4163 return false;
4164 }
4165
4166 // Start out with the entire array in the StringRef.
4167 Str = Slice.Array->getAsString();
4168 // Skip over 'offset' bytes.
4169 Str = Str.substr(Slice.Offset);
4170
4171 if (TrimAtNul) {
4172 // Trim off the \0 and anything after it. If the array is not nul
4173 // terminated, we just return the whole end of string. The client may know
4174 // some other way that the string is length-bound.
4175 Str = Str.substr(0, Str.find('\0'));
4176 }
4177 return true;
4178 }
4179
4180 // These next two are very similar to the above, but also look through PHI
4181 // nodes.
4182 // TODO: See if we can integrate these two together.
4183
4184 /// If we can compute the length of the string pointed to by
4185 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLengthH(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs,unsigned CharSize)4186 static uint64_t GetStringLengthH(const Value *V,
4187 SmallPtrSetImpl<const PHINode*> &PHIs,
4188 unsigned CharSize) {
4189 // Look through noop bitcast instructions.
4190 V = V->stripPointerCasts();
4191
4192 // If this is a PHI node, there are two cases: either we have already seen it
4193 // or we haven't.
4194 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4195 if (!PHIs.insert(PN).second)
4196 return ~0ULL; // already in the set.
4197
4198 // If it was new, see if all the input strings are the same length.
4199 uint64_t LenSoFar = ~0ULL;
4200 for (Value *IncValue : PN->incoming_values()) {
4201 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4202 if (Len == 0) return 0; // Unknown length -> unknown.
4203
4204 if (Len == ~0ULL) continue;
4205
4206 if (Len != LenSoFar && LenSoFar != ~0ULL)
4207 return 0; // Disagree -> unknown.
4208 LenSoFar = Len;
4209 }
4210
4211 // Success, all agree.
4212 return LenSoFar;
4213 }
4214
4215 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4216 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4217 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4218 if (Len1 == 0) return 0;
4219 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4220 if (Len2 == 0) return 0;
4221 if (Len1 == ~0ULL) return Len2;
4222 if (Len2 == ~0ULL) return Len1;
4223 if (Len1 != Len2) return 0;
4224 return Len1;
4225 }
4226
4227 // Otherwise, see if we can read the string.
4228 ConstantDataArraySlice Slice;
4229 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4230 return 0;
4231
4232 if (Slice.Array == nullptr)
4233 return 1;
4234
4235 // Search for nul characters
4236 unsigned NullIndex = 0;
4237 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4238 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4239 break;
4240 }
4241
4242 return NullIndex + 1;
4243 }
4244
4245 /// If we can compute the length of the string pointed to by
4246 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLength(const Value * V,unsigned CharSize)4247 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4248 if (!V->getType()->isPointerTy())
4249 return 0;
4250
4251 SmallPtrSet<const PHINode*, 32> PHIs;
4252 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4253 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4254 // an empty string as a length.
4255 return Len == ~0ULL ? 1 : Len;
4256 }
4257
4258 const Value *
getArgumentAliasingToReturnedPointer(const CallBase * Call,bool MustPreserveNullness)4259 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4260 bool MustPreserveNullness) {
4261 assert(Call &&
4262 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4263 if (const Value *RV = Call->getReturnedArgOperand())
4264 return RV;
4265 // This can be used only as a aliasing property.
4266 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4267 Call, MustPreserveNullness))
4268 return Call->getArgOperand(0);
4269 return nullptr;
4270 }
4271
isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase * Call,bool MustPreserveNullness)4272 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4273 const CallBase *Call, bool MustPreserveNullness) {
4274 switch (Call->getIntrinsicID()) {
4275 case Intrinsic::launder_invariant_group:
4276 case Intrinsic::strip_invariant_group:
4277 case Intrinsic::aarch64_irg:
4278 case Intrinsic::aarch64_tagp:
4279 return true;
4280 case Intrinsic::ptrmask:
4281 return !MustPreserveNullness;
4282 default:
4283 return false;
4284 }
4285 }
4286
4287 /// \p PN defines a loop-variant pointer to an object. Check if the
4288 /// previous iteration of the loop was referring to the same object as \p PN.
isSameUnderlyingObjectInLoop(const PHINode * PN,const LoopInfo * LI)4289 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4290 const LoopInfo *LI) {
4291 // Find the loop-defined value.
4292 Loop *L = LI->getLoopFor(PN->getParent());
4293 if (PN->getNumIncomingValues() != 2)
4294 return true;
4295
4296 // Find the value from previous iteration.
4297 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4298 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4299 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4300 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4301 return true;
4302
4303 // If a new pointer is loaded in the loop, the pointer references a different
4304 // object in every iteration. E.g.:
4305 // for (i)
4306 // int *p = a[i];
4307 // ...
4308 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4309 if (!L->isLoopInvariant(Load->getPointerOperand()))
4310 return false;
4311 return true;
4312 }
4313
getUnderlyingObject(const Value * V,unsigned MaxLookup)4314 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4315 if (!V->getType()->isPointerTy())
4316 return V;
4317 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4318 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4319 V = GEP->getPointerOperand();
4320 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4321 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4322 V = cast<Operator>(V)->getOperand(0);
4323 if (!V->getType()->isPointerTy())
4324 return V;
4325 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4326 if (GA->isInterposable())
4327 return V;
4328 V = GA->getAliasee();
4329 } else {
4330 if (auto *PHI = dyn_cast<PHINode>(V)) {
4331 // Look through single-arg phi nodes created by LCSSA.
4332 if (PHI->getNumIncomingValues() == 1) {
4333 V = PHI->getIncomingValue(0);
4334 continue;
4335 }
4336 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4337 // CaptureTracking can know about special capturing properties of some
4338 // intrinsics like launder.invariant.group, that can't be expressed with
4339 // the attributes, but have properties like returning aliasing pointer.
4340 // Because some analysis may assume that nocaptured pointer is not
4341 // returned from some special intrinsic (because function would have to
4342 // be marked with returns attribute), it is crucial to use this function
4343 // because it should be in sync with CaptureTracking. Not using it may
4344 // cause weird miscompilations where 2 aliasing pointers are assumed to
4345 // noalias.
4346 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4347 V = RP;
4348 continue;
4349 }
4350 }
4351
4352 return V;
4353 }
4354 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4355 }
4356 return V;
4357 }
4358
getUnderlyingObjects(const Value * V,SmallVectorImpl<const Value * > & Objects,LoopInfo * LI,unsigned MaxLookup)4359 void llvm::getUnderlyingObjects(const Value *V,
4360 SmallVectorImpl<const Value *> &Objects,
4361 LoopInfo *LI, unsigned MaxLookup) {
4362 SmallPtrSet<const Value *, 4> Visited;
4363 SmallVector<const Value *, 4> Worklist;
4364 Worklist.push_back(V);
4365 do {
4366 const Value *P = Worklist.pop_back_val();
4367 P = getUnderlyingObject(P, MaxLookup);
4368
4369 if (!Visited.insert(P).second)
4370 continue;
4371
4372 if (auto *SI = dyn_cast<SelectInst>(P)) {
4373 Worklist.push_back(SI->getTrueValue());
4374 Worklist.push_back(SI->getFalseValue());
4375 continue;
4376 }
4377
4378 if (auto *PN = dyn_cast<PHINode>(P)) {
4379 // If this PHI changes the underlying object in every iteration of the
4380 // loop, don't look through it. Consider:
4381 // int **A;
4382 // for (i) {
4383 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4384 // Curr = A[i];
4385 // *Prev, *Curr;
4386 //
4387 // Prev is tracking Curr one iteration behind so they refer to different
4388 // underlying objects.
4389 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4390 isSameUnderlyingObjectInLoop(PN, LI))
4391 append_range(Worklist, PN->incoming_values());
4392 continue;
4393 }
4394
4395 Objects.push_back(P);
4396 } while (!Worklist.empty());
4397 }
4398
4399 /// This is the function that does the work of looking through basic
4400 /// ptrtoint+arithmetic+inttoptr sequences.
getUnderlyingObjectFromInt(const Value * V)4401 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4402 do {
4403 if (const Operator *U = dyn_cast<Operator>(V)) {
4404 // If we find a ptrtoint, we can transfer control back to the
4405 // regular getUnderlyingObjectFromInt.
4406 if (U->getOpcode() == Instruction::PtrToInt)
4407 return U->getOperand(0);
4408 // If we find an add of a constant, a multiplied value, or a phi, it's
4409 // likely that the other operand will lead us to the base
4410 // object. We don't have to worry about the case where the
4411 // object address is somehow being computed by the multiply,
4412 // because our callers only care when the result is an
4413 // identifiable object.
4414 if (U->getOpcode() != Instruction::Add ||
4415 (!isa<ConstantInt>(U->getOperand(1)) &&
4416 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4417 !isa<PHINode>(U->getOperand(1))))
4418 return V;
4419 V = U->getOperand(0);
4420 } else {
4421 return V;
4422 }
4423 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4424 } while (true);
4425 }
4426
4427 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4428 /// ptrtoint+arithmetic+inttoptr sequences.
4429 /// It returns false if unidentified object is found in getUnderlyingObjects.
getUnderlyingObjectsForCodeGen(const Value * V,SmallVectorImpl<Value * > & Objects)4430 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4431 SmallVectorImpl<Value *> &Objects) {
4432 SmallPtrSet<const Value *, 16> Visited;
4433 SmallVector<const Value *, 4> Working(1, V);
4434 do {
4435 V = Working.pop_back_val();
4436
4437 SmallVector<const Value *, 4> Objs;
4438 getUnderlyingObjects(V, Objs);
4439
4440 for (const Value *V : Objs) {
4441 if (!Visited.insert(V).second)
4442 continue;
4443 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4444 const Value *O =
4445 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4446 if (O->getType()->isPointerTy()) {
4447 Working.push_back(O);
4448 continue;
4449 }
4450 }
4451 // If getUnderlyingObjects fails to find an identifiable object,
4452 // getUnderlyingObjectsForCodeGen also fails for safety.
4453 if (!isIdentifiedObject(V)) {
4454 Objects.clear();
4455 return false;
4456 }
4457 Objects.push_back(const_cast<Value *>(V));
4458 }
4459 } while (!Working.empty());
4460 return true;
4461 }
4462
findAllocaForValue(Value * V,bool OffsetZero)4463 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4464 AllocaInst *Result = nullptr;
4465 SmallPtrSet<Value *, 4> Visited;
4466 SmallVector<Value *, 4> Worklist;
4467
4468 auto AddWork = [&](Value *V) {
4469 if (Visited.insert(V).second)
4470 Worklist.push_back(V);
4471 };
4472
4473 AddWork(V);
4474 do {
4475 V = Worklist.pop_back_val();
4476 assert(Visited.count(V));
4477
4478 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4479 if (Result && Result != AI)
4480 return nullptr;
4481 Result = AI;
4482 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4483 AddWork(CI->getOperand(0));
4484 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4485 for (Value *IncValue : PN->incoming_values())
4486 AddWork(IncValue);
4487 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4488 AddWork(SI->getTrueValue());
4489 AddWork(SI->getFalseValue());
4490 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4491 if (OffsetZero && !GEP->hasAllZeroIndices())
4492 return nullptr;
4493 AddWork(GEP->getPointerOperand());
4494 } else {
4495 return nullptr;
4496 }
4497 } while (!Worklist.empty());
4498
4499 return Result;
4500 }
4501
onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value * V,bool AllowLifetime,bool AllowDroppable)4502 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4503 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4504 for (const User *U : V->users()) {
4505 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4506 if (!II)
4507 return false;
4508
4509 if (AllowLifetime && II->isLifetimeStartOrEnd())
4510 continue;
4511
4512 if (AllowDroppable && II->isDroppable())
4513 continue;
4514
4515 return false;
4516 }
4517 return true;
4518 }
4519
onlyUsedByLifetimeMarkers(const Value * V)4520 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4521 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4522 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4523 }
onlyUsedByLifetimeMarkersOrDroppableInsts(const Value * V)4524 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4525 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4526 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4527 }
4528
mustSuppressSpeculation(const LoadInst & LI)4529 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4530 if (!LI.isUnordered())
4531 return true;
4532 const Function &F = *LI.getFunction();
4533 // Speculative load may create a race that did not exist in the source.
4534 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4535 // Speculative load may load data from dirty regions.
4536 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4537 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4538 }
4539
4540
isSafeToSpeculativelyExecute(const Value * V,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI)4541 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4542 const Instruction *CtxI,
4543 const DominatorTree *DT,
4544 const TargetLibraryInfo *TLI) {
4545 const Operator *Inst = dyn_cast<Operator>(V);
4546 if (!Inst)
4547 return false;
4548
4549 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4550 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4551 if (C->canTrap())
4552 return false;
4553
4554 switch (Inst->getOpcode()) {
4555 default:
4556 return true;
4557 case Instruction::UDiv:
4558 case Instruction::URem: {
4559 // x / y is undefined if y == 0.
4560 const APInt *V;
4561 if (match(Inst->getOperand(1), m_APInt(V)))
4562 return *V != 0;
4563 return false;
4564 }
4565 case Instruction::SDiv:
4566 case Instruction::SRem: {
4567 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4568 const APInt *Numerator, *Denominator;
4569 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4570 return false;
4571 // We cannot hoist this division if the denominator is 0.
4572 if (*Denominator == 0)
4573 return false;
4574 // It's safe to hoist if the denominator is not 0 or -1.
4575 if (!Denominator->isAllOnesValue())
4576 return true;
4577 // At this point we know that the denominator is -1. It is safe to hoist as
4578 // long we know that the numerator is not INT_MIN.
4579 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4580 return !Numerator->isMinSignedValue();
4581 // The numerator *might* be MinSignedValue.
4582 return false;
4583 }
4584 case Instruction::Load: {
4585 const LoadInst *LI = cast<LoadInst>(Inst);
4586 if (mustSuppressSpeculation(*LI))
4587 return false;
4588 const DataLayout &DL = LI->getModule()->getDataLayout();
4589 return isDereferenceableAndAlignedPointer(
4590 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4591 DL, CtxI, DT, TLI);
4592 }
4593 case Instruction::Call: {
4594 auto *CI = cast<const CallInst>(Inst);
4595 const Function *Callee = CI->getCalledFunction();
4596
4597 // The called function could have undefined behavior or side-effects, even
4598 // if marked readnone nounwind.
4599 return Callee && Callee->isSpeculatable();
4600 }
4601 case Instruction::VAArg:
4602 case Instruction::Alloca:
4603 case Instruction::Invoke:
4604 case Instruction::CallBr:
4605 case Instruction::PHI:
4606 case Instruction::Store:
4607 case Instruction::Ret:
4608 case Instruction::Br:
4609 case Instruction::IndirectBr:
4610 case Instruction::Switch:
4611 case Instruction::Unreachable:
4612 case Instruction::Fence:
4613 case Instruction::AtomicRMW:
4614 case Instruction::AtomicCmpXchg:
4615 case Instruction::LandingPad:
4616 case Instruction::Resume:
4617 case Instruction::CatchSwitch:
4618 case Instruction::CatchPad:
4619 case Instruction::CatchRet:
4620 case Instruction::CleanupPad:
4621 case Instruction::CleanupRet:
4622 return false; // Misc instructions which have effects
4623 }
4624 }
4625
mayBeMemoryDependent(const Instruction & I)4626 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4627 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4628 }
4629
4630 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
mapOverflowResult(ConstantRange::OverflowResult OR)4631 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4632 switch (OR) {
4633 case ConstantRange::OverflowResult::MayOverflow:
4634 return OverflowResult::MayOverflow;
4635 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4636 return OverflowResult::AlwaysOverflowsLow;
4637 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4638 return OverflowResult::AlwaysOverflowsHigh;
4639 case ConstantRange::OverflowResult::NeverOverflows:
4640 return OverflowResult::NeverOverflows;
4641 }
4642 llvm_unreachable("Unknown OverflowResult");
4643 }
4644
4645 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
computeConstantRangeIncludingKnownBits(const Value * V,bool ForSigned,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE=nullptr,bool UseInstrInfo=true)4646 static ConstantRange computeConstantRangeIncludingKnownBits(
4647 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4648 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4649 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4650 KnownBits Known = computeKnownBits(
4651 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4652 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4653 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4654 ConstantRange::PreferredRangeType RangeType =
4655 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4656 return CR1.intersectWith(CR2, RangeType);
4657 }
4658
computeOverflowForUnsignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4659 OverflowResult llvm::computeOverflowForUnsignedMul(
4660 const Value *LHS, const Value *RHS, const DataLayout &DL,
4661 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4662 bool UseInstrInfo) {
4663 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4664 nullptr, UseInstrInfo);
4665 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4666 nullptr, UseInstrInfo);
4667 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4668 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4669 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4670 }
4671
4672 OverflowResult
computeOverflowForSignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4673 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4674 const DataLayout &DL, AssumptionCache *AC,
4675 const Instruction *CxtI,
4676 const DominatorTree *DT, bool UseInstrInfo) {
4677 // Multiplying n * m significant bits yields a result of n + m significant
4678 // bits. If the total number of significant bits does not exceed the
4679 // result bit width (minus 1), there is no overflow.
4680 // This means if we have enough leading sign bits in the operands
4681 // we can guarantee that the result does not overflow.
4682 // Ref: "Hacker's Delight" by Henry Warren
4683 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4684
4685 // Note that underestimating the number of sign bits gives a more
4686 // conservative answer.
4687 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4688 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4689
4690 // First handle the easy case: if we have enough sign bits there's
4691 // definitely no overflow.
4692 if (SignBits > BitWidth + 1)
4693 return OverflowResult::NeverOverflows;
4694
4695 // There are two ambiguous cases where there can be no overflow:
4696 // SignBits == BitWidth + 1 and
4697 // SignBits == BitWidth
4698 // The second case is difficult to check, therefore we only handle the
4699 // first case.
4700 if (SignBits == BitWidth + 1) {
4701 // It overflows only when both arguments are negative and the true
4702 // product is exactly the minimum negative number.
4703 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4704 // For simplicity we just check if at least one side is not negative.
4705 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4706 nullptr, UseInstrInfo);
4707 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4708 nullptr, UseInstrInfo);
4709 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4710 return OverflowResult::NeverOverflows;
4711 }
4712 return OverflowResult::MayOverflow;
4713 }
4714
computeOverflowForUnsignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4715 OverflowResult llvm::computeOverflowForUnsignedAdd(
4716 const Value *LHS, const Value *RHS, const DataLayout &DL,
4717 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4718 bool UseInstrInfo) {
4719 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4720 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4721 nullptr, UseInstrInfo);
4722 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4723 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4724 nullptr, UseInstrInfo);
4725 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4726 }
4727
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4728 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4729 const Value *RHS,
4730 const AddOperator *Add,
4731 const DataLayout &DL,
4732 AssumptionCache *AC,
4733 const Instruction *CxtI,
4734 const DominatorTree *DT) {
4735 if (Add && Add->hasNoSignedWrap()) {
4736 return OverflowResult::NeverOverflows;
4737 }
4738
4739 // If LHS and RHS each have at least two sign bits, the addition will look
4740 // like
4741 //
4742 // XX..... +
4743 // YY.....
4744 //
4745 // If the carry into the most significant position is 0, X and Y can't both
4746 // be 1 and therefore the carry out of the addition is also 0.
4747 //
4748 // If the carry into the most significant position is 1, X and Y can't both
4749 // be 0 and therefore the carry out of the addition is also 1.
4750 //
4751 // Since the carry into the most significant position is always equal to
4752 // the carry out of the addition, there is no signed overflow.
4753 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4754 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4755 return OverflowResult::NeverOverflows;
4756
4757 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4758 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4759 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4760 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4761 OverflowResult OR =
4762 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4763 if (OR != OverflowResult::MayOverflow)
4764 return OR;
4765
4766 // The remaining code needs Add to be available. Early returns if not so.
4767 if (!Add)
4768 return OverflowResult::MayOverflow;
4769
4770 // If the sign of Add is the same as at least one of the operands, this add
4771 // CANNOT overflow. If this can be determined from the known bits of the
4772 // operands the above signedAddMayOverflow() check will have already done so.
4773 // The only other way to improve on the known bits is from an assumption, so
4774 // call computeKnownBitsFromAssume() directly.
4775 bool LHSOrRHSKnownNonNegative =
4776 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4777 bool LHSOrRHSKnownNegative =
4778 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4779 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4780 KnownBits AddKnown(LHSRange.getBitWidth());
4781 computeKnownBitsFromAssume(
4782 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4783 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4784 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4785 return OverflowResult::NeverOverflows;
4786 }
4787
4788 return OverflowResult::MayOverflow;
4789 }
4790
computeOverflowForUnsignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4791 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4792 const Value *RHS,
4793 const DataLayout &DL,
4794 AssumptionCache *AC,
4795 const Instruction *CxtI,
4796 const DominatorTree *DT) {
4797 // Checking for conditions implied by dominating conditions may be expensive.
4798 // Limit it to usub_with_overflow calls for now.
4799 if (match(CxtI,
4800 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4801 if (auto C =
4802 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4803 if (*C)
4804 return OverflowResult::NeverOverflows;
4805 return OverflowResult::AlwaysOverflowsLow;
4806 }
4807 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4808 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4809 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4810 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4811 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4812 }
4813
computeOverflowForSignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4814 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4815 const Value *RHS,
4816 const DataLayout &DL,
4817 AssumptionCache *AC,
4818 const Instruction *CxtI,
4819 const DominatorTree *DT) {
4820 // If LHS and RHS each have at least two sign bits, the subtraction
4821 // cannot overflow.
4822 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4823 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4824 return OverflowResult::NeverOverflows;
4825
4826 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4827 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4828 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4829 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4830 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4831 }
4832
isOverflowIntrinsicNoWrap(const WithOverflowInst * WO,const DominatorTree & DT)4833 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4834 const DominatorTree &DT) {
4835 SmallVector<const BranchInst *, 2> GuardingBranches;
4836 SmallVector<const ExtractValueInst *, 2> Results;
4837
4838 for (const User *U : WO->users()) {
4839 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4840 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4841
4842 if (EVI->getIndices()[0] == 0)
4843 Results.push_back(EVI);
4844 else {
4845 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4846
4847 for (const auto *U : EVI->users())
4848 if (const auto *B = dyn_cast<BranchInst>(U)) {
4849 assert(B->isConditional() && "How else is it using an i1?");
4850 GuardingBranches.push_back(B);
4851 }
4852 }
4853 } else {
4854 // We are using the aggregate directly in a way we don't want to analyze
4855 // here (storing it to a global, say).
4856 return false;
4857 }
4858 }
4859
4860 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4861 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4862 if (!NoWrapEdge.isSingleEdge())
4863 return false;
4864
4865 // Check if all users of the add are provably no-wrap.
4866 for (const auto *Result : Results) {
4867 // If the extractvalue itself is not executed on overflow, the we don't
4868 // need to check each use separately, since domination is transitive.
4869 if (DT.dominates(NoWrapEdge, Result->getParent()))
4870 continue;
4871
4872 for (auto &RU : Result->uses())
4873 if (!DT.dominates(NoWrapEdge, RU))
4874 return false;
4875 }
4876
4877 return true;
4878 };
4879
4880 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4881 }
4882
canCreateUndefOrPoison(const Operator * Op,bool PoisonOnly)4883 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4884 // See whether I has flags that may create poison
4885 if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4886 if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4887 return true;
4888 }
4889 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4890 if (ExactOp->isExact())
4891 return true;
4892 if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4893 auto FMF = FP->getFastMathFlags();
4894 if (FMF.noNaNs() || FMF.noInfs())
4895 return true;
4896 }
4897
4898 unsigned Opcode = Op->getOpcode();
4899
4900 // Check whether opcode is a poison/undef-generating operation
4901 switch (Opcode) {
4902 case Instruction::Shl:
4903 case Instruction::AShr:
4904 case Instruction::LShr: {
4905 // Shifts return poison if shiftwidth is larger than the bitwidth.
4906 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4907 SmallVector<Constant *, 4> ShiftAmounts;
4908 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4909 unsigned NumElts = FVTy->getNumElements();
4910 for (unsigned i = 0; i < NumElts; ++i)
4911 ShiftAmounts.push_back(C->getAggregateElement(i));
4912 } else if (isa<ScalableVectorType>(C->getType()))
4913 return true; // Can't tell, just return true to be safe
4914 else
4915 ShiftAmounts.push_back(C);
4916
4917 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4918 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4919 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4920 });
4921 return !Safe;
4922 }
4923 return true;
4924 }
4925 case Instruction::FPToSI:
4926 case Instruction::FPToUI:
4927 // fptosi/ui yields poison if the resulting value does not fit in the
4928 // destination type.
4929 return true;
4930 case Instruction::Call:
4931 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4932 switch (II->getIntrinsicID()) {
4933 // TODO: Add more intrinsics.
4934 case Intrinsic::ctpop:
4935 case Intrinsic::sadd_with_overflow:
4936 case Intrinsic::ssub_with_overflow:
4937 case Intrinsic::smul_with_overflow:
4938 case Intrinsic::uadd_with_overflow:
4939 case Intrinsic::usub_with_overflow:
4940 case Intrinsic::umul_with_overflow:
4941 return false;
4942 }
4943 }
4944 LLVM_FALLTHROUGH;
4945 case Instruction::CallBr:
4946 case Instruction::Invoke: {
4947 const auto *CB = cast<CallBase>(Op);
4948 return !CB->hasRetAttr(Attribute::NoUndef);
4949 }
4950 case Instruction::InsertElement:
4951 case Instruction::ExtractElement: {
4952 // If index exceeds the length of the vector, it returns poison
4953 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4954 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4955 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4956 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4957 return true;
4958 return false;
4959 }
4960 case Instruction::ShuffleVector: {
4961 // shufflevector may return undef.
4962 if (PoisonOnly)
4963 return false;
4964 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4965 ? cast<ConstantExpr>(Op)->getShuffleMask()
4966 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4967 return is_contained(Mask, UndefMaskElem);
4968 }
4969 case Instruction::FNeg:
4970 case Instruction::PHI:
4971 case Instruction::Select:
4972 case Instruction::URem:
4973 case Instruction::SRem:
4974 case Instruction::ExtractValue:
4975 case Instruction::InsertValue:
4976 case Instruction::Freeze:
4977 case Instruction::ICmp:
4978 case Instruction::FCmp:
4979 return false;
4980 case Instruction::GetElementPtr: {
4981 const auto *GEP = cast<GEPOperator>(Op);
4982 return GEP->isInBounds();
4983 }
4984 default: {
4985 const auto *CE = dyn_cast<ConstantExpr>(Op);
4986 if (isa<CastInst>(Op) || (CE && CE->isCast()))
4987 return false;
4988 else if (Instruction::isBinaryOp(Opcode))
4989 return false;
4990 // Be conservative and return true.
4991 return true;
4992 }
4993 }
4994 }
4995
canCreateUndefOrPoison(const Operator * Op)4996 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4997 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4998 }
4999
canCreatePoison(const Operator * Op)5000 bool llvm::canCreatePoison(const Operator *Op) {
5001 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
5002 }
5003
directlyImpliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)5004 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5005 const Value *V, unsigned Depth) {
5006 if (ValAssumedPoison == V)
5007 return true;
5008
5009 const unsigned MaxDepth = 2;
5010 if (Depth >= MaxDepth)
5011 return false;
5012
5013 if (const auto *I = dyn_cast<Instruction>(V)) {
5014 if (propagatesPoison(cast<Operator>(I)))
5015 return any_of(I->operands(), [=](const Value *Op) {
5016 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5017 });
5018
5019 // 'select ValAssumedPoison, _, _' is poison.
5020 if (const auto *SI = dyn_cast<SelectInst>(I))
5021 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5022 Depth + 1);
5023 // V = extractvalue V0, idx
5024 // V2 = extractvalue V0, idx2
5025 // V0's elements are all poison or not. (e.g., add_with_overflow)
5026 const WithOverflowInst *II;
5027 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5028 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5029 llvm::is_contained(II->arg_operands(), ValAssumedPoison)))
5030 return true;
5031 }
5032 return false;
5033 }
5034
impliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)5035 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5036 unsigned Depth) {
5037 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5038 return true;
5039
5040 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5041 return true;
5042
5043 const unsigned MaxDepth = 2;
5044 if (Depth >= MaxDepth)
5045 return false;
5046
5047 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5048 if (I && !canCreatePoison(cast<Operator>(I))) {
5049 return all_of(I->operands(), [=](const Value *Op) {
5050 return impliesPoison(Op, V, Depth + 1);
5051 });
5052 }
5053 return false;
5054 }
5055
impliesPoison(const Value * ValAssumedPoison,const Value * V)5056 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5057 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5058 }
5059
5060 static bool programUndefinedIfUndefOrPoison(const Value *V,
5061 bool PoisonOnly);
5062
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth,bool PoisonOnly)5063 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5064 AssumptionCache *AC,
5065 const Instruction *CtxI,
5066 const DominatorTree *DT,
5067 unsigned Depth, bool PoisonOnly) {
5068 if (Depth >= MaxAnalysisRecursionDepth)
5069 return false;
5070
5071 if (isa<MetadataAsValue>(V))
5072 return false;
5073
5074 if (const auto *A = dyn_cast<Argument>(V)) {
5075 if (A->hasAttribute(Attribute::NoUndef))
5076 return true;
5077 }
5078
5079 if (auto *C = dyn_cast<Constant>(V)) {
5080 if (isa<UndefValue>(C))
5081 return PoisonOnly && !isa<PoisonValue>(C);
5082
5083 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5084 isa<ConstantPointerNull>(C) || isa<Function>(C))
5085 return true;
5086
5087 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5088 return (PoisonOnly ? !C->containsPoisonElement()
5089 : !C->containsUndefOrPoisonElement()) &&
5090 !C->containsConstantExpression();
5091 }
5092
5093 // Strip cast operations from a pointer value.
5094 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5095 // inbounds with zero offset. To guarantee that the result isn't poison, the
5096 // stripped pointer is checked as it has to be pointing into an allocated
5097 // object or be null `null` to ensure `inbounds` getelement pointers with a
5098 // zero offset could not produce poison.
5099 // It can strip off addrspacecast that do not change bit representation as
5100 // well. We believe that such addrspacecast is equivalent to no-op.
5101 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5102 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5103 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5104 return true;
5105
5106 auto OpCheck = [&](const Value *V) {
5107 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5108 PoisonOnly);
5109 };
5110
5111 if (auto *Opr = dyn_cast<Operator>(V)) {
5112 // If the value is a freeze instruction, then it can never
5113 // be undef or poison.
5114 if (isa<FreezeInst>(V))
5115 return true;
5116
5117 if (const auto *CB = dyn_cast<CallBase>(V)) {
5118 if (CB->hasRetAttr(Attribute::NoUndef))
5119 return true;
5120 }
5121
5122 if (const auto *PN = dyn_cast<PHINode>(V)) {
5123 unsigned Num = PN->getNumIncomingValues();
5124 bool IsWellDefined = true;
5125 for (unsigned i = 0; i < Num; ++i) {
5126 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5127 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5128 DT, Depth + 1, PoisonOnly)) {
5129 IsWellDefined = false;
5130 break;
5131 }
5132 }
5133 if (IsWellDefined)
5134 return true;
5135 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5136 return true;
5137 }
5138
5139 if (auto *I = dyn_cast<LoadInst>(V))
5140 if (I->getMetadata(LLVMContext::MD_noundef))
5141 return true;
5142
5143 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5144 return true;
5145
5146 // CxtI may be null or a cloned instruction.
5147 if (!CtxI || !CtxI->getParent() || !DT)
5148 return false;
5149
5150 auto *DNode = DT->getNode(CtxI->getParent());
5151 if (!DNode)
5152 // Unreachable block
5153 return false;
5154
5155 // If V is used as a branch condition before reaching CtxI, V cannot be
5156 // undef or poison.
5157 // br V, BB1, BB2
5158 // BB1:
5159 // CtxI ; V cannot be undef or poison here
5160 auto *Dominator = DNode->getIDom();
5161 while (Dominator) {
5162 auto *TI = Dominator->getBlock()->getTerminator();
5163
5164 Value *Cond = nullptr;
5165 if (auto BI = dyn_cast<BranchInst>(TI)) {
5166 if (BI->isConditional())
5167 Cond = BI->getCondition();
5168 } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5169 Cond = SI->getCondition();
5170 }
5171
5172 if (Cond) {
5173 if (Cond == V)
5174 return true;
5175 else if (PoisonOnly && isa<Operator>(Cond)) {
5176 // For poison, we can analyze further
5177 auto *Opr = cast<Operator>(Cond);
5178 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5179 return true;
5180 }
5181 }
5182
5183 Dominator = Dominator->getIDom();
5184 }
5185
5186 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5187 if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5188 return true;
5189
5190 return false;
5191 }
5192
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)5193 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5194 const Instruction *CtxI,
5195 const DominatorTree *DT,
5196 unsigned Depth) {
5197 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5198 }
5199
isGuaranteedNotToBePoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)5200 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5201 const Instruction *CtxI,
5202 const DominatorTree *DT, unsigned Depth) {
5203 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5204 }
5205
computeOverflowForSignedAdd(const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5206 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5207 const DataLayout &DL,
5208 AssumptionCache *AC,
5209 const Instruction *CxtI,
5210 const DominatorTree *DT) {
5211 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5212 Add, DL, AC, CxtI, DT);
5213 }
5214
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5215 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5216 const Value *RHS,
5217 const DataLayout &DL,
5218 AssumptionCache *AC,
5219 const Instruction *CxtI,
5220 const DominatorTree *DT) {
5221 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5222 }
5223
isGuaranteedToTransferExecutionToSuccessor(const Instruction * I)5224 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5225 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5226 // of time because it's possible for another thread to interfere with it for an
5227 // arbitrary length of time, but programs aren't allowed to rely on that.
5228
5229 // If there is no successor, then execution can't transfer to it.
5230 if (isa<ReturnInst>(I))
5231 return false;
5232 if (isa<UnreachableInst>(I))
5233 return false;
5234
5235 // An instruction that returns without throwing must transfer control flow
5236 // to a successor.
5237 return !I->mayThrow() && I->willReturn();
5238 }
5239
isGuaranteedToTransferExecutionToSuccessor(const BasicBlock * BB)5240 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5241 // TODO: This is slightly conservative for invoke instruction since exiting
5242 // via an exception *is* normal control for them.
5243 for (const Instruction &I : *BB)
5244 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5245 return false;
5246 return true;
5247 }
5248
isGuaranteedToExecuteForEveryIteration(const Instruction * I,const Loop * L)5249 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5250 const Loop *L) {
5251 // The loop header is guaranteed to be executed for every iteration.
5252 //
5253 // FIXME: Relax this constraint to cover all basic blocks that are
5254 // guaranteed to be executed at every iteration.
5255 if (I->getParent() != L->getHeader()) return false;
5256
5257 for (const Instruction &LI : *L->getHeader()) {
5258 if (&LI == I) return true;
5259 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5260 }
5261 llvm_unreachable("Instruction not contained in its own parent basic block.");
5262 }
5263
propagatesPoison(const Operator * I)5264 bool llvm::propagatesPoison(const Operator *I) {
5265 switch (I->getOpcode()) {
5266 case Instruction::Freeze:
5267 case Instruction::Select:
5268 case Instruction::PHI:
5269 case Instruction::Invoke:
5270 return false;
5271 case Instruction::Call:
5272 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5273 switch (II->getIntrinsicID()) {
5274 // TODO: Add more intrinsics.
5275 case Intrinsic::sadd_with_overflow:
5276 case Intrinsic::ssub_with_overflow:
5277 case Intrinsic::smul_with_overflow:
5278 case Intrinsic::uadd_with_overflow:
5279 case Intrinsic::usub_with_overflow:
5280 case Intrinsic::umul_with_overflow:
5281 // If an input is a vector containing a poison element, the
5282 // two output vectors (calculated results, overflow bits)'
5283 // corresponding lanes are poison.
5284 return true;
5285 case Intrinsic::ctpop:
5286 return true;
5287 }
5288 }
5289 return false;
5290 case Instruction::ICmp:
5291 case Instruction::FCmp:
5292 case Instruction::GetElementPtr:
5293 return true;
5294 default:
5295 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5296 return true;
5297
5298 // Be conservative and return false.
5299 return false;
5300 }
5301 }
5302
getGuaranteedWellDefinedOps(const Instruction * I,SmallPtrSetImpl<const Value * > & Operands)5303 void llvm::getGuaranteedWellDefinedOps(
5304 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5305 switch (I->getOpcode()) {
5306 case Instruction::Store:
5307 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5308 break;
5309
5310 case Instruction::Load:
5311 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5312 break;
5313
5314 // Since dereferenceable attribute imply noundef, atomic operations
5315 // also implicitly have noundef pointers too
5316 case Instruction::AtomicCmpXchg:
5317 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5318 break;
5319
5320 case Instruction::AtomicRMW:
5321 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5322 break;
5323
5324 case Instruction::Call:
5325 case Instruction::Invoke: {
5326 const CallBase *CB = cast<CallBase>(I);
5327 if (CB->isIndirectCall())
5328 Operands.insert(CB->getCalledOperand());
5329 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5330 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5331 CB->paramHasAttr(i, Attribute::Dereferenceable))
5332 Operands.insert(CB->getArgOperand(i));
5333 }
5334 break;
5335 }
5336
5337 default:
5338 break;
5339 }
5340 }
5341
getGuaranteedNonPoisonOps(const Instruction * I,SmallPtrSetImpl<const Value * > & Operands)5342 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5343 SmallPtrSetImpl<const Value *> &Operands) {
5344 getGuaranteedWellDefinedOps(I, Operands);
5345 switch (I->getOpcode()) {
5346 // Divisors of these operations are allowed to be partially undef.
5347 case Instruction::UDiv:
5348 case Instruction::SDiv:
5349 case Instruction::URem:
5350 case Instruction::SRem:
5351 Operands.insert(I->getOperand(1));
5352 break;
5353
5354 default:
5355 break;
5356 }
5357 }
5358
mustTriggerUB(const Instruction * I,const SmallSet<const Value *,16> & KnownPoison)5359 bool llvm::mustTriggerUB(const Instruction *I,
5360 const SmallSet<const Value *, 16>& KnownPoison) {
5361 SmallPtrSet<const Value *, 4> NonPoisonOps;
5362 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5363
5364 for (const auto *V : NonPoisonOps)
5365 if (KnownPoison.count(V))
5366 return true;
5367
5368 return false;
5369 }
5370
programUndefinedIfUndefOrPoison(const Value * V,bool PoisonOnly)5371 static bool programUndefinedIfUndefOrPoison(const Value *V,
5372 bool PoisonOnly) {
5373 // We currently only look for uses of values within the same basic
5374 // block, as that makes it easier to guarantee that the uses will be
5375 // executed given that Inst is executed.
5376 //
5377 // FIXME: Expand this to consider uses beyond the same basic block. To do
5378 // this, look out for the distinction between post-dominance and strong
5379 // post-dominance.
5380 const BasicBlock *BB = nullptr;
5381 BasicBlock::const_iterator Begin;
5382 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5383 BB = Inst->getParent();
5384 Begin = Inst->getIterator();
5385 Begin++;
5386 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5387 BB = &Arg->getParent()->getEntryBlock();
5388 Begin = BB->begin();
5389 } else {
5390 return false;
5391 }
5392
5393 // Limit number of instructions we look at, to avoid scanning through large
5394 // blocks. The current limit is chosen arbitrarily.
5395 unsigned ScanLimit = 32;
5396 BasicBlock::const_iterator End = BB->end();
5397
5398 if (!PoisonOnly) {
5399 // Since undef does not propagate eagerly, be conservative & just check
5400 // whether a value is directly passed to an instruction that must take
5401 // well-defined operands.
5402
5403 for (auto &I : make_range(Begin, End)) {
5404 if (isa<DbgInfoIntrinsic>(I))
5405 continue;
5406 if (--ScanLimit == 0)
5407 break;
5408
5409 SmallPtrSet<const Value *, 4> WellDefinedOps;
5410 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5411 if (WellDefinedOps.contains(V))
5412 return true;
5413
5414 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5415 break;
5416 }
5417 return false;
5418 }
5419
5420 // Set of instructions that we have proved will yield poison if Inst
5421 // does.
5422 SmallSet<const Value *, 16> YieldsPoison;
5423 SmallSet<const BasicBlock *, 4> Visited;
5424
5425 YieldsPoison.insert(V);
5426 auto Propagate = [&](const User *User) {
5427 if (propagatesPoison(cast<Operator>(User)))
5428 YieldsPoison.insert(User);
5429 };
5430 for_each(V->users(), Propagate);
5431 Visited.insert(BB);
5432
5433 while (true) {
5434 for (auto &I : make_range(Begin, End)) {
5435 if (isa<DbgInfoIntrinsic>(I))
5436 continue;
5437 if (--ScanLimit == 0)
5438 return false;
5439 if (mustTriggerUB(&I, YieldsPoison))
5440 return true;
5441 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5442 return false;
5443
5444 // Mark poison that propagates from I through uses of I.
5445 if (YieldsPoison.count(&I))
5446 for_each(I.users(), Propagate);
5447 }
5448
5449 BB = BB->getSingleSuccessor();
5450 if (!BB || !Visited.insert(BB).second)
5451 break;
5452
5453 Begin = BB->getFirstNonPHI()->getIterator();
5454 End = BB->end();
5455 }
5456 return false;
5457 }
5458
programUndefinedIfUndefOrPoison(const Instruction * Inst)5459 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5460 return ::programUndefinedIfUndefOrPoison(Inst, false);
5461 }
5462
programUndefinedIfPoison(const Instruction * Inst)5463 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5464 return ::programUndefinedIfUndefOrPoison(Inst, true);
5465 }
5466
isKnownNonNaN(const Value * V,FastMathFlags FMF)5467 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5468 if (FMF.noNaNs())
5469 return true;
5470
5471 if (auto *C = dyn_cast<ConstantFP>(V))
5472 return !C->isNaN();
5473
5474 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5475 if (!C->getElementType()->isFloatingPointTy())
5476 return false;
5477 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5478 if (C->getElementAsAPFloat(I).isNaN())
5479 return false;
5480 }
5481 return true;
5482 }
5483
5484 if (isa<ConstantAggregateZero>(V))
5485 return true;
5486
5487 return false;
5488 }
5489
isKnownNonZero(const Value * V)5490 static bool isKnownNonZero(const Value *V) {
5491 if (auto *C = dyn_cast<ConstantFP>(V))
5492 return !C->isZero();
5493
5494 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5495 if (!C->getElementType()->isFloatingPointTy())
5496 return false;
5497 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5498 if (C->getElementAsAPFloat(I).isZero())
5499 return false;
5500 }
5501 return true;
5502 }
5503
5504 return false;
5505 }
5506
5507 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5508 /// Given non-min/max outer cmp/select from the clamp pattern this
5509 /// function recognizes if it can be substitued by a "canonical" min/max
5510 /// pattern.
matchFastFloatClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS)5511 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5512 Value *CmpLHS, Value *CmpRHS,
5513 Value *TrueVal, Value *FalseVal,
5514 Value *&LHS, Value *&RHS) {
5515 // Try to match
5516 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5517 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5518 // and return description of the outer Max/Min.
5519
5520 // First, check if select has inverse order:
5521 if (CmpRHS == FalseVal) {
5522 std::swap(TrueVal, FalseVal);
5523 Pred = CmpInst::getInversePredicate(Pred);
5524 }
5525
5526 // Assume success now. If there's no match, callers should not use these anyway.
5527 LHS = TrueVal;
5528 RHS = FalseVal;
5529
5530 const APFloat *FC1;
5531 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5532 return {SPF_UNKNOWN, SPNB_NA, false};
5533
5534 const APFloat *FC2;
5535 switch (Pred) {
5536 case CmpInst::FCMP_OLT:
5537 case CmpInst::FCMP_OLE:
5538 case CmpInst::FCMP_ULT:
5539 case CmpInst::FCMP_ULE:
5540 if (match(FalseVal,
5541 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5542 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5543 *FC1 < *FC2)
5544 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5545 break;
5546 case CmpInst::FCMP_OGT:
5547 case CmpInst::FCMP_OGE:
5548 case CmpInst::FCMP_UGT:
5549 case CmpInst::FCMP_UGE:
5550 if (match(FalseVal,
5551 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5552 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5553 *FC1 > *FC2)
5554 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5555 break;
5556 default:
5557 break;
5558 }
5559
5560 return {SPF_UNKNOWN, SPNB_NA, false};
5561 }
5562
5563 /// Recognize variations of:
5564 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
matchClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal)5565 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5566 Value *CmpLHS, Value *CmpRHS,
5567 Value *TrueVal, Value *FalseVal) {
5568 // Swap the select operands and predicate to match the patterns below.
5569 if (CmpRHS != TrueVal) {
5570 Pred = ICmpInst::getSwappedPredicate(Pred);
5571 std::swap(TrueVal, FalseVal);
5572 }
5573 const APInt *C1;
5574 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5575 const APInt *C2;
5576 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5577 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5578 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5579 return {SPF_SMAX, SPNB_NA, false};
5580
5581 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5582 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5583 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5584 return {SPF_SMIN, SPNB_NA, false};
5585
5586 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5587 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5588 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5589 return {SPF_UMAX, SPNB_NA, false};
5590
5591 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5592 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5593 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5594 return {SPF_UMIN, SPNB_NA, false};
5595 }
5596 return {SPF_UNKNOWN, SPNB_NA, false};
5597 }
5598
5599 /// Recognize variations of:
5600 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
matchMinMaxOfMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TVal,Value * FVal,unsigned Depth)5601 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5602 Value *CmpLHS, Value *CmpRHS,
5603 Value *TVal, Value *FVal,
5604 unsigned Depth) {
5605 // TODO: Allow FP min/max with nnan/nsz.
5606 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5607
5608 Value *A = nullptr, *B = nullptr;
5609 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5610 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5611 return {SPF_UNKNOWN, SPNB_NA, false};
5612
5613 Value *C = nullptr, *D = nullptr;
5614 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5615 if (L.Flavor != R.Flavor)
5616 return {SPF_UNKNOWN, SPNB_NA, false};
5617
5618 // We have something like: x Pred y ? min(a, b) : min(c, d).
5619 // Try to match the compare to the min/max operations of the select operands.
5620 // First, make sure we have the right compare predicate.
5621 switch (L.Flavor) {
5622 case SPF_SMIN:
5623 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5624 Pred = ICmpInst::getSwappedPredicate(Pred);
5625 std::swap(CmpLHS, CmpRHS);
5626 }
5627 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5628 break;
5629 return {SPF_UNKNOWN, SPNB_NA, false};
5630 case SPF_SMAX:
5631 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5632 Pred = ICmpInst::getSwappedPredicate(Pred);
5633 std::swap(CmpLHS, CmpRHS);
5634 }
5635 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5636 break;
5637 return {SPF_UNKNOWN, SPNB_NA, false};
5638 case SPF_UMIN:
5639 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5640 Pred = ICmpInst::getSwappedPredicate(Pred);
5641 std::swap(CmpLHS, CmpRHS);
5642 }
5643 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5644 break;
5645 return {SPF_UNKNOWN, SPNB_NA, false};
5646 case SPF_UMAX:
5647 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5648 Pred = ICmpInst::getSwappedPredicate(Pred);
5649 std::swap(CmpLHS, CmpRHS);
5650 }
5651 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5652 break;
5653 return {SPF_UNKNOWN, SPNB_NA, false};
5654 default:
5655 return {SPF_UNKNOWN, SPNB_NA, false};
5656 }
5657
5658 // If there is a common operand in the already matched min/max and the other
5659 // min/max operands match the compare operands (either directly or inverted),
5660 // then this is min/max of the same flavor.
5661
5662 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5663 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5664 if (D == B) {
5665 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5666 match(A, m_Not(m_Specific(CmpRHS)))))
5667 return {L.Flavor, SPNB_NA, false};
5668 }
5669 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5670 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5671 if (C == B) {
5672 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5673 match(A, m_Not(m_Specific(CmpRHS)))))
5674 return {L.Flavor, SPNB_NA, false};
5675 }
5676 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5677 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5678 if (D == A) {
5679 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5680 match(B, m_Not(m_Specific(CmpRHS)))))
5681 return {L.Flavor, SPNB_NA, false};
5682 }
5683 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5684 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5685 if (C == A) {
5686 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5687 match(B, m_Not(m_Specific(CmpRHS)))))
5688 return {L.Flavor, SPNB_NA, false};
5689 }
5690
5691 return {SPF_UNKNOWN, SPNB_NA, false};
5692 }
5693
5694 /// If the input value is the result of a 'not' op, constant integer, or vector
5695 /// splat of a constant integer, return the bitwise-not source value.
5696 /// TODO: This could be extended to handle non-splat vector integer constants.
getNotValue(Value * V)5697 static Value *getNotValue(Value *V) {
5698 Value *NotV;
5699 if (match(V, m_Not(m_Value(NotV))))
5700 return NotV;
5701
5702 const APInt *C;
5703 if (match(V, m_APInt(C)))
5704 return ConstantInt::get(V->getType(), ~(*C));
5705
5706 return nullptr;
5707 }
5708
5709 /// Match non-obvious integer minimum and maximum sequences.
matchMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5710 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5711 Value *CmpLHS, Value *CmpRHS,
5712 Value *TrueVal, Value *FalseVal,
5713 Value *&LHS, Value *&RHS,
5714 unsigned Depth) {
5715 // Assume success. If there's no match, callers should not use these anyway.
5716 LHS = TrueVal;
5717 RHS = FalseVal;
5718
5719 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5720 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5721 return SPR;
5722
5723 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5724 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5725 return SPR;
5726
5727 // Look through 'not' ops to find disguised min/max.
5728 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5729 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5730 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5731 switch (Pred) {
5732 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5733 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5734 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5735 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5736 default: break;
5737 }
5738 }
5739
5740 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5741 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5742 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5743 switch (Pred) {
5744 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5745 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5746 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5747 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5748 default: break;
5749 }
5750 }
5751
5752 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5753 return {SPF_UNKNOWN, SPNB_NA, false};
5754
5755 // Z = X -nsw Y
5756 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5757 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5758 if (match(TrueVal, m_Zero()) &&
5759 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5760 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5761
5762 // Z = X -nsw Y
5763 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5764 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5765 if (match(FalseVal, m_Zero()) &&
5766 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5767 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5768
5769 const APInt *C1;
5770 if (!match(CmpRHS, m_APInt(C1)))
5771 return {SPF_UNKNOWN, SPNB_NA, false};
5772
5773 // An unsigned min/max can be written with a signed compare.
5774 const APInt *C2;
5775 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5776 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5777 // Is the sign bit set?
5778 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5779 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5780 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5781 C2->isMaxSignedValue())
5782 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5783
5784 // Is the sign bit clear?
5785 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5786 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5787 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5788 C2->isMinSignedValue())
5789 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5790 }
5791
5792 return {SPF_UNKNOWN, SPNB_NA, false};
5793 }
5794
isKnownNegation(const Value * X,const Value * Y,bool NeedNSW)5795 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5796 assert(X && Y && "Invalid operand");
5797
5798 // X = sub (0, Y) || X = sub nsw (0, Y)
5799 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5800 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5801 return true;
5802
5803 // Y = sub (0, X) || Y = sub nsw (0, X)
5804 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5805 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5806 return true;
5807
5808 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5809 Value *A, *B;
5810 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5811 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5812 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5813 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5814 }
5815
matchSelectPattern(CmpInst::Predicate Pred,FastMathFlags FMF,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5816 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5817 FastMathFlags FMF,
5818 Value *CmpLHS, Value *CmpRHS,
5819 Value *TrueVal, Value *FalseVal,
5820 Value *&LHS, Value *&RHS,
5821 unsigned Depth) {
5822 if (CmpInst::isFPPredicate(Pred)) {
5823 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5824 // 0.0 operand, set the compare's 0.0 operands to that same value for the
5825 // purpose of identifying min/max. Disregard vector constants with undefined
5826 // elements because those can not be back-propagated for analysis.
5827 Value *OutputZeroVal = nullptr;
5828 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5829 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5830 OutputZeroVal = TrueVal;
5831 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5832 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5833 OutputZeroVal = FalseVal;
5834
5835 if (OutputZeroVal) {
5836 if (match(CmpLHS, m_AnyZeroFP()))
5837 CmpLHS = OutputZeroVal;
5838 if (match(CmpRHS, m_AnyZeroFP()))
5839 CmpRHS = OutputZeroVal;
5840 }
5841 }
5842
5843 LHS = CmpLHS;
5844 RHS = CmpRHS;
5845
5846 // Signed zero may return inconsistent results between implementations.
5847 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5848 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5849 // Therefore, we behave conservatively and only proceed if at least one of the
5850 // operands is known to not be zero or if we don't care about signed zero.
5851 switch (Pred) {
5852 default: break;
5853 // FIXME: Include OGT/OLT/UGT/ULT.
5854 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5855 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5856 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5857 !isKnownNonZero(CmpRHS))
5858 return {SPF_UNKNOWN, SPNB_NA, false};
5859 }
5860
5861 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5862 bool Ordered = false;
5863
5864 // When given one NaN and one non-NaN input:
5865 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5866 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5867 // ordered comparison fails), which could be NaN or non-NaN.
5868 // so here we discover exactly what NaN behavior is required/accepted.
5869 if (CmpInst::isFPPredicate(Pred)) {
5870 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5871 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5872
5873 if (LHSSafe && RHSSafe) {
5874 // Both operands are known non-NaN.
5875 NaNBehavior = SPNB_RETURNS_ANY;
5876 } else if (CmpInst::isOrdered(Pred)) {
5877 // An ordered comparison will return false when given a NaN, so it
5878 // returns the RHS.
5879 Ordered = true;
5880 if (LHSSafe)
5881 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5882 NaNBehavior = SPNB_RETURNS_NAN;
5883 else if (RHSSafe)
5884 NaNBehavior = SPNB_RETURNS_OTHER;
5885 else
5886 // Completely unsafe.
5887 return {SPF_UNKNOWN, SPNB_NA, false};
5888 } else {
5889 Ordered = false;
5890 // An unordered comparison will return true when given a NaN, so it
5891 // returns the LHS.
5892 if (LHSSafe)
5893 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5894 NaNBehavior = SPNB_RETURNS_OTHER;
5895 else if (RHSSafe)
5896 NaNBehavior = SPNB_RETURNS_NAN;
5897 else
5898 // Completely unsafe.
5899 return {SPF_UNKNOWN, SPNB_NA, false};
5900 }
5901 }
5902
5903 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5904 std::swap(CmpLHS, CmpRHS);
5905 Pred = CmpInst::getSwappedPredicate(Pred);
5906 if (NaNBehavior == SPNB_RETURNS_NAN)
5907 NaNBehavior = SPNB_RETURNS_OTHER;
5908 else if (NaNBehavior == SPNB_RETURNS_OTHER)
5909 NaNBehavior = SPNB_RETURNS_NAN;
5910 Ordered = !Ordered;
5911 }
5912
5913 // ([if]cmp X, Y) ? X : Y
5914 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5915 switch (Pred) {
5916 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5917 case ICmpInst::ICMP_UGT:
5918 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5919 case ICmpInst::ICMP_SGT:
5920 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5921 case ICmpInst::ICMP_ULT:
5922 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5923 case ICmpInst::ICMP_SLT:
5924 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5925 case FCmpInst::FCMP_UGT:
5926 case FCmpInst::FCMP_UGE:
5927 case FCmpInst::FCMP_OGT:
5928 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5929 case FCmpInst::FCMP_ULT:
5930 case FCmpInst::FCMP_ULE:
5931 case FCmpInst::FCMP_OLT:
5932 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5933 }
5934 }
5935
5936 if (isKnownNegation(TrueVal, FalseVal)) {
5937 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5938 // match against either LHS or sext(LHS).
5939 auto MaybeSExtCmpLHS =
5940 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5941 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5942 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5943 if (match(TrueVal, MaybeSExtCmpLHS)) {
5944 // Set the return values. If the compare uses the negated value (-X >s 0),
5945 // swap the return values because the negated value is always 'RHS'.
5946 LHS = TrueVal;
5947 RHS = FalseVal;
5948 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5949 std::swap(LHS, RHS);
5950
5951 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5952 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5953 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5954 return {SPF_ABS, SPNB_NA, false};
5955
5956 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5957 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5958 return {SPF_ABS, SPNB_NA, false};
5959
5960 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5961 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5962 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5963 return {SPF_NABS, SPNB_NA, false};
5964 }
5965 else if (match(FalseVal, MaybeSExtCmpLHS)) {
5966 // Set the return values. If the compare uses the negated value (-X >s 0),
5967 // swap the return values because the negated value is always 'RHS'.
5968 LHS = FalseVal;
5969 RHS = TrueVal;
5970 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5971 std::swap(LHS, RHS);
5972
5973 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5974 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5975 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5976 return {SPF_NABS, SPNB_NA, false};
5977
5978 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5979 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5980 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5981 return {SPF_ABS, SPNB_NA, false};
5982 }
5983 }
5984
5985 if (CmpInst::isIntPredicate(Pred))
5986 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5987
5988 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5989 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5990 // semantics than minNum. Be conservative in such case.
5991 if (NaNBehavior != SPNB_RETURNS_ANY ||
5992 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5993 !isKnownNonZero(CmpRHS)))
5994 return {SPF_UNKNOWN, SPNB_NA, false};
5995
5996 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5997 }
5998
5999 /// Helps to match a select pattern in case of a type mismatch.
6000 ///
6001 /// The function processes the case when type of true and false values of a
6002 /// select instruction differs from type of the cmp instruction operands because
6003 /// of a cast instruction. The function checks if it is legal to move the cast
6004 /// operation after "select". If yes, it returns the new second value of
6005 /// "select" (with the assumption that cast is moved):
6006 /// 1. As operand of cast instruction when both values of "select" are same cast
6007 /// instructions.
6008 /// 2. As restored constant (by applying reverse cast operation) when the first
6009 /// value of the "select" is a cast operation and the second value is a
6010 /// constant.
6011 /// NOTE: We return only the new second value because the first value could be
6012 /// accessed as operand of cast instruction.
lookThroughCast(CmpInst * CmpI,Value * V1,Value * V2,Instruction::CastOps * CastOp)6013 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6014 Instruction::CastOps *CastOp) {
6015 auto *Cast1 = dyn_cast<CastInst>(V1);
6016 if (!Cast1)
6017 return nullptr;
6018
6019 *CastOp = Cast1->getOpcode();
6020 Type *SrcTy = Cast1->getSrcTy();
6021 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6022 // If V1 and V2 are both the same cast from the same type, look through V1.
6023 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6024 return Cast2->getOperand(0);
6025 return nullptr;
6026 }
6027
6028 auto *C = dyn_cast<Constant>(V2);
6029 if (!C)
6030 return nullptr;
6031
6032 Constant *CastedTo = nullptr;
6033 switch (*CastOp) {
6034 case Instruction::ZExt:
6035 if (CmpI->isUnsigned())
6036 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6037 break;
6038 case Instruction::SExt:
6039 if (CmpI->isSigned())
6040 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6041 break;
6042 case Instruction::Trunc:
6043 Constant *CmpConst;
6044 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6045 CmpConst->getType() == SrcTy) {
6046 // Here we have the following case:
6047 //
6048 // %cond = cmp iN %x, CmpConst
6049 // %tr = trunc iN %x to iK
6050 // %narrowsel = select i1 %cond, iK %t, iK C
6051 //
6052 // We can always move trunc after select operation:
6053 //
6054 // %cond = cmp iN %x, CmpConst
6055 // %widesel = select i1 %cond, iN %x, iN CmpConst
6056 // %tr = trunc iN %widesel to iK
6057 //
6058 // Note that C could be extended in any way because we don't care about
6059 // upper bits after truncation. It can't be abs pattern, because it would
6060 // look like:
6061 //
6062 // select i1 %cond, x, -x.
6063 //
6064 // So only min/max pattern could be matched. Such match requires widened C
6065 // == CmpConst. That is why set widened C = CmpConst, condition trunc
6066 // CmpConst == C is checked below.
6067 CastedTo = CmpConst;
6068 } else {
6069 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6070 }
6071 break;
6072 case Instruction::FPTrunc:
6073 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6074 break;
6075 case Instruction::FPExt:
6076 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6077 break;
6078 case Instruction::FPToUI:
6079 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6080 break;
6081 case Instruction::FPToSI:
6082 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6083 break;
6084 case Instruction::UIToFP:
6085 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6086 break;
6087 case Instruction::SIToFP:
6088 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6089 break;
6090 default:
6091 break;
6092 }
6093
6094 if (!CastedTo)
6095 return nullptr;
6096
6097 // Make sure the cast doesn't lose any information.
6098 Constant *CastedBack =
6099 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6100 if (CastedBack != C)
6101 return nullptr;
6102
6103 return CastedTo;
6104 }
6105
matchSelectPattern(Value * V,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)6106 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6107 Instruction::CastOps *CastOp,
6108 unsigned Depth) {
6109 if (Depth >= MaxAnalysisRecursionDepth)
6110 return {SPF_UNKNOWN, SPNB_NA, false};
6111
6112 SelectInst *SI = dyn_cast<SelectInst>(V);
6113 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6114
6115 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6116 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6117
6118 Value *TrueVal = SI->getTrueValue();
6119 Value *FalseVal = SI->getFalseValue();
6120
6121 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6122 CastOp, Depth);
6123 }
6124
matchDecomposedSelectPattern(CmpInst * CmpI,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)6125 SelectPatternResult llvm::matchDecomposedSelectPattern(
6126 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6127 Instruction::CastOps *CastOp, unsigned Depth) {
6128 CmpInst::Predicate Pred = CmpI->getPredicate();
6129 Value *CmpLHS = CmpI->getOperand(0);
6130 Value *CmpRHS = CmpI->getOperand(1);
6131 FastMathFlags FMF;
6132 if (isa<FPMathOperator>(CmpI))
6133 FMF = CmpI->getFastMathFlags();
6134
6135 // Bail out early.
6136 if (CmpI->isEquality())
6137 return {SPF_UNKNOWN, SPNB_NA, false};
6138
6139 // Deal with type mismatches.
6140 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6141 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6142 // If this is a potential fmin/fmax with a cast to integer, then ignore
6143 // -0.0 because there is no corresponding integer value.
6144 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6145 FMF.setNoSignedZeros();
6146 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6147 cast<CastInst>(TrueVal)->getOperand(0), C,
6148 LHS, RHS, Depth);
6149 }
6150 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6151 // If this is a potential fmin/fmax with a cast to integer, then ignore
6152 // -0.0 because there is no corresponding integer value.
6153 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6154 FMF.setNoSignedZeros();
6155 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6156 C, cast<CastInst>(FalseVal)->getOperand(0),
6157 LHS, RHS, Depth);
6158 }
6159 }
6160 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6161 LHS, RHS, Depth);
6162 }
6163
getMinMaxPred(SelectPatternFlavor SPF,bool Ordered)6164 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6165 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6166 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6167 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6168 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6169 if (SPF == SPF_FMINNUM)
6170 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6171 if (SPF == SPF_FMAXNUM)
6172 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6173 llvm_unreachable("unhandled!");
6174 }
6175
getInverseMinMaxFlavor(SelectPatternFlavor SPF)6176 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6177 if (SPF == SPF_SMIN) return SPF_SMAX;
6178 if (SPF == SPF_UMIN) return SPF_UMAX;
6179 if (SPF == SPF_SMAX) return SPF_SMIN;
6180 if (SPF == SPF_UMAX) return SPF_UMIN;
6181 llvm_unreachable("unhandled!");
6182 }
6183
getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)6184 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6185 switch (MinMaxID) {
6186 case Intrinsic::smax: return Intrinsic::smin;
6187 case Intrinsic::smin: return Intrinsic::smax;
6188 case Intrinsic::umax: return Intrinsic::umin;
6189 case Intrinsic::umin: return Intrinsic::umax;
6190 default: llvm_unreachable("Unexpected intrinsic");
6191 }
6192 }
6193
getInverseMinMaxPred(SelectPatternFlavor SPF)6194 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6195 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6196 }
6197
6198 std::pair<Intrinsic::ID, bool>
canConvertToMinOrMaxIntrinsic(ArrayRef<Value * > VL)6199 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6200 // Check if VL contains select instructions that can be folded into a min/max
6201 // vector intrinsic and return the intrinsic if it is possible.
6202 // TODO: Support floating point min/max.
6203 bool AllCmpSingleUse = true;
6204 SelectPatternResult SelectPattern;
6205 SelectPattern.Flavor = SPF_UNKNOWN;
6206 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6207 Value *LHS, *RHS;
6208 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6209 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6210 CurrentPattern.Flavor == SPF_FMINNUM ||
6211 CurrentPattern.Flavor == SPF_FMAXNUM ||
6212 !I->getType()->isIntOrIntVectorTy())
6213 return false;
6214 if (SelectPattern.Flavor != SPF_UNKNOWN &&
6215 SelectPattern.Flavor != CurrentPattern.Flavor)
6216 return false;
6217 SelectPattern = CurrentPattern;
6218 AllCmpSingleUse &=
6219 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6220 return true;
6221 })) {
6222 switch (SelectPattern.Flavor) {
6223 case SPF_SMIN:
6224 return {Intrinsic::smin, AllCmpSingleUse};
6225 case SPF_UMIN:
6226 return {Intrinsic::umin, AllCmpSingleUse};
6227 case SPF_SMAX:
6228 return {Intrinsic::smax, AllCmpSingleUse};
6229 case SPF_UMAX:
6230 return {Intrinsic::umax, AllCmpSingleUse};
6231 default:
6232 llvm_unreachable("unexpected select pattern flavor");
6233 }
6234 }
6235 return {Intrinsic::not_intrinsic, false};
6236 }
6237
matchSimpleRecurrence(const PHINode * P,BinaryOperator * & BO,Value * & Start,Value * & Step)6238 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6239 Value *&Start, Value *&Step) {
6240 // Handle the case of a simple two-predecessor recurrence PHI.
6241 // There's a lot more that could theoretically be done here, but
6242 // this is sufficient to catch some interesting cases.
6243 if (P->getNumIncomingValues() != 2)
6244 return false;
6245
6246 for (unsigned i = 0; i != 2; ++i) {
6247 Value *L = P->getIncomingValue(i);
6248 Value *R = P->getIncomingValue(!i);
6249 Operator *LU = dyn_cast<Operator>(L);
6250 if (!LU)
6251 continue;
6252 unsigned Opcode = LU->getOpcode();
6253
6254 switch (Opcode) {
6255 default:
6256 continue;
6257 // TODO: Expand list -- xor, div, gep, uaddo, etc..
6258 case Instruction::LShr:
6259 case Instruction::AShr:
6260 case Instruction::Shl:
6261 case Instruction::Add:
6262 case Instruction::Sub:
6263 case Instruction::And:
6264 case Instruction::Or:
6265 case Instruction::Mul: {
6266 Value *LL = LU->getOperand(0);
6267 Value *LR = LU->getOperand(1);
6268 // Find a recurrence.
6269 if (LL == P)
6270 L = LR;
6271 else if (LR == P)
6272 L = LL;
6273 else
6274 continue; // Check for recurrence with L and R flipped.
6275
6276 break; // Match!
6277 }
6278 };
6279
6280 // We have matched a recurrence of the form:
6281 // %iv = [R, %entry], [%iv.next, %backedge]
6282 // %iv.next = binop %iv, L
6283 // OR
6284 // %iv = [R, %entry], [%iv.next, %backedge]
6285 // %iv.next = binop L, %iv
6286 BO = cast<BinaryOperator>(LU);
6287 Start = R;
6288 Step = L;
6289 return true;
6290 }
6291 return false;
6292 }
6293
matchSimpleRecurrence(const BinaryOperator * I,PHINode * & P,Value * & Start,Value * & Step)6294 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6295 Value *&Start, Value *&Step) {
6296 BinaryOperator *BO = nullptr;
6297 P = dyn_cast<PHINode>(I->getOperand(0));
6298 if (!P)
6299 P = dyn_cast<PHINode>(I->getOperand(1));
6300 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6301 }
6302
6303 /// Return true if "icmp Pred LHS RHS" is always true.
isTruePredicate(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const DataLayout & DL,unsigned Depth)6304 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6305 const Value *RHS, const DataLayout &DL,
6306 unsigned Depth) {
6307 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6308 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6309 return true;
6310
6311 switch (Pred) {
6312 default:
6313 return false;
6314
6315 case CmpInst::ICMP_SLE: {
6316 const APInt *C;
6317
6318 // LHS s<= LHS +_{nsw} C if C >= 0
6319 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6320 return !C->isNegative();
6321 return false;
6322 }
6323
6324 case CmpInst::ICMP_ULE: {
6325 const APInt *C;
6326
6327 // LHS u<= LHS +_{nuw} C for any C
6328 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6329 return true;
6330
6331 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6332 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6333 const Value *&X,
6334 const APInt *&CA, const APInt *&CB) {
6335 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6336 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6337 return true;
6338
6339 // If X & C == 0 then (X | C) == X +_{nuw} C
6340 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6341 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6342 KnownBits Known(CA->getBitWidth());
6343 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6344 /*CxtI*/ nullptr, /*DT*/ nullptr);
6345 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6346 return true;
6347 }
6348
6349 return false;
6350 };
6351
6352 const Value *X;
6353 const APInt *CLHS, *CRHS;
6354 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6355 return CLHS->ule(*CRHS);
6356
6357 return false;
6358 }
6359 }
6360 }
6361
6362 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6363 /// ALHS ARHS" is true. Otherwise, return None.
6364 static Optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred,const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,const DataLayout & DL,unsigned Depth)6365 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6366 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6367 const DataLayout &DL, unsigned Depth) {
6368 switch (Pred) {
6369 default:
6370 return None;
6371
6372 case CmpInst::ICMP_SLT:
6373 case CmpInst::ICMP_SLE:
6374 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6375 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6376 return true;
6377 return None;
6378
6379 case CmpInst::ICMP_ULT:
6380 case CmpInst::ICMP_ULE:
6381 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6382 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6383 return true;
6384 return None;
6385 }
6386 }
6387
6388 /// Return true if the operands of the two compares match. IsSwappedOps is true
6389 /// when the operands match, but are swapped.
isMatchingOps(const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,bool & IsSwappedOps)6390 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6391 const Value *BLHS, const Value *BRHS,
6392 bool &IsSwappedOps) {
6393
6394 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6395 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6396 return IsMatchingOps || IsSwappedOps;
6397 }
6398
6399 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6400 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6401 /// Otherwise, return None if we can't infer anything.
isImpliedCondMatchingOperands(CmpInst::Predicate APred,CmpInst::Predicate BPred,bool AreSwappedOps)6402 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6403 CmpInst::Predicate BPred,
6404 bool AreSwappedOps) {
6405 // Canonicalize the predicate as if the operands were not commuted.
6406 if (AreSwappedOps)
6407 BPred = ICmpInst::getSwappedPredicate(BPred);
6408
6409 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6410 return true;
6411 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6412 return false;
6413
6414 return None;
6415 }
6416
6417 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6418 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6419 /// Otherwise, return None if we can't infer anything.
6420 static Optional<bool>
isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,const ConstantInt * C1,CmpInst::Predicate BPred,const ConstantInt * C2)6421 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6422 const ConstantInt *C1,
6423 CmpInst::Predicate BPred,
6424 const ConstantInt *C2) {
6425 ConstantRange DomCR =
6426 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6427 ConstantRange CR =
6428 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6429 ConstantRange Intersection = DomCR.intersectWith(CR);
6430 ConstantRange Difference = DomCR.difference(CR);
6431 if (Intersection.isEmptySet())
6432 return false;
6433 if (Difference.isEmptySet())
6434 return true;
6435 return None;
6436 }
6437
6438 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6439 /// false. Otherwise, return None if we can't infer anything.
isImpliedCondICmps(const ICmpInst * LHS,CmpInst::Predicate BPred,const Value * BLHS,const Value * BRHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6440 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6441 CmpInst::Predicate BPred,
6442 const Value *BLHS, const Value *BRHS,
6443 const DataLayout &DL, bool LHSIsTrue,
6444 unsigned Depth) {
6445 Value *ALHS = LHS->getOperand(0);
6446 Value *ARHS = LHS->getOperand(1);
6447
6448 // The rest of the logic assumes the LHS condition is true. If that's not the
6449 // case, invert the predicate to make it so.
6450 CmpInst::Predicate APred =
6451 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6452
6453 // Can we infer anything when the two compares have matching operands?
6454 bool AreSwappedOps;
6455 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6456 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6457 APred, BPred, AreSwappedOps))
6458 return Implication;
6459 // No amount of additional analysis will infer the second condition, so
6460 // early exit.
6461 return None;
6462 }
6463
6464 // Can we infer anything when the LHS operands match and the RHS operands are
6465 // constants (not necessarily matching)?
6466 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6467 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6468 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6469 return Implication;
6470 // No amount of additional analysis will infer the second condition, so
6471 // early exit.
6472 return None;
6473 }
6474
6475 if (APred == BPred)
6476 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6477 return None;
6478 }
6479
6480 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6481 /// false. Otherwise, return None if we can't infer anything. We expect the
6482 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6483 static Optional<bool>
isImpliedCondAndOr(const Instruction * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6484 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6485 const Value *RHSOp0, const Value *RHSOp1,
6486 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6487 // The LHS must be an 'or', 'and', or a 'select' instruction.
6488 assert((LHS->getOpcode() == Instruction::And ||
6489 LHS->getOpcode() == Instruction::Or ||
6490 LHS->getOpcode() == Instruction::Select) &&
6491 "Expected LHS to be 'and', 'or', or 'select'.");
6492
6493 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6494
6495 // If the result of an 'or' is false, then we know both legs of the 'or' are
6496 // false. Similarly, if the result of an 'and' is true, then we know both
6497 // legs of the 'and' are true.
6498 const Value *ALHS, *ARHS;
6499 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6500 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6501 // FIXME: Make this non-recursion.
6502 if (Optional<bool> Implication = isImpliedCondition(
6503 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6504 return Implication;
6505 if (Optional<bool> Implication = isImpliedCondition(
6506 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6507 return Implication;
6508 return None;
6509 }
6510 return None;
6511 }
6512
6513 Optional<bool>
isImpliedCondition(const Value * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6514 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6515 const Value *RHSOp0, const Value *RHSOp1,
6516 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6517 // Bail out when we hit the limit.
6518 if (Depth == MaxAnalysisRecursionDepth)
6519 return None;
6520
6521 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6522 // example.
6523 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6524 return None;
6525
6526 Type *OpTy = LHS->getType();
6527 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6528
6529 // FIXME: Extending the code below to handle vectors.
6530 if (OpTy->isVectorTy())
6531 return None;
6532
6533 assert(OpTy->isIntegerTy(1) && "implied by above");
6534
6535 // Both LHS and RHS are icmps.
6536 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6537 if (LHSCmp)
6538 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6539 Depth);
6540
6541 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6542 /// the RHS to be an icmp.
6543 /// FIXME: Add support for and/or/select on the RHS.
6544 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6545 if ((LHSI->getOpcode() == Instruction::And ||
6546 LHSI->getOpcode() == Instruction::Or ||
6547 LHSI->getOpcode() == Instruction::Select))
6548 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6549 Depth);
6550 }
6551 return None;
6552 }
6553
isImpliedCondition(const Value * LHS,const Value * RHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6554 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6555 const DataLayout &DL, bool LHSIsTrue,
6556 unsigned Depth) {
6557 // LHS ==> RHS by definition
6558 if (LHS == RHS)
6559 return LHSIsTrue;
6560
6561 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6562 if (RHSCmp)
6563 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6564 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6565 LHSIsTrue, Depth);
6566 return None;
6567 }
6568
6569 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6570 // condition dominating ContextI or nullptr, if no condition is found.
6571 static std::pair<Value *, bool>
getDomPredecessorCondition(const Instruction * ContextI)6572 getDomPredecessorCondition(const Instruction *ContextI) {
6573 if (!ContextI || !ContextI->getParent())
6574 return {nullptr, false};
6575
6576 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6577 // dominator tree (eg, from a SimplifyQuery) instead?
6578 const BasicBlock *ContextBB = ContextI->getParent();
6579 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6580 if (!PredBB)
6581 return {nullptr, false};
6582
6583 // We need a conditional branch in the predecessor.
6584 Value *PredCond;
6585 BasicBlock *TrueBB, *FalseBB;
6586 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6587 return {nullptr, false};
6588
6589 // The branch should get simplified. Don't bother simplifying this condition.
6590 if (TrueBB == FalseBB)
6591 return {nullptr, false};
6592
6593 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6594 "Predecessor block does not point to successor?");
6595
6596 // Is this condition implied by the predecessor condition?
6597 return {PredCond, TrueBB == ContextBB};
6598 }
6599
isImpliedByDomCondition(const Value * Cond,const Instruction * ContextI,const DataLayout & DL)6600 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6601 const Instruction *ContextI,
6602 const DataLayout &DL) {
6603 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6604 auto PredCond = getDomPredecessorCondition(ContextI);
6605 if (PredCond.first)
6606 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6607 return None;
6608 }
6609
isImpliedByDomCondition(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const Instruction * ContextI,const DataLayout & DL)6610 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6611 const Value *LHS, const Value *RHS,
6612 const Instruction *ContextI,
6613 const DataLayout &DL) {
6614 auto PredCond = getDomPredecessorCondition(ContextI);
6615 if (PredCond.first)
6616 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6617 PredCond.second);
6618 return None;
6619 }
6620
setLimitsForBinOp(const BinaryOperator & BO,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6621 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6622 APInt &Upper, const InstrInfoQuery &IIQ) {
6623 unsigned Width = Lower.getBitWidth();
6624 const APInt *C;
6625 switch (BO.getOpcode()) {
6626 case Instruction::Add:
6627 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6628 // FIXME: If we have both nuw and nsw, we should reduce the range further.
6629 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6630 // 'add nuw x, C' produces [C, UINT_MAX].
6631 Lower = *C;
6632 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6633 if (C->isNegative()) {
6634 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6635 Lower = APInt::getSignedMinValue(Width);
6636 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6637 } else {
6638 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6639 Lower = APInt::getSignedMinValue(Width) + *C;
6640 Upper = APInt::getSignedMaxValue(Width) + 1;
6641 }
6642 }
6643 }
6644 break;
6645
6646 case Instruction::And:
6647 if (match(BO.getOperand(1), m_APInt(C)))
6648 // 'and x, C' produces [0, C].
6649 Upper = *C + 1;
6650 break;
6651
6652 case Instruction::Or:
6653 if (match(BO.getOperand(1), m_APInt(C)))
6654 // 'or x, C' produces [C, UINT_MAX].
6655 Lower = *C;
6656 break;
6657
6658 case Instruction::AShr:
6659 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6660 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6661 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6662 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6663 } else if (match(BO.getOperand(0), m_APInt(C))) {
6664 unsigned ShiftAmount = Width - 1;
6665 if (!C->isNullValue() && IIQ.isExact(&BO))
6666 ShiftAmount = C->countTrailingZeros();
6667 if (C->isNegative()) {
6668 // 'ashr C, x' produces [C, C >> (Width-1)]
6669 Lower = *C;
6670 Upper = C->ashr(ShiftAmount) + 1;
6671 } else {
6672 // 'ashr C, x' produces [C >> (Width-1), C]
6673 Lower = C->ashr(ShiftAmount);
6674 Upper = *C + 1;
6675 }
6676 }
6677 break;
6678
6679 case Instruction::LShr:
6680 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6681 // 'lshr x, C' produces [0, UINT_MAX >> C].
6682 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6683 } else if (match(BO.getOperand(0), m_APInt(C))) {
6684 // 'lshr C, x' produces [C >> (Width-1), C].
6685 unsigned ShiftAmount = Width - 1;
6686 if (!C->isNullValue() && IIQ.isExact(&BO))
6687 ShiftAmount = C->countTrailingZeros();
6688 Lower = C->lshr(ShiftAmount);
6689 Upper = *C + 1;
6690 }
6691 break;
6692
6693 case Instruction::Shl:
6694 if (match(BO.getOperand(0), m_APInt(C))) {
6695 if (IIQ.hasNoUnsignedWrap(&BO)) {
6696 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6697 Lower = *C;
6698 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6699 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6700 if (C->isNegative()) {
6701 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6702 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6703 Lower = C->shl(ShiftAmount);
6704 Upper = *C + 1;
6705 } else {
6706 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6707 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6708 Lower = *C;
6709 Upper = C->shl(ShiftAmount) + 1;
6710 }
6711 }
6712 }
6713 break;
6714
6715 case Instruction::SDiv:
6716 if (match(BO.getOperand(1), m_APInt(C))) {
6717 APInt IntMin = APInt::getSignedMinValue(Width);
6718 APInt IntMax = APInt::getSignedMaxValue(Width);
6719 if (C->isAllOnesValue()) {
6720 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6721 // where C != -1 and C != 0 and C != 1
6722 Lower = IntMin + 1;
6723 Upper = IntMax + 1;
6724 } else if (C->countLeadingZeros() < Width - 1) {
6725 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6726 // where C != -1 and C != 0 and C != 1
6727 Lower = IntMin.sdiv(*C);
6728 Upper = IntMax.sdiv(*C);
6729 if (Lower.sgt(Upper))
6730 std::swap(Lower, Upper);
6731 Upper = Upper + 1;
6732 assert(Upper != Lower && "Upper part of range has wrapped!");
6733 }
6734 } else if (match(BO.getOperand(0), m_APInt(C))) {
6735 if (C->isMinSignedValue()) {
6736 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6737 Lower = *C;
6738 Upper = Lower.lshr(1) + 1;
6739 } else {
6740 // 'sdiv C, x' produces [-|C|, |C|].
6741 Upper = C->abs() + 1;
6742 Lower = (-Upper) + 1;
6743 }
6744 }
6745 break;
6746
6747 case Instruction::UDiv:
6748 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6749 // 'udiv x, C' produces [0, UINT_MAX / C].
6750 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6751 } else if (match(BO.getOperand(0), m_APInt(C))) {
6752 // 'udiv C, x' produces [0, C].
6753 Upper = *C + 1;
6754 }
6755 break;
6756
6757 case Instruction::SRem:
6758 if (match(BO.getOperand(1), m_APInt(C))) {
6759 // 'srem x, C' produces (-|C|, |C|).
6760 Upper = C->abs();
6761 Lower = (-Upper) + 1;
6762 }
6763 break;
6764
6765 case Instruction::URem:
6766 if (match(BO.getOperand(1), m_APInt(C)))
6767 // 'urem x, C' produces [0, C).
6768 Upper = *C;
6769 break;
6770
6771 default:
6772 break;
6773 }
6774 }
6775
setLimitsForIntrinsic(const IntrinsicInst & II,APInt & Lower,APInt & Upper)6776 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6777 APInt &Upper) {
6778 unsigned Width = Lower.getBitWidth();
6779 const APInt *C;
6780 switch (II.getIntrinsicID()) {
6781 case Intrinsic::ctpop:
6782 case Intrinsic::ctlz:
6783 case Intrinsic::cttz:
6784 // Maximum of set/clear bits is the bit width.
6785 assert(Lower == 0 && "Expected lower bound to be zero");
6786 Upper = Width + 1;
6787 break;
6788 case Intrinsic::uadd_sat:
6789 // uadd.sat(x, C) produces [C, UINT_MAX].
6790 if (match(II.getOperand(0), m_APInt(C)) ||
6791 match(II.getOperand(1), m_APInt(C)))
6792 Lower = *C;
6793 break;
6794 case Intrinsic::sadd_sat:
6795 if (match(II.getOperand(0), m_APInt(C)) ||
6796 match(II.getOperand(1), m_APInt(C))) {
6797 if (C->isNegative()) {
6798 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6799 Lower = APInt::getSignedMinValue(Width);
6800 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6801 } else {
6802 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6803 Lower = APInt::getSignedMinValue(Width) + *C;
6804 Upper = APInt::getSignedMaxValue(Width) + 1;
6805 }
6806 }
6807 break;
6808 case Intrinsic::usub_sat:
6809 // usub.sat(C, x) produces [0, C].
6810 if (match(II.getOperand(0), m_APInt(C)))
6811 Upper = *C + 1;
6812 // usub.sat(x, C) produces [0, UINT_MAX - C].
6813 else if (match(II.getOperand(1), m_APInt(C)))
6814 Upper = APInt::getMaxValue(Width) - *C + 1;
6815 break;
6816 case Intrinsic::ssub_sat:
6817 if (match(II.getOperand(0), m_APInt(C))) {
6818 if (C->isNegative()) {
6819 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6820 Lower = APInt::getSignedMinValue(Width);
6821 Upper = *C - APInt::getSignedMinValue(Width) + 1;
6822 } else {
6823 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6824 Lower = *C - APInt::getSignedMaxValue(Width);
6825 Upper = APInt::getSignedMaxValue(Width) + 1;
6826 }
6827 } else if (match(II.getOperand(1), m_APInt(C))) {
6828 if (C->isNegative()) {
6829 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6830 Lower = APInt::getSignedMinValue(Width) - *C;
6831 Upper = APInt::getSignedMaxValue(Width) + 1;
6832 } else {
6833 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6834 Lower = APInt::getSignedMinValue(Width);
6835 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6836 }
6837 }
6838 break;
6839 case Intrinsic::umin:
6840 case Intrinsic::umax:
6841 case Intrinsic::smin:
6842 case Intrinsic::smax:
6843 if (!match(II.getOperand(0), m_APInt(C)) &&
6844 !match(II.getOperand(1), m_APInt(C)))
6845 break;
6846
6847 switch (II.getIntrinsicID()) {
6848 case Intrinsic::umin:
6849 Upper = *C + 1;
6850 break;
6851 case Intrinsic::umax:
6852 Lower = *C;
6853 break;
6854 case Intrinsic::smin:
6855 Lower = APInt::getSignedMinValue(Width);
6856 Upper = *C + 1;
6857 break;
6858 case Intrinsic::smax:
6859 Lower = *C;
6860 Upper = APInt::getSignedMaxValue(Width) + 1;
6861 break;
6862 default:
6863 llvm_unreachable("Must be min/max intrinsic");
6864 }
6865 break;
6866 case Intrinsic::abs:
6867 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6868 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6869 if (match(II.getOperand(1), m_One()))
6870 Upper = APInt::getSignedMaxValue(Width) + 1;
6871 else
6872 Upper = APInt::getSignedMinValue(Width) + 1;
6873 break;
6874 default:
6875 break;
6876 }
6877 }
6878
setLimitsForSelectPattern(const SelectInst & SI,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6879 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6880 APInt &Upper, const InstrInfoQuery &IIQ) {
6881 const Value *LHS = nullptr, *RHS = nullptr;
6882 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6883 if (R.Flavor == SPF_UNKNOWN)
6884 return;
6885
6886 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6887
6888 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6889 // If the negation part of the abs (in RHS) has the NSW flag,
6890 // then the result of abs(X) is [0..SIGNED_MAX],
6891 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6892 Lower = APInt::getNullValue(BitWidth);
6893 if (match(RHS, m_Neg(m_Specific(LHS))) &&
6894 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6895 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6896 else
6897 Upper = APInt::getSignedMinValue(BitWidth) + 1;
6898 return;
6899 }
6900
6901 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6902 // The result of -abs(X) is <= 0.
6903 Lower = APInt::getSignedMinValue(BitWidth);
6904 Upper = APInt(BitWidth, 1);
6905 return;
6906 }
6907
6908 const APInt *C;
6909 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6910 return;
6911
6912 switch (R.Flavor) {
6913 case SPF_UMIN:
6914 Upper = *C + 1;
6915 break;
6916 case SPF_UMAX:
6917 Lower = *C;
6918 break;
6919 case SPF_SMIN:
6920 Lower = APInt::getSignedMinValue(BitWidth);
6921 Upper = *C + 1;
6922 break;
6923 case SPF_SMAX:
6924 Lower = *C;
6925 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6926 break;
6927 default:
6928 break;
6929 }
6930 }
6931
computeConstantRange(const Value * V,bool UseInstrInfo,AssumptionCache * AC,const Instruction * CtxI,unsigned Depth)6932 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6933 AssumptionCache *AC,
6934 const Instruction *CtxI,
6935 unsigned Depth) {
6936 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6937
6938 if (Depth == MaxAnalysisRecursionDepth)
6939 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6940
6941 const APInt *C;
6942 if (match(V, m_APInt(C)))
6943 return ConstantRange(*C);
6944
6945 InstrInfoQuery IIQ(UseInstrInfo);
6946 unsigned BitWidth = V->getType()->getScalarSizeInBits();
6947 APInt Lower = APInt(BitWidth, 0);
6948 APInt Upper = APInt(BitWidth, 0);
6949 if (auto *BO = dyn_cast<BinaryOperator>(V))
6950 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6951 else if (auto *II = dyn_cast<IntrinsicInst>(V))
6952 setLimitsForIntrinsic(*II, Lower, Upper);
6953 else if (auto *SI = dyn_cast<SelectInst>(V))
6954 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6955
6956 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6957
6958 if (auto *I = dyn_cast<Instruction>(V))
6959 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6960 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6961
6962 if (CtxI && AC) {
6963 // Try to restrict the range based on information from assumptions.
6964 for (auto &AssumeVH : AC->assumptionsFor(V)) {
6965 if (!AssumeVH)
6966 continue;
6967 CallInst *I = cast<CallInst>(AssumeVH);
6968 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6969 "Got assumption for the wrong function!");
6970 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6971 "must be an assume intrinsic");
6972
6973 if (!isValidAssumeForContext(I, CtxI, nullptr))
6974 continue;
6975 Value *Arg = I->getArgOperand(0);
6976 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6977 // Currently we just use information from comparisons.
6978 if (!Cmp || Cmp->getOperand(0) != V)
6979 continue;
6980 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6981 AC, I, Depth + 1);
6982 CR = CR.intersectWith(
6983 ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6984 }
6985 }
6986
6987 return CR;
6988 }
6989
6990 static Optional<int64_t>
getOffsetFromIndex(const GEPOperator * GEP,unsigned Idx,const DataLayout & DL)6991 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6992 // Skip over the first indices.
6993 gep_type_iterator GTI = gep_type_begin(GEP);
6994 for (unsigned i = 1; i != Idx; ++i, ++GTI)
6995 /*skip along*/;
6996
6997 // Compute the offset implied by the rest of the indices.
6998 int64_t Offset = 0;
6999 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7000 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7001 if (!OpC)
7002 return None;
7003 if (OpC->isZero())
7004 continue; // No offset.
7005
7006 // Handle struct indices, which add their field offset to the pointer.
7007 if (StructType *STy = GTI.getStructTypeOrNull()) {
7008 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7009 continue;
7010 }
7011
7012 // Otherwise, we have a sequential type like an array or fixed-length
7013 // vector. Multiply the index by the ElementSize.
7014 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7015 if (Size.isScalable())
7016 return None;
7017 Offset += Size.getFixedSize() * OpC->getSExtValue();
7018 }
7019
7020 return Offset;
7021 }
7022
isPointerOffset(const Value * Ptr1,const Value * Ptr2,const DataLayout & DL)7023 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7024 const DataLayout &DL) {
7025 Ptr1 = Ptr1->stripPointerCasts();
7026 Ptr2 = Ptr2->stripPointerCasts();
7027
7028 // Handle the trivial case first.
7029 if (Ptr1 == Ptr2) {
7030 return 0;
7031 }
7032
7033 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7034 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7035
7036 // If one pointer is a GEP see if the GEP is a constant offset from the base,
7037 // as in "P" and "gep P, 1".
7038 // Also do this iteratively to handle the the following case:
7039 // Ptr_t1 = GEP Ptr1, c1
7040 // Ptr_t2 = GEP Ptr_t1, c2
7041 // Ptr2 = GEP Ptr_t2, c3
7042 // where we will return c1+c2+c3.
7043 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
7044 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
7045 // are the same, and return the difference between offsets.
7046 auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
7047 const Value *Ptr) -> Optional<int64_t> {
7048 const GEPOperator *GEP_T = GEP;
7049 int64_t OffsetVal = 0;
7050 bool HasSameBase = false;
7051 while (GEP_T) {
7052 auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
7053 if (!Offset)
7054 return None;
7055 OffsetVal += *Offset;
7056 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
7057 if (Op0 == Ptr) {
7058 HasSameBase = true;
7059 break;
7060 }
7061 GEP_T = dyn_cast<GEPOperator>(Op0);
7062 }
7063 if (!HasSameBase)
7064 return None;
7065 return OffsetVal;
7066 };
7067
7068 if (GEP1) {
7069 auto Offset = getOffsetFromBase(GEP1, Ptr2);
7070 if (Offset)
7071 return -*Offset;
7072 }
7073 if (GEP2) {
7074 auto Offset = getOffsetFromBase(GEP2, Ptr1);
7075 if (Offset)
7076 return Offset;
7077 }
7078
7079 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7080 // base. After that base, they may have some number of common (and
7081 // potentially variable) indices. After that they handle some constant
7082 // offset, which determines their offset from each other. At this point, we
7083 // handle no other case.
7084 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
7085 return None;
7086
7087 // Skip any common indices and track the GEP types.
7088 unsigned Idx = 1;
7089 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7090 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7091 break;
7092
7093 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
7094 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
7095 if (!Offset1 || !Offset2)
7096 return None;
7097 return *Offset2 - *Offset1;
7098 }
7099