1 //===-- ConstraintElimination.cpp - Eliminate conds using constraints. ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Eliminate conditions based on constraints collected from dominating 10 // conditions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/ConstraintElimination.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/ScopeExit.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/ConstraintSystem.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 23 #include "llvm/Analysis/ScalarEvolution.h" 24 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/PatternMatch.h" 33 #include "llvm/IR/Verifier.h" 34 #include "llvm/Pass.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/DebugCounter.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Transforms/Utils/Cloning.h" 40 #include "llvm/Transforms/Utils/ValueMapper.h" 41 42 #include <cmath> 43 #include <optional> 44 #include <string> 45 46 using namespace llvm; 47 using namespace PatternMatch; 48 49 #define DEBUG_TYPE "constraint-elimination" 50 51 STATISTIC(NumCondsRemoved, "Number of instructions removed"); 52 DEBUG_COUNTER(EliminatedCounter, "conds-eliminated", 53 "Controls which conditions are eliminated"); 54 55 static cl::opt<unsigned> 56 MaxRows("constraint-elimination-max-rows", cl::init(500), cl::Hidden, 57 cl::desc("Maximum number of rows to keep in constraint system")); 58 59 static cl::opt<bool> DumpReproducers( 60 "constraint-elimination-dump-reproducers", cl::init(false), cl::Hidden, 61 cl::desc("Dump IR to reproduce successful transformations.")); 62 63 static int64_t MaxConstraintValue = std::numeric_limits<int64_t>::max(); 64 static int64_t MinSignedConstraintValue = std::numeric_limits<int64_t>::min(); 65 66 // A helper to multiply 2 signed integers where overflowing is allowed. 67 static int64_t multiplyWithOverflow(int64_t A, int64_t B) { 68 int64_t Result; 69 MulOverflow(A, B, Result); 70 return Result; 71 } 72 73 // A helper to add 2 signed integers where overflowing is allowed. 74 static int64_t addWithOverflow(int64_t A, int64_t B) { 75 int64_t Result; 76 AddOverflow(A, B, Result); 77 return Result; 78 } 79 80 static Instruction *getContextInstForUse(Use &U) { 81 Instruction *UserI = cast<Instruction>(U.getUser()); 82 if (auto *Phi = dyn_cast<PHINode>(UserI)) 83 UserI = Phi->getIncomingBlock(U)->getTerminator(); 84 return UserI; 85 } 86 87 namespace { 88 /// Struct to express a condition of the form %Op0 Pred %Op1. 89 struct ConditionTy { 90 CmpInst::Predicate Pred; 91 Value *Op0; 92 Value *Op1; 93 94 ConditionTy() 95 : Pred(CmpInst::BAD_ICMP_PREDICATE), Op0(nullptr), Op1(nullptr) {} 96 ConditionTy(CmpInst::Predicate Pred, Value *Op0, Value *Op1) 97 : Pred(Pred), Op0(Op0), Op1(Op1) {} 98 }; 99 100 /// Represents either 101 /// * a condition that holds on entry to a block (=condition fact) 102 /// * an assume (=assume fact) 103 /// * a use of a compare instruction to simplify. 104 /// It also tracks the Dominator DFS in and out numbers for each entry. 105 struct FactOrCheck { 106 enum class EntryTy { 107 ConditionFact, /// A condition that holds on entry to a block. 108 InstFact, /// A fact that holds after Inst executed (e.g. an assume or 109 /// min/mix intrinsic. 110 InstCheck, /// An instruction to simplify (e.g. an overflow math 111 /// intrinsics). 112 UseCheck /// An use of a compare instruction to simplify. 113 }; 114 115 union { 116 Instruction *Inst; 117 Use *U; 118 ConditionTy Cond; 119 }; 120 121 /// A pre-condition that must hold for the current fact to be added to the 122 /// system. 123 ConditionTy DoesHold; 124 125 unsigned NumIn; 126 unsigned NumOut; 127 EntryTy Ty; 128 129 FactOrCheck(EntryTy Ty, DomTreeNode *DTN, Instruction *Inst) 130 : Inst(Inst), NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()), 131 Ty(Ty) {} 132 133 FactOrCheck(DomTreeNode *DTN, Use *U) 134 : U(U), DoesHold(CmpInst::BAD_ICMP_PREDICATE, nullptr, nullptr), 135 NumIn(DTN->getDFSNumIn()), NumOut(DTN->getDFSNumOut()), 136 Ty(EntryTy::UseCheck) {} 137 138 FactOrCheck(DomTreeNode *DTN, CmpInst::Predicate Pred, Value *Op0, Value *Op1, 139 ConditionTy Precond = ConditionTy()) 140 : Cond(Pred, Op0, Op1), DoesHold(Precond), NumIn(DTN->getDFSNumIn()), 141 NumOut(DTN->getDFSNumOut()), Ty(EntryTy::ConditionFact) {} 142 143 static FactOrCheck getConditionFact(DomTreeNode *DTN, CmpInst::Predicate Pred, 144 Value *Op0, Value *Op1, 145 ConditionTy Precond = ConditionTy()) { 146 return FactOrCheck(DTN, Pred, Op0, Op1, Precond); 147 } 148 149 static FactOrCheck getInstFact(DomTreeNode *DTN, Instruction *Inst) { 150 return FactOrCheck(EntryTy::InstFact, DTN, Inst); 151 } 152 153 static FactOrCheck getCheck(DomTreeNode *DTN, Use *U) { 154 return FactOrCheck(DTN, U); 155 } 156 157 static FactOrCheck getCheck(DomTreeNode *DTN, CallInst *CI) { 158 return FactOrCheck(EntryTy::InstCheck, DTN, CI); 159 } 160 161 bool isCheck() const { 162 return Ty == EntryTy::InstCheck || Ty == EntryTy::UseCheck; 163 } 164 165 Instruction *getContextInst() const { 166 if (Ty == EntryTy::UseCheck) 167 return getContextInstForUse(*U); 168 return Inst; 169 } 170 171 Instruction *getInstructionToSimplify() const { 172 assert(isCheck()); 173 if (Ty == EntryTy::InstCheck) 174 return Inst; 175 // The use may have been simplified to a constant already. 176 return dyn_cast<Instruction>(*U); 177 } 178 179 bool isConditionFact() const { return Ty == EntryTy::ConditionFact; } 180 }; 181 182 /// Keep state required to build worklist. 183 struct State { 184 DominatorTree &DT; 185 LoopInfo &LI; 186 ScalarEvolution &SE; 187 SmallVector<FactOrCheck, 64> WorkList; 188 189 State(DominatorTree &DT, LoopInfo &LI, ScalarEvolution &SE) 190 : DT(DT), LI(LI), SE(SE) {} 191 192 /// Process block \p BB and add known facts to work-list. 193 void addInfoFor(BasicBlock &BB); 194 195 /// Try to add facts for loop inductions (AddRecs) in EQ/NE compares 196 /// controlling the loop header. 197 void addInfoForInductions(BasicBlock &BB); 198 199 /// Returns true if we can add a known condition from BB to its successor 200 /// block Succ. 201 bool canAddSuccessor(BasicBlock &BB, BasicBlock *Succ) const { 202 return DT.dominates(BasicBlockEdge(&BB, Succ), Succ); 203 } 204 }; 205 206 class ConstraintInfo; 207 208 struct StackEntry { 209 unsigned NumIn; 210 unsigned NumOut; 211 bool IsSigned = false; 212 /// Variables that can be removed from the system once the stack entry gets 213 /// removed. 214 SmallVector<Value *, 2> ValuesToRelease; 215 216 StackEntry(unsigned NumIn, unsigned NumOut, bool IsSigned, 217 SmallVector<Value *, 2> ValuesToRelease) 218 : NumIn(NumIn), NumOut(NumOut), IsSigned(IsSigned), 219 ValuesToRelease(ValuesToRelease) {} 220 }; 221 222 struct ConstraintTy { 223 SmallVector<int64_t, 8> Coefficients; 224 SmallVector<ConditionTy, 2> Preconditions; 225 226 SmallVector<SmallVector<int64_t, 8>> ExtraInfo; 227 228 bool IsSigned = false; 229 230 ConstraintTy() = default; 231 232 ConstraintTy(SmallVector<int64_t, 8> Coefficients, bool IsSigned, bool IsEq, 233 bool IsNe) 234 : Coefficients(std::move(Coefficients)), IsSigned(IsSigned), IsEq(IsEq), 235 IsNe(IsNe) {} 236 237 unsigned size() const { return Coefficients.size(); } 238 239 unsigned empty() const { return Coefficients.empty(); } 240 241 /// Returns true if all preconditions for this list of constraints are 242 /// satisfied given \p CS and the corresponding \p Value2Index mapping. 243 bool isValid(const ConstraintInfo &Info) const; 244 245 bool isEq() const { return IsEq; } 246 247 bool isNe() const { return IsNe; } 248 249 /// Check if the current constraint is implied by the given ConstraintSystem. 250 /// 251 /// \return true or false if the constraint is proven to be respectively true, 252 /// or false. When the constraint cannot be proven to be either true or false, 253 /// std::nullopt is returned. 254 std::optional<bool> isImpliedBy(const ConstraintSystem &CS) const; 255 256 private: 257 bool IsEq = false; 258 bool IsNe = false; 259 }; 260 261 /// Wrapper encapsulating separate constraint systems and corresponding value 262 /// mappings for both unsigned and signed information. Facts are added to and 263 /// conditions are checked against the corresponding system depending on the 264 /// signed-ness of their predicates. While the information is kept separate 265 /// based on signed-ness, certain conditions can be transferred between the two 266 /// systems. 267 class ConstraintInfo { 268 269 ConstraintSystem UnsignedCS; 270 ConstraintSystem SignedCS; 271 272 const DataLayout &DL; 273 274 public: 275 ConstraintInfo(const DataLayout &DL, ArrayRef<Value *> FunctionArgs) 276 : UnsignedCS(FunctionArgs), SignedCS(FunctionArgs), DL(DL) { 277 auto &Value2Index = getValue2Index(false); 278 // Add Arg > -1 constraints to unsigned system for all function arguments. 279 for (Value *Arg : FunctionArgs) { 280 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0), 281 false, false, false); 282 VarPos.Coefficients[Value2Index[Arg]] = -1; 283 UnsignedCS.addVariableRow(VarPos.Coefficients); 284 } 285 } 286 287 DenseMap<Value *, unsigned> &getValue2Index(bool Signed) { 288 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index(); 289 } 290 const DenseMap<Value *, unsigned> &getValue2Index(bool Signed) const { 291 return Signed ? SignedCS.getValue2Index() : UnsignedCS.getValue2Index(); 292 } 293 294 ConstraintSystem &getCS(bool Signed) { 295 return Signed ? SignedCS : UnsignedCS; 296 } 297 const ConstraintSystem &getCS(bool Signed) const { 298 return Signed ? SignedCS : UnsignedCS; 299 } 300 301 void popLastConstraint(bool Signed) { getCS(Signed).popLastConstraint(); } 302 void popLastNVariables(bool Signed, unsigned N) { 303 getCS(Signed).popLastNVariables(N); 304 } 305 306 bool doesHold(CmpInst::Predicate Pred, Value *A, Value *B) const; 307 308 void addFact(CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn, 309 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack); 310 311 /// Turn a comparison of the form \p Op0 \p Pred \p Op1 into a vector of 312 /// constraints, using indices from the corresponding constraint system. 313 /// New variables that need to be added to the system are collected in 314 /// \p NewVariables. 315 ConstraintTy getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1, 316 SmallVectorImpl<Value *> &NewVariables) const; 317 318 /// Turns a comparison of the form \p Op0 \p Pred \p Op1 into a vector of 319 /// constraints using getConstraint. Returns an empty constraint if the result 320 /// cannot be used to query the existing constraint system, e.g. because it 321 /// would require adding new variables. Also tries to convert signed 322 /// predicates to unsigned ones if possible to allow using the unsigned system 323 /// which increases the effectiveness of the signed <-> unsigned transfer 324 /// logic. 325 ConstraintTy getConstraintForSolving(CmpInst::Predicate Pred, Value *Op0, 326 Value *Op1) const; 327 328 /// Try to add information from \p A \p Pred \p B to the unsigned/signed 329 /// system if \p Pred is signed/unsigned. 330 void transferToOtherSystem(CmpInst::Predicate Pred, Value *A, Value *B, 331 unsigned NumIn, unsigned NumOut, 332 SmallVectorImpl<StackEntry> &DFSInStack); 333 }; 334 335 /// Represents a (Coefficient * Variable) entry after IR decomposition. 336 struct DecompEntry { 337 int64_t Coefficient; 338 Value *Variable; 339 /// True if the variable is known positive in the current constraint. 340 bool IsKnownNonNegative; 341 342 DecompEntry(int64_t Coefficient, Value *Variable, 343 bool IsKnownNonNegative = false) 344 : Coefficient(Coefficient), Variable(Variable), 345 IsKnownNonNegative(IsKnownNonNegative) {} 346 }; 347 348 /// Represents an Offset + Coefficient1 * Variable1 + ... decomposition. 349 struct Decomposition { 350 int64_t Offset = 0; 351 SmallVector<DecompEntry, 3> Vars; 352 353 Decomposition(int64_t Offset) : Offset(Offset) {} 354 Decomposition(Value *V, bool IsKnownNonNegative = false) { 355 Vars.emplace_back(1, V, IsKnownNonNegative); 356 } 357 Decomposition(int64_t Offset, ArrayRef<DecompEntry> Vars) 358 : Offset(Offset), Vars(Vars) {} 359 360 void add(int64_t OtherOffset) { 361 Offset = addWithOverflow(Offset, OtherOffset); 362 } 363 364 void add(const Decomposition &Other) { 365 add(Other.Offset); 366 append_range(Vars, Other.Vars); 367 } 368 369 void sub(const Decomposition &Other) { 370 Decomposition Tmp = Other; 371 Tmp.mul(-1); 372 add(Tmp.Offset); 373 append_range(Vars, Tmp.Vars); 374 } 375 376 void mul(int64_t Factor) { 377 Offset = multiplyWithOverflow(Offset, Factor); 378 for (auto &Var : Vars) 379 Var.Coefficient = multiplyWithOverflow(Var.Coefficient, Factor); 380 } 381 }; 382 383 // Variable and constant offsets for a chain of GEPs, with base pointer BasePtr. 384 struct OffsetResult { 385 Value *BasePtr; 386 APInt ConstantOffset; 387 MapVector<Value *, APInt> VariableOffsets; 388 bool AllInbounds; 389 390 OffsetResult() : BasePtr(nullptr), ConstantOffset(0, uint64_t(0)) {} 391 392 OffsetResult(GEPOperator &GEP, const DataLayout &DL) 393 : BasePtr(GEP.getPointerOperand()), AllInbounds(GEP.isInBounds()) { 394 ConstantOffset = APInt(DL.getIndexTypeSizeInBits(BasePtr->getType()), 0); 395 } 396 }; 397 } // namespace 398 399 // Try to collect variable and constant offsets for \p GEP, partly traversing 400 // nested GEPs. Returns an OffsetResult with nullptr as BasePtr of collecting 401 // the offset fails. 402 static OffsetResult collectOffsets(GEPOperator &GEP, const DataLayout &DL) { 403 OffsetResult Result(GEP, DL); 404 unsigned BitWidth = Result.ConstantOffset.getBitWidth(); 405 if (!GEP.collectOffset(DL, BitWidth, Result.VariableOffsets, 406 Result.ConstantOffset)) 407 return {}; 408 409 // If we have a nested GEP, check if we can combine the constant offset of the 410 // inner GEP with the outer GEP. 411 if (auto *InnerGEP = dyn_cast<GetElementPtrInst>(Result.BasePtr)) { 412 MapVector<Value *, APInt> VariableOffsets2; 413 APInt ConstantOffset2(BitWidth, 0); 414 bool CanCollectInner = InnerGEP->collectOffset( 415 DL, BitWidth, VariableOffsets2, ConstantOffset2); 416 // TODO: Support cases with more than 1 variable offset. 417 if (!CanCollectInner || Result.VariableOffsets.size() > 1 || 418 VariableOffsets2.size() > 1 || 419 (Result.VariableOffsets.size() >= 1 && VariableOffsets2.size() >= 1)) { 420 // More than 1 variable index, use outer result. 421 return Result; 422 } 423 Result.BasePtr = InnerGEP->getPointerOperand(); 424 Result.ConstantOffset += ConstantOffset2; 425 if (Result.VariableOffsets.size() == 0 && VariableOffsets2.size() == 1) 426 Result.VariableOffsets = VariableOffsets2; 427 Result.AllInbounds &= InnerGEP->isInBounds(); 428 } 429 return Result; 430 } 431 432 static Decomposition decompose(Value *V, 433 SmallVectorImpl<ConditionTy> &Preconditions, 434 bool IsSigned, const DataLayout &DL); 435 436 static bool canUseSExt(ConstantInt *CI) { 437 const APInt &Val = CI->getValue(); 438 return Val.sgt(MinSignedConstraintValue) && Val.slt(MaxConstraintValue); 439 } 440 441 static Decomposition decomposeGEP(GEPOperator &GEP, 442 SmallVectorImpl<ConditionTy> &Preconditions, 443 bool IsSigned, const DataLayout &DL) { 444 // Do not reason about pointers where the index size is larger than 64 bits, 445 // as the coefficients used to encode constraints are 64 bit integers. 446 if (DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()) > 64) 447 return &GEP; 448 449 assert(!IsSigned && "The logic below only supports decomposition for " 450 "unsigned predicates at the moment."); 451 const auto &[BasePtr, ConstantOffset, VariableOffsets, AllInbounds] = 452 collectOffsets(GEP, DL); 453 if (!BasePtr || !AllInbounds) 454 return &GEP; 455 456 Decomposition Result(ConstantOffset.getSExtValue(), DecompEntry(1, BasePtr)); 457 for (auto [Index, Scale] : VariableOffsets) { 458 auto IdxResult = decompose(Index, Preconditions, IsSigned, DL); 459 IdxResult.mul(Scale.getSExtValue()); 460 Result.add(IdxResult); 461 462 // If Op0 is signed non-negative, the GEP is increasing monotonically and 463 // can be de-composed. 464 if (!isKnownNonNegative(Index, DL)) 465 Preconditions.emplace_back(CmpInst::ICMP_SGE, Index, 466 ConstantInt::get(Index->getType(), 0)); 467 } 468 return Result; 469 } 470 471 // Decomposes \p V into a constant offset + list of pairs { Coefficient, 472 // Variable } where Coefficient * Variable. The sum of the constant offset and 473 // pairs equals \p V. 474 static Decomposition decompose(Value *V, 475 SmallVectorImpl<ConditionTy> &Preconditions, 476 bool IsSigned, const DataLayout &DL) { 477 478 auto MergeResults = [&Preconditions, IsSigned, &DL](Value *A, Value *B, 479 bool IsSignedB) { 480 auto ResA = decompose(A, Preconditions, IsSigned, DL); 481 auto ResB = decompose(B, Preconditions, IsSignedB, DL); 482 ResA.add(ResB); 483 return ResA; 484 }; 485 486 Type *Ty = V->getType()->getScalarType(); 487 if (Ty->isPointerTy() && !IsSigned) { 488 if (auto *GEP = dyn_cast<GEPOperator>(V)) 489 return decomposeGEP(*GEP, Preconditions, IsSigned, DL); 490 if (isa<ConstantPointerNull>(V)) 491 return int64_t(0); 492 493 return V; 494 } 495 496 // Don't handle integers > 64 bit. Our coefficients are 64-bit large, so 497 // coefficient add/mul may wrap, while the operation in the full bit width 498 // would not. 499 if (!Ty->isIntegerTy() || Ty->getIntegerBitWidth() > 64) 500 return V; 501 502 bool IsKnownNonNegative = false; 503 504 // Decompose \p V used with a signed predicate. 505 if (IsSigned) { 506 if (auto *CI = dyn_cast<ConstantInt>(V)) { 507 if (canUseSExt(CI)) 508 return CI->getSExtValue(); 509 } 510 Value *Op0; 511 Value *Op1; 512 513 if (match(V, m_SExt(m_Value(Op0)))) 514 V = Op0; 515 else if (match(V, m_NNegZExt(m_Value(Op0)))) { 516 V = Op0; 517 IsKnownNonNegative = true; 518 } 519 520 if (match(V, m_NSWAdd(m_Value(Op0), m_Value(Op1)))) 521 return MergeResults(Op0, Op1, IsSigned); 522 523 ConstantInt *CI; 524 if (match(V, m_NSWMul(m_Value(Op0), m_ConstantInt(CI))) && canUseSExt(CI)) { 525 auto Result = decompose(Op0, Preconditions, IsSigned, DL); 526 Result.mul(CI->getSExtValue()); 527 return Result; 528 } 529 530 // (shl nsw x, shift) is (mul nsw x, (1<<shift)), with the exception of 531 // shift == bw-1. 532 if (match(V, m_NSWShl(m_Value(Op0), m_ConstantInt(CI)))) { 533 uint64_t Shift = CI->getValue().getLimitedValue(); 534 if (Shift < Ty->getIntegerBitWidth() - 1) { 535 assert(Shift < 64 && "Would overflow"); 536 auto Result = decompose(Op0, Preconditions, IsSigned, DL); 537 Result.mul(int64_t(1) << Shift); 538 return Result; 539 } 540 } 541 542 return {V, IsKnownNonNegative}; 543 } 544 545 if (auto *CI = dyn_cast<ConstantInt>(V)) { 546 if (CI->uge(MaxConstraintValue)) 547 return V; 548 return int64_t(CI->getZExtValue()); 549 } 550 551 Value *Op0; 552 if (match(V, m_ZExt(m_Value(Op0)))) { 553 IsKnownNonNegative = true; 554 V = Op0; 555 } 556 557 if (match(V, m_SExt(m_Value(Op0)))) { 558 V = Op0; 559 Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0, 560 ConstantInt::get(Op0->getType(), 0)); 561 } 562 563 Value *Op1; 564 ConstantInt *CI; 565 if (match(V, m_NUWAdd(m_Value(Op0), m_Value(Op1)))) { 566 return MergeResults(Op0, Op1, IsSigned); 567 } 568 if (match(V, m_NSWAdd(m_Value(Op0), m_Value(Op1)))) { 569 if (!isKnownNonNegative(Op0, DL)) 570 Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0, 571 ConstantInt::get(Op0->getType(), 0)); 572 if (!isKnownNonNegative(Op1, DL)) 573 Preconditions.emplace_back(CmpInst::ICMP_SGE, Op1, 574 ConstantInt::get(Op1->getType(), 0)); 575 576 return MergeResults(Op0, Op1, IsSigned); 577 } 578 579 if (match(V, m_Add(m_Value(Op0), m_ConstantInt(CI))) && CI->isNegative() && 580 canUseSExt(CI)) { 581 Preconditions.emplace_back( 582 CmpInst::ICMP_UGE, Op0, 583 ConstantInt::get(Op0->getType(), CI->getSExtValue() * -1)); 584 return MergeResults(Op0, CI, true); 585 } 586 587 // Decompose or as an add if there are no common bits between the operands. 588 if (match(V, m_DisjointOr(m_Value(Op0), m_ConstantInt(CI)))) 589 return MergeResults(Op0, CI, IsSigned); 590 591 if (match(V, m_NUWShl(m_Value(Op1), m_ConstantInt(CI))) && canUseSExt(CI)) { 592 if (CI->getSExtValue() < 0 || CI->getSExtValue() >= 64) 593 return {V, IsKnownNonNegative}; 594 auto Result = decompose(Op1, Preconditions, IsSigned, DL); 595 Result.mul(int64_t{1} << CI->getSExtValue()); 596 return Result; 597 } 598 599 if (match(V, m_NUWMul(m_Value(Op1), m_ConstantInt(CI))) && canUseSExt(CI) && 600 (!CI->isNegative())) { 601 auto Result = decompose(Op1, Preconditions, IsSigned, DL); 602 Result.mul(CI->getSExtValue()); 603 return Result; 604 } 605 606 if (match(V, m_NUWSub(m_Value(Op0), m_Value(Op1)))) { 607 auto ResA = decompose(Op0, Preconditions, IsSigned, DL); 608 auto ResB = decompose(Op1, Preconditions, IsSigned, DL); 609 ResA.sub(ResB); 610 return ResA; 611 } 612 613 return {V, IsKnownNonNegative}; 614 } 615 616 ConstraintTy 617 ConstraintInfo::getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1, 618 SmallVectorImpl<Value *> &NewVariables) const { 619 assert(NewVariables.empty() && "NewVariables must be empty when passed in"); 620 bool IsEq = false; 621 bool IsNe = false; 622 623 // Try to convert Pred to one of ULE/SLT/SLE/SLT. 624 switch (Pred) { 625 case CmpInst::ICMP_UGT: 626 case CmpInst::ICMP_UGE: 627 case CmpInst::ICMP_SGT: 628 case CmpInst::ICMP_SGE: { 629 Pred = CmpInst::getSwappedPredicate(Pred); 630 std::swap(Op0, Op1); 631 break; 632 } 633 case CmpInst::ICMP_EQ: 634 if (match(Op1, m_Zero())) { 635 Pred = CmpInst::ICMP_ULE; 636 } else { 637 IsEq = true; 638 Pred = CmpInst::ICMP_ULE; 639 } 640 break; 641 case CmpInst::ICMP_NE: 642 if (match(Op1, m_Zero())) { 643 Pred = CmpInst::getSwappedPredicate(CmpInst::ICMP_UGT); 644 std::swap(Op0, Op1); 645 } else { 646 IsNe = true; 647 Pred = CmpInst::ICMP_ULE; 648 } 649 break; 650 default: 651 break; 652 } 653 654 if (Pred != CmpInst::ICMP_ULE && Pred != CmpInst::ICMP_ULT && 655 Pred != CmpInst::ICMP_SLE && Pred != CmpInst::ICMP_SLT) 656 return {}; 657 658 SmallVector<ConditionTy, 4> Preconditions; 659 bool IsSigned = CmpInst::isSigned(Pred); 660 auto &Value2Index = getValue2Index(IsSigned); 661 auto ADec = decompose(Op0->stripPointerCastsSameRepresentation(), 662 Preconditions, IsSigned, DL); 663 auto BDec = decompose(Op1->stripPointerCastsSameRepresentation(), 664 Preconditions, IsSigned, DL); 665 int64_t Offset1 = ADec.Offset; 666 int64_t Offset2 = BDec.Offset; 667 Offset1 *= -1; 668 669 auto &VariablesA = ADec.Vars; 670 auto &VariablesB = BDec.Vars; 671 672 // First try to look up \p V in Value2Index and NewVariables. Otherwise add a 673 // new entry to NewVariables. 674 SmallDenseMap<Value *, unsigned> NewIndexMap; 675 auto GetOrAddIndex = [&Value2Index, &NewVariables, 676 &NewIndexMap](Value *V) -> unsigned { 677 auto V2I = Value2Index.find(V); 678 if (V2I != Value2Index.end()) 679 return V2I->second; 680 auto Insert = 681 NewIndexMap.insert({V, Value2Index.size() + NewVariables.size() + 1}); 682 if (Insert.second) 683 NewVariables.push_back(V); 684 return Insert.first->second; 685 }; 686 687 // Make sure all variables have entries in Value2Index or NewVariables. 688 for (const auto &KV : concat<DecompEntry>(VariablesA, VariablesB)) 689 GetOrAddIndex(KV.Variable); 690 691 // Build result constraint, by first adding all coefficients from A and then 692 // subtracting all coefficients from B. 693 ConstraintTy Res( 694 SmallVector<int64_t, 8>(Value2Index.size() + NewVariables.size() + 1, 0), 695 IsSigned, IsEq, IsNe); 696 // Collect variables that are known to be positive in all uses in the 697 // constraint. 698 SmallDenseMap<Value *, bool> KnownNonNegativeVariables; 699 auto &R = Res.Coefficients; 700 for (const auto &KV : VariablesA) { 701 R[GetOrAddIndex(KV.Variable)] += KV.Coefficient; 702 auto I = 703 KnownNonNegativeVariables.insert({KV.Variable, KV.IsKnownNonNegative}); 704 I.first->second &= KV.IsKnownNonNegative; 705 } 706 707 for (const auto &KV : VariablesB) { 708 if (SubOverflow(R[GetOrAddIndex(KV.Variable)], KV.Coefficient, 709 R[GetOrAddIndex(KV.Variable)])) 710 return {}; 711 auto I = 712 KnownNonNegativeVariables.insert({KV.Variable, KV.IsKnownNonNegative}); 713 I.first->second &= KV.IsKnownNonNegative; 714 } 715 716 int64_t OffsetSum; 717 if (AddOverflow(Offset1, Offset2, OffsetSum)) 718 return {}; 719 if (Pred == (IsSigned ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT)) 720 if (AddOverflow(OffsetSum, int64_t(-1), OffsetSum)) 721 return {}; 722 R[0] = OffsetSum; 723 Res.Preconditions = std::move(Preconditions); 724 725 // Remove any (Coefficient, Variable) entry where the Coefficient is 0 for new 726 // variables. 727 while (!NewVariables.empty()) { 728 int64_t Last = R.back(); 729 if (Last != 0) 730 break; 731 R.pop_back(); 732 Value *RemovedV = NewVariables.pop_back_val(); 733 NewIndexMap.erase(RemovedV); 734 } 735 736 // Add extra constraints for variables that are known positive. 737 for (auto &KV : KnownNonNegativeVariables) { 738 if (!KV.second || 739 (!Value2Index.contains(KV.first) && !NewIndexMap.contains(KV.first))) 740 continue; 741 SmallVector<int64_t, 8> C(Value2Index.size() + NewVariables.size() + 1, 0); 742 C[GetOrAddIndex(KV.first)] = -1; 743 Res.ExtraInfo.push_back(C); 744 } 745 return Res; 746 } 747 748 ConstraintTy ConstraintInfo::getConstraintForSolving(CmpInst::Predicate Pred, 749 Value *Op0, 750 Value *Op1) const { 751 Constant *NullC = Constant::getNullValue(Op0->getType()); 752 // Handle trivially true compares directly to avoid adding V UGE 0 constraints 753 // for all variables in the unsigned system. 754 if ((Pred == CmpInst::ICMP_ULE && Op0 == NullC) || 755 (Pred == CmpInst::ICMP_UGE && Op1 == NullC)) { 756 auto &Value2Index = getValue2Index(false); 757 // Return constraint that's trivially true. 758 return ConstraintTy(SmallVector<int64_t, 8>(Value2Index.size(), 0), false, 759 false, false); 760 } 761 762 // If both operands are known to be non-negative, change signed predicates to 763 // unsigned ones. This increases the reasoning effectiveness in combination 764 // with the signed <-> unsigned transfer logic. 765 if (CmpInst::isSigned(Pred) && 766 isKnownNonNegative(Op0, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1) && 767 isKnownNonNegative(Op1, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1)) 768 Pred = CmpInst::getUnsignedPredicate(Pred); 769 770 SmallVector<Value *> NewVariables; 771 ConstraintTy R = getConstraint(Pred, Op0, Op1, NewVariables); 772 if (!NewVariables.empty()) 773 return {}; 774 return R; 775 } 776 777 bool ConstraintTy::isValid(const ConstraintInfo &Info) const { 778 return Coefficients.size() > 0 && 779 all_of(Preconditions, [&Info](const ConditionTy &C) { 780 return Info.doesHold(C.Pred, C.Op0, C.Op1); 781 }); 782 } 783 784 std::optional<bool> 785 ConstraintTy::isImpliedBy(const ConstraintSystem &CS) const { 786 bool IsConditionImplied = CS.isConditionImplied(Coefficients); 787 788 if (IsEq || IsNe) { 789 auto NegatedOrEqual = ConstraintSystem::negateOrEqual(Coefficients); 790 bool IsNegatedOrEqualImplied = 791 !NegatedOrEqual.empty() && CS.isConditionImplied(NegatedOrEqual); 792 793 // In order to check that `%a == %b` is true (equality), both conditions `%a 794 // >= %b` and `%a <= %b` must hold true. When checking for equality (`IsEq` 795 // is true), we return true if they both hold, false in the other cases. 796 if (IsConditionImplied && IsNegatedOrEqualImplied) 797 return IsEq; 798 799 auto Negated = ConstraintSystem::negate(Coefficients); 800 bool IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(Negated); 801 802 auto StrictLessThan = ConstraintSystem::toStrictLessThan(Coefficients); 803 bool IsStrictLessThanImplied = 804 !StrictLessThan.empty() && CS.isConditionImplied(StrictLessThan); 805 806 // In order to check that `%a != %b` is true (non-equality), either 807 // condition `%a > %b` or `%a < %b` must hold true. When checking for 808 // non-equality (`IsNe` is true), we return true if one of the two holds, 809 // false in the other cases. 810 if (IsNegatedImplied || IsStrictLessThanImplied) 811 return IsNe; 812 813 return std::nullopt; 814 } 815 816 if (IsConditionImplied) 817 return true; 818 819 auto Negated = ConstraintSystem::negate(Coefficients); 820 auto IsNegatedImplied = !Negated.empty() && CS.isConditionImplied(Negated); 821 if (IsNegatedImplied) 822 return false; 823 824 // Neither the condition nor its negated holds, did not prove anything. 825 return std::nullopt; 826 } 827 828 bool ConstraintInfo::doesHold(CmpInst::Predicate Pred, Value *A, 829 Value *B) const { 830 auto R = getConstraintForSolving(Pred, A, B); 831 return R.isValid(*this) && 832 getCS(R.IsSigned).isConditionImplied(R.Coefficients); 833 } 834 835 void ConstraintInfo::transferToOtherSystem( 836 CmpInst::Predicate Pred, Value *A, Value *B, unsigned NumIn, 837 unsigned NumOut, SmallVectorImpl<StackEntry> &DFSInStack) { 838 auto IsKnownNonNegative = [this](Value *V) { 839 return doesHold(CmpInst::ICMP_SGE, V, ConstantInt::get(V->getType(), 0)) || 840 isKnownNonNegative(V, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1); 841 }; 842 // Check if we can combine facts from the signed and unsigned systems to 843 // derive additional facts. 844 if (!A->getType()->isIntegerTy()) 845 return; 846 // FIXME: This currently depends on the order we add facts. Ideally we 847 // would first add all known facts and only then try to add additional 848 // facts. 849 switch (Pred) { 850 default: 851 break; 852 case CmpInst::ICMP_ULT: 853 case CmpInst::ICMP_ULE: 854 // If B is a signed positive constant, then A >=s 0 and A <s (or <=s) B. 855 if (IsKnownNonNegative(B)) { 856 addFact(CmpInst::ICMP_SGE, A, ConstantInt::get(B->getType(), 0), NumIn, 857 NumOut, DFSInStack); 858 addFact(CmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut, 859 DFSInStack); 860 } 861 break; 862 case CmpInst::ICMP_UGE: 863 case CmpInst::ICMP_UGT: 864 // If A is a signed positive constant, then B >=s 0 and A >s (or >=s) B. 865 if (IsKnownNonNegative(A)) { 866 addFact(CmpInst::ICMP_SGE, B, ConstantInt::get(B->getType(), 0), NumIn, 867 NumOut, DFSInStack); 868 addFact(CmpInst::getSignedPredicate(Pred), A, B, NumIn, NumOut, 869 DFSInStack); 870 } 871 break; 872 case CmpInst::ICMP_SLT: 873 if (IsKnownNonNegative(A)) 874 addFact(CmpInst::ICMP_ULT, A, B, NumIn, NumOut, DFSInStack); 875 break; 876 case CmpInst::ICMP_SGT: { 877 if (doesHold(CmpInst::ICMP_SGE, B, ConstantInt::get(B->getType(), -1))) 878 addFact(CmpInst::ICMP_UGE, A, ConstantInt::get(B->getType(), 0), NumIn, 879 NumOut, DFSInStack); 880 if (IsKnownNonNegative(B)) 881 addFact(CmpInst::ICMP_UGT, A, B, NumIn, NumOut, DFSInStack); 882 883 break; 884 } 885 case CmpInst::ICMP_SGE: 886 if (IsKnownNonNegative(B)) 887 addFact(CmpInst::ICMP_UGE, A, B, NumIn, NumOut, DFSInStack); 888 break; 889 } 890 } 891 892 #ifndef NDEBUG 893 894 static void dumpConstraint(ArrayRef<int64_t> C, 895 const DenseMap<Value *, unsigned> &Value2Index) { 896 ConstraintSystem CS(Value2Index); 897 CS.addVariableRowFill(C); 898 CS.dump(); 899 } 900 #endif 901 902 void State::addInfoForInductions(BasicBlock &BB) { 903 auto *L = LI.getLoopFor(&BB); 904 if (!L || L->getHeader() != &BB) 905 return; 906 907 Value *A; 908 Value *B; 909 CmpInst::Predicate Pred; 910 911 if (!match(BB.getTerminator(), 912 m_Br(m_ICmp(Pred, m_Value(A), m_Value(B)), m_Value(), m_Value()))) 913 return; 914 PHINode *PN = dyn_cast<PHINode>(A); 915 if (!PN) { 916 Pred = CmpInst::getSwappedPredicate(Pred); 917 std::swap(A, B); 918 PN = dyn_cast<PHINode>(A); 919 } 920 921 if (!PN || PN->getParent() != &BB || PN->getNumIncomingValues() != 2 || 922 !SE.isSCEVable(PN->getType())) 923 return; 924 925 BasicBlock *InLoopSucc = nullptr; 926 if (Pred == CmpInst::ICMP_NE) 927 InLoopSucc = cast<BranchInst>(BB.getTerminator())->getSuccessor(0); 928 else if (Pred == CmpInst::ICMP_EQ) 929 InLoopSucc = cast<BranchInst>(BB.getTerminator())->getSuccessor(1); 930 else 931 return; 932 933 if (!L->contains(InLoopSucc) || !L->isLoopExiting(&BB) || InLoopSucc == &BB) 934 return; 935 936 auto *AR = dyn_cast_or_null<SCEVAddRecExpr>(SE.getSCEV(PN)); 937 BasicBlock *LoopPred = L->getLoopPredecessor(); 938 if (!AR || AR->getLoop() != L || !LoopPred) 939 return; 940 941 const SCEV *StartSCEV = AR->getStart(); 942 Value *StartValue = nullptr; 943 if (auto *C = dyn_cast<SCEVConstant>(StartSCEV)) { 944 StartValue = C->getValue(); 945 } else { 946 StartValue = PN->getIncomingValueForBlock(LoopPred); 947 assert(SE.getSCEV(StartValue) == StartSCEV && "inconsistent start value"); 948 } 949 950 DomTreeNode *DTN = DT.getNode(InLoopSucc); 951 auto IncUnsigned = SE.getMonotonicPredicateType(AR, CmpInst::ICMP_UGT); 952 auto IncSigned = SE.getMonotonicPredicateType(AR, CmpInst::ICMP_SGT); 953 bool MonotonicallyIncreasingUnsigned = 954 IncUnsigned && *IncUnsigned == ScalarEvolution::MonotonicallyIncreasing; 955 bool MonotonicallyIncreasingSigned = 956 IncSigned && *IncSigned == ScalarEvolution::MonotonicallyIncreasing; 957 // If SCEV guarantees that AR does not wrap, PN >= StartValue can be added 958 // unconditionally. 959 if (MonotonicallyIncreasingUnsigned) 960 WorkList.push_back( 961 FactOrCheck::getConditionFact(DTN, CmpInst::ICMP_UGE, PN, StartValue)); 962 if (MonotonicallyIncreasingSigned) 963 WorkList.push_back( 964 FactOrCheck::getConditionFact(DTN, CmpInst::ICMP_SGE, PN, StartValue)); 965 966 APInt StepOffset; 967 if (auto *C = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 968 StepOffset = C->getAPInt(); 969 else 970 return; 971 972 // Make sure the bound B is loop-invariant. 973 if (!L->isLoopInvariant(B)) 974 return; 975 976 // Handle negative steps. 977 if (StepOffset.isNegative()) { 978 // TODO: Extend to allow steps > -1. 979 if (!(-StepOffset).isOne()) 980 return; 981 982 // AR may wrap. 983 // Add StartValue >= PN conditional on B <= StartValue which guarantees that 984 // the loop exits before wrapping with a step of -1. 985 WorkList.push_back(FactOrCheck::getConditionFact( 986 DTN, CmpInst::ICMP_UGE, StartValue, PN, 987 ConditionTy(CmpInst::ICMP_ULE, B, StartValue))); 988 WorkList.push_back(FactOrCheck::getConditionFact( 989 DTN, CmpInst::ICMP_SGE, StartValue, PN, 990 ConditionTy(CmpInst::ICMP_SLE, B, StartValue))); 991 // Add PN > B conditional on B <= StartValue which guarantees that the loop 992 // exits when reaching B with a step of -1. 993 WorkList.push_back(FactOrCheck::getConditionFact( 994 DTN, CmpInst::ICMP_UGT, PN, B, 995 ConditionTy(CmpInst::ICMP_ULE, B, StartValue))); 996 WorkList.push_back(FactOrCheck::getConditionFact( 997 DTN, CmpInst::ICMP_SGT, PN, B, 998 ConditionTy(CmpInst::ICMP_SLE, B, StartValue))); 999 return; 1000 } 1001 1002 // Make sure AR either steps by 1 or that the value we compare against is a 1003 // GEP based on the same start value and all offsets are a multiple of the 1004 // step size, to guarantee that the induction will reach the value. 1005 if (StepOffset.isZero() || StepOffset.isNegative()) 1006 return; 1007 1008 if (!StepOffset.isOne()) { 1009 // Check whether B-Start is known to be a multiple of StepOffset. 1010 const SCEV *BMinusStart = SE.getMinusSCEV(SE.getSCEV(B), StartSCEV); 1011 if (isa<SCEVCouldNotCompute>(BMinusStart) || 1012 !SE.getConstantMultiple(BMinusStart).urem(StepOffset).isZero()) 1013 return; 1014 } 1015 1016 // AR may wrap. Add PN >= StartValue conditional on StartValue <= B which 1017 // guarantees that the loop exits before wrapping in combination with the 1018 // restrictions on B and the step above. 1019 if (!MonotonicallyIncreasingUnsigned) 1020 WorkList.push_back(FactOrCheck::getConditionFact( 1021 DTN, CmpInst::ICMP_UGE, PN, StartValue, 1022 ConditionTy(CmpInst::ICMP_ULE, StartValue, B))); 1023 if (!MonotonicallyIncreasingSigned) 1024 WorkList.push_back(FactOrCheck::getConditionFact( 1025 DTN, CmpInst::ICMP_SGE, PN, StartValue, 1026 ConditionTy(CmpInst::ICMP_SLE, StartValue, B))); 1027 1028 WorkList.push_back(FactOrCheck::getConditionFact( 1029 DTN, CmpInst::ICMP_ULT, PN, B, 1030 ConditionTy(CmpInst::ICMP_ULE, StartValue, B))); 1031 WorkList.push_back(FactOrCheck::getConditionFact( 1032 DTN, CmpInst::ICMP_SLT, PN, B, 1033 ConditionTy(CmpInst::ICMP_SLE, StartValue, B))); 1034 } 1035 1036 void State::addInfoFor(BasicBlock &BB) { 1037 addInfoForInductions(BB); 1038 1039 // True as long as long as the current instruction is guaranteed to execute. 1040 bool GuaranteedToExecute = true; 1041 // Queue conditions and assumes. 1042 for (Instruction &I : BB) { 1043 if (auto Cmp = dyn_cast<ICmpInst>(&I)) { 1044 for (Use &U : Cmp->uses()) { 1045 auto *UserI = getContextInstForUse(U); 1046 auto *DTN = DT.getNode(UserI->getParent()); 1047 if (!DTN) 1048 continue; 1049 WorkList.push_back(FactOrCheck::getCheck(DTN, &U)); 1050 } 1051 continue; 1052 } 1053 1054 auto *II = dyn_cast<IntrinsicInst>(&I); 1055 Intrinsic::ID ID = II ? II->getIntrinsicID() : Intrinsic::not_intrinsic; 1056 switch (ID) { 1057 case Intrinsic::assume: { 1058 Value *A, *B; 1059 CmpInst::Predicate Pred; 1060 if (!match(I.getOperand(0), m_ICmp(Pred, m_Value(A), m_Value(B)))) 1061 break; 1062 if (GuaranteedToExecute) { 1063 // The assume is guaranteed to execute when BB is entered, hence Cond 1064 // holds on entry to BB. 1065 WorkList.emplace_back(FactOrCheck::getConditionFact( 1066 DT.getNode(I.getParent()), Pred, A, B)); 1067 } else { 1068 WorkList.emplace_back( 1069 FactOrCheck::getInstFact(DT.getNode(I.getParent()), &I)); 1070 } 1071 break; 1072 } 1073 // Enqueue ssub_with_overflow for simplification. 1074 case Intrinsic::ssub_with_overflow: 1075 WorkList.push_back( 1076 FactOrCheck::getCheck(DT.getNode(&BB), cast<CallInst>(&I))); 1077 break; 1078 // Enqueue the intrinsics to add extra info. 1079 case Intrinsic::umin: 1080 case Intrinsic::umax: 1081 case Intrinsic::smin: 1082 case Intrinsic::smax: 1083 // TODO: handle llvm.abs as well 1084 WorkList.push_back( 1085 FactOrCheck::getCheck(DT.getNode(&BB), cast<CallInst>(&I))); 1086 // TODO: Check if it is possible to instead only added the min/max facts 1087 // when simplifying uses of the min/max intrinsics. 1088 if (!isGuaranteedNotToBePoison(&I)) 1089 break; 1090 [[fallthrough]]; 1091 case Intrinsic::abs: 1092 WorkList.push_back(FactOrCheck::getInstFact(DT.getNode(&BB), &I)); 1093 break; 1094 } 1095 1096 GuaranteedToExecute &= isGuaranteedToTransferExecutionToSuccessor(&I); 1097 } 1098 1099 if (auto *Switch = dyn_cast<SwitchInst>(BB.getTerminator())) { 1100 for (auto &Case : Switch->cases()) { 1101 BasicBlock *Succ = Case.getCaseSuccessor(); 1102 Value *V = Case.getCaseValue(); 1103 if (!canAddSuccessor(BB, Succ)) 1104 continue; 1105 WorkList.emplace_back(FactOrCheck::getConditionFact( 1106 DT.getNode(Succ), CmpInst::ICMP_EQ, Switch->getCondition(), V)); 1107 } 1108 return; 1109 } 1110 1111 auto *Br = dyn_cast<BranchInst>(BB.getTerminator()); 1112 if (!Br || !Br->isConditional()) 1113 return; 1114 1115 Value *Cond = Br->getCondition(); 1116 1117 // If the condition is a chain of ORs/AND and the successor only has the 1118 // current block as predecessor, queue conditions for the successor. 1119 Value *Op0, *Op1; 1120 if (match(Cond, m_LogicalOr(m_Value(Op0), m_Value(Op1))) || 1121 match(Cond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 1122 bool IsOr = match(Cond, m_LogicalOr()); 1123 bool IsAnd = match(Cond, m_LogicalAnd()); 1124 // If there's a select that matches both AND and OR, we need to commit to 1125 // one of the options. Arbitrarily pick OR. 1126 if (IsOr && IsAnd) 1127 IsAnd = false; 1128 1129 BasicBlock *Successor = Br->getSuccessor(IsOr ? 1 : 0); 1130 if (canAddSuccessor(BB, Successor)) { 1131 SmallVector<Value *> CondWorkList; 1132 SmallPtrSet<Value *, 8> SeenCond; 1133 auto QueueValue = [&CondWorkList, &SeenCond](Value *V) { 1134 if (SeenCond.insert(V).second) 1135 CondWorkList.push_back(V); 1136 }; 1137 QueueValue(Op1); 1138 QueueValue(Op0); 1139 while (!CondWorkList.empty()) { 1140 Value *Cur = CondWorkList.pop_back_val(); 1141 if (auto *Cmp = dyn_cast<ICmpInst>(Cur)) { 1142 WorkList.emplace_back(FactOrCheck::getConditionFact( 1143 DT.getNode(Successor), 1144 IsOr ? CmpInst::getInversePredicate(Cmp->getPredicate()) 1145 : Cmp->getPredicate(), 1146 Cmp->getOperand(0), Cmp->getOperand(1))); 1147 continue; 1148 } 1149 if (IsOr && match(Cur, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 1150 QueueValue(Op1); 1151 QueueValue(Op0); 1152 continue; 1153 } 1154 if (IsAnd && match(Cur, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 1155 QueueValue(Op1); 1156 QueueValue(Op0); 1157 continue; 1158 } 1159 } 1160 } 1161 return; 1162 } 1163 1164 auto *CmpI = dyn_cast<ICmpInst>(Br->getCondition()); 1165 if (!CmpI) 1166 return; 1167 if (canAddSuccessor(BB, Br->getSuccessor(0))) 1168 WorkList.emplace_back(FactOrCheck::getConditionFact( 1169 DT.getNode(Br->getSuccessor(0)), CmpI->getPredicate(), 1170 CmpI->getOperand(0), CmpI->getOperand(1))); 1171 if (canAddSuccessor(BB, Br->getSuccessor(1))) 1172 WorkList.emplace_back(FactOrCheck::getConditionFact( 1173 DT.getNode(Br->getSuccessor(1)), 1174 CmpInst::getInversePredicate(CmpI->getPredicate()), CmpI->getOperand(0), 1175 CmpI->getOperand(1))); 1176 } 1177 1178 #ifndef NDEBUG 1179 static void dumpUnpackedICmp(raw_ostream &OS, ICmpInst::Predicate Pred, 1180 Value *LHS, Value *RHS) { 1181 OS << "icmp " << Pred << ' '; 1182 LHS->printAsOperand(OS, /*PrintType=*/true); 1183 OS << ", "; 1184 RHS->printAsOperand(OS, /*PrintType=*/false); 1185 } 1186 #endif 1187 1188 namespace { 1189 /// Helper to keep track of a condition and if it should be treated as negated 1190 /// for reproducer construction. 1191 /// Pred == Predicate::BAD_ICMP_PREDICATE indicates that this entry is a 1192 /// placeholder to keep the ReproducerCondStack in sync with DFSInStack. 1193 struct ReproducerEntry { 1194 ICmpInst::Predicate Pred; 1195 Value *LHS; 1196 Value *RHS; 1197 1198 ReproducerEntry(ICmpInst::Predicate Pred, Value *LHS, Value *RHS) 1199 : Pred(Pred), LHS(LHS), RHS(RHS) {} 1200 }; 1201 } // namespace 1202 1203 /// Helper function to generate a reproducer function for simplifying \p Cond. 1204 /// The reproducer function contains a series of @llvm.assume calls, one for 1205 /// each condition in \p Stack. For each condition, the operand instruction are 1206 /// cloned until we reach operands that have an entry in \p Value2Index. Those 1207 /// will then be added as function arguments. \p DT is used to order cloned 1208 /// instructions. The reproducer function will get added to \p M, if it is 1209 /// non-null. Otherwise no reproducer function is generated. 1210 static void generateReproducer(CmpInst *Cond, Module *M, 1211 ArrayRef<ReproducerEntry> Stack, 1212 ConstraintInfo &Info, DominatorTree &DT) { 1213 if (!M) 1214 return; 1215 1216 LLVMContext &Ctx = Cond->getContext(); 1217 1218 LLVM_DEBUG(dbgs() << "Creating reproducer for " << *Cond << "\n"); 1219 1220 ValueToValueMapTy Old2New; 1221 SmallVector<Value *> Args; 1222 SmallPtrSet<Value *, 8> Seen; 1223 // Traverse Cond and its operands recursively until we reach a value that's in 1224 // Value2Index or not an instruction, or not a operation that 1225 // ConstraintElimination can decompose. Such values will be considered as 1226 // external inputs to the reproducer, they are collected and added as function 1227 // arguments later. 1228 auto CollectArguments = [&](ArrayRef<Value *> Ops, bool IsSigned) { 1229 auto &Value2Index = Info.getValue2Index(IsSigned); 1230 SmallVector<Value *, 4> WorkList(Ops); 1231 while (!WorkList.empty()) { 1232 Value *V = WorkList.pop_back_val(); 1233 if (!Seen.insert(V).second) 1234 continue; 1235 if (Old2New.find(V) != Old2New.end()) 1236 continue; 1237 if (isa<Constant>(V)) 1238 continue; 1239 1240 auto *I = dyn_cast<Instruction>(V); 1241 if (Value2Index.contains(V) || !I || 1242 !isa<CmpInst, BinaryOperator, GEPOperator, CastInst>(V)) { 1243 Old2New[V] = V; 1244 Args.push_back(V); 1245 LLVM_DEBUG(dbgs() << " found external input " << *V << "\n"); 1246 } else { 1247 append_range(WorkList, I->operands()); 1248 } 1249 } 1250 }; 1251 1252 for (auto &Entry : Stack) 1253 if (Entry.Pred != ICmpInst::BAD_ICMP_PREDICATE) 1254 CollectArguments({Entry.LHS, Entry.RHS}, ICmpInst::isSigned(Entry.Pred)); 1255 CollectArguments(Cond, ICmpInst::isSigned(Cond->getPredicate())); 1256 1257 SmallVector<Type *> ParamTys; 1258 for (auto *P : Args) 1259 ParamTys.push_back(P->getType()); 1260 1261 FunctionType *FTy = FunctionType::get(Cond->getType(), ParamTys, 1262 /*isVarArg=*/false); 1263 Function *F = Function::Create(FTy, Function::ExternalLinkage, 1264 Cond->getModule()->getName() + 1265 Cond->getFunction()->getName() + "repro", 1266 M); 1267 // Add arguments to the reproducer function for each external value collected. 1268 for (unsigned I = 0; I < Args.size(); ++I) { 1269 F->getArg(I)->setName(Args[I]->getName()); 1270 Old2New[Args[I]] = F->getArg(I); 1271 } 1272 1273 BasicBlock *Entry = BasicBlock::Create(Ctx, "entry", F); 1274 IRBuilder<> Builder(Entry); 1275 Builder.CreateRet(Builder.getTrue()); 1276 Builder.SetInsertPoint(Entry->getTerminator()); 1277 1278 // Clone instructions in \p Ops and their operands recursively until reaching 1279 // an value in Value2Index (external input to the reproducer). Update Old2New 1280 // mapping for the original and cloned instructions. Sort instructions to 1281 // clone by dominance, then insert the cloned instructions in the function. 1282 auto CloneInstructions = [&](ArrayRef<Value *> Ops, bool IsSigned) { 1283 SmallVector<Value *, 4> WorkList(Ops); 1284 SmallVector<Instruction *> ToClone; 1285 auto &Value2Index = Info.getValue2Index(IsSigned); 1286 while (!WorkList.empty()) { 1287 Value *V = WorkList.pop_back_val(); 1288 if (Old2New.find(V) != Old2New.end()) 1289 continue; 1290 1291 auto *I = dyn_cast<Instruction>(V); 1292 if (!Value2Index.contains(V) && I) { 1293 Old2New[V] = nullptr; 1294 ToClone.push_back(I); 1295 append_range(WorkList, I->operands()); 1296 } 1297 } 1298 1299 sort(ToClone, 1300 [&DT](Instruction *A, Instruction *B) { return DT.dominates(A, B); }); 1301 for (Instruction *I : ToClone) { 1302 Instruction *Cloned = I->clone(); 1303 Old2New[I] = Cloned; 1304 Old2New[I]->setName(I->getName()); 1305 Cloned->insertBefore(&*Builder.GetInsertPoint()); 1306 Cloned->dropUnknownNonDebugMetadata(); 1307 Cloned->setDebugLoc({}); 1308 } 1309 }; 1310 1311 // Materialize the assumptions for the reproducer using the entries in Stack. 1312 // That is, first clone the operands of the condition recursively until we 1313 // reach an external input to the reproducer and add them to the reproducer 1314 // function. Then add an ICmp for the condition (with the inverse predicate if 1315 // the entry is negated) and an assert using the ICmp. 1316 for (auto &Entry : Stack) { 1317 if (Entry.Pred == ICmpInst::BAD_ICMP_PREDICATE) 1318 continue; 1319 1320 LLVM_DEBUG(dbgs() << " Materializing assumption "; 1321 dumpUnpackedICmp(dbgs(), Entry.Pred, Entry.LHS, Entry.RHS); 1322 dbgs() << "\n"); 1323 CloneInstructions({Entry.LHS, Entry.RHS}, CmpInst::isSigned(Entry.Pred)); 1324 1325 auto *Cmp = Builder.CreateICmp(Entry.Pred, Entry.LHS, Entry.RHS); 1326 Builder.CreateAssumption(Cmp); 1327 } 1328 1329 // Finally, clone the condition to reproduce and remap instruction operands in 1330 // the reproducer using Old2New. 1331 CloneInstructions(Cond, CmpInst::isSigned(Cond->getPredicate())); 1332 Entry->getTerminator()->setOperand(0, Cond); 1333 remapInstructionsInBlocks({Entry}, Old2New); 1334 1335 assert(!verifyFunction(*F, &dbgs())); 1336 } 1337 1338 static std::optional<bool> checkCondition(CmpInst::Predicate Pred, Value *A, 1339 Value *B, Instruction *CheckInst, 1340 ConstraintInfo &Info) { 1341 LLVM_DEBUG(dbgs() << "Checking " << *CheckInst << "\n"); 1342 1343 auto R = Info.getConstraintForSolving(Pred, A, B); 1344 if (R.empty() || !R.isValid(Info)){ 1345 LLVM_DEBUG(dbgs() << " failed to decompose condition\n"); 1346 return std::nullopt; 1347 } 1348 1349 auto &CSToUse = Info.getCS(R.IsSigned); 1350 1351 // If there was extra information collected during decomposition, apply 1352 // it now and remove it immediately once we are done with reasoning 1353 // about the constraint. 1354 for (auto &Row : R.ExtraInfo) 1355 CSToUse.addVariableRow(Row); 1356 auto InfoRestorer = make_scope_exit([&]() { 1357 for (unsigned I = 0; I < R.ExtraInfo.size(); ++I) 1358 CSToUse.popLastConstraint(); 1359 }); 1360 1361 if (auto ImpliedCondition = R.isImpliedBy(CSToUse)) { 1362 if (!DebugCounter::shouldExecute(EliminatedCounter)) 1363 return std::nullopt; 1364 1365 LLVM_DEBUG({ 1366 dbgs() << "Condition "; 1367 dumpUnpackedICmp( 1368 dbgs(), *ImpliedCondition ? Pred : CmpInst::getInversePredicate(Pred), 1369 A, B); 1370 dbgs() << " implied by dominating constraints\n"; 1371 CSToUse.dump(); 1372 }); 1373 return ImpliedCondition; 1374 } 1375 1376 return std::nullopt; 1377 } 1378 1379 static bool checkAndReplaceCondition( 1380 CmpInst *Cmp, ConstraintInfo &Info, unsigned NumIn, unsigned NumOut, 1381 Instruction *ContextInst, Module *ReproducerModule, 1382 ArrayRef<ReproducerEntry> ReproducerCondStack, DominatorTree &DT, 1383 SmallVectorImpl<Instruction *> &ToRemove) { 1384 auto ReplaceCmpWithConstant = [&](CmpInst *Cmp, bool IsTrue) { 1385 generateReproducer(Cmp, ReproducerModule, ReproducerCondStack, Info, DT); 1386 Constant *ConstantC = ConstantInt::getBool( 1387 CmpInst::makeCmpResultType(Cmp->getType()), IsTrue); 1388 Cmp->replaceUsesWithIf(ConstantC, [&DT, NumIn, NumOut, 1389 ContextInst](Use &U) { 1390 auto *UserI = getContextInstForUse(U); 1391 auto *DTN = DT.getNode(UserI->getParent()); 1392 if (!DTN || DTN->getDFSNumIn() < NumIn || DTN->getDFSNumOut() > NumOut) 1393 return false; 1394 if (UserI->getParent() == ContextInst->getParent() && 1395 UserI->comesBefore(ContextInst)) 1396 return false; 1397 1398 // Conditions in an assume trivially simplify to true. Skip uses 1399 // in assume calls to not destroy the available information. 1400 auto *II = dyn_cast<IntrinsicInst>(U.getUser()); 1401 return !II || II->getIntrinsicID() != Intrinsic::assume; 1402 }); 1403 NumCondsRemoved++; 1404 if (Cmp->use_empty()) 1405 ToRemove.push_back(Cmp); 1406 return true; 1407 }; 1408 1409 if (auto ImpliedCondition = 1410 checkCondition(Cmp->getPredicate(), Cmp->getOperand(0), 1411 Cmp->getOperand(1), Cmp, Info)) 1412 return ReplaceCmpWithConstant(Cmp, *ImpliedCondition); 1413 return false; 1414 } 1415 1416 static bool checkAndReplaceMinMax(MinMaxIntrinsic *MinMax, ConstraintInfo &Info, 1417 SmallVectorImpl<Instruction *> &ToRemove) { 1418 auto ReplaceMinMaxWithOperand = [&](MinMaxIntrinsic *MinMax, bool UseLHS) { 1419 // TODO: generate reproducer for min/max. 1420 MinMax->replaceAllUsesWith(MinMax->getOperand(UseLHS ? 0 : 1)); 1421 ToRemove.push_back(MinMax); 1422 return true; 1423 }; 1424 1425 ICmpInst::Predicate Pred = 1426 ICmpInst::getNonStrictPredicate(MinMax->getPredicate()); 1427 if (auto ImpliedCondition = checkCondition( 1428 Pred, MinMax->getOperand(0), MinMax->getOperand(1), MinMax, Info)) 1429 return ReplaceMinMaxWithOperand(MinMax, *ImpliedCondition); 1430 if (auto ImpliedCondition = checkCondition( 1431 Pred, MinMax->getOperand(1), MinMax->getOperand(0), MinMax, Info)) 1432 return ReplaceMinMaxWithOperand(MinMax, !*ImpliedCondition); 1433 return false; 1434 } 1435 1436 static void 1437 removeEntryFromStack(const StackEntry &E, ConstraintInfo &Info, 1438 Module *ReproducerModule, 1439 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack, 1440 SmallVectorImpl<StackEntry> &DFSInStack) { 1441 Info.popLastConstraint(E.IsSigned); 1442 // Remove variables in the system that went out of scope. 1443 auto &Mapping = Info.getValue2Index(E.IsSigned); 1444 for (Value *V : E.ValuesToRelease) 1445 Mapping.erase(V); 1446 Info.popLastNVariables(E.IsSigned, E.ValuesToRelease.size()); 1447 DFSInStack.pop_back(); 1448 if (ReproducerModule) 1449 ReproducerCondStack.pop_back(); 1450 } 1451 1452 /// Check if either the first condition of an AND or OR is implied by the 1453 /// (negated in case of OR) second condition or vice versa. 1454 static bool checkOrAndOpImpliedByOther( 1455 FactOrCheck &CB, ConstraintInfo &Info, Module *ReproducerModule, 1456 SmallVectorImpl<ReproducerEntry> &ReproducerCondStack, 1457 SmallVectorImpl<StackEntry> &DFSInStack) { 1458 1459 CmpInst::Predicate Pred; 1460 Value *A, *B; 1461 Instruction *JoinOp = CB.getContextInst(); 1462 CmpInst *CmpToCheck = cast<CmpInst>(CB.getInstructionToSimplify()); 1463 unsigned OtherOpIdx = JoinOp->getOperand(0) == CmpToCheck ? 1 : 0; 1464 1465 // Don't try to simplify the first condition of a select by the second, as 1466 // this may make the select more poisonous than the original one. 1467 // TODO: check if the first operand may be poison. 1468 if (OtherOpIdx != 0 && isa<SelectInst>(JoinOp)) 1469 return false; 1470 1471 if (!match(JoinOp->getOperand(OtherOpIdx), 1472 m_ICmp(Pred, m_Value(A), m_Value(B)))) 1473 return false; 1474 1475 // For OR, check if the negated condition implies CmpToCheck. 1476 bool IsOr = match(JoinOp, m_LogicalOr()); 1477 if (IsOr) 1478 Pred = CmpInst::getInversePredicate(Pred); 1479 1480 // Optimistically add fact from first condition. 1481 unsigned OldSize = DFSInStack.size(); 1482 Info.addFact(Pred, A, B, CB.NumIn, CB.NumOut, DFSInStack); 1483 if (OldSize == DFSInStack.size()) 1484 return false; 1485 1486 bool Changed = false; 1487 // Check if the second condition can be simplified now. 1488 if (auto ImpliedCondition = 1489 checkCondition(CmpToCheck->getPredicate(), CmpToCheck->getOperand(0), 1490 CmpToCheck->getOperand(1), CmpToCheck, Info)) { 1491 if (IsOr && isa<SelectInst>(JoinOp)) { 1492 JoinOp->setOperand( 1493 OtherOpIdx == 0 ? 2 : 0, 1494 ConstantInt::getBool(JoinOp->getType(), *ImpliedCondition)); 1495 } else 1496 JoinOp->setOperand( 1497 1 - OtherOpIdx, 1498 ConstantInt::getBool(JoinOp->getType(), *ImpliedCondition)); 1499 1500 Changed = true; 1501 } 1502 1503 // Remove entries again. 1504 while (OldSize < DFSInStack.size()) { 1505 StackEntry E = DFSInStack.back(); 1506 removeEntryFromStack(E, Info, ReproducerModule, ReproducerCondStack, 1507 DFSInStack); 1508 } 1509 return Changed; 1510 } 1511 1512 void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B, 1513 unsigned NumIn, unsigned NumOut, 1514 SmallVectorImpl<StackEntry> &DFSInStack) { 1515 // If the constraint has a pre-condition, skip the constraint if it does not 1516 // hold. 1517 SmallVector<Value *> NewVariables; 1518 auto R = getConstraint(Pred, A, B, NewVariables); 1519 1520 // TODO: Support non-equality for facts as well. 1521 if (!R.isValid(*this) || R.isNe()) 1522 return; 1523 1524 LLVM_DEBUG(dbgs() << "Adding '"; dumpUnpackedICmp(dbgs(), Pred, A, B); 1525 dbgs() << "'\n"); 1526 bool Added = false; 1527 auto &CSToUse = getCS(R.IsSigned); 1528 if (R.Coefficients.empty()) 1529 return; 1530 1531 Added |= CSToUse.addVariableRowFill(R.Coefficients); 1532 1533 // If R has been added to the system, add the new variables and queue it for 1534 // removal once it goes out-of-scope. 1535 if (Added) { 1536 SmallVector<Value *, 2> ValuesToRelease; 1537 auto &Value2Index = getValue2Index(R.IsSigned); 1538 for (Value *V : NewVariables) { 1539 Value2Index.insert({V, Value2Index.size() + 1}); 1540 ValuesToRelease.push_back(V); 1541 } 1542 1543 LLVM_DEBUG({ 1544 dbgs() << " constraint: "; 1545 dumpConstraint(R.Coefficients, getValue2Index(R.IsSigned)); 1546 dbgs() << "\n"; 1547 }); 1548 1549 DFSInStack.emplace_back(NumIn, NumOut, R.IsSigned, 1550 std::move(ValuesToRelease)); 1551 1552 if (!R.IsSigned) { 1553 for (Value *V : NewVariables) { 1554 ConstraintTy VarPos(SmallVector<int64_t, 8>(Value2Index.size() + 1, 0), 1555 false, false, false); 1556 VarPos.Coefficients[Value2Index[V]] = -1; 1557 CSToUse.addVariableRow(VarPos.Coefficients); 1558 DFSInStack.emplace_back(NumIn, NumOut, R.IsSigned, 1559 SmallVector<Value *, 2>()); 1560 } 1561 } 1562 1563 if (R.isEq()) { 1564 // Also add the inverted constraint for equality constraints. 1565 for (auto &Coeff : R.Coefficients) 1566 Coeff *= -1; 1567 CSToUse.addVariableRowFill(R.Coefficients); 1568 1569 DFSInStack.emplace_back(NumIn, NumOut, R.IsSigned, 1570 SmallVector<Value *, 2>()); 1571 } 1572 } 1573 } 1574 1575 static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B, 1576 SmallVectorImpl<Instruction *> &ToRemove) { 1577 bool Changed = false; 1578 IRBuilder<> Builder(II->getParent(), II->getIterator()); 1579 Value *Sub = nullptr; 1580 for (User *U : make_early_inc_range(II->users())) { 1581 if (match(U, m_ExtractValue<0>(m_Value()))) { 1582 if (!Sub) 1583 Sub = Builder.CreateSub(A, B); 1584 U->replaceAllUsesWith(Sub); 1585 Changed = true; 1586 } else if (match(U, m_ExtractValue<1>(m_Value()))) { 1587 U->replaceAllUsesWith(Builder.getFalse()); 1588 Changed = true; 1589 } else 1590 continue; 1591 1592 if (U->use_empty()) { 1593 auto *I = cast<Instruction>(U); 1594 ToRemove.push_back(I); 1595 I->setOperand(0, PoisonValue::get(II->getType())); 1596 Changed = true; 1597 } 1598 } 1599 1600 if (II->use_empty()) { 1601 II->eraseFromParent(); 1602 Changed = true; 1603 } 1604 return Changed; 1605 } 1606 1607 static bool 1608 tryToSimplifyOverflowMath(IntrinsicInst *II, ConstraintInfo &Info, 1609 SmallVectorImpl<Instruction *> &ToRemove) { 1610 auto DoesConditionHold = [](CmpInst::Predicate Pred, Value *A, Value *B, 1611 ConstraintInfo &Info) { 1612 auto R = Info.getConstraintForSolving(Pred, A, B); 1613 if (R.size() < 2 || !R.isValid(Info)) 1614 return false; 1615 1616 auto &CSToUse = Info.getCS(R.IsSigned); 1617 return CSToUse.isConditionImplied(R.Coefficients); 1618 }; 1619 1620 bool Changed = false; 1621 if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) { 1622 // If A s>= B && B s>= 0, ssub.with.overflow(a, b) should not overflow and 1623 // can be simplified to a regular sub. 1624 Value *A = II->getArgOperand(0); 1625 Value *B = II->getArgOperand(1); 1626 if (!DoesConditionHold(CmpInst::ICMP_SGE, A, B, Info) || 1627 !DoesConditionHold(CmpInst::ICMP_SGE, B, 1628 ConstantInt::get(A->getType(), 0), Info)) 1629 return false; 1630 Changed = replaceSubOverflowUses(II, A, B, ToRemove); 1631 } 1632 return Changed; 1633 } 1634 1635 static bool eliminateConstraints(Function &F, DominatorTree &DT, LoopInfo &LI, 1636 ScalarEvolution &SE, 1637 OptimizationRemarkEmitter &ORE) { 1638 bool Changed = false; 1639 DT.updateDFSNumbers(); 1640 SmallVector<Value *> FunctionArgs; 1641 for (Value &Arg : F.args()) 1642 FunctionArgs.push_back(&Arg); 1643 ConstraintInfo Info(F.getDataLayout(), FunctionArgs); 1644 State S(DT, LI, SE); 1645 std::unique_ptr<Module> ReproducerModule( 1646 DumpReproducers ? new Module(F.getName(), F.getContext()) : nullptr); 1647 1648 // First, collect conditions implied by branches and blocks with their 1649 // Dominator DFS in and out numbers. 1650 for (BasicBlock &BB : F) { 1651 if (!DT.getNode(&BB)) 1652 continue; 1653 S.addInfoFor(BB); 1654 } 1655 1656 // Next, sort worklist by dominance, so that dominating conditions to check 1657 // and facts come before conditions and facts dominated by them. If a 1658 // condition to check and a fact have the same numbers, conditional facts come 1659 // first. Assume facts and checks are ordered according to their relative 1660 // order in the containing basic block. Also make sure conditions with 1661 // constant operands come before conditions without constant operands. This 1662 // increases the effectiveness of the current signed <-> unsigned fact 1663 // transfer logic. 1664 stable_sort(S.WorkList, [](const FactOrCheck &A, const FactOrCheck &B) { 1665 auto HasNoConstOp = [](const FactOrCheck &B) { 1666 Value *V0 = B.isConditionFact() ? B.Cond.Op0 : B.Inst->getOperand(0); 1667 Value *V1 = B.isConditionFact() ? B.Cond.Op1 : B.Inst->getOperand(1); 1668 return !isa<ConstantInt>(V0) && !isa<ConstantInt>(V1); 1669 }; 1670 // If both entries have the same In numbers, conditional facts come first. 1671 // Otherwise use the relative order in the basic block. 1672 if (A.NumIn == B.NumIn) { 1673 if (A.isConditionFact() && B.isConditionFact()) { 1674 bool NoConstOpA = HasNoConstOp(A); 1675 bool NoConstOpB = HasNoConstOp(B); 1676 return NoConstOpA < NoConstOpB; 1677 } 1678 if (A.isConditionFact()) 1679 return true; 1680 if (B.isConditionFact()) 1681 return false; 1682 auto *InstA = A.getContextInst(); 1683 auto *InstB = B.getContextInst(); 1684 return InstA->comesBefore(InstB); 1685 } 1686 return A.NumIn < B.NumIn; 1687 }); 1688 1689 SmallVector<Instruction *> ToRemove; 1690 1691 // Finally, process ordered worklist and eliminate implied conditions. 1692 SmallVector<StackEntry, 16> DFSInStack; 1693 SmallVector<ReproducerEntry> ReproducerCondStack; 1694 for (FactOrCheck &CB : S.WorkList) { 1695 // First, pop entries from the stack that are out-of-scope for CB. Remove 1696 // the corresponding entry from the constraint system. 1697 while (!DFSInStack.empty()) { 1698 auto &E = DFSInStack.back(); 1699 LLVM_DEBUG(dbgs() << "Top of stack : " << E.NumIn << " " << E.NumOut 1700 << "\n"); 1701 LLVM_DEBUG(dbgs() << "CB: " << CB.NumIn << " " << CB.NumOut << "\n"); 1702 assert(E.NumIn <= CB.NumIn); 1703 if (CB.NumOut <= E.NumOut) 1704 break; 1705 LLVM_DEBUG({ 1706 dbgs() << "Removing "; 1707 dumpConstraint(Info.getCS(E.IsSigned).getLastConstraint(), 1708 Info.getValue2Index(E.IsSigned)); 1709 dbgs() << "\n"; 1710 }); 1711 removeEntryFromStack(E, Info, ReproducerModule.get(), ReproducerCondStack, 1712 DFSInStack); 1713 } 1714 1715 // For a block, check if any CmpInsts become known based on the current set 1716 // of constraints. 1717 if (CB.isCheck()) { 1718 Instruction *Inst = CB.getInstructionToSimplify(); 1719 if (!Inst) 1720 continue; 1721 LLVM_DEBUG(dbgs() << "Processing condition to simplify: " << *Inst 1722 << "\n"); 1723 if (auto *II = dyn_cast<WithOverflowInst>(Inst)) { 1724 Changed |= tryToSimplifyOverflowMath(II, Info, ToRemove); 1725 } else if (auto *Cmp = dyn_cast<ICmpInst>(Inst)) { 1726 bool Simplified = checkAndReplaceCondition( 1727 Cmp, Info, CB.NumIn, CB.NumOut, CB.getContextInst(), 1728 ReproducerModule.get(), ReproducerCondStack, S.DT, ToRemove); 1729 if (!Simplified && 1730 match(CB.getContextInst(), m_LogicalOp(m_Value(), m_Value()))) { 1731 Simplified = 1732 checkOrAndOpImpliedByOther(CB, Info, ReproducerModule.get(), 1733 ReproducerCondStack, DFSInStack); 1734 } 1735 Changed |= Simplified; 1736 } else if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Inst)) { 1737 Changed |= checkAndReplaceMinMax(MinMax, Info, ToRemove); 1738 } 1739 continue; 1740 } 1741 1742 auto AddFact = [&](CmpInst::Predicate Pred, Value *A, Value *B) { 1743 LLVM_DEBUG(dbgs() << "Processing fact to add to the system: "; 1744 dumpUnpackedICmp(dbgs(), Pred, A, B); dbgs() << "\n"); 1745 if (Info.getCS(CmpInst::isSigned(Pred)).size() > MaxRows) { 1746 LLVM_DEBUG( 1747 dbgs() 1748 << "Skip adding constraint because system has too many rows.\n"); 1749 return; 1750 } 1751 1752 Info.addFact(Pred, A, B, CB.NumIn, CB.NumOut, DFSInStack); 1753 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size()) 1754 ReproducerCondStack.emplace_back(Pred, A, B); 1755 1756 Info.transferToOtherSystem(Pred, A, B, CB.NumIn, CB.NumOut, DFSInStack); 1757 if (ReproducerModule && DFSInStack.size() > ReproducerCondStack.size()) { 1758 // Add dummy entries to ReproducerCondStack to keep it in sync with 1759 // DFSInStack. 1760 for (unsigned I = 0, 1761 E = (DFSInStack.size() - ReproducerCondStack.size()); 1762 I < E; ++I) { 1763 ReproducerCondStack.emplace_back(ICmpInst::BAD_ICMP_PREDICATE, 1764 nullptr, nullptr); 1765 } 1766 } 1767 }; 1768 1769 ICmpInst::Predicate Pred; 1770 if (!CB.isConditionFact()) { 1771 Value *X; 1772 if (match(CB.Inst, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) { 1773 // If is_int_min_poison is true then we may assume llvm.abs >= 0. 1774 if (cast<ConstantInt>(CB.Inst->getOperand(1))->isOne()) 1775 AddFact(CmpInst::ICMP_SGE, CB.Inst, 1776 ConstantInt::get(CB.Inst->getType(), 0)); 1777 AddFact(CmpInst::ICMP_SGE, CB.Inst, X); 1778 continue; 1779 } 1780 1781 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(CB.Inst)) { 1782 Pred = ICmpInst::getNonStrictPredicate(MinMax->getPredicate()); 1783 AddFact(Pred, MinMax, MinMax->getLHS()); 1784 AddFact(Pred, MinMax, MinMax->getRHS()); 1785 continue; 1786 } 1787 } 1788 1789 Value *A = nullptr, *B = nullptr; 1790 if (CB.isConditionFact()) { 1791 Pred = CB.Cond.Pred; 1792 A = CB.Cond.Op0; 1793 B = CB.Cond.Op1; 1794 if (CB.DoesHold.Pred != CmpInst::BAD_ICMP_PREDICATE && 1795 !Info.doesHold(CB.DoesHold.Pred, CB.DoesHold.Op0, CB.DoesHold.Op1)) { 1796 LLVM_DEBUG({ 1797 dbgs() << "Not adding fact "; 1798 dumpUnpackedICmp(dbgs(), Pred, A, B); 1799 dbgs() << " because precondition "; 1800 dumpUnpackedICmp(dbgs(), CB.DoesHold.Pred, CB.DoesHold.Op0, 1801 CB.DoesHold.Op1); 1802 dbgs() << " does not hold.\n"; 1803 }); 1804 continue; 1805 } 1806 } else { 1807 bool Matched = match(CB.Inst, m_Intrinsic<Intrinsic::assume>( 1808 m_ICmp(Pred, m_Value(A), m_Value(B)))); 1809 (void)Matched; 1810 assert(Matched && "Must have an assume intrinsic with a icmp operand"); 1811 } 1812 AddFact(Pred, A, B); 1813 } 1814 1815 if (ReproducerModule && !ReproducerModule->functions().empty()) { 1816 std::string S; 1817 raw_string_ostream StringS(S); 1818 ReproducerModule->print(StringS, nullptr); 1819 StringS.flush(); 1820 OptimizationRemark Rem(DEBUG_TYPE, "Reproducer", &F); 1821 Rem << ore::NV("module") << S; 1822 ORE.emit(Rem); 1823 } 1824 1825 #ifndef NDEBUG 1826 unsigned SignedEntries = 1827 count_if(DFSInStack, [](const StackEntry &E) { return E.IsSigned; }); 1828 assert(Info.getCS(false).size() - FunctionArgs.size() == 1829 DFSInStack.size() - SignedEntries && 1830 "updates to CS and DFSInStack are out of sync"); 1831 assert(Info.getCS(true).size() == SignedEntries && 1832 "updates to CS and DFSInStack are out of sync"); 1833 #endif 1834 1835 for (Instruction *I : ToRemove) 1836 I->eraseFromParent(); 1837 return Changed; 1838 } 1839 1840 PreservedAnalyses ConstraintEliminationPass::run(Function &F, 1841 FunctionAnalysisManager &AM) { 1842 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1843 auto &LI = AM.getResult<LoopAnalysis>(F); 1844 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 1845 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 1846 if (!eliminateConstraints(F, DT, LI, SE, ORE)) 1847 return PreservedAnalyses::all(); 1848 1849 PreservedAnalyses PA; 1850 PA.preserve<DominatorTreeAnalysis>(); 1851 PA.preserve<LoopAnalysis>(); 1852 PA.preserve<ScalarEvolutionAnalysis>(); 1853 PA.preserveSet<CFGAnalyses>(); 1854 return PA; 1855 } 1856