Lines Matching refs:Ops

800 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
802 if (Ops.size() < 2) return; // Noop
813 if (Ops.size() == 2) {
816 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
823 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
831 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
832 const SCEV *S = Ops[i];
837 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
838 if (Ops[j] == S) { // Found a duplicate.
840 std::swap(Ops[i+1], Ops[j]);
848 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
850 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
851 return any_of(Ops, [](const SCEV *S) {
1788 SmallVector<const SCEV *, 4> Ops;
1790 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1791 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1821 SmallVector<const SCEV *, 4> Ops;
1823 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1824 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1960 SmallVector<const SCEV *, 4> Ops;
1962 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1963 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
2199 SmallVector<const SCEV *, 4> Ops;
2201 Ops.push_back(getAnyExtendExpr(Op, Ty));
2202 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2213 /// Process the given Ops list, which is a list of operands to be added under
2240 ArrayRef<const SCEV *> Ops, const APInt &Scale,
2246 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2256 for (; i != Ops.size(); ++i) {
2257 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2285 M.insert({Ops[i], Scale});
2421 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2426 const ArrayRef<const SCEV *> Ops,
2446 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2453 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2454 isa<SCEVConstant>(Ops[0])) {
2467 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2473 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2481 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2489 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 &&
2490 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2495 Ops.size() == 2) {
2496 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2497 if (UDiv->getOperand(1) == Ops[1])
2499 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2500 if (UDiv->getOperand(1) == Ops[0])
2512 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2517 assert(!Ops.empty() && "Cannot get empty add!");
2518 if (Ops.size() == 1) return Ops[0];
2520 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2521 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2522 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2525 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2530 GroupByComplexity(Ops, &LI, DT);
2534 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2536 assert(Idx < Ops.size());
2537 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2539 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2540 if (Ops.size() == 2) return Ops[0];
2541 Ops.erase(Ops.begin()+1); // Erase the folded element
2542 LHSC = cast<SCEVConstant>(Ops[0]);
2547 Ops.erase(Ops.begin());
2551 if (Ops.size() == 1) return Ops[0];
2555 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2556 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2560 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2561 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2563 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) {
2567 Add->setNoWrapFlags(ComputeFlags(Ops));
2574 Type *Ty = Ops[0]->getType();
2576 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2577 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2580 while (i+Count != e && Ops[i+Count] == Ops[i])
2584 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2585 if (Ops.size() == Count)
2587 Ops[i] = Mul;
2588 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2593 return getAddExpr(Ops, OrigFlags, Depth + 1);
2604 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2606 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2618 for (const SCEV *Op : Ops) {
2660 if (Ops.size() == 2) {
2664 const SCEV *A = Ops[0];
2665 const SCEV *B = Ops[1];
2700 if (Ops.size() == 2) {
2701 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]);
2706 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) {
2713 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2717 if (Idx < Ops.size()) {
2723 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2724 if (Ops.size() > AddOpsInlineThreshold ||
2729 Ops.erase(Ops.begin()+Idx);
2730 append_range(Ops, Add->operands());
2739 return getAddExpr(Ops, CommonFlags, Depth + 1);
2743 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2748 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2754 Ops, APInt(BitWidth, 1), *this)) {
2768 Ops.clear();
2770 Ops.push_back(getConstant(AccumulatedConstant));
2773 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2775 Ops.push_back(getMulExpr(
2781 if (Ops.empty())
2783 if (Ops.size() == 1)
2784 return Ops[0];
2785 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2792 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2793 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2798 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2799 if (MulOpSCEV == Ops[AddOp]) {
2814 if (Ops.size() == 2) return OuterMul;
2816 Ops.erase(Ops.begin()+AddOp);
2817 Ops.erase(Ops.begin()+Idx-1);
2819 Ops.erase(Ops.begin()+Idx);
2820 Ops.erase(Ops.begin()+AddOp-1);
2822 Ops.push_back(OuterMul);
2823 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2828 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2830 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2856 if (Ops.size() == 2) return OuterMul;
2857 Ops.erase(Ops.begin()+Idx);
2858 Ops.erase(Ops.begin()+OtherMulIdx-1);
2859 Ops.push_back(OuterMul);
2860 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2869 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2873 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2877 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2879 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2880 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2881 LIOps.push_back(Ops[i]);
2882 Ops.erase(Ops.begin()+i);
2926 if (Ops.size() == 1) return NewRec;
2930 if (Ops[i] == AddRec) {
2931 Ops[i] = NewRec;
2934 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2941 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2946 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2949 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2952 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2954 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2966 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2970 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2971 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2981 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2985 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2989 for (const SCEV *Op : Ops)
2995 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2996 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2998 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
3000 registerUser(S, Ops);
3007 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
3011 for (const SCEV *Op : Ops)
3018 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3019 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3021 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
3024 registerUser(S, Ops);
3031 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
3035 for (const SCEV *Op : Ops)
3041 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3042 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3044 O, Ops.size());
3046 registerUser(S, Ops);
3107 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
3112 assert(!Ops.empty() && "Cannot get empty mul!");
3113 if (Ops.size() == 1) return Ops[0];
3115 Type *ETy = Ops[0]->getType();
3117 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3118 assert(Ops[i]->getType() == ETy &&
3123 GroupByComplexity(Ops, &LI, DT);
3127 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3129 assert(Idx < Ops.size());
3130 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3132 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3133 if (Ops.size() == 2) return Ops[0];
3134 Ops.erase(Ops.begin()+1); // Erase the folded element
3135 LHSC = cast<SCEVConstant>(Ops[0]);
3144 Ops.erase(Ops.begin());
3148 if (Ops.size() == 1)
3149 return Ops[0];
3153 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
3154 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3158 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
3159 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3161 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) {
3165 Mul->setNoWrapFlags(ComputeFlags(Ops));
3169 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3170 if (Ops.size() == 2) {
3172 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3187 if (Ops[0]->isAllOnesValue()) {
3190 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3194 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
3201 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3205 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
3227 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3231 if (Idx < Ops.size()) {
3233 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3234 if (Ops.size() > MulOpsInlineThreshold)
3238 Ops.erase(Ops.begin()+Idx);
3239 append_range(Ops, Mul->operands());
3247 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3253 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3257 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3261 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3262 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3263 if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
3264 LIOps.push_back(Ops[i]);
3265 Ops.erase(Ops.begin()+i);
3299 if (Ops.size() == 1) return NewRec;
3303 if (Ops[i] == AddRec) {
3304 Ops[i] = NewRec;
3307 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3326 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3329 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3371 if (Ops.size() == 2) return NewAddRec;
3372 Ops[Idx] = NewAddRec;
3373 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3381 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3389 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3821 ArrayRef<const SCEV *> Ops) {
3824 for (const SCEV *Op : Ops)
3836 SmallVectorImpl<const SCEV *> &Ops) {
3838 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3839 if (Ops.size() == 1) return Ops[0];
3841 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3842 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3843 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3845 assert(Ops[0]->getType()->isPointerTy() ==
3846 Ops[i]->getType()->isPointerTy() &&
3855 GroupByComplexity(Ops, &LI, DT);
3858 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3864 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3866 assert(Idx < Ops.size());
3882 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3886 Ops[0] = getConstant(Fold);
3887 Ops.erase(Ops.begin()+1); // Erase the folded element
3888 if (Ops.size() == 1) return Ops[0];
3889 LHSC = cast<SCEVConstant>(Ops[0]);
3897 Ops.erase(Ops.begin());
3905 if (Ops.size() == 1) return Ops[0];
3909 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3914 if (Idx < Ops.size()) {
3916 while (Ops[Idx]->getSCEVType() == Kind) {
3917 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3918 Ops.erase(Ops.begin()+Idx);
3919 append_range(Ops, SMME->operands());
3924 return getMinMaxExpr(Kind, Ops);
3936 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3937 if (Ops[i] == Ops[i + 1] ||
3938 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3941 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3944 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3945 Ops[i + 1])) {
3947 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3953 if (Ops.size() == 1) return Ops[0];
3955 assert(!Ops.empty() && "Reduced smax down to nothing!");
3961 for (const SCEV *Op : Ops)
3967 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3968 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3970 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3973 registerUser(S, Ops);
4036 SmallVector<const SCEV *> Ops;
4037 Ops.reserve(OrigOps.size());
4044 Ops.emplace_back(*NewOp);
4048 NewOps = std::move(Ops);
4256 SmallVectorImpl<const SCEV *> &Ops) {
4259 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
4260 if (Ops.size() == 1)
4261 return Ops[0];
4263 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
4264 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
4265 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
4267 assert(Ops[0]->getType()->isPointerTy() ==
4268 Ops[i]->getType()->isPointerTy() &&
4277 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4285 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4287 return getSequentialMinMaxExpr(Kind, Ops);
4295 while (Idx < Ops.size()) {
4296 if (Ops[Idx]->getSCEVType() != Kind) {
4300 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]);
4301 Ops.erase(Ops.begin() + Idx);
4302 Ops.insert(Ops.begin() + Idx, SMME->operands().begin(),
4308 return getSequentialMinMaxExpr(Kind, Ops);
4315 SaturationPoint = getZero(Ops[0]->getType());
4322 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
4326 if (::impliesPoison(Ops[i], Ops[i - 1]) ||
4327 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1],
4329 SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]};
4330 Ops[i - 1] = getMinMaxExpr(
4333 Ops.erase(Ops.begin() + i);
4334 return getSequentialMinMaxExpr(Kind, Ops);
4338 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4339 Ops.erase(Ops.begin() + i);
4340 return getSequentialMinMaxExpr(Kind, Ops);
4348 for (const SCEV *Op : Ops)
4355 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
4356 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
4358 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
4361 registerUser(S, Ops);
4366 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
4367 return getSMaxExpr(Ops);
4370 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
4371 return getMinMaxExpr(scSMaxExpr, Ops);
4375 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
4376 return getUMaxExpr(Ops);
4379 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
4380 return getMinMaxExpr(scUMaxExpr, Ops);
4385 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4386 return getSMinExpr(Ops);
4389 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
4390 return getMinMaxExpr(scSMinExpr, Ops);
4395 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4396 return getUMinExpr(Ops, Sequential);
4399 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
4401 return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
4402 : getMinMaxExpr(scUMinExpr, Ops);
4652 SmallVector<const SCEV *> Ops{AddRec->operands()};
4653 Ops[0] = removePointerBase(Ops[0]);
4656 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4660 SmallVector<const SCEV *> Ops{Add->operands()};
4662 for (const SCEV *&AddOp : Ops) {
4671 return getAddExpr(Ops);
4815 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4816 return getUMinFromMismatchedTypes(Ops, Sequential);
4820 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
4822 assert(!Ops.empty() && "At least one operand must be!");
4824 if (Ops.size() == 1)
4825 return Ops[0];
4829 for (const auto *S : Ops)
4838 for (const auto *S : Ops)
5533 SmallVector<const SCEV *, 8> Ops;
5536 Ops.push_back(Add->getOperand(i));
5537 const SCEV *Accum = getAddExpr(Ops);
5863 SmallVector<const SCEV *, 8> Ops;
5866 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5868 const SCEV *Accum = getAddExpr(Ops);
7264 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
7281 for (const auto *S : Ops)
7299 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
7301 return getDefiningScopeBound(Ops, Discard);
7450 SmallVector<Value *> Ops;
7459 CreatedSCEV = getOperandsToCreate(CurV, Ops);
7468 for (Value *Op : Ops)
7477 ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) {
7508 Ops.push_back(BO->Op);
7512 Ops.push_back(BO->RHS);
7521 Ops.push_back(BO->LHS);
7529 Ops.push_back(BO->LHS);
7559 Ops.push_back(BO->LHS);
7560 Ops.push_back(BO->RHS);
7569 Ops.push_back(U->getOperand(0));
7574 Ops.push_back(U->getOperand(0));
7581 Ops.push_back(U->getOperand(0));
7582 Ops.push_back(U->getOperand(1));
7589 Ops.push_back(Index);
7623 Ops.push_back(Inc);
7630 Ops.push_back(RV);
7637 Ops.push_back(II->getArgOperand(0));
7645 Ops.push_back(II->getArgOperand(0));
7646 Ops.push_back(II->getArgOperand(1));
7651 Ops.push_back(II->getArgOperand(0));
8590 SmallVector<const SCEV *, 2> Ops;
8598 Ops.push_back(BECount);
8611 return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true);
9964 ArrayRef<const SCEV *> Ops = V->operands();
9967 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
9968 const SCEV *OpAtScope = getSCEVAtScope(Ops[i], L);
9969 if (OpAtScope != Ops[i]) {
9973 NewOps.reserve(Ops.size());
9974 append_range(NewOps, Ops.take_front(i));
9978 OpAtScope = getSCEVAtScope(Ops[i], L);
13435 SmallVector<const SCEV *, 3> Ops;
13439 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
13446 Ops.push_back(Last);
13447 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
14878 ArrayRef<const SCEV *> Ops) {
14879 for (const auto *Op : Ops)
15227 SmallVector<const SCEV *> Ops = {
15229 return SE.getMinMaxExpr(SCTy, Ops);