xref: /freebsd-src/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp (revision 753f127f3ace09432b2baeffd71a308760641a62)
1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file "describes" induction and recurrence variables.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/IVDescriptors.h"
14 #include "llvm/Analysis/DemandedBits.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/KnownBits.h"
26 
27 #include <set>
28 
29 using namespace llvm;
30 using namespace llvm::PatternMatch;
31 
32 #define DEBUG_TYPE "iv-descriptors"
33 
34 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
35                                         SmallPtrSetImpl<Instruction *> &Set) {
36   for (const Use &Use : I->operands())
37     if (!Set.count(dyn_cast<Instruction>(Use)))
38       return false;
39   return true;
40 }
41 
42 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
43   switch (Kind) {
44   default:
45     break;
46   case RecurKind::Add:
47   case RecurKind::Mul:
48   case RecurKind::Or:
49   case RecurKind::And:
50   case RecurKind::Xor:
51   case RecurKind::SMax:
52   case RecurKind::SMin:
53   case RecurKind::UMax:
54   case RecurKind::UMin:
55   case RecurKind::SelectICmp:
56   case RecurKind::SelectFCmp:
57     return true;
58   }
59   return false;
60 }
61 
62 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
63   return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
64 }
65 
66 bool RecurrenceDescriptor::isArithmeticRecurrenceKind(RecurKind Kind) {
67   switch (Kind) {
68   default:
69     break;
70   case RecurKind::Add:
71   case RecurKind::Mul:
72   case RecurKind::FAdd:
73   case RecurKind::FMul:
74   case RecurKind::FMulAdd:
75     return true;
76   }
77   return false;
78 }
79 
80 /// Determines if Phi may have been type-promoted. If Phi has a single user
81 /// that ANDs the Phi with a type mask, return the user. RT is updated to
82 /// account for the narrower bit width represented by the mask, and the AND
83 /// instruction is added to CI.
84 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
85                                    SmallPtrSetImpl<Instruction *> &Visited,
86                                    SmallPtrSetImpl<Instruction *> &CI) {
87   if (!Phi->hasOneUse())
88     return Phi;
89 
90   const APInt *M = nullptr;
91   Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
92 
93   // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
94   // with a new integer type of the corresponding bit width.
95   if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
96     int32_t Bits = (*M + 1).exactLogBase2();
97     if (Bits > 0) {
98       RT = IntegerType::get(Phi->getContext(), Bits);
99       Visited.insert(Phi);
100       CI.insert(J);
101       return J;
102     }
103   }
104   return Phi;
105 }
106 
107 /// Compute the minimal bit width needed to represent a reduction whose exit
108 /// instruction is given by Exit.
109 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
110                                                      DemandedBits *DB,
111                                                      AssumptionCache *AC,
112                                                      DominatorTree *DT) {
113   bool IsSigned = false;
114   const DataLayout &DL = Exit->getModule()->getDataLayout();
115   uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
116 
117   if (DB) {
118     // Use the demanded bits analysis to determine the bits that are live out
119     // of the exit instruction, rounding up to the nearest power of two. If the
120     // use of demanded bits results in a smaller bit width, we know the value
121     // must be positive (i.e., IsSigned = false), because if this were not the
122     // case, the sign bit would have been demanded.
123     auto Mask = DB->getDemandedBits(Exit);
124     MaxBitWidth = Mask.getBitWidth() - Mask.countLeadingZeros();
125   }
126 
127   if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
128     // If demanded bits wasn't able to limit the bit width, we can try to use
129     // value tracking instead. This can be the case, for example, if the value
130     // may be negative.
131     auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
132     auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
133     MaxBitWidth = NumTypeBits - NumSignBits;
134     KnownBits Bits = computeKnownBits(Exit, DL);
135     if (!Bits.isNonNegative()) {
136       // If the value is not known to be non-negative, we set IsSigned to true,
137       // meaning that we will use sext instructions instead of zext
138       // instructions to restore the original type.
139       IsSigned = true;
140       // Make sure at at least one sign bit is included in the result, so it
141       // will get properly sign-extended.
142       ++MaxBitWidth;
143     }
144   }
145   if (!isPowerOf2_64(MaxBitWidth))
146     MaxBitWidth = NextPowerOf2(MaxBitWidth);
147 
148   return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
149                         IsSigned);
150 }
151 
152 /// Collect cast instructions that can be ignored in the vectorizer's cost
153 /// model, given a reduction exit value and the minimal type in which the
154 // reduction can be represented. Also search casts to the recurrence type
155 // to find the minimum width used by the recurrence.
156 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
157                               Type *RecurrenceType,
158                               SmallPtrSetImpl<Instruction *> &Casts,
159                               unsigned &MinWidthCastToRecurTy) {
160 
161   SmallVector<Instruction *, 8> Worklist;
162   SmallPtrSet<Instruction *, 8> Visited;
163   Worklist.push_back(Exit);
164   MinWidthCastToRecurTy = -1U;
165 
166   while (!Worklist.empty()) {
167     Instruction *Val = Worklist.pop_back_val();
168     Visited.insert(Val);
169     if (auto *Cast = dyn_cast<CastInst>(Val)) {
170       if (Cast->getSrcTy() == RecurrenceType) {
171         // If the source type of a cast instruction is equal to the recurrence
172         // type, it will be eliminated, and should be ignored in the vectorizer
173         // cost model.
174         Casts.insert(Cast);
175         continue;
176       }
177       if (Cast->getDestTy() == RecurrenceType) {
178         // The minimum width used by the recurrence is found by checking for
179         // casts on its operands. The minimum width is used by the vectorizer
180         // when finding the widest type for in-loop reductions without any
181         // loads/stores.
182         MinWidthCastToRecurTy = std::min<unsigned>(
183             MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
184         continue;
185       }
186     }
187     // Add all operands to the work list if they are loop-varying values that
188     // we haven't yet visited.
189     for (Value *O : cast<User>(Val)->operands())
190       if (auto *I = dyn_cast<Instruction>(O))
191         if (TheLoop->contains(I) && !Visited.count(I))
192           Worklist.push_back(I);
193   }
194 }
195 
196 // Check if a given Phi node can be recognized as an ordered reduction for
197 // vectorizing floating point operations without unsafe math.
198 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
199                                   Instruction *Exit, PHINode *Phi) {
200   // Currently only FAdd and FMulAdd are supported.
201   if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
202     return false;
203 
204   if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
205     return false;
206 
207   if (Kind == RecurKind::FMulAdd &&
208       !RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
209     return false;
210 
211   // Ensure the exit instruction has only one user other than the reduction PHI
212   if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
213     return false;
214 
215   // The only pattern accepted is the one in which the reduction PHI
216   // is used as one of the operands of the exit instruction
217   auto *Op0 = Exit->getOperand(0);
218   auto *Op1 = Exit->getOperand(1);
219   if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
220     return false;
221   if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
222     return false;
223 
224   LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
225                     << ", ExitInst: " << *Exit << "\n");
226 
227   return true;
228 }
229 
230 bool RecurrenceDescriptor::AddReductionVar(
231     PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
232     RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
233     DominatorTree *DT, ScalarEvolution *SE) {
234   if (Phi->getNumIncomingValues() != 2)
235     return false;
236 
237   // Reduction variables are only found in the loop header block.
238   if (Phi->getParent() != TheLoop->getHeader())
239     return false;
240 
241   // Obtain the reduction start value from the value that comes from the loop
242   // preheader.
243   Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
244 
245   // ExitInstruction is the single value which is used outside the loop.
246   // We only allow for a single reduction value to be used outside the loop.
247   // This includes users of the reduction, variables (which form a cycle
248   // which ends in the phi node).
249   Instruction *ExitInstruction = nullptr;
250 
251   // Variable to keep last visited store instruction. By the end of the
252   // algorithm this variable will be either empty or having intermediate
253   // reduction value stored in invariant address.
254   StoreInst *IntermediateStore = nullptr;
255 
256   // Indicates that we found a reduction operation in our scan.
257   bool FoundReduxOp = false;
258 
259   // We start with the PHI node and scan for all of the users of this
260   // instruction. All users must be instructions that can be used as reduction
261   // variables (such as ADD). We must have a single out-of-block user. The cycle
262   // must include the original PHI.
263   bool FoundStartPHI = false;
264 
265   // To recognize min/max patterns formed by a icmp select sequence, we store
266   // the number of instruction we saw from the recognized min/max pattern,
267   //  to make sure we only see exactly the two instructions.
268   unsigned NumCmpSelectPatternInst = 0;
269   InstDesc ReduxDesc(false, nullptr);
270 
271   // Data used for determining if the recurrence has been type-promoted.
272   Type *RecurrenceType = Phi->getType();
273   SmallPtrSet<Instruction *, 4> CastInsts;
274   unsigned MinWidthCastToRecurrenceType;
275   Instruction *Start = Phi;
276   bool IsSigned = false;
277 
278   SmallPtrSet<Instruction *, 8> VisitedInsts;
279   SmallVector<Instruction *, 8> Worklist;
280 
281   // Return early if the recurrence kind does not match the type of Phi. If the
282   // recurrence kind is arithmetic, we attempt to look through AND operations
283   // resulting from the type promotion performed by InstCombine.  Vector
284   // operations are not limited to the legal integer widths, so we may be able
285   // to evaluate the reduction in the narrower width.
286   if (RecurrenceType->isFloatingPointTy()) {
287     if (!isFloatingPointRecurrenceKind(Kind))
288       return false;
289   } else if (RecurrenceType->isIntegerTy()) {
290     if (!isIntegerRecurrenceKind(Kind))
291       return false;
292     if (!isMinMaxRecurrenceKind(Kind))
293       Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
294   } else {
295     // Pointer min/max may exist, but it is not supported as a reduction op.
296     return false;
297   }
298 
299   Worklist.push_back(Start);
300   VisitedInsts.insert(Start);
301 
302   // Start with all flags set because we will intersect this with the reduction
303   // flags from all the reduction operations.
304   FastMathFlags FMF = FastMathFlags::getFast();
305 
306   // The first instruction in the use-def chain of the Phi node that requires
307   // exact floating point operations.
308   Instruction *ExactFPMathInst = nullptr;
309 
310   // A value in the reduction can be used:
311   //  - By the reduction:
312   //      - Reduction operation:
313   //        - One use of reduction value (safe).
314   //        - Multiple use of reduction value (not safe).
315   //      - PHI:
316   //        - All uses of the PHI must be the reduction (safe).
317   //        - Otherwise, not safe.
318   //  - By instructions outside of the loop (safe).
319   //      * One value may have several outside users, but all outside
320   //        uses must be of the same value.
321   //  - By store instructions with a loop invariant address (safe with
322   //    the following restrictions):
323   //      * If there are several stores, all must have the same address.
324   //      * Final value should be stored in that loop invariant address.
325   //  - By an instruction that is not part of the reduction (not safe).
326   //    This is either:
327   //      * An instruction type other than PHI or the reduction operation.
328   //      * A PHI in the header other than the initial PHI.
329   while (!Worklist.empty()) {
330     Instruction *Cur = Worklist.pop_back_val();
331 
332     // Store instructions are allowed iff it is the store of the reduction
333     // value to the same loop invariant memory location.
334     if (auto *SI = dyn_cast<StoreInst>(Cur)) {
335       if (!SE) {
336         LLVM_DEBUG(dbgs() << "Store instructions are not processed without "
337                           << "Scalar Evolution Analysis\n");
338         return false;
339       }
340 
341       const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
342       // Check it is the same address as previous stores
343       if (IntermediateStore) {
344         const SCEV *OtherScev =
345             SE->getSCEV(IntermediateStore->getPointerOperand());
346 
347         if (OtherScev != PtrScev) {
348           LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "
349                             << "inside the loop: " << *SI->getPointerOperand()
350                             << " and "
351                             << *IntermediateStore->getPointerOperand() << '\n');
352           return false;
353         }
354       }
355 
356       // Check the pointer is loop invariant
357       if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
358         LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "
359                           << "inside the loop: " << *SI->getPointerOperand()
360                           << '\n');
361         return false;
362       }
363 
364       // IntermediateStore is always the last store in the loop.
365       IntermediateStore = SI;
366       continue;
367     }
368 
369     // No Users.
370     // If the instruction has no users then this is a broken chain and can't be
371     // a reduction variable.
372     if (Cur->use_empty())
373       return false;
374 
375     bool IsAPhi = isa<PHINode>(Cur);
376 
377     // A header PHI use other than the original PHI.
378     if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
379       return false;
380 
381     // Reductions of instructions such as Div, and Sub is only possible if the
382     // LHS is the reduction variable.
383     if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
384         !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
385         !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
386       return false;
387 
388     // Any reduction instruction must be of one of the allowed kinds. We ignore
389     // the starting value (the Phi or an AND instruction if the Phi has been
390     // type-promoted).
391     if (Cur != Start) {
392       ReduxDesc =
393           isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
394       ExactFPMathInst = ExactFPMathInst == nullptr
395                             ? ReduxDesc.getExactFPMathInst()
396                             : ExactFPMathInst;
397       if (!ReduxDesc.isRecurrence())
398         return false;
399       // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
400       if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
401         FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
402         if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
403           // Accept FMF on either fcmp or select of a min/max idiom.
404           // TODO: This is a hack to work-around the fact that FMF may not be
405           //       assigned/propagated correctly. If that problem is fixed or we
406           //       standardize on fmin/fmax via intrinsics, this can be removed.
407           if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
408             CurFMF |= FCmp->getFastMathFlags();
409         }
410         FMF &= CurFMF;
411       }
412       // Update this reduction kind if we matched a new instruction.
413       // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
414       //       state accurate while processing the worklist?
415       if (ReduxDesc.getRecKind() != RecurKind::None)
416         Kind = ReduxDesc.getRecKind();
417     }
418 
419     bool IsASelect = isa<SelectInst>(Cur);
420 
421     // A conditional reduction operation must only have 2 or less uses in
422     // VisitedInsts.
423     if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
424         hasMultipleUsesOf(Cur, VisitedInsts, 2))
425       return false;
426 
427     // A reduction operation must only have one use of the reduction value.
428     if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
429         !isSelectCmpRecurrenceKind(Kind) &&
430         hasMultipleUsesOf(Cur, VisitedInsts, 1))
431       return false;
432 
433     // All inputs to a PHI node must be a reduction value.
434     if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
435       return false;
436 
437     if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectICmp) &&
438         (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
439       ++NumCmpSelectPatternInst;
440     if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectFCmp) &&
441         (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
442       ++NumCmpSelectPatternInst;
443 
444     // Check  whether we found a reduction operator.
445     FoundReduxOp |= !IsAPhi && Cur != Start;
446 
447     // Process users of current instruction. Push non-PHI nodes after PHI nodes
448     // onto the stack. This way we are going to have seen all inputs to PHI
449     // nodes once we get to them.
450     SmallVector<Instruction *, 8> NonPHIs;
451     SmallVector<Instruction *, 8> PHIs;
452     for (User *U : Cur->users()) {
453       Instruction *UI = cast<Instruction>(U);
454 
455       // If the user is a call to llvm.fmuladd then the instruction can only be
456       // the final operand.
457       if (isFMulAddIntrinsic(UI))
458         if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
459           return false;
460 
461       // Check if we found the exit user.
462       BasicBlock *Parent = UI->getParent();
463       if (!TheLoop->contains(Parent)) {
464         // If we already know this instruction is used externally, move on to
465         // the next user.
466         if (ExitInstruction == Cur)
467           continue;
468 
469         // Exit if you find multiple values used outside or if the header phi
470         // node is being used. In this case the user uses the value of the
471         // previous iteration, in which case we would loose "VF-1" iterations of
472         // the reduction operation if we vectorize.
473         if (ExitInstruction != nullptr || Cur == Phi)
474           return false;
475 
476         // The instruction used by an outside user must be the last instruction
477         // before we feed back to the reduction phi. Otherwise, we loose VF-1
478         // operations on the value.
479         if (!is_contained(Phi->operands(), Cur))
480           return false;
481 
482         ExitInstruction = Cur;
483         continue;
484       }
485 
486       // Process instructions only once (termination). Each reduction cycle
487       // value must only be used once, except by phi nodes and min/max
488       // reductions which are represented as a cmp followed by a select.
489       InstDesc IgnoredVal(false, nullptr);
490       if (VisitedInsts.insert(UI).second) {
491         if (isa<PHINode>(UI)) {
492           PHIs.push_back(UI);
493         } else {
494           StoreInst *SI = dyn_cast<StoreInst>(UI);
495           if (SI && SI->getPointerOperand() == Cur) {
496             // Reduction variable chain can only be stored somewhere but it
497             // can't be used as an address.
498             return false;
499           }
500           NonPHIs.push_back(UI);
501         }
502       } else if (!isa<PHINode>(UI) &&
503                  ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
504                    !isa<SelectInst>(UI)) ||
505                   (!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
506                    !isSelectCmpPattern(TheLoop, Phi, UI, IgnoredVal)
507                         .isRecurrence() &&
508                    !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
509         return false;
510 
511       // Remember that we completed the cycle.
512       if (UI == Phi)
513         FoundStartPHI = true;
514     }
515     Worklist.append(PHIs.begin(), PHIs.end());
516     Worklist.append(NonPHIs.begin(), NonPHIs.end());
517   }
518 
519   // This means we have seen one but not the other instruction of the
520   // pattern or more than just a select and cmp. Zero implies that we saw a
521   // llvm.min/max intrinsic, which is always OK.
522   if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
523       NumCmpSelectPatternInst != 0)
524     return false;
525 
526   if (isSelectCmpRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
527     return false;
528 
529   if (IntermediateStore) {
530     // Check that stored value goes to the phi node again. This way we make sure
531     // that the value stored in IntermediateStore is indeed the final reduction
532     // value.
533     if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
534       LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "
535                         << *IntermediateStore << '\n');
536       return false;
537     }
538 
539     // If there is an exit instruction it's value should be stored in
540     // IntermediateStore
541     if (ExitInstruction &&
542         IntermediateStore->getValueOperand() != ExitInstruction) {
543       LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "
544                            "store last calculated value of the reduction: "
545                         << *IntermediateStore << '\n');
546       return false;
547     }
548 
549     // If all uses are inside the loop (intermediate stores), then the
550     // reduction value after the loop will be the one used in the last store.
551     if (!ExitInstruction)
552       ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
553   }
554 
555   if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
556     return false;
557 
558   const bool IsOrdered =
559       checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
560 
561   if (Start != Phi) {
562     // If the starting value is not the same as the phi node, we speculatively
563     // looked through an 'and' instruction when evaluating a potential
564     // arithmetic reduction to determine if it may have been type-promoted.
565     //
566     // We now compute the minimal bit width that is required to represent the
567     // reduction. If this is the same width that was indicated by the 'and', we
568     // can represent the reduction in the smaller type. The 'and' instruction
569     // will be eliminated since it will essentially be a cast instruction that
570     // can be ignore in the cost model. If we compute a different type than we
571     // did when evaluating the 'and', the 'and' will not be eliminated, and we
572     // will end up with different kinds of operations in the recurrence
573     // expression (e.g., IntegerAND, IntegerADD). We give up if this is
574     // the case.
575     //
576     // The vectorizer relies on InstCombine to perform the actual
577     // type-shrinking. It does this by inserting instructions to truncate the
578     // exit value of the reduction to the width indicated by RecurrenceType and
579     // then extend this value back to the original width. If IsSigned is false,
580     // a 'zext' instruction will be generated; otherwise, a 'sext' will be
581     // used.
582     //
583     // TODO: We should not rely on InstCombine to rewrite the reduction in the
584     //       smaller type. We should just generate a correctly typed expression
585     //       to begin with.
586     Type *ComputedType;
587     std::tie(ComputedType, IsSigned) =
588         computeRecurrenceType(ExitInstruction, DB, AC, DT);
589     if (ComputedType != RecurrenceType)
590       return false;
591   }
592 
593   // Collect cast instructions and the minimum width used by the recurrence.
594   // If the starting value is not the same as the phi node and the computed
595   // recurrence type is equal to the recurrence type, the recurrence expression
596   // will be represented in a narrower or wider type. If there are any cast
597   // instructions that will be unnecessary, collect them in CastsFromRecurTy.
598   // Note that the 'and' instruction was already included in this list.
599   //
600   // TODO: A better way to represent this may be to tag in some way all the
601   //       instructions that are a part of the reduction. The vectorizer cost
602   //       model could then apply the recurrence type to these instructions,
603   //       without needing a white list of instructions to ignore.
604   //       This may also be useful for the inloop reductions, if it can be
605   //       kept simple enough.
606   collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
607                     MinWidthCastToRecurrenceType);
608 
609   // We found a reduction var if we have reached the original phi node and we
610   // only have a single instruction with out-of-loop users.
611 
612   // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
613   // is saved as part of the RecurrenceDescriptor.
614 
615   // Save the description of this reduction variable.
616   RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
617                           FMF, ExactFPMathInst, RecurrenceType, IsSigned,
618                           IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
619   RedDes = RD;
620 
621   return true;
622 }
623 
624 // We are looking for loops that do something like this:
625 //   int r = 0;
626 //   for (int i = 0; i < n; i++) {
627 //     if (src[i] > 3)
628 //       r = 3;
629 //   }
630 // where the reduction value (r) only has two states, in this example 0 or 3.
631 // The generated LLVM IR for this type of loop will be like this:
632 //   for.body:
633 //     %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
634 //     ...
635 //     %cmp = icmp sgt i32 %5, 3
636 //     %spec.select = select i1 %cmp, i32 3, i32 %r
637 //     ...
638 // In general we can support vectorization of loops where 'r' flips between
639 // any two non-constants, provided they are loop invariant. The only thing
640 // we actually care about at the end of the loop is whether or not any lane
641 // in the selected vector is different from the start value. The final
642 // across-vector reduction after the loop simply involves choosing the start
643 // value if nothing changed (0 in the example above) or the other selected
644 // value (3 in the example above).
645 RecurrenceDescriptor::InstDesc
646 RecurrenceDescriptor::isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi,
647                                          Instruction *I, InstDesc &Prev) {
648   // We must handle the select(cmp(),x,y) as a single instruction. Advance to
649   // the select.
650   CmpInst::Predicate Pred;
651   if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
652     if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
653       return InstDesc(Select, Prev.getRecKind());
654   }
655 
656   // Only match select with single use cmp condition.
657   if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
658                          m_Value())))
659     return InstDesc(false, I);
660 
661   SelectInst *SI = cast<SelectInst>(I);
662   Value *NonPhi = nullptr;
663 
664   if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
665     NonPhi = SI->getFalseValue();
666   else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
667     NonPhi = SI->getTrueValue();
668   else
669     return InstDesc(false, I);
670 
671   // We are looking for selects of the form:
672   //   select(cmp(), phi, loop_invariant) or
673   //   select(cmp(), loop_invariant, phi)
674   if (!Loop->isLoopInvariant(NonPhi))
675     return InstDesc(false, I);
676 
677   return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::SelectICmp
678                                                      : RecurKind::SelectFCmp);
679 }
680 
681 RecurrenceDescriptor::InstDesc
682 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
683                                       const InstDesc &Prev) {
684   assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&
685          "Expected a cmp or select or call instruction");
686   if (!isMinMaxRecurrenceKind(Kind))
687     return InstDesc(false, I);
688 
689   // We must handle the select(cmp()) as a single instruction. Advance to the
690   // select.
691   CmpInst::Predicate Pred;
692   if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
693     if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
694       return InstDesc(Select, Prev.getRecKind());
695   }
696 
697   // Only match select with single use cmp condition, or a min/max intrinsic.
698   if (!isa<IntrinsicInst>(I) &&
699       !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
700                          m_Value())))
701     return InstDesc(false, I);
702 
703   // Look for a min/max pattern.
704   if (match(I, m_UMin(m_Value(), m_Value())))
705     return InstDesc(Kind == RecurKind::UMin, I);
706   if (match(I, m_UMax(m_Value(), m_Value())))
707     return InstDesc(Kind == RecurKind::UMax, I);
708   if (match(I, m_SMax(m_Value(), m_Value())))
709     return InstDesc(Kind == RecurKind::SMax, I);
710   if (match(I, m_SMin(m_Value(), m_Value())))
711     return InstDesc(Kind == RecurKind::SMin, I);
712   if (match(I, m_OrdFMin(m_Value(), m_Value())))
713     return InstDesc(Kind == RecurKind::FMin, I);
714   if (match(I, m_OrdFMax(m_Value(), m_Value())))
715     return InstDesc(Kind == RecurKind::FMax, I);
716   if (match(I, m_UnordFMin(m_Value(), m_Value())))
717     return InstDesc(Kind == RecurKind::FMin, I);
718   if (match(I, m_UnordFMax(m_Value(), m_Value())))
719     return InstDesc(Kind == RecurKind::FMax, I);
720   if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
721     return InstDesc(Kind == RecurKind::FMin, I);
722   if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
723     return InstDesc(Kind == RecurKind::FMax, I);
724 
725   return InstDesc(false, I);
726 }
727 
728 /// Returns true if the select instruction has users in the compare-and-add
729 /// reduction pattern below. The select instruction argument is the last one
730 /// in the sequence.
731 ///
732 /// %sum.1 = phi ...
733 /// ...
734 /// %cmp = fcmp pred %0, %CFP
735 /// %add = fadd %0, %sum.1
736 /// %sum.2 = select %cmp, %add, %sum.1
737 RecurrenceDescriptor::InstDesc
738 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
739   SelectInst *SI = dyn_cast<SelectInst>(I);
740   if (!SI)
741     return InstDesc(false, I);
742 
743   CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
744   // Only handle single use cases for now.
745   if (!CI || !CI->hasOneUse())
746     return InstDesc(false, I);
747 
748   Value *TrueVal = SI->getTrueValue();
749   Value *FalseVal = SI->getFalseValue();
750   // Handle only when either of operands of select instruction is a PHI
751   // node for now.
752   if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
753       (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
754     return InstDesc(false, I);
755 
756   Instruction *I1 =
757       isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
758                              : dyn_cast<Instruction>(TrueVal);
759   if (!I1 || !I1->isBinaryOp())
760     return InstDesc(false, I);
761 
762   Value *Op1, *Op2;
763   if ((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1)  ||
764        m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
765       I1->isFast())
766     return InstDesc(Kind == RecurKind::FAdd, SI);
767 
768   if (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast()))
769     return InstDesc(Kind == RecurKind::FMul, SI);
770 
771   return InstDesc(false, I);
772 }
773 
774 RecurrenceDescriptor::InstDesc
775 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
776                                         Instruction *I, RecurKind Kind,
777                                         InstDesc &Prev, FastMathFlags FuncFMF) {
778   assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind);
779   switch (I->getOpcode()) {
780   default:
781     return InstDesc(false, I);
782   case Instruction::PHI:
783     return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
784   case Instruction::Sub:
785   case Instruction::Add:
786     return InstDesc(Kind == RecurKind::Add, I);
787   case Instruction::Mul:
788     return InstDesc(Kind == RecurKind::Mul, I);
789   case Instruction::And:
790     return InstDesc(Kind == RecurKind::And, I);
791   case Instruction::Or:
792     return InstDesc(Kind == RecurKind::Or, I);
793   case Instruction::Xor:
794     return InstDesc(Kind == RecurKind::Xor, I);
795   case Instruction::FDiv:
796   case Instruction::FMul:
797     return InstDesc(Kind == RecurKind::FMul, I,
798                     I->hasAllowReassoc() ? nullptr : I);
799   case Instruction::FSub:
800   case Instruction::FAdd:
801     return InstDesc(Kind == RecurKind::FAdd, I,
802                     I->hasAllowReassoc() ? nullptr : I);
803   case Instruction::Select:
804     if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
805       return isConditionalRdxPattern(Kind, I);
806     LLVM_FALLTHROUGH;
807   case Instruction::FCmp:
808   case Instruction::ICmp:
809   case Instruction::Call:
810     if (isSelectCmpRecurrenceKind(Kind))
811       return isSelectCmpPattern(L, OrigPhi, I, Prev);
812     if (isIntMinMaxRecurrenceKind(Kind) ||
813         (((FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) ||
814           (isa<FPMathOperator>(I) && I->hasNoNaNs() &&
815            I->hasNoSignedZeros())) &&
816          isFPMinMaxRecurrenceKind(Kind)))
817       return isMinMaxPattern(I, Kind, Prev);
818     else if (isFMulAddIntrinsic(I))
819       return InstDesc(Kind == RecurKind::FMulAdd, I,
820                       I->hasAllowReassoc() ? nullptr : I);
821     return InstDesc(false, I);
822   }
823 }
824 
825 bool RecurrenceDescriptor::hasMultipleUsesOf(
826     Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
827     unsigned MaxNumUses) {
828   unsigned NumUses = 0;
829   for (const Use &U : I->operands()) {
830     if (Insts.count(dyn_cast<Instruction>(U)))
831       ++NumUses;
832     if (NumUses > MaxNumUses)
833       return true;
834   }
835 
836   return false;
837 }
838 
839 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
840                                           RecurrenceDescriptor &RedDes,
841                                           DemandedBits *DB, AssumptionCache *AC,
842                                           DominatorTree *DT,
843                                           ScalarEvolution *SE) {
844   BasicBlock *Header = TheLoop->getHeader();
845   Function &F = *Header->getParent();
846   FastMathFlags FMF;
847   FMF.setNoNaNs(
848       F.getFnAttribute("no-nans-fp-math").getValueAsBool());
849   FMF.setNoSignedZeros(
850       F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
851 
852   if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
853                       SE)) {
854     LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
855     return true;
856   }
857   if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
858                       SE)) {
859     LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
860     return true;
861   }
862   if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
863                       SE)) {
864     LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
865     return true;
866   }
867   if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
868                       SE)) {
869     LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
870     return true;
871   }
872   if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
873                       SE)) {
874     LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
875     return true;
876   }
877   if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
878                       SE)) {
879     LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
880     return true;
881   }
882   if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
883                       SE)) {
884     LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
885     return true;
886   }
887   if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
888                       SE)) {
889     LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
890     return true;
891   }
892   if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
893                       SE)) {
894     LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
895     return true;
896   }
897   if (AddReductionVar(Phi, RecurKind::SelectICmp, TheLoop, FMF, RedDes, DB, AC,
898                       DT, SE)) {
899     LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."
900                       << *Phi << "\n");
901     return true;
902   }
903   if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
904                       SE)) {
905     LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
906     return true;
907   }
908   if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
909                       SE)) {
910     LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
911     return true;
912   }
913   if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
914                       SE)) {
915     LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
916     return true;
917   }
918   if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
919                       SE)) {
920     LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
921     return true;
922   }
923   if (AddReductionVar(Phi, RecurKind::SelectFCmp, TheLoop, FMF, RedDes, DB, AC,
924                       DT, SE)) {
925     LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."
926                       << " PHI." << *Phi << "\n");
927     return true;
928   }
929   if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
930                       SE)) {
931     LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n");
932     return true;
933   }
934   // Not a reduction of known type.
935   return false;
936 }
937 
938 bool RecurrenceDescriptor::isFirstOrderRecurrence(
939     PHINode *Phi, Loop *TheLoop,
940     MapVector<Instruction *, Instruction *> &SinkAfter, DominatorTree *DT) {
941 
942   // Ensure the phi node is in the loop header and has two incoming values.
943   if (Phi->getParent() != TheLoop->getHeader() ||
944       Phi->getNumIncomingValues() != 2)
945     return false;
946 
947   // Ensure the loop has a preheader and a single latch block. The loop
948   // vectorizer will need the latch to set up the next iteration of the loop.
949   auto *Preheader = TheLoop->getLoopPreheader();
950   auto *Latch = TheLoop->getLoopLatch();
951   if (!Preheader || !Latch)
952     return false;
953 
954   // Ensure the phi node's incoming blocks are the loop preheader and latch.
955   if (Phi->getBasicBlockIndex(Preheader) < 0 ||
956       Phi->getBasicBlockIndex(Latch) < 0)
957     return false;
958 
959   // Get the previous value. The previous value comes from the latch edge while
960   // the initial value comes form the preheader edge.
961   auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
962   if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous) ||
963       SinkAfter.count(Previous)) // Cannot rely on dominance due to motion.
964     return false;
965 
966   // Ensure every user of the phi node (recursively) is dominated by the
967   // previous value. The dominance requirement ensures the loop vectorizer will
968   // not need to vectorize the initial value prior to the first iteration of the
969   // loop.
970   // TODO: Consider extending this sinking to handle memory instructions.
971 
972   // We optimistically assume we can sink all users after Previous. Keep a set
973   // of instructions to sink after Previous ordered by dominance in the common
974   // basic block. It will be applied to SinkAfter if all users can be sunk.
975   auto CompareByComesBefore = [](const Instruction *A, const Instruction *B) {
976     return A->comesBefore(B);
977   };
978   std::set<Instruction *, decltype(CompareByComesBefore)> InstrsToSink(
979       CompareByComesBefore);
980 
981   BasicBlock *PhiBB = Phi->getParent();
982   SmallVector<Instruction *, 8> WorkList;
983   auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
984     // Already sunk SinkCandidate.
985     if (SinkCandidate->getParent() == PhiBB &&
986         InstrsToSink.find(SinkCandidate) != InstrsToSink.end())
987       return true;
988 
989     // Cyclic dependence.
990     if (Previous == SinkCandidate)
991       return false;
992 
993     if (DT->dominates(Previous,
994                       SinkCandidate)) // We already are good w/o sinking.
995       return true;
996 
997     if (SinkCandidate->getParent() != PhiBB ||
998         SinkCandidate->mayHaveSideEffects() ||
999         SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
1000       return false;
1001 
1002     // Avoid sinking an instruction multiple times (if multiple operands are
1003     // first order recurrences) by sinking once - after the latest 'previous'
1004     // instruction.
1005     auto It = SinkAfter.find(SinkCandidate);
1006     if (It != SinkAfter.end()) {
1007       auto *OtherPrev = It->second;
1008       // Find the earliest entry in the 'sink-after' chain. The last entry in
1009       // the chain is the original 'Previous' for a recurrence handled earlier.
1010       auto EarlierIt = SinkAfter.find(OtherPrev);
1011       while (EarlierIt != SinkAfter.end()) {
1012         Instruction *EarlierInst = EarlierIt->second;
1013         EarlierIt = SinkAfter.find(EarlierInst);
1014         // Bail out if order has not been preserved.
1015         if (EarlierIt != SinkAfter.end() &&
1016             !DT->dominates(EarlierInst, OtherPrev))
1017           return false;
1018         OtherPrev = EarlierInst;
1019       }
1020       // Bail out if order has not been preserved.
1021       if (OtherPrev != It->second && !DT->dominates(It->second, OtherPrev))
1022         return false;
1023 
1024       // SinkCandidate is already being sunk after an instruction after
1025       // Previous. Nothing left to do.
1026       if (DT->dominates(Previous, OtherPrev) || Previous == OtherPrev)
1027         return true;
1028       // Otherwise, Previous comes after OtherPrev and SinkCandidate needs to be
1029       // re-sunk to Previous, instead of sinking to OtherPrev. Remove
1030       // SinkCandidate from SinkAfter to ensure it's insert position is updated.
1031       SinkAfter.erase(SinkCandidate);
1032     }
1033 
1034     // If we reach a PHI node that is not dominated by Previous, we reached a
1035     // header PHI. No need for sinking.
1036     if (isa<PHINode>(SinkCandidate))
1037       return true;
1038 
1039     // Sink User tentatively and check its users
1040     InstrsToSink.insert(SinkCandidate);
1041     WorkList.push_back(SinkCandidate);
1042     return true;
1043   };
1044 
1045   WorkList.push_back(Phi);
1046   // Try to recursively sink instructions and their users after Previous.
1047   while (!WorkList.empty()) {
1048     Instruction *Current = WorkList.pop_back_val();
1049     for (User *User : Current->users()) {
1050       if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1051         return false;
1052     }
1053   }
1054 
1055   // We can sink all users of Phi. Update the mapping.
1056   for (Instruction *I : InstrsToSink) {
1057     SinkAfter[I] = Previous;
1058     Previous = I;
1059   }
1060   return true;
1061 }
1062 
1063 /// This function returns the identity element (or neutral element) for
1064 /// the operation K.
1065 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1066                                                    FastMathFlags FMF) const {
1067   switch (K) {
1068   case RecurKind::Xor:
1069   case RecurKind::Add:
1070   case RecurKind::Or:
1071     // Adding, Xoring, Oring zero to a number does not change it.
1072     return ConstantInt::get(Tp, 0);
1073   case RecurKind::Mul:
1074     // Multiplying a number by 1 does not change it.
1075     return ConstantInt::get(Tp, 1);
1076   case RecurKind::And:
1077     // AND-ing a number with an all-1 value does not change it.
1078     return ConstantInt::get(Tp, -1, true);
1079   case RecurKind::FMul:
1080     // Multiplying a number by 1 does not change it.
1081     return ConstantFP::get(Tp, 1.0L);
1082   case RecurKind::FMulAdd:
1083   case RecurKind::FAdd:
1084     // Adding zero to a number does not change it.
1085     // FIXME: Ideally we should not need to check FMF for FAdd and should always
1086     // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1087     // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1088     // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1089     // mean we can then remove the check for noSignedZeros() below (see D98963).
1090     if (FMF.noSignedZeros())
1091       return ConstantFP::get(Tp, 0.0L);
1092     return ConstantFP::get(Tp, -0.0L);
1093   case RecurKind::UMin:
1094     return ConstantInt::get(Tp, -1);
1095   case RecurKind::UMax:
1096     return ConstantInt::get(Tp, 0);
1097   case RecurKind::SMin:
1098     return ConstantInt::get(Tp,
1099                             APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1100   case RecurKind::SMax:
1101     return ConstantInt::get(Tp,
1102                             APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1103   case RecurKind::FMin:
1104     return ConstantFP::getInfinity(Tp, true);
1105   case RecurKind::FMax:
1106     return ConstantFP::getInfinity(Tp, false);
1107   case RecurKind::SelectICmp:
1108   case RecurKind::SelectFCmp:
1109     return getRecurrenceStartValue();
1110     break;
1111   default:
1112     llvm_unreachable("Unknown recurrence kind");
1113   }
1114 }
1115 
1116 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1117   switch (Kind) {
1118   case RecurKind::Add:
1119     return Instruction::Add;
1120   case RecurKind::Mul:
1121     return Instruction::Mul;
1122   case RecurKind::Or:
1123     return Instruction::Or;
1124   case RecurKind::And:
1125     return Instruction::And;
1126   case RecurKind::Xor:
1127     return Instruction::Xor;
1128   case RecurKind::FMul:
1129     return Instruction::FMul;
1130   case RecurKind::FMulAdd:
1131   case RecurKind::FAdd:
1132     return Instruction::FAdd;
1133   case RecurKind::SMax:
1134   case RecurKind::SMin:
1135   case RecurKind::UMax:
1136   case RecurKind::UMin:
1137   case RecurKind::SelectICmp:
1138     return Instruction::ICmp;
1139   case RecurKind::FMax:
1140   case RecurKind::FMin:
1141   case RecurKind::SelectFCmp:
1142     return Instruction::FCmp;
1143   default:
1144     llvm_unreachable("Unknown recurrence operation");
1145   }
1146 }
1147 
1148 SmallVector<Instruction *, 4>
1149 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1150   SmallVector<Instruction *, 4> ReductionOperations;
1151   unsigned RedOp = getOpcode(Kind);
1152 
1153   // Search down from the Phi to the LoopExitInstr, looking for instructions
1154   // with a single user of the correct type for the reduction.
1155 
1156   // Note that we check that the type of the operand is correct for each item in
1157   // the chain, including the last (the loop exit value). This can come up from
1158   // sub, which would otherwise be treated as an add reduction. MinMax also need
1159   // to check for a pair of icmp/select, for which we use getNextInstruction and
1160   // isCorrectOpcode functions to step the right number of instruction, and
1161   // check the icmp/select pair.
1162   // FIXME: We also do not attempt to look through Select's yet, which might
1163   // be part of the reduction chain, or attempt to looks through And's to find a
1164   // smaller bitwidth. Subs are also currently not allowed (which are usually
1165   // treated as part of a add reduction) as they are expected to generally be
1166   // more expensive than out-of-loop reductions, and need to be costed more
1167   // carefully.
1168   unsigned ExpectedUses = 1;
1169   if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp)
1170     ExpectedUses = 2;
1171 
1172   auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1173     for (auto User : Cur->users()) {
1174       Instruction *UI = cast<Instruction>(User);
1175       if (isa<PHINode>(UI))
1176         continue;
1177       if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1178         // We are expecting a icmp/select pair, which we go to the next select
1179         // instruction if we can. We already know that Cur has 2 uses.
1180         if (isa<SelectInst>(UI))
1181           return UI;
1182         continue;
1183       }
1184       return UI;
1185     }
1186     return nullptr;
1187   };
1188   auto isCorrectOpcode = [&](Instruction *Cur) {
1189     if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1190       Value *LHS, *RHS;
1191       return SelectPatternResult::isMinOrMax(
1192           matchSelectPattern(Cur, LHS, RHS).Flavor);
1193     }
1194     // Recognize a call to the llvm.fmuladd intrinsic.
1195     if (isFMulAddIntrinsic(Cur))
1196       return true;
1197 
1198     return Cur->getOpcode() == RedOp;
1199   };
1200 
1201   // Attempt to look through Phis which are part of the reduction chain
1202   unsigned ExtraPhiUses = 0;
1203   Instruction *RdxInstr = LoopExitInstr;
1204   if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) {
1205     if (ExitPhi->getNumIncomingValues() != 2)
1206       return {};
1207 
1208     Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
1209     Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1210 
1211     Instruction *Chain = nullptr;
1212     if (Inc0 == Phi)
1213       Chain = Inc1;
1214     else if (Inc1 == Phi)
1215       Chain = Inc0;
1216     else
1217       return {};
1218 
1219     RdxInstr = Chain;
1220     ExtraPhiUses = 1;
1221   }
1222 
1223   // The loop exit instruction we check first (as a quick test) but add last. We
1224   // check the opcode is correct (and dont allow them to be Subs) and that they
1225   // have expected to have the expected number of uses. They will have one use
1226   // from the phi and one from a LCSSA value, no matter the type.
1227   if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
1228     return {};
1229 
1230   // Check that the Phi has one (or two for min/max) uses, plus an extra use
1231   // for conditional reductions.
1232   if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
1233     return {};
1234 
1235   Instruction *Cur = getNextInstruction(Phi);
1236 
1237   // Each other instruction in the chain should have the expected number of uses
1238   // and be the correct opcode.
1239   while (Cur != RdxInstr) {
1240     if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1241       return {};
1242 
1243     ReductionOperations.push_back(Cur);
1244     Cur = getNextInstruction(Cur);
1245   }
1246 
1247   ReductionOperations.push_back(Cur);
1248   return ReductionOperations;
1249 }
1250 
1251 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1252                                          const SCEV *Step, BinaryOperator *BOp,
1253                                          Type *ElementType,
1254                                          SmallVectorImpl<Instruction *> *Casts)
1255     : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp),
1256       ElementType(ElementType) {
1257   assert(IK != IK_NoInduction && "Not an induction");
1258 
1259   // Start value type should match the induction kind and the value
1260   // itself should not be null.
1261   assert(StartValue && "StartValue is null");
1262   assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
1263          "StartValue is not a pointer for pointer induction");
1264   assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
1265          "StartValue is not an integer for integer induction");
1266 
1267   // Check the Step Value. It should be non-zero integer value.
1268   assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
1269          "Step value is zero");
1270 
1271   assert((IK != IK_PtrInduction || getConstIntStepValue()) &&
1272          "Step value should be constant for pointer induction");
1273   assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
1274          "StepValue is not an integer");
1275 
1276   assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
1277          "StepValue is not FP for FpInduction");
1278   assert((IK != IK_FpInduction ||
1279           (InductionBinOp &&
1280            (InductionBinOp->getOpcode() == Instruction::FAdd ||
1281             InductionBinOp->getOpcode() == Instruction::FSub))) &&
1282          "Binary opcode should be specified for FP induction");
1283 
1284   if (IK == IK_PtrInduction)
1285     assert(ElementType && "Pointer induction must have element type");
1286   else
1287     assert(!ElementType && "Non-pointer induction cannot have element type");
1288 
1289   if (Casts) {
1290     for (auto &Inst : *Casts) {
1291       RedundantCasts.push_back(Inst);
1292     }
1293   }
1294 }
1295 
1296 ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1297   if (isa<SCEVConstant>(Step))
1298     return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1299   return nullptr;
1300 }
1301 
1302 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1303                                            ScalarEvolution *SE,
1304                                            InductionDescriptor &D) {
1305 
1306   // Here we only handle FP induction variables.
1307   assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
1308 
1309   if (TheLoop->getHeader() != Phi->getParent())
1310     return false;
1311 
1312   // The loop may have multiple entrances or multiple exits; we can analyze
1313   // this phi if it has a unique entry value and a unique backedge value.
1314   if (Phi->getNumIncomingValues() != 2)
1315     return false;
1316   Value *BEValue = nullptr, *StartValue = nullptr;
1317   if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1318     BEValue = Phi->getIncomingValue(0);
1319     StartValue = Phi->getIncomingValue(1);
1320   } else {
1321     assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
1322            "Unexpected Phi node in the loop");
1323     BEValue = Phi->getIncomingValue(1);
1324     StartValue = Phi->getIncomingValue(0);
1325   }
1326 
1327   BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1328   if (!BOp)
1329     return false;
1330 
1331   Value *Addend = nullptr;
1332   if (BOp->getOpcode() == Instruction::FAdd) {
1333     if (BOp->getOperand(0) == Phi)
1334       Addend = BOp->getOperand(1);
1335     else if (BOp->getOperand(1) == Phi)
1336       Addend = BOp->getOperand(0);
1337   } else if (BOp->getOpcode() == Instruction::FSub)
1338     if (BOp->getOperand(0) == Phi)
1339       Addend = BOp->getOperand(1);
1340 
1341   if (!Addend)
1342     return false;
1343 
1344   // The addend should be loop invariant
1345   if (auto *I = dyn_cast<Instruction>(Addend))
1346     if (TheLoop->contains(I))
1347       return false;
1348 
1349   // FP Step has unknown SCEV
1350   const SCEV *Step = SE->getUnknown(Addend);
1351   D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1352   return true;
1353 }
1354 
1355 /// This function is called when we suspect that the update-chain of a phi node
1356 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1357 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1358 /// predicate P under which the SCEV expression for the phi can be the
1359 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1360 /// cast instructions that are involved in the update-chain of this induction.
1361 /// A caller that adds the required runtime predicate can be free to drop these
1362 /// cast instructions, and compute the phi using \p AR (instead of some scev
1363 /// expression with casts).
1364 ///
1365 /// For example, without a predicate the scev expression can take the following
1366 /// form:
1367 ///      (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1368 ///
1369 /// It corresponds to the following IR sequence:
1370 /// %for.body:
1371 ///   %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1372 ///   %casted_phi = "ExtTrunc i64 %x"
1373 ///   %add = add i64 %casted_phi, %step
1374 ///
1375 /// where %x is given in \p PN,
1376 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1377 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1378 /// several forms, for example, such as:
1379 ///   ExtTrunc1:    %casted_phi = and  %x, 2^n-1
1380 /// or:
1381 ///   ExtTrunc2:    %t = shl %x, m
1382 ///                 %casted_phi = ashr %t, m
1383 ///
1384 /// If we are able to find such sequence, we return the instructions
1385 /// we found, namely %casted_phi and the instructions on its use-def chain up
1386 /// to the phi (not including the phi).
1387 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1388                                     const SCEVUnknown *PhiScev,
1389                                     const SCEVAddRecExpr *AR,
1390                                     SmallVectorImpl<Instruction *> &CastInsts) {
1391 
1392   assert(CastInsts.empty() && "CastInsts is expected to be empty.");
1393   auto *PN = cast<PHINode>(PhiScev->getValue());
1394   assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression");
1395   const Loop *L = AR->getLoop();
1396 
1397   // Find any cast instructions that participate in the def-use chain of
1398   // PhiScev in the loop.
1399   // FORNOW/TODO: We currently expect the def-use chain to include only
1400   // two-operand instructions, where one of the operands is an invariant.
1401   // createAddRecFromPHIWithCasts() currently does not support anything more
1402   // involved than that, so we keep the search simple. This can be
1403   // extended/generalized as needed.
1404 
1405   auto getDef = [&](const Value *Val) -> Value * {
1406     const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1407     if (!BinOp)
1408       return nullptr;
1409     Value *Op0 = BinOp->getOperand(0);
1410     Value *Op1 = BinOp->getOperand(1);
1411     Value *Def = nullptr;
1412     if (L->isLoopInvariant(Op0))
1413       Def = Op1;
1414     else if (L->isLoopInvariant(Op1))
1415       Def = Op0;
1416     return Def;
1417   };
1418 
1419   // Look for the instruction that defines the induction via the
1420   // loop backedge.
1421   BasicBlock *Latch = L->getLoopLatch();
1422   if (!Latch)
1423     return false;
1424   Value *Val = PN->getIncomingValueForBlock(Latch);
1425   if (!Val)
1426     return false;
1427 
1428   // Follow the def-use chain until the induction phi is reached.
1429   // If on the way we encounter a Value that has the same SCEV Expr as the
1430   // phi node, we can consider the instructions we visit from that point
1431   // as part of the cast-sequence that can be ignored.
1432   bool InCastSequence = false;
1433   auto *Inst = dyn_cast<Instruction>(Val);
1434   while (Val != PN) {
1435     // If we encountered a phi node other than PN, or if we left the loop,
1436     // we bail out.
1437     if (!Inst || !L->contains(Inst)) {
1438       return false;
1439     }
1440     auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1441     if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1442       InCastSequence = true;
1443     if (InCastSequence) {
1444       // Only the last instruction in the cast sequence is expected to have
1445       // uses outside the induction def-use chain.
1446       if (!CastInsts.empty())
1447         if (!Inst->hasOneUse())
1448           return false;
1449       CastInsts.push_back(Inst);
1450     }
1451     Val = getDef(Val);
1452     if (!Val)
1453       return false;
1454     Inst = dyn_cast<Instruction>(Val);
1455   }
1456 
1457   return InCastSequence;
1458 }
1459 
1460 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1461                                          PredicatedScalarEvolution &PSE,
1462                                          InductionDescriptor &D, bool Assume) {
1463   Type *PhiTy = Phi->getType();
1464 
1465   // Handle integer and pointer inductions variables.
1466   // Now we handle also FP induction but not trying to make a
1467   // recurrent expression from the PHI node in-place.
1468 
1469   if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1470       !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1471     return false;
1472 
1473   if (PhiTy->isFloatingPointTy())
1474     return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1475 
1476   const SCEV *PhiScev = PSE.getSCEV(Phi);
1477   const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1478 
1479   // We need this expression to be an AddRecExpr.
1480   if (Assume && !AR)
1481     AR = PSE.getAsAddRec(Phi);
1482 
1483   if (!AR) {
1484     LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1485     return false;
1486   }
1487 
1488   // Record any Cast instructions that participate in the induction update
1489   const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1490   // If we started from an UnknownSCEV, and managed to build an addRecurrence
1491   // only after enabling Assume with PSCEV, this means we may have encountered
1492   // cast instructions that required adding a runtime check in order to
1493   // guarantee the correctness of the AddRecurrence respresentation of the
1494   // induction.
1495   if (PhiScev != AR && SymbolicPhi) {
1496     SmallVector<Instruction *, 2> Casts;
1497     if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1498       return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1499   }
1500 
1501   return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1502 }
1503 
1504 bool InductionDescriptor::isInductionPHI(
1505     PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1506     InductionDescriptor &D, const SCEV *Expr,
1507     SmallVectorImpl<Instruction *> *CastsToIgnore) {
1508   Type *PhiTy = Phi->getType();
1509   // We only handle integer and pointer inductions variables.
1510   if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1511     return false;
1512 
1513   // Check that the PHI is consecutive.
1514   const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1515   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1516 
1517   if (!AR) {
1518     LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1519     return false;
1520   }
1521 
1522   if (AR->getLoop() != TheLoop) {
1523     // FIXME: We should treat this as a uniform. Unfortunately, we
1524     // don't currently know how to handled uniform PHIs.
1525     LLVM_DEBUG(
1526         dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
1527     return false;
1528   }
1529 
1530   Value *StartValue =
1531       Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1532 
1533   BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1534   if (!Latch)
1535     return false;
1536 
1537   const SCEV *Step = AR->getStepRecurrence(*SE);
1538   // Calculate the pointer stride and check if it is consecutive.
1539   // The stride may be a constant or a loop invariant integer value.
1540   const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1541   if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1542     return false;
1543 
1544   if (PhiTy->isIntegerTy()) {
1545     BinaryOperator *BOp =
1546         dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1547     D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1548                             /* ElementType */ nullptr, CastsToIgnore);
1549     return true;
1550   }
1551 
1552   assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
1553   // Pointer induction should be a constant.
1554   if (!ConstStep)
1555     return false;
1556 
1557   // Always use i8 element type for opaque pointer inductions.
1558   PointerType *PtrTy = cast<PointerType>(PhiTy);
1559   Type *ElementType = PtrTy->isOpaque()
1560                           ? Type::getInt8Ty(PtrTy->getContext())
1561                           : PtrTy->getNonOpaquePointerElementType();
1562   if (!ElementType->isSized())
1563     return false;
1564 
1565   ConstantInt *CV = ConstStep->getValue();
1566   const DataLayout &DL = Phi->getModule()->getDataLayout();
1567   TypeSize TySize = DL.getTypeAllocSize(ElementType);
1568   // TODO: We could potentially support this for scalable vectors if we can
1569   // prove at compile time that the constant step is always a multiple of
1570   // the scalable type.
1571   if (TySize.isZero() || TySize.isScalable())
1572     return false;
1573 
1574   int64_t Size = static_cast<int64_t>(TySize.getFixedSize());
1575   int64_t CVSize = CV->getSExtValue();
1576   if (CVSize % Size)
1577     return false;
1578   auto *StepValue =
1579       SE->getConstant(CV->getType(), CVSize / Size, true /* signed */);
1580   D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue,
1581                           /* BinOp */ nullptr, ElementType);
1582   return true;
1583 }
1584