xref: /llvm-project/llvm/lib/Analysis/VectorUtils.cpp (revision d42b392696fbd9d612ac22ff82b4a1760fc26d89)
1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/CommandLine.h"
30 
31 #define DEBUG_TYPE "vectorutils"
32 
33 using namespace llvm;
34 using namespace llvm::PatternMatch;
35 
36 /// Maximum factor for an interleaved memory access.
37 static cl::opt<unsigned> MaxInterleaveGroupFactor(
38     "max-interleave-group-factor", cl::Hidden,
39     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
40     cl::init(8));
41 
42 /// Return true if all of the intrinsic's arguments and return type are scalars
43 /// for the scalar form of the intrinsic, and vectors for the vector form of the
44 /// intrinsic (except operands that are marked as always being scalar by
45 /// isVectorIntrinsicWithScalarOpAtArg).
46 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
47   switch (ID) {
48   case Intrinsic::abs:   // Begin integer bit-manipulation.
49   case Intrinsic::bswap:
50   case Intrinsic::bitreverse:
51   case Intrinsic::ctpop:
52   case Intrinsic::ctlz:
53   case Intrinsic::cttz:
54   case Intrinsic::fshl:
55   case Intrinsic::fshr:
56   case Intrinsic::smax:
57   case Intrinsic::smin:
58   case Intrinsic::umax:
59   case Intrinsic::umin:
60   case Intrinsic::sadd_sat:
61   case Intrinsic::ssub_sat:
62   case Intrinsic::uadd_sat:
63   case Intrinsic::usub_sat:
64   case Intrinsic::smul_fix:
65   case Intrinsic::smul_fix_sat:
66   case Intrinsic::umul_fix:
67   case Intrinsic::umul_fix_sat:
68   case Intrinsic::sqrt: // Begin floating-point.
69   case Intrinsic::sin:
70   case Intrinsic::cos:
71   case Intrinsic::tan:
72   case Intrinsic::exp:
73   case Intrinsic::exp2:
74   case Intrinsic::log:
75   case Intrinsic::log10:
76   case Intrinsic::log2:
77   case Intrinsic::fabs:
78   case Intrinsic::minnum:
79   case Intrinsic::maxnum:
80   case Intrinsic::minimum:
81   case Intrinsic::maximum:
82   case Intrinsic::copysign:
83   case Intrinsic::floor:
84   case Intrinsic::ceil:
85   case Intrinsic::trunc:
86   case Intrinsic::rint:
87   case Intrinsic::nearbyint:
88   case Intrinsic::round:
89   case Intrinsic::roundeven:
90   case Intrinsic::pow:
91   case Intrinsic::fma:
92   case Intrinsic::fmuladd:
93   case Intrinsic::is_fpclass:
94   case Intrinsic::powi:
95   case Intrinsic::canonicalize:
96   case Intrinsic::fptosi_sat:
97   case Intrinsic::fptoui_sat:
98   case Intrinsic::lrint:
99   case Intrinsic::llrint:
100     return true;
101   default:
102     return false;
103   }
104 }
105 
106 /// Identifies if the vector form of the intrinsic has a scalar operand.
107 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
108                                               unsigned ScalarOpdIdx) {
109   switch (ID) {
110   case Intrinsic::abs:
111   case Intrinsic::ctlz:
112   case Intrinsic::cttz:
113   case Intrinsic::is_fpclass:
114   case Intrinsic::powi:
115     return (ScalarOpdIdx == 1);
116   case Intrinsic::smul_fix:
117   case Intrinsic::smul_fix_sat:
118   case Intrinsic::umul_fix:
119   case Intrinsic::umul_fix_sat:
120     return (ScalarOpdIdx == 2);
121   default:
122     return false;
123   }
124 }
125 
126 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
127                                                   int OpdIdx) {
128   assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
129 
130   switch (ID) {
131   case Intrinsic::fptosi_sat:
132   case Intrinsic::fptoui_sat:
133   case Intrinsic::lrint:
134   case Intrinsic::llrint:
135     return OpdIdx == -1 || OpdIdx == 0;
136   case Intrinsic::is_fpclass:
137     return OpdIdx == 0;
138   case Intrinsic::powi:
139     return OpdIdx == -1 || OpdIdx == 1;
140   default:
141     return OpdIdx == -1;
142   }
143 }
144 
145 /// Returns intrinsic ID for call.
146 /// For the input call instruction it finds mapping intrinsic and returns
147 /// its ID, in case it does not found it return not_intrinsic.
148 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
149                                                 const TargetLibraryInfo *TLI) {
150   Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
151   if (ID == Intrinsic::not_intrinsic)
152     return Intrinsic::not_intrinsic;
153 
154   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
155       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
156       ID == Intrinsic::experimental_noalias_scope_decl ||
157       ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
158     return ID;
159   return Intrinsic::not_intrinsic;
160 }
161 
162 /// Given a vector and an element number, see if the scalar value is
163 /// already around as a register, for example if it were inserted then extracted
164 /// from the vector.
165 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
166   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
167   VectorType *VTy = cast<VectorType>(V->getType());
168   // For fixed-length vector, return poison for out of range access.
169   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
170     unsigned Width = FVTy->getNumElements();
171     if (EltNo >= Width)
172       return PoisonValue::get(FVTy->getElementType());
173   }
174 
175   if (Constant *C = dyn_cast<Constant>(V))
176     return C->getAggregateElement(EltNo);
177 
178   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
179     // If this is an insert to a variable element, we don't know what it is.
180     if (!isa<ConstantInt>(III->getOperand(2)))
181       return nullptr;
182     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
183 
184     // If this is an insert to the element we are looking for, return the
185     // inserted value.
186     if (EltNo == IIElt)
187       return III->getOperand(1);
188 
189     // Guard against infinite loop on malformed, unreachable IR.
190     if (III == III->getOperand(0))
191       return nullptr;
192 
193     // Otherwise, the insertelement doesn't modify the value, recurse on its
194     // vector input.
195     return findScalarElement(III->getOperand(0), EltNo);
196   }
197 
198   ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
199   // Restrict the following transformation to fixed-length vector.
200   if (SVI && isa<FixedVectorType>(SVI->getType())) {
201     unsigned LHSWidth =
202         cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
203     int InEl = SVI->getMaskValue(EltNo);
204     if (InEl < 0)
205       return PoisonValue::get(VTy->getElementType());
206     if (InEl < (int)LHSWidth)
207       return findScalarElement(SVI->getOperand(0), InEl);
208     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
209   }
210 
211   // Extract a value from a vector add operation with a constant zero.
212   // TODO: Use getBinOpIdentity() to generalize this.
213   Value *Val; Constant *C;
214   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
215     if (Constant *Elt = C->getAggregateElement(EltNo))
216       if (Elt->isNullValue())
217         return findScalarElement(Val, EltNo);
218 
219   // If the vector is a splat then we can trivially find the scalar element.
220   if (isa<ScalableVectorType>(VTy))
221     if (Value *Splat = getSplatValue(V))
222       if (EltNo < VTy->getElementCount().getKnownMinValue())
223         return Splat;
224 
225   // Otherwise, we don't know.
226   return nullptr;
227 }
228 
229 int llvm::getSplatIndex(ArrayRef<int> Mask) {
230   int SplatIndex = -1;
231   for (int M : Mask) {
232     // Ignore invalid (undefined) mask elements.
233     if (M < 0)
234       continue;
235 
236     // There can be only 1 non-negative mask element value if this is a splat.
237     if (SplatIndex != -1 && SplatIndex != M)
238       return -1;
239 
240     // Initialize the splat index to the 1st non-negative mask element.
241     SplatIndex = M;
242   }
243   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
244   return SplatIndex;
245 }
246 
247 /// Get splat value if the input is a splat vector or return nullptr.
248 /// This function is not fully general. It checks only 2 cases:
249 /// the input value is (1) a splat constant vector or (2) a sequence
250 /// of instructions that broadcasts a scalar at element 0.
251 Value *llvm::getSplatValue(const Value *V) {
252   if (isa<VectorType>(V->getType()))
253     if (auto *C = dyn_cast<Constant>(V))
254       return C->getSplatValue();
255 
256   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
257   Value *Splat;
258   if (match(V,
259             m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
260                       m_Value(), m_ZeroMask())))
261     return Splat;
262 
263   return nullptr;
264 }
265 
266 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
267   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
268 
269   if (isa<VectorType>(V->getType())) {
270     if (isa<UndefValue>(V))
271       return true;
272     // FIXME: We can allow undefs, but if Index was specified, we may want to
273     //        check that the constant is defined at that index.
274     if (auto *C = dyn_cast<Constant>(V))
275       return C->getSplatValue() != nullptr;
276   }
277 
278   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
279     // FIXME: We can safely allow undefs here. If Index was specified, we will
280     //        check that the mask elt is defined at the required index.
281     if (!all_equal(Shuf->getShuffleMask()))
282       return false;
283 
284     // Match any index.
285     if (Index == -1)
286       return true;
287 
288     // Match a specific element. The mask should be defined at and match the
289     // specified index.
290     return Shuf->getMaskValue(Index) == Index;
291   }
292 
293   // The remaining tests are all recursive, so bail out if we hit the limit.
294   if (Depth++ == MaxAnalysisRecursionDepth)
295     return false;
296 
297   // If both operands of a binop are splats, the result is a splat.
298   Value *X, *Y, *Z;
299   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
300     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
301 
302   // If all operands of a select are splats, the result is a splat.
303   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
304     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
305            isSplatValue(Z, Index, Depth);
306 
307   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
308 
309   return false;
310 }
311 
312 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
313                                   const APInt &DemandedElts, APInt &DemandedLHS,
314                                   APInt &DemandedRHS, bool AllowUndefElts) {
315   DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
316 
317   // Early out if we don't demand any elements.
318   if (DemandedElts.isZero())
319     return true;
320 
321   // Simple case of a shuffle with zeroinitializer.
322   if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
323     DemandedLHS.setBit(0);
324     return true;
325   }
326 
327   for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
328     int M = Mask[I];
329     assert((-1 <= M) && (M < (SrcWidth * 2)) &&
330            "Invalid shuffle mask constant");
331 
332     if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
333       continue;
334 
335     // For undef elements, we don't know anything about the common state of
336     // the shuffle result.
337     if (M < 0)
338       return false;
339 
340     if (M < SrcWidth)
341       DemandedLHS.setBit(M);
342     else
343       DemandedRHS.setBit(M - SrcWidth);
344   }
345 
346   return true;
347 }
348 
349 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
350                                  SmallVectorImpl<int> &ScaledMask) {
351   assert(Scale > 0 && "Unexpected scaling factor");
352 
353   // Fast-path: if no scaling, then it is just a copy.
354   if (Scale == 1) {
355     ScaledMask.assign(Mask.begin(), Mask.end());
356     return;
357   }
358 
359   ScaledMask.clear();
360   for (int MaskElt : Mask) {
361     if (MaskElt >= 0) {
362       assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
363              "Overflowed 32-bits");
364     }
365     for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
366       ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
367   }
368 }
369 
370 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
371                                 SmallVectorImpl<int> &ScaledMask) {
372   assert(Scale > 0 && "Unexpected scaling factor");
373 
374   // Fast-path: if no scaling, then it is just a copy.
375   if (Scale == 1) {
376     ScaledMask.assign(Mask.begin(), Mask.end());
377     return true;
378   }
379 
380   // We must map the original elements down evenly to a type with less elements.
381   int NumElts = Mask.size();
382   if (NumElts % Scale != 0)
383     return false;
384 
385   ScaledMask.clear();
386   ScaledMask.reserve(NumElts / Scale);
387 
388   // Step through the input mask by splitting into Scale-sized slices.
389   do {
390     ArrayRef<int> MaskSlice = Mask.take_front(Scale);
391     assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
392 
393     // The first element of the slice determines how we evaluate this slice.
394     int SliceFront = MaskSlice.front();
395     if (SliceFront < 0) {
396       // Negative values (undef or other "sentinel" values) must be equal across
397       // the entire slice.
398       if (!all_equal(MaskSlice))
399         return false;
400       ScaledMask.push_back(SliceFront);
401     } else {
402       // A positive mask element must be cleanly divisible.
403       if (SliceFront % Scale != 0)
404         return false;
405       // Elements of the slice must be consecutive.
406       for (int i = 1; i < Scale; ++i)
407         if (MaskSlice[i] != SliceFront + i)
408           return false;
409       ScaledMask.push_back(SliceFront / Scale);
410     }
411     Mask = Mask.drop_front(Scale);
412   } while (!Mask.empty());
413 
414   assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
415 
416   // All elements of the original mask can be scaled down to map to the elements
417   // of a mask with wider elements.
418   return true;
419 }
420 
421 bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
422                                 SmallVectorImpl<int> &ScaledMask) {
423   unsigned NumSrcElts = Mask.size();
424   assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor");
425 
426   // Fast-path: if no scaling, then it is just a copy.
427   if (NumSrcElts == NumDstElts) {
428     ScaledMask.assign(Mask.begin(), Mask.end());
429     return true;
430   }
431 
432   // Ensure we can find a whole scale factor.
433   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
434          "Unexpected scaling factor");
435 
436   if (NumSrcElts > NumDstElts) {
437     int Scale = NumSrcElts / NumDstElts;
438     return widenShuffleMaskElts(Scale, Mask, ScaledMask);
439   }
440 
441   int Scale = NumDstElts / NumSrcElts;
442   narrowShuffleMaskElts(Scale, Mask, ScaledMask);
443   return true;
444 }
445 
446 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
447                                         SmallVectorImpl<int> &ScaledMask) {
448   std::array<SmallVector<int, 16>, 2> TmpMasks;
449   SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
450   ArrayRef<int> InputMask = Mask;
451   for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
452     while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
453       InputMask = *Output;
454       std::swap(Output, Tmp);
455     }
456   }
457   ScaledMask.assign(InputMask.begin(), InputMask.end());
458 }
459 
460 void llvm::processShuffleMasks(
461     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
462     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
463     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
464     function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
465   SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
466   // Try to perform better estimation of the permutation.
467   // 1. Split the source/destination vectors into real registers.
468   // 2. Do the mask analysis to identify which real registers are
469   // permuted.
470   int Sz = Mask.size();
471   unsigned SzDest = Sz / NumOfDestRegs;
472   unsigned SzSrc = Sz / NumOfSrcRegs;
473   for (unsigned I = 0; I < NumOfDestRegs; ++I) {
474     auto &RegMasks = Res[I];
475     RegMasks.assign(NumOfSrcRegs, {});
476     // Check that the values in dest registers are in the one src
477     // register.
478     for (unsigned K = 0; K < SzDest; ++K) {
479       int Idx = I * SzDest + K;
480       if (Idx == Sz)
481         break;
482       if (Mask[Idx] >= Sz || Mask[Idx] == PoisonMaskElem)
483         continue;
484       int SrcRegIdx = Mask[Idx] / SzSrc;
485       // Add a cost of PermuteTwoSrc for each new source register permute,
486       // if we have more than one source registers.
487       if (RegMasks[SrcRegIdx].empty())
488         RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem);
489       RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
490     }
491   }
492   // Process split mask.
493   for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
494     auto &Dest = Res[I];
495     int NumSrcRegs =
496         count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
497     switch (NumSrcRegs) {
498     case 0:
499       // No input vectors were used!
500       NoInputAction();
501       break;
502     case 1: {
503       // Find the only mask with at least single undef mask elem.
504       auto *It =
505           find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
506       unsigned SrcReg = std::distance(Dest.begin(), It);
507       SingleInputAction(*It, SrcReg, I);
508       break;
509     }
510     default: {
511       // The first mask is a permutation of a single register. Since we have >2
512       // input registers to shuffle, we merge the masks for 2 first registers
513       // and generate a shuffle of 2 registers rather than the reordering of the
514       // first register and then shuffle with the second register. Next,
515       // generate the shuffles of the resulting register + the remaining
516       // registers from the list.
517       auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
518                                ArrayRef<int> SecondMask) {
519         for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
520           if (SecondMask[Idx] != PoisonMaskElem) {
521             assert(FirstMask[Idx] == PoisonMaskElem &&
522                    "Expected undefined mask element.");
523             FirstMask[Idx] = SecondMask[Idx] + VF;
524           }
525         }
526       };
527       auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
528         for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
529           if (Mask[Idx] != PoisonMaskElem)
530             Mask[Idx] = Idx;
531         }
532       };
533       int SecondIdx;
534       do {
535         int FirstIdx = -1;
536         SecondIdx = -1;
537         MutableArrayRef<int> FirstMask, SecondMask;
538         for (unsigned I = 0; I < NumOfDestRegs; ++I) {
539           SmallVectorImpl<int> &RegMask = Dest[I];
540           if (RegMask.empty())
541             continue;
542 
543           if (FirstIdx == SecondIdx) {
544             FirstIdx = I;
545             FirstMask = RegMask;
546             continue;
547           }
548           SecondIdx = I;
549           SecondMask = RegMask;
550           CombineMasks(FirstMask, SecondMask);
551           ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
552           NormalizeMask(FirstMask);
553           RegMask.clear();
554           SecondMask = FirstMask;
555           SecondIdx = FirstIdx;
556         }
557         if (FirstIdx != SecondIdx && SecondIdx >= 0) {
558           CombineMasks(SecondMask, FirstMask);
559           ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
560           Dest[FirstIdx].clear();
561           NormalizeMask(SecondMask);
562         }
563       } while (SecondIdx >= 0);
564       break;
565     }
566     }
567   }
568 }
569 
570 MapVector<Instruction *, uint64_t>
571 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
572                                const TargetTransformInfo *TTI) {
573 
574   // DemandedBits will give us every value's live-out bits. But we want
575   // to ensure no extra casts would need to be inserted, so every DAG
576   // of connected values must have the same minimum bitwidth.
577   EquivalenceClasses<Value *> ECs;
578   SmallVector<Value *, 16> Worklist;
579   SmallPtrSet<Value *, 4> Roots;
580   SmallPtrSet<Value *, 16> Visited;
581   DenseMap<Value *, uint64_t> DBits;
582   SmallPtrSet<Instruction *, 4> InstructionSet;
583   MapVector<Instruction *, uint64_t> MinBWs;
584 
585   // Determine the roots. We work bottom-up, from truncs or icmps.
586   bool SeenExtFromIllegalType = false;
587   for (auto *BB : Blocks)
588     for (auto &I : *BB) {
589       InstructionSet.insert(&I);
590 
591       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
592           !TTI->isTypeLegal(I.getOperand(0)->getType()))
593         SeenExtFromIllegalType = true;
594 
595       // Only deal with non-vector integers up to 64-bits wide.
596       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
597           !I.getType()->isVectorTy() &&
598           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
599         // Don't make work for ourselves. If we know the loaded type is legal,
600         // don't add it to the worklist.
601         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
602           continue;
603 
604         Worklist.push_back(&I);
605         Roots.insert(&I);
606       }
607     }
608   // Early exit.
609   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
610     return MinBWs;
611 
612   // Now proceed breadth-first, unioning values together.
613   while (!Worklist.empty()) {
614     Value *Val = Worklist.pop_back_val();
615     Value *Leader = ECs.getOrInsertLeaderValue(Val);
616 
617     if (!Visited.insert(Val).second)
618       continue;
619 
620     // Non-instructions terminate a chain successfully.
621     if (!isa<Instruction>(Val))
622       continue;
623     Instruction *I = cast<Instruction>(Val);
624 
625     // If we encounter a type that is larger than 64 bits, we can't represent
626     // it so bail out.
627     if (DB.getDemandedBits(I).getBitWidth() > 64)
628       return MapVector<Instruction *, uint64_t>();
629 
630     uint64_t V = DB.getDemandedBits(I).getZExtValue();
631     DBits[Leader] |= V;
632     DBits[I] = V;
633 
634     // Casts, loads and instructions outside of our range terminate a chain
635     // successfully.
636     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
637         !InstructionSet.count(I))
638       continue;
639 
640     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
641     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
642     // transform anything that relies on them.
643     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
644         !I->getType()->isIntegerTy()) {
645       DBits[Leader] |= ~0ULL;
646       continue;
647     }
648 
649     // We don't modify the types of PHIs. Reductions will already have been
650     // truncated if possible, and inductions' sizes will have been chosen by
651     // indvars.
652     if (isa<PHINode>(I))
653       continue;
654 
655     if (DBits[Leader] == ~0ULL)
656       // All bits demanded, no point continuing.
657       continue;
658 
659     for (Value *O : cast<User>(I)->operands()) {
660       ECs.unionSets(Leader, O);
661       Worklist.push_back(O);
662     }
663   }
664 
665   // Now we've discovered all values, walk them to see if there are
666   // any users we didn't see. If there are, we can't optimize that
667   // chain.
668   for (auto &I : DBits)
669     for (auto *U : I.first->users())
670       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
671         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
672 
673   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
674     uint64_t LeaderDemandedBits = 0;
675     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
676       LeaderDemandedBits |= DBits[M];
677 
678     uint64_t MinBW = llvm::bit_width(LeaderDemandedBits);
679     // Round up to a power of 2
680     MinBW = llvm::bit_ceil(MinBW);
681 
682     // We don't modify the types of PHIs. Reductions will already have been
683     // truncated if possible, and inductions' sizes will have been chosen by
684     // indvars.
685     // If we are required to shrink a PHI, abandon this entire equivalence class.
686     bool Abort = false;
687     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
688       if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
689         Abort = true;
690         break;
691       }
692     if (Abort)
693       continue;
694 
695     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
696       auto *MI = dyn_cast<Instruction>(M);
697       if (!MI)
698         continue;
699       Type *Ty = M->getType();
700       if (Roots.count(M))
701         Ty = MI->getOperand(0)->getType();
702 
703       if (MinBW >= Ty->getScalarSizeInBits())
704         continue;
705 
706       // If any of M's operands demand more bits than MinBW then M cannot be
707       // performed safely in MinBW.
708       if (any_of(MI->operands(), [&DB, MinBW](Use &U) {
709             auto *CI = dyn_cast<ConstantInt>(U);
710             // For constants shift amounts, check if the shift would result in
711             // poison.
712             if (CI &&
713                 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) &&
714                 U.getOperandNo() == 1)
715               return CI->uge(MinBW);
716             uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue());
717             return bit_ceil(BW) > MinBW;
718           }))
719         continue;
720 
721       MinBWs[MI] = MinBW;
722     }
723   }
724 
725   return MinBWs;
726 }
727 
728 /// Add all access groups in @p AccGroups to @p List.
729 template <typename ListT>
730 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
731   // Interpret an access group as a list containing itself.
732   if (AccGroups->getNumOperands() == 0) {
733     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
734     List.insert(AccGroups);
735     return;
736   }
737 
738   for (const auto &AccGroupListOp : AccGroups->operands()) {
739     auto *Item = cast<MDNode>(AccGroupListOp.get());
740     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
741     List.insert(Item);
742   }
743 }
744 
745 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
746   if (!AccGroups1)
747     return AccGroups2;
748   if (!AccGroups2)
749     return AccGroups1;
750   if (AccGroups1 == AccGroups2)
751     return AccGroups1;
752 
753   SmallSetVector<Metadata *, 4> Union;
754   addToAccessGroupList(Union, AccGroups1);
755   addToAccessGroupList(Union, AccGroups2);
756 
757   if (Union.size() == 0)
758     return nullptr;
759   if (Union.size() == 1)
760     return cast<MDNode>(Union.front());
761 
762   LLVMContext &Ctx = AccGroups1->getContext();
763   return MDNode::get(Ctx, Union.getArrayRef());
764 }
765 
766 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
767                                     const Instruction *Inst2) {
768   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
769   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
770 
771   if (!MayAccessMem1 && !MayAccessMem2)
772     return nullptr;
773   if (!MayAccessMem1)
774     return Inst2->getMetadata(LLVMContext::MD_access_group);
775   if (!MayAccessMem2)
776     return Inst1->getMetadata(LLVMContext::MD_access_group);
777 
778   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
779   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
780   if (!MD1 || !MD2)
781     return nullptr;
782   if (MD1 == MD2)
783     return MD1;
784 
785   // Use set for scalable 'contains' check.
786   SmallPtrSet<Metadata *, 4> AccGroupSet2;
787   addToAccessGroupList(AccGroupSet2, MD2);
788 
789   SmallVector<Metadata *, 4> Intersection;
790   if (MD1->getNumOperands() == 0) {
791     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
792     if (AccGroupSet2.count(MD1))
793       Intersection.push_back(MD1);
794   } else {
795     for (const MDOperand &Node : MD1->operands()) {
796       auto *Item = cast<MDNode>(Node.get());
797       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
798       if (AccGroupSet2.count(Item))
799         Intersection.push_back(Item);
800     }
801   }
802 
803   if (Intersection.size() == 0)
804     return nullptr;
805   if (Intersection.size() == 1)
806     return cast<MDNode>(Intersection.front());
807 
808   LLVMContext &Ctx = Inst1->getContext();
809   return MDNode::get(Ctx, Intersection);
810 }
811 
812 /// \returns \p I after propagating metadata from \p VL.
813 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
814   if (VL.empty())
815     return Inst;
816   Instruction *I0 = cast<Instruction>(VL[0]);
817   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
818   I0->getAllMetadataOtherThanDebugLoc(Metadata);
819 
820   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
821                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
822                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
823                     LLVMContext::MD_access_group, LLVMContext::MD_mmra}) {
824     MDNode *MD = I0->getMetadata(Kind);
825     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
826       const Instruction *IJ = cast<Instruction>(VL[J]);
827       MDNode *IMD = IJ->getMetadata(Kind);
828 
829       switch (Kind) {
830       case LLVMContext::MD_mmra: {
831         MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD);
832         break;
833       }
834       case LLVMContext::MD_tbaa:
835         MD = MDNode::getMostGenericTBAA(MD, IMD);
836         break;
837       case LLVMContext::MD_alias_scope:
838         MD = MDNode::getMostGenericAliasScope(MD, IMD);
839         break;
840       case LLVMContext::MD_fpmath:
841         MD = MDNode::getMostGenericFPMath(MD, IMD);
842         break;
843       case LLVMContext::MD_noalias:
844       case LLVMContext::MD_nontemporal:
845       case LLVMContext::MD_invariant_load:
846         MD = MDNode::intersect(MD, IMD);
847         break;
848       case LLVMContext::MD_access_group:
849         MD = intersectAccessGroups(Inst, IJ);
850         break;
851       default:
852         llvm_unreachable("unhandled metadata");
853       }
854     }
855 
856     Inst->setMetadata(Kind, MD);
857   }
858 
859   return Inst;
860 }
861 
862 Constant *
863 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
864                            const InterleaveGroup<Instruction> &Group) {
865   // All 1's means mask is not needed.
866   if (Group.getNumMembers() == Group.getFactor())
867     return nullptr;
868 
869   // TODO: support reversed access.
870   assert(!Group.isReverse() && "Reversed group not supported.");
871 
872   SmallVector<Constant *, 16> Mask;
873   for (unsigned i = 0; i < VF; i++)
874     for (unsigned j = 0; j < Group.getFactor(); ++j) {
875       unsigned HasMember = Group.getMember(j) ? 1 : 0;
876       Mask.push_back(Builder.getInt1(HasMember));
877     }
878 
879   return ConstantVector::get(Mask);
880 }
881 
882 llvm::SmallVector<int, 16>
883 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
884   SmallVector<int, 16> MaskVec;
885   for (unsigned i = 0; i < VF; i++)
886     for (unsigned j = 0; j < ReplicationFactor; j++)
887       MaskVec.push_back(i);
888 
889   return MaskVec;
890 }
891 
892 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
893                                                       unsigned NumVecs) {
894   SmallVector<int, 16> Mask;
895   for (unsigned i = 0; i < VF; i++)
896     for (unsigned j = 0; j < NumVecs; j++)
897       Mask.push_back(j * VF + i);
898 
899   return Mask;
900 }
901 
902 llvm::SmallVector<int, 16>
903 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
904   SmallVector<int, 16> Mask;
905   for (unsigned i = 0; i < VF; i++)
906     Mask.push_back(Start + i * Stride);
907 
908   return Mask;
909 }
910 
911 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
912                                                       unsigned NumInts,
913                                                       unsigned NumUndefs) {
914   SmallVector<int, 16> Mask;
915   for (unsigned i = 0; i < NumInts; i++)
916     Mask.push_back(Start + i);
917 
918   for (unsigned i = 0; i < NumUndefs; i++)
919     Mask.push_back(-1);
920 
921   return Mask;
922 }
923 
924 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
925                                                  unsigned NumElts) {
926   // Avoid casts in the loop and make sure we have a reasonable number.
927   int NumEltsSigned = NumElts;
928   assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
929 
930   // If the mask chooses an element from operand 1, reduce it to choose from the
931   // corresponding element of operand 0. Undef mask elements are unchanged.
932   SmallVector<int, 16> UnaryMask;
933   for (int MaskElt : Mask) {
934     assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
935     int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
936     UnaryMask.push_back(UnaryElt);
937   }
938   return UnaryMask;
939 }
940 
941 /// A helper function for concatenating vectors. This function concatenates two
942 /// vectors having the same element type. If the second vector has fewer
943 /// elements than the first, it is padded with undefs.
944 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
945                                     Value *V2) {
946   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
947   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
948   assert(VecTy1 && VecTy2 &&
949          VecTy1->getScalarType() == VecTy2->getScalarType() &&
950          "Expect two vectors with the same element type");
951 
952   unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
953   unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
954   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
955 
956   if (NumElts1 > NumElts2) {
957     // Extend with UNDEFs.
958     V2 = Builder.CreateShuffleVector(
959         V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
960   }
961 
962   return Builder.CreateShuffleVector(
963       V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
964 }
965 
966 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
967                                 ArrayRef<Value *> Vecs) {
968   unsigned NumVecs = Vecs.size();
969   assert(NumVecs > 1 && "Should be at least two vectors");
970 
971   SmallVector<Value *, 8> ResList;
972   ResList.append(Vecs.begin(), Vecs.end());
973   do {
974     SmallVector<Value *, 8> TmpList;
975     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
976       Value *V0 = ResList[i], *V1 = ResList[i + 1];
977       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
978              "Only the last vector may have a different type");
979 
980       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
981     }
982 
983     // Push the last vector if the total number of vectors is odd.
984     if (NumVecs % 2 != 0)
985       TmpList.push_back(ResList[NumVecs - 1]);
986 
987     ResList = TmpList;
988     NumVecs = ResList.size();
989   } while (NumVecs > 1);
990 
991   return ResList[0];
992 }
993 
994 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
995   assert(isa<VectorType>(Mask->getType()) &&
996          isa<IntegerType>(Mask->getType()->getScalarType()) &&
997          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
998              1 &&
999          "Mask must be a vector of i1");
1000 
1001   auto *ConstMask = dyn_cast<Constant>(Mask);
1002   if (!ConstMask)
1003     return false;
1004   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1005     return true;
1006   if (isa<ScalableVectorType>(ConstMask->getType()))
1007     return false;
1008   for (unsigned
1009            I = 0,
1010            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1011        I != E; ++I) {
1012     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1013       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1014         continue;
1015     return false;
1016   }
1017   return true;
1018 }
1019 
1020 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1021   assert(isa<VectorType>(Mask->getType()) &&
1022          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1023          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1024              1 &&
1025          "Mask must be a vector of i1");
1026 
1027   auto *ConstMask = dyn_cast<Constant>(Mask);
1028   if (!ConstMask)
1029     return false;
1030   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1031     return true;
1032   if (isa<ScalableVectorType>(ConstMask->getType()))
1033     return false;
1034   for (unsigned
1035            I = 0,
1036            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1037        I != E; ++I) {
1038     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1039       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1040         continue;
1041     return false;
1042   }
1043   return true;
1044 }
1045 
1046 bool llvm::maskContainsAllOneOrUndef(Value *Mask) {
1047   assert(isa<VectorType>(Mask->getType()) &&
1048          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1049          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1050              1 &&
1051          "Mask must be a vector of i1");
1052 
1053   auto *ConstMask = dyn_cast<Constant>(Mask);
1054   if (!ConstMask)
1055     return false;
1056   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1057     return true;
1058   if (isa<ScalableVectorType>(ConstMask->getType()))
1059     return false;
1060   for (unsigned
1061            I = 0,
1062            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1063        I != E; ++I) {
1064     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1065       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1066         return true;
1067   }
1068   return false;
1069 }
1070 
1071 /// TODO: This is a lot like known bits, but for
1072 /// vectors.  Is there something we can common this with?
1073 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1074   assert(isa<FixedVectorType>(Mask->getType()) &&
1075          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1076          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1077              1 &&
1078          "Mask must be a fixed width vector of i1");
1079 
1080   const unsigned VWidth =
1081       cast<FixedVectorType>(Mask->getType())->getNumElements();
1082   APInt DemandedElts = APInt::getAllOnes(VWidth);
1083   if (auto *CV = dyn_cast<ConstantVector>(Mask))
1084     for (unsigned i = 0; i < VWidth; i++)
1085       if (CV->getAggregateElement(i)->isNullValue())
1086         DemandedElts.clearBit(i);
1087   return DemandedElts;
1088 }
1089 
1090 bool InterleavedAccessInfo::isStrided(int Stride) {
1091   unsigned Factor = std::abs(Stride);
1092   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1093 }
1094 
1095 void InterleavedAccessInfo::collectConstStrideAccesses(
1096     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1097     const DenseMap<Value*, const SCEV*> &Strides) {
1098   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1099 
1100   // Since it's desired that the load/store instructions be maintained in
1101   // "program order" for the interleaved access analysis, we have to visit the
1102   // blocks in the loop in reverse postorder (i.e., in a topological order).
1103   // Such an ordering will ensure that any load/store that may be executed
1104   // before a second load/store will precede the second load/store in
1105   // AccessStrideInfo.
1106   LoopBlocksDFS DFS(TheLoop);
1107   DFS.perform(LI);
1108   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1109     for (auto &I : *BB) {
1110       Value *Ptr = getLoadStorePointerOperand(&I);
1111       if (!Ptr)
1112         continue;
1113       Type *ElementTy = getLoadStoreType(&I);
1114 
1115       // Currently, codegen doesn't support cases where the type size doesn't
1116       // match the alloc size. Skip them for now.
1117       uint64_t Size = DL.getTypeAllocSize(ElementTy);
1118       if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1119         continue;
1120 
1121       // We don't check wrapping here because we don't know yet if Ptr will be
1122       // part of a full group or a group with gaps. Checking wrapping for all
1123       // pointers (even those that end up in groups with no gaps) will be overly
1124       // conservative. For full groups, wrapping should be ok since if we would
1125       // wrap around the address space we would do a memory access at nullptr
1126       // even without the transformation. The wrapping checks are therefore
1127       // deferred until after we've formed the interleaved groups.
1128       int64_t Stride =
1129         getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1130                      /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1131 
1132       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1133       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1134                                               getLoadStoreAlignment(&I));
1135     }
1136 }
1137 
1138 // Analyze interleaved accesses and collect them into interleaved load and
1139 // store groups.
1140 //
1141 // When generating code for an interleaved load group, we effectively hoist all
1142 // loads in the group to the location of the first load in program order. When
1143 // generating code for an interleaved store group, we sink all stores to the
1144 // location of the last store. This code motion can change the order of load
1145 // and store instructions and may break dependences.
1146 //
1147 // The code generation strategy mentioned above ensures that we won't violate
1148 // any write-after-read (WAR) dependences.
1149 //
1150 // E.g., for the WAR dependence:  a = A[i];      // (1)
1151 //                                A[i] = b;      // (2)
1152 //
1153 // The store group of (2) is always inserted at or below (2), and the load
1154 // group of (1) is always inserted at or above (1). Thus, the instructions will
1155 // never be reordered. All other dependences are checked to ensure the
1156 // correctness of the instruction reordering.
1157 //
1158 // The algorithm visits all memory accesses in the loop in bottom-up program
1159 // order. Program order is established by traversing the blocks in the loop in
1160 // reverse postorder when collecting the accesses.
1161 //
1162 // We visit the memory accesses in bottom-up order because it can simplify the
1163 // construction of store groups in the presence of write-after-write (WAW)
1164 // dependences.
1165 //
1166 // E.g., for the WAW dependence:  A[i] = a;      // (1)
1167 //                                A[i] = b;      // (2)
1168 //                                A[i + 1] = c;  // (3)
1169 //
1170 // We will first create a store group with (3) and (2). (1) can't be added to
1171 // this group because it and (2) are dependent. However, (1) can be grouped
1172 // with other accesses that may precede it in program order. Note that a
1173 // bottom-up order does not imply that WAW dependences should not be checked.
1174 void InterleavedAccessInfo::analyzeInterleaving(
1175                                  bool EnablePredicatedInterleavedMemAccesses) {
1176   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1177   const auto &Strides = LAI->getSymbolicStrides();
1178 
1179   // Holds all accesses with a constant stride.
1180   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1181   collectConstStrideAccesses(AccessStrideInfo, Strides);
1182 
1183   if (AccessStrideInfo.empty())
1184     return;
1185 
1186   // Collect the dependences in the loop.
1187   collectDependences();
1188 
1189   // Holds all interleaved store groups temporarily.
1190   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1191   // Holds all interleaved load groups temporarily.
1192   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1193   // Groups added to this set cannot have new members added.
1194   SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups;
1195 
1196   // Search in bottom-up program order for pairs of accesses (A and B) that can
1197   // form interleaved load or store groups. In the algorithm below, access A
1198   // precedes access B in program order. We initialize a group for B in the
1199   // outer loop of the algorithm, and then in the inner loop, we attempt to
1200   // insert each A into B's group if:
1201   //
1202   //  1. A and B have the same stride,
1203   //  2. A and B have the same memory object size, and
1204   //  3. A belongs in B's group according to its distance from B.
1205   //
1206   // Special care is taken to ensure group formation will not break any
1207   // dependences.
1208   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1209        BI != E; ++BI) {
1210     Instruction *B = BI->first;
1211     StrideDescriptor DesB = BI->second;
1212 
1213     // Initialize a group for B if it has an allowable stride. Even if we don't
1214     // create a group for B, we continue with the bottom-up algorithm to ensure
1215     // we don't break any of B's dependences.
1216     InterleaveGroup<Instruction> *GroupB = nullptr;
1217     if (isStrided(DesB.Stride) &&
1218         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1219       GroupB = getInterleaveGroup(B);
1220       if (!GroupB) {
1221         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1222                           << '\n');
1223         GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1224         if (B->mayWriteToMemory())
1225           StoreGroups.insert(GroupB);
1226         else
1227           LoadGroups.insert(GroupB);
1228       }
1229     }
1230 
1231     for (auto AI = std::next(BI); AI != E; ++AI) {
1232       Instruction *A = AI->first;
1233       StrideDescriptor DesA = AI->second;
1234 
1235       // Our code motion strategy implies that we can't have dependences
1236       // between accesses in an interleaved group and other accesses located
1237       // between the first and last member of the group. Note that this also
1238       // means that a group can't have more than one member at a given offset.
1239       // The accesses in a group can have dependences with other accesses, but
1240       // we must ensure we don't extend the boundaries of the group such that
1241       // we encompass those dependent accesses.
1242       //
1243       // For example, assume we have the sequence of accesses shown below in a
1244       // stride-2 loop:
1245       //
1246       //  (1, 2) is a group | A[i]   = a;  // (1)
1247       //                    | A[i-1] = b;  // (2) |
1248       //                      A[i-3] = c;  // (3)
1249       //                      A[i]   = d;  // (4) | (2, 4) is not a group
1250       //
1251       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1252       // but not with (4). If we did, the dependent access (3) would be within
1253       // the boundaries of the (2, 4) group.
1254       auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1255                                  StrideEntry *A) -> Instruction * {
1256         for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1257           Instruction *MemberOfGroupB = Group->getMember(Index);
1258           if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1259                                     A, &*AccessStrideInfo.find(MemberOfGroupB)))
1260             return MemberOfGroupB;
1261         }
1262         return nullptr;
1263       };
1264 
1265       auto GroupA = getInterleaveGroup(A);
1266       // If A is a load, dependencies are tolerable, there's nothing to do here.
1267       // If both A and B belong to the same (store) group, they are independent,
1268       // even if dependencies have not been recorded.
1269       // If both GroupA and GroupB are null, there's nothing to do here.
1270       if (A->mayWriteToMemory() && GroupA != GroupB) {
1271         Instruction *DependentInst = nullptr;
1272         // If GroupB is a load group, we have to compare AI against all
1273         // members of GroupB because if any load within GroupB has a dependency
1274         // on AI, we need to mark GroupB as complete and also release the
1275         // store GroupA (if A belongs to one). The former prevents incorrect
1276         // hoisting of load B above store A while the latter prevents incorrect
1277         // sinking of store A below load B.
1278         if (GroupB && LoadGroups.contains(GroupB))
1279           DependentInst = DependentMember(GroupB, &*AI);
1280         else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1281           DependentInst = B;
1282 
1283         if (DependentInst) {
1284           // A has a store dependence on B (or on some load within GroupB) and
1285           // is part of a store group. Release A's group to prevent illegal
1286           // sinking of A below B. A will then be free to form another group
1287           // with instructions that precede it.
1288           if (GroupA && StoreGroups.contains(GroupA)) {
1289             LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1290                                  "dependence between "
1291                               << *A << " and " << *DependentInst << '\n');
1292             StoreGroups.remove(GroupA);
1293             releaseGroup(GroupA);
1294           }
1295           // If B is a load and part of an interleave group, no earlier loads
1296           // can be added to B's interleave group, because this would mean the
1297           // DependentInst would move across store A. Mark the interleave group
1298           // as complete.
1299           if (GroupB && LoadGroups.contains(GroupB)) {
1300             LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1301                               << " as complete.\n");
1302             CompletedLoadGroups.insert(GroupB);
1303           }
1304         }
1305       }
1306       if (CompletedLoadGroups.contains(GroupB)) {
1307         // Skip trying to add A to B, continue to look for other conflicting A's
1308         // in groups to be released.
1309         continue;
1310       }
1311 
1312       // At this point, we've checked for illegal code motion. If either A or B
1313       // isn't strided, there's nothing left to do.
1314       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1315         continue;
1316 
1317       // Ignore A if it's already in a group or isn't the same kind of memory
1318       // operation as B.
1319       // Note that mayReadFromMemory() isn't mutually exclusive to
1320       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1321       // here, canVectorizeMemory() should have returned false - except for the
1322       // case we asked for optimization remarks.
1323       if (isInterleaved(A) ||
1324           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1325           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1326         continue;
1327 
1328       // Check rules 1 and 2. Ignore A if its stride or size is different from
1329       // that of B.
1330       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1331         continue;
1332 
1333       // Ignore A if the memory object of A and B don't belong to the same
1334       // address space
1335       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1336         continue;
1337 
1338       // Calculate the distance from A to B.
1339       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1340           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1341       if (!DistToB)
1342         continue;
1343       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1344 
1345       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1346       // size.
1347       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1348         continue;
1349 
1350       // All members of a predicated interleave-group must have the same predicate,
1351       // and currently must reside in the same BB.
1352       BasicBlock *BlockA = A->getParent();
1353       BasicBlock *BlockB = B->getParent();
1354       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1355           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1356         continue;
1357 
1358       // The index of A is the index of B plus A's distance to B in multiples
1359       // of the size.
1360       int IndexA =
1361           GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1362 
1363       // Try to insert A into B's group.
1364       if (GroupB->insertMember(A, IndexA, DesA.Alignment)) {
1365         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1366                           << "    into the interleave group with" << *B
1367                           << '\n');
1368         InterleaveGroupMap[A] = GroupB;
1369 
1370         // Set the first load in program order as the insert position.
1371         if (A->mayReadFromMemory())
1372           GroupB->setInsertPos(A);
1373       }
1374     } // Iteration over A accesses.
1375   }   // Iteration over B accesses.
1376 
1377   auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1378                                             int Index,
1379                                             std::string FirstOrLast) -> bool {
1380     Instruction *Member = Group->getMember(Index);
1381     assert(Member && "Group member does not exist");
1382     Value *MemberPtr = getLoadStorePointerOperand(Member);
1383     Type *AccessTy = getLoadStoreType(Member);
1384     if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1385                      /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1386       return false;
1387     LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1388                       << FirstOrLast
1389                       << " group member potentially pointer-wrapping.\n");
1390     releaseGroup(Group);
1391     return true;
1392   };
1393 
1394   // Remove interleaved groups with gaps whose memory
1395   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1396   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1397   // not check wrapping (see documentation there).
1398   // FORNOW we use Assume=false;
1399   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1400   // of runtime SCEV assumptions checks (thereby potentially failing to
1401   // vectorize altogether).
1402   // Additional optional optimizations:
1403   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1404   // wrap then we can deduce that all pointers in the group don't wrap.
1405   // This means that we can forcefully peel the loop in order to only have to
1406   // check the first pointer for no-wrap. When we'll change to use Assume=true
1407   // we'll only need at most one runtime check per interleaved group.
1408   for (auto *Group : LoadGroups) {
1409     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1410     // load would wrap around the address space we would do a memory access at
1411     // nullptr even without the transformation.
1412     if (Group->getNumMembers() == Group->getFactor())
1413       continue;
1414 
1415     // Case 2: If first and last members of the group don't wrap this implies
1416     // that all the pointers in the group don't wrap.
1417     // So we check only group member 0 (which is always guaranteed to exist),
1418     // and group member Factor - 1; If the latter doesn't exist we rely on
1419     // peeling (if it is a non-reversed accsess -- see Case 3).
1420     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1421       continue;
1422     if (Group->getMember(Group->getFactor() - 1))
1423       InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1424                                      std::string("last"));
1425     else {
1426       // Case 3: A non-reversed interleaved load group with gaps: We need
1427       // to execute at least one scalar epilogue iteration. This will ensure
1428       // we don't speculatively access memory out-of-bounds. We only need
1429       // to look for a member at index factor - 1, since every group must have
1430       // a member at index zero.
1431       if (Group->isReverse()) {
1432         LLVM_DEBUG(
1433             dbgs() << "LV: Invalidate candidate interleaved group due to "
1434                       "a reverse access with gaps.\n");
1435         releaseGroup(Group);
1436         continue;
1437       }
1438       LLVM_DEBUG(
1439           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1440       RequiresScalarEpilogue = true;
1441     }
1442   }
1443 
1444   for (auto *Group : StoreGroups) {
1445     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1446     // store would wrap around the address space we would do a memory access at
1447     // nullptr even without the transformation.
1448     if (Group->getNumMembers() == Group->getFactor())
1449       continue;
1450 
1451     // Interleave-store-group with gaps is implemented using masked wide store.
1452     // Remove interleaved store groups with gaps if
1453     // masked-interleaved-accesses are not enabled by the target.
1454     if (!EnablePredicatedInterleavedMemAccesses) {
1455       LLVM_DEBUG(
1456           dbgs() << "LV: Invalidate candidate interleaved store group due "
1457                     "to gaps.\n");
1458       releaseGroup(Group);
1459       continue;
1460     }
1461 
1462     // Case 2: If first and last members of the group don't wrap this implies
1463     // that all the pointers in the group don't wrap.
1464     // So we check only group member 0 (which is always guaranteed to exist),
1465     // and the last group member. Case 3 (scalar epilog) is not relevant for
1466     // stores with gaps, which are implemented with masked-store (rather than
1467     // speculative access, as in loads).
1468     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1469       continue;
1470     for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1471       if (Group->getMember(Index)) {
1472         InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1473         break;
1474       }
1475   }
1476 }
1477 
1478 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1479   // If no group had triggered the requirement to create an epilogue loop,
1480   // there is nothing to do.
1481   if (!requiresScalarEpilogue())
1482     return;
1483 
1484   // Release groups requiring scalar epilogues. Note that this also removes them
1485   // from InterleaveGroups.
1486   bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) {
1487     if (!Group->requiresScalarEpilogue())
1488       return false;
1489     LLVM_DEBUG(
1490         dbgs()
1491         << "LV: Invalidate candidate interleaved group due to gaps that "
1492            "require a scalar epilogue (not allowed under optsize) and cannot "
1493            "be masked (not enabled). \n");
1494     releaseGroupWithoutRemovingFromSet(Group);
1495     return true;
1496   });
1497   assert(ReleasedGroup && "At least one group must be invalidated, as a "
1498                           "scalar epilogue was required");
1499   (void)ReleasedGroup;
1500   RequiresScalarEpilogue = false;
1501 }
1502 
1503 template <typename InstT>
1504 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1505   llvm_unreachable("addMetadata can only be used for Instruction");
1506 }
1507 
1508 namespace llvm {
1509 template <>
1510 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1511   SmallVector<Value *, 4> VL;
1512   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1513                  [](std::pair<int, Instruction *> p) { return p.second; });
1514   propagateMetadata(NewInst, VL);
1515 }
1516 } // namespace llvm
1517