xref: /llvm-project/llvm/lib/Analysis/VectorUtils.cpp (revision 4ba1800be6c9294e21e2b87b64600daac12730c1)
1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/CommandLine.h"
30 
31 #define DEBUG_TYPE "vectorutils"
32 
33 using namespace llvm;
34 using namespace llvm::PatternMatch;
35 
36 /// Maximum factor for an interleaved memory access.
37 static cl::opt<unsigned> MaxInterleaveGroupFactor(
38     "max-interleave-group-factor", cl::Hidden,
39     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
40     cl::init(8));
41 
42 /// Return true if all of the intrinsic's arguments and return type are scalars
43 /// for the scalar form of the intrinsic, and vectors for the vector form of the
44 /// intrinsic (except operands that are marked as always being scalar by
45 /// isVectorIntrinsicWithScalarOpAtArg).
46 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
47   switch (ID) {
48   case Intrinsic::abs:   // Begin integer bit-manipulation.
49   case Intrinsic::bswap:
50   case Intrinsic::bitreverse:
51   case Intrinsic::ctpop:
52   case Intrinsic::ctlz:
53   case Intrinsic::cttz:
54   case Intrinsic::fshl:
55   case Intrinsic::fshr:
56   case Intrinsic::smax:
57   case Intrinsic::smin:
58   case Intrinsic::umax:
59   case Intrinsic::umin:
60   case Intrinsic::sadd_sat:
61   case Intrinsic::ssub_sat:
62   case Intrinsic::uadd_sat:
63   case Intrinsic::usub_sat:
64   case Intrinsic::smul_fix:
65   case Intrinsic::smul_fix_sat:
66   case Intrinsic::umul_fix:
67   case Intrinsic::umul_fix_sat:
68   case Intrinsic::sqrt: // Begin floating-point.
69   case Intrinsic::asin:
70   case Intrinsic::acos:
71   case Intrinsic::atan:
72   case Intrinsic::sin:
73   case Intrinsic::cos:
74   case Intrinsic::tan:
75   case Intrinsic::sinh:
76   case Intrinsic::cosh:
77   case Intrinsic::tanh:
78   case Intrinsic::exp:
79   case Intrinsic::exp2:
80   case Intrinsic::log:
81   case Intrinsic::log10:
82   case Intrinsic::log2:
83   case Intrinsic::fabs:
84   case Intrinsic::minnum:
85   case Intrinsic::maxnum:
86   case Intrinsic::minimum:
87   case Intrinsic::maximum:
88   case Intrinsic::copysign:
89   case Intrinsic::floor:
90   case Intrinsic::ceil:
91   case Intrinsic::trunc:
92   case Intrinsic::rint:
93   case Intrinsic::nearbyint:
94   case Intrinsic::round:
95   case Intrinsic::roundeven:
96   case Intrinsic::pow:
97   case Intrinsic::fma:
98   case Intrinsic::fmuladd:
99   case Intrinsic::is_fpclass:
100   case Intrinsic::powi:
101   case Intrinsic::canonicalize:
102   case Intrinsic::fptosi_sat:
103   case Intrinsic::fptoui_sat:
104   case Intrinsic::lrint:
105   case Intrinsic::llrint:
106   case Intrinsic::ucmp:
107   case Intrinsic::scmp:
108     return true;
109   default:
110     return false;
111   }
112 }
113 
114 /// Identifies if the vector form of the intrinsic has a scalar operand.
115 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
116                                               unsigned ScalarOpdIdx) {
117   switch (ID) {
118   case Intrinsic::abs:
119   case Intrinsic::ctlz:
120   case Intrinsic::cttz:
121   case Intrinsic::is_fpclass:
122   case Intrinsic::powi:
123     return (ScalarOpdIdx == 1);
124   case Intrinsic::smul_fix:
125   case Intrinsic::smul_fix_sat:
126   case Intrinsic::umul_fix:
127   case Intrinsic::umul_fix_sat:
128     return (ScalarOpdIdx == 2);
129   default:
130     return false;
131   }
132 }
133 
134 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
135                                                   int OpdIdx) {
136   assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
137 
138   switch (ID) {
139   case Intrinsic::fptosi_sat:
140   case Intrinsic::fptoui_sat:
141   case Intrinsic::lrint:
142   case Intrinsic::llrint:
143   case Intrinsic::ucmp:
144   case Intrinsic::scmp:
145     return OpdIdx == -1 || OpdIdx == 0;
146   case Intrinsic::is_fpclass:
147     return OpdIdx == 0;
148   case Intrinsic::powi:
149     return OpdIdx == -1 || OpdIdx == 1;
150   default:
151     return OpdIdx == -1;
152   }
153 }
154 
155 /// Returns intrinsic ID for call.
156 /// For the input call instruction it finds mapping intrinsic and returns
157 /// its ID, in case it does not found it return not_intrinsic.
158 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
159                                                 const TargetLibraryInfo *TLI) {
160   Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
161   if (ID == Intrinsic::not_intrinsic)
162     return Intrinsic::not_intrinsic;
163 
164   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
165       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
166       ID == Intrinsic::experimental_noalias_scope_decl ||
167       ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
168     return ID;
169   return Intrinsic::not_intrinsic;
170 }
171 
172 /// Given a vector and an element number, see if the scalar value is
173 /// already around as a register, for example if it were inserted then extracted
174 /// from the vector.
175 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
176   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
177   VectorType *VTy = cast<VectorType>(V->getType());
178   // For fixed-length vector, return poison for out of range access.
179   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
180     unsigned Width = FVTy->getNumElements();
181     if (EltNo >= Width)
182       return PoisonValue::get(FVTy->getElementType());
183   }
184 
185   if (Constant *C = dyn_cast<Constant>(V))
186     return C->getAggregateElement(EltNo);
187 
188   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
189     // If this is an insert to a variable element, we don't know what it is.
190     if (!isa<ConstantInt>(III->getOperand(2)))
191       return nullptr;
192     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
193 
194     // If this is an insert to the element we are looking for, return the
195     // inserted value.
196     if (EltNo == IIElt)
197       return III->getOperand(1);
198 
199     // Guard against infinite loop on malformed, unreachable IR.
200     if (III == III->getOperand(0))
201       return nullptr;
202 
203     // Otherwise, the insertelement doesn't modify the value, recurse on its
204     // vector input.
205     return findScalarElement(III->getOperand(0), EltNo);
206   }
207 
208   ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
209   // Restrict the following transformation to fixed-length vector.
210   if (SVI && isa<FixedVectorType>(SVI->getType())) {
211     unsigned LHSWidth =
212         cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
213     int InEl = SVI->getMaskValue(EltNo);
214     if (InEl < 0)
215       return PoisonValue::get(VTy->getElementType());
216     if (InEl < (int)LHSWidth)
217       return findScalarElement(SVI->getOperand(0), InEl);
218     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
219   }
220 
221   // Extract a value from a vector add operation with a constant zero.
222   // TODO: Use getBinOpIdentity() to generalize this.
223   Value *Val; Constant *C;
224   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
225     if (Constant *Elt = C->getAggregateElement(EltNo))
226       if (Elt->isNullValue())
227         return findScalarElement(Val, EltNo);
228 
229   // If the vector is a splat then we can trivially find the scalar element.
230   if (isa<ScalableVectorType>(VTy))
231     if (Value *Splat = getSplatValue(V))
232       if (EltNo < VTy->getElementCount().getKnownMinValue())
233         return Splat;
234 
235   // Otherwise, we don't know.
236   return nullptr;
237 }
238 
239 int llvm::getSplatIndex(ArrayRef<int> Mask) {
240   int SplatIndex = -1;
241   for (int M : Mask) {
242     // Ignore invalid (undefined) mask elements.
243     if (M < 0)
244       continue;
245 
246     // There can be only 1 non-negative mask element value if this is a splat.
247     if (SplatIndex != -1 && SplatIndex != M)
248       return -1;
249 
250     // Initialize the splat index to the 1st non-negative mask element.
251     SplatIndex = M;
252   }
253   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
254   return SplatIndex;
255 }
256 
257 /// Get splat value if the input is a splat vector or return nullptr.
258 /// This function is not fully general. It checks only 2 cases:
259 /// the input value is (1) a splat constant vector or (2) a sequence
260 /// of instructions that broadcasts a scalar at element 0.
261 Value *llvm::getSplatValue(const Value *V) {
262   if (isa<VectorType>(V->getType()))
263     if (auto *C = dyn_cast<Constant>(V))
264       return C->getSplatValue();
265 
266   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
267   Value *Splat;
268   if (match(V,
269             m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
270                       m_Value(), m_ZeroMask())))
271     return Splat;
272 
273   return nullptr;
274 }
275 
276 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
277   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
278 
279   if (isa<VectorType>(V->getType())) {
280     if (isa<UndefValue>(V))
281       return true;
282     // FIXME: We can allow undefs, but if Index was specified, we may want to
283     //        check that the constant is defined at that index.
284     if (auto *C = dyn_cast<Constant>(V))
285       return C->getSplatValue() != nullptr;
286   }
287 
288   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
289     // FIXME: We can safely allow undefs here. If Index was specified, we will
290     //        check that the mask elt is defined at the required index.
291     if (!all_equal(Shuf->getShuffleMask()))
292       return false;
293 
294     // Match any index.
295     if (Index == -1)
296       return true;
297 
298     // Match a specific element. The mask should be defined at and match the
299     // specified index.
300     return Shuf->getMaskValue(Index) == Index;
301   }
302 
303   // The remaining tests are all recursive, so bail out if we hit the limit.
304   if (Depth++ == MaxAnalysisRecursionDepth)
305     return false;
306 
307   // If both operands of a binop are splats, the result is a splat.
308   Value *X, *Y, *Z;
309   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
310     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
311 
312   // If all operands of a select are splats, the result is a splat.
313   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
314     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
315            isSplatValue(Z, Index, Depth);
316 
317   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
318 
319   return false;
320 }
321 
322 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
323                                   const APInt &DemandedElts, APInt &DemandedLHS,
324                                   APInt &DemandedRHS, bool AllowUndefElts) {
325   DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
326 
327   // Early out if we don't demand any elements.
328   if (DemandedElts.isZero())
329     return true;
330 
331   // Simple case of a shuffle with zeroinitializer.
332   if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
333     DemandedLHS.setBit(0);
334     return true;
335   }
336 
337   for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
338     int M = Mask[I];
339     assert((-1 <= M) && (M < (SrcWidth * 2)) &&
340            "Invalid shuffle mask constant");
341 
342     if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
343       continue;
344 
345     // For undef elements, we don't know anything about the common state of
346     // the shuffle result.
347     if (M < 0)
348       return false;
349 
350     if (M < SrcWidth)
351       DemandedLHS.setBit(M);
352     else
353       DemandedRHS.setBit(M - SrcWidth);
354   }
355 
356   return true;
357 }
358 
359 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
360                                  SmallVectorImpl<int> &ScaledMask) {
361   assert(Scale > 0 && "Unexpected scaling factor");
362 
363   // Fast-path: if no scaling, then it is just a copy.
364   if (Scale == 1) {
365     ScaledMask.assign(Mask.begin(), Mask.end());
366     return;
367   }
368 
369   ScaledMask.clear();
370   for (int MaskElt : Mask) {
371     if (MaskElt >= 0) {
372       assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
373              "Overflowed 32-bits");
374     }
375     for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
376       ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
377   }
378 }
379 
380 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
381                                 SmallVectorImpl<int> &ScaledMask) {
382   assert(Scale > 0 && "Unexpected scaling factor");
383 
384   // Fast-path: if no scaling, then it is just a copy.
385   if (Scale == 1) {
386     ScaledMask.assign(Mask.begin(), Mask.end());
387     return true;
388   }
389 
390   // We must map the original elements down evenly to a type with less elements.
391   int NumElts = Mask.size();
392   if (NumElts % Scale != 0)
393     return false;
394 
395   ScaledMask.clear();
396   ScaledMask.reserve(NumElts / Scale);
397 
398   // Step through the input mask by splitting into Scale-sized slices.
399   do {
400     ArrayRef<int> MaskSlice = Mask.take_front(Scale);
401     assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
402 
403     // The first element of the slice determines how we evaluate this slice.
404     int SliceFront = MaskSlice.front();
405     if (SliceFront < 0) {
406       // Negative values (undef or other "sentinel" values) must be equal across
407       // the entire slice.
408       if (!all_equal(MaskSlice))
409         return false;
410       ScaledMask.push_back(SliceFront);
411     } else {
412       // A positive mask element must be cleanly divisible.
413       if (SliceFront % Scale != 0)
414         return false;
415       // Elements of the slice must be consecutive.
416       for (int i = 1; i < Scale; ++i)
417         if (MaskSlice[i] != SliceFront + i)
418           return false;
419       ScaledMask.push_back(SliceFront / Scale);
420     }
421     Mask = Mask.drop_front(Scale);
422   } while (!Mask.empty());
423 
424   assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
425 
426   // All elements of the original mask can be scaled down to map to the elements
427   // of a mask with wider elements.
428   return true;
429 }
430 
431 bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
432                                 SmallVectorImpl<int> &ScaledMask) {
433   unsigned NumSrcElts = Mask.size();
434   assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor");
435 
436   // Fast-path: if no scaling, then it is just a copy.
437   if (NumSrcElts == NumDstElts) {
438     ScaledMask.assign(Mask.begin(), Mask.end());
439     return true;
440   }
441 
442   // Ensure we can find a whole scale factor.
443   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
444          "Unexpected scaling factor");
445 
446   if (NumSrcElts > NumDstElts) {
447     int Scale = NumSrcElts / NumDstElts;
448     return widenShuffleMaskElts(Scale, Mask, ScaledMask);
449   }
450 
451   int Scale = NumDstElts / NumSrcElts;
452   narrowShuffleMaskElts(Scale, Mask, ScaledMask);
453   return true;
454 }
455 
456 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
457                                         SmallVectorImpl<int> &ScaledMask) {
458   std::array<SmallVector<int, 16>, 2> TmpMasks;
459   SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
460   ArrayRef<int> InputMask = Mask;
461   for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
462     while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
463       InputMask = *Output;
464       std::swap(Output, Tmp);
465     }
466   }
467   ScaledMask.assign(InputMask.begin(), InputMask.end());
468 }
469 
470 void llvm::processShuffleMasks(
471     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
472     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
473     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
474     function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
475   SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
476   // Try to perform better estimation of the permutation.
477   // 1. Split the source/destination vectors into real registers.
478   // 2. Do the mask analysis to identify which real registers are
479   // permuted.
480   int Sz = Mask.size();
481   unsigned SzDest = Sz / NumOfDestRegs;
482   unsigned SzSrc = Sz / NumOfSrcRegs;
483   for (unsigned I = 0; I < NumOfDestRegs; ++I) {
484     auto &RegMasks = Res[I];
485     RegMasks.assign(NumOfSrcRegs, {});
486     // Check that the values in dest registers are in the one src
487     // register.
488     for (unsigned K = 0; K < SzDest; ++K) {
489       int Idx = I * SzDest + K;
490       if (Idx == Sz)
491         break;
492       if (Mask[Idx] >= Sz || Mask[Idx] == PoisonMaskElem)
493         continue;
494       int SrcRegIdx = Mask[Idx] / SzSrc;
495       // Add a cost of PermuteTwoSrc for each new source register permute,
496       // if we have more than one source registers.
497       if (RegMasks[SrcRegIdx].empty())
498         RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem);
499       RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
500     }
501   }
502   // Process split mask.
503   for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
504     auto &Dest = Res[I];
505     int NumSrcRegs =
506         count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
507     switch (NumSrcRegs) {
508     case 0:
509       // No input vectors were used!
510       NoInputAction();
511       break;
512     case 1: {
513       // Find the only mask with at least single undef mask elem.
514       auto *It =
515           find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
516       unsigned SrcReg = std::distance(Dest.begin(), It);
517       SingleInputAction(*It, SrcReg, I);
518       break;
519     }
520     default: {
521       // The first mask is a permutation of a single register. Since we have >2
522       // input registers to shuffle, we merge the masks for 2 first registers
523       // and generate a shuffle of 2 registers rather than the reordering of the
524       // first register and then shuffle with the second register. Next,
525       // generate the shuffles of the resulting register + the remaining
526       // registers from the list.
527       auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
528                                ArrayRef<int> SecondMask) {
529         for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
530           if (SecondMask[Idx] != PoisonMaskElem) {
531             assert(FirstMask[Idx] == PoisonMaskElem &&
532                    "Expected undefined mask element.");
533             FirstMask[Idx] = SecondMask[Idx] + VF;
534           }
535         }
536       };
537       auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
538         for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
539           if (Mask[Idx] != PoisonMaskElem)
540             Mask[Idx] = Idx;
541         }
542       };
543       int SecondIdx;
544       do {
545         int FirstIdx = -1;
546         SecondIdx = -1;
547         MutableArrayRef<int> FirstMask, SecondMask;
548         for (unsigned I = 0; I < NumOfDestRegs; ++I) {
549           SmallVectorImpl<int> &RegMask = Dest[I];
550           if (RegMask.empty())
551             continue;
552 
553           if (FirstIdx == SecondIdx) {
554             FirstIdx = I;
555             FirstMask = RegMask;
556             continue;
557           }
558           SecondIdx = I;
559           SecondMask = RegMask;
560           CombineMasks(FirstMask, SecondMask);
561           ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
562           NormalizeMask(FirstMask);
563           RegMask.clear();
564           SecondMask = FirstMask;
565           SecondIdx = FirstIdx;
566         }
567         if (FirstIdx != SecondIdx && SecondIdx >= 0) {
568           CombineMasks(SecondMask, FirstMask);
569           ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
570           Dest[FirstIdx].clear();
571           NormalizeMask(SecondMask);
572         }
573       } while (SecondIdx >= 0);
574       break;
575     }
576     }
577   }
578 }
579 
580 void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
581                                                const APInt &DemandedElts,
582                                                APInt &DemandedLHS,
583                                                APInt &DemandedRHS) {
584   assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported");
585   int NumLanes = VectorBitWidth / 128;
586   int NumElts = DemandedElts.getBitWidth();
587   int NumEltsPerLane = NumElts / NumLanes;
588   int HalfEltsPerLane = NumEltsPerLane / 2;
589 
590   DemandedLHS = APInt::getZero(NumElts);
591   DemandedRHS = APInt::getZero(NumElts);
592 
593   // Map DemandedElts to the horizontal operands.
594   for (int Idx = 0; Idx != NumElts; ++Idx) {
595     if (!DemandedElts[Idx])
596       continue;
597     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
598     int LocalIdx = Idx % NumEltsPerLane;
599     if (LocalIdx < HalfEltsPerLane) {
600       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx);
601     } else {
602       LocalIdx -= HalfEltsPerLane;
603       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx);
604     }
605   }
606 }
607 
608 MapVector<Instruction *, uint64_t>
609 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
610                                const TargetTransformInfo *TTI) {
611 
612   // DemandedBits will give us every value's live-out bits. But we want
613   // to ensure no extra casts would need to be inserted, so every DAG
614   // of connected values must have the same minimum bitwidth.
615   EquivalenceClasses<Value *> ECs;
616   SmallVector<Value *, 16> Worklist;
617   SmallPtrSet<Value *, 4> Roots;
618   SmallPtrSet<Value *, 16> Visited;
619   DenseMap<Value *, uint64_t> DBits;
620   SmallPtrSet<Instruction *, 4> InstructionSet;
621   MapVector<Instruction *, uint64_t> MinBWs;
622 
623   // Determine the roots. We work bottom-up, from truncs or icmps.
624   bool SeenExtFromIllegalType = false;
625   for (auto *BB : Blocks)
626     for (auto &I : *BB) {
627       InstructionSet.insert(&I);
628 
629       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
630           !TTI->isTypeLegal(I.getOperand(0)->getType()))
631         SeenExtFromIllegalType = true;
632 
633       // Only deal with non-vector integers up to 64-bits wide.
634       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
635           !I.getType()->isVectorTy() &&
636           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
637         // Don't make work for ourselves. If we know the loaded type is legal,
638         // don't add it to the worklist.
639         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
640           continue;
641 
642         Worklist.push_back(&I);
643         Roots.insert(&I);
644       }
645     }
646   // Early exit.
647   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
648     return MinBWs;
649 
650   // Now proceed breadth-first, unioning values together.
651   while (!Worklist.empty()) {
652     Value *Val = Worklist.pop_back_val();
653     Value *Leader = ECs.getOrInsertLeaderValue(Val);
654 
655     if (!Visited.insert(Val).second)
656       continue;
657 
658     // Non-instructions terminate a chain successfully.
659     if (!isa<Instruction>(Val))
660       continue;
661     Instruction *I = cast<Instruction>(Val);
662 
663     // If we encounter a type that is larger than 64 bits, we can't represent
664     // it so bail out.
665     if (DB.getDemandedBits(I).getBitWidth() > 64)
666       return MapVector<Instruction *, uint64_t>();
667 
668     uint64_t V = DB.getDemandedBits(I).getZExtValue();
669     DBits[Leader] |= V;
670     DBits[I] = V;
671 
672     // Casts, loads and instructions outside of our range terminate a chain
673     // successfully.
674     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
675         !InstructionSet.count(I))
676       continue;
677 
678     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
679     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
680     // transform anything that relies on them.
681     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
682         !I->getType()->isIntegerTy()) {
683       DBits[Leader] |= ~0ULL;
684       continue;
685     }
686 
687     // We don't modify the types of PHIs. Reductions will already have been
688     // truncated if possible, and inductions' sizes will have been chosen by
689     // indvars.
690     if (isa<PHINode>(I))
691       continue;
692 
693     if (DBits[Leader] == ~0ULL)
694       // All bits demanded, no point continuing.
695       continue;
696 
697     for (Value *O : cast<User>(I)->operands()) {
698       ECs.unionSets(Leader, O);
699       Worklist.push_back(O);
700     }
701   }
702 
703   // Now we've discovered all values, walk them to see if there are
704   // any users we didn't see. If there are, we can't optimize that
705   // chain.
706   for (auto &I : DBits)
707     for (auto *U : I.first->users())
708       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
709         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
710 
711   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
712     uint64_t LeaderDemandedBits = 0;
713     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
714       LeaderDemandedBits |= DBits[M];
715 
716     uint64_t MinBW = llvm::bit_width(LeaderDemandedBits);
717     // Round up to a power of 2
718     MinBW = llvm::bit_ceil(MinBW);
719 
720     // We don't modify the types of PHIs. Reductions will already have been
721     // truncated if possible, and inductions' sizes will have been chosen by
722     // indvars.
723     // If we are required to shrink a PHI, abandon this entire equivalence class.
724     bool Abort = false;
725     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
726       if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
727         Abort = true;
728         break;
729       }
730     if (Abort)
731       continue;
732 
733     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
734       auto *MI = dyn_cast<Instruction>(M);
735       if (!MI)
736         continue;
737       Type *Ty = M->getType();
738       if (Roots.count(M))
739         Ty = MI->getOperand(0)->getType();
740 
741       if (MinBW >= Ty->getScalarSizeInBits())
742         continue;
743 
744       // If any of M's operands demand more bits than MinBW then M cannot be
745       // performed safely in MinBW.
746       if (any_of(MI->operands(), [&DB, MinBW](Use &U) {
747             auto *CI = dyn_cast<ConstantInt>(U);
748             // For constants shift amounts, check if the shift would result in
749             // poison.
750             if (CI &&
751                 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) &&
752                 U.getOperandNo() == 1)
753               return CI->uge(MinBW);
754             uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue());
755             return bit_ceil(BW) > MinBW;
756           }))
757         continue;
758 
759       MinBWs[MI] = MinBW;
760     }
761   }
762 
763   return MinBWs;
764 }
765 
766 /// Add all access groups in @p AccGroups to @p List.
767 template <typename ListT>
768 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
769   // Interpret an access group as a list containing itself.
770   if (AccGroups->getNumOperands() == 0) {
771     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
772     List.insert(AccGroups);
773     return;
774   }
775 
776   for (const auto &AccGroupListOp : AccGroups->operands()) {
777     auto *Item = cast<MDNode>(AccGroupListOp.get());
778     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
779     List.insert(Item);
780   }
781 }
782 
783 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
784   if (!AccGroups1)
785     return AccGroups2;
786   if (!AccGroups2)
787     return AccGroups1;
788   if (AccGroups1 == AccGroups2)
789     return AccGroups1;
790 
791   SmallSetVector<Metadata *, 4> Union;
792   addToAccessGroupList(Union, AccGroups1);
793   addToAccessGroupList(Union, AccGroups2);
794 
795   if (Union.size() == 0)
796     return nullptr;
797   if (Union.size() == 1)
798     return cast<MDNode>(Union.front());
799 
800   LLVMContext &Ctx = AccGroups1->getContext();
801   return MDNode::get(Ctx, Union.getArrayRef());
802 }
803 
804 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
805                                     const Instruction *Inst2) {
806   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
807   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
808 
809   if (!MayAccessMem1 && !MayAccessMem2)
810     return nullptr;
811   if (!MayAccessMem1)
812     return Inst2->getMetadata(LLVMContext::MD_access_group);
813   if (!MayAccessMem2)
814     return Inst1->getMetadata(LLVMContext::MD_access_group);
815 
816   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
817   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
818   if (!MD1 || !MD2)
819     return nullptr;
820   if (MD1 == MD2)
821     return MD1;
822 
823   // Use set for scalable 'contains' check.
824   SmallPtrSet<Metadata *, 4> AccGroupSet2;
825   addToAccessGroupList(AccGroupSet2, MD2);
826 
827   SmallVector<Metadata *, 4> Intersection;
828   if (MD1->getNumOperands() == 0) {
829     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
830     if (AccGroupSet2.count(MD1))
831       Intersection.push_back(MD1);
832   } else {
833     for (const MDOperand &Node : MD1->operands()) {
834       auto *Item = cast<MDNode>(Node.get());
835       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
836       if (AccGroupSet2.count(Item))
837         Intersection.push_back(Item);
838     }
839   }
840 
841   if (Intersection.size() == 0)
842     return nullptr;
843   if (Intersection.size() == 1)
844     return cast<MDNode>(Intersection.front());
845 
846   LLVMContext &Ctx = Inst1->getContext();
847   return MDNode::get(Ctx, Intersection);
848 }
849 
850 /// \returns \p I after propagating metadata from \p VL.
851 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
852   if (VL.empty())
853     return Inst;
854   Instruction *I0 = cast<Instruction>(VL[0]);
855   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
856   I0->getAllMetadataOtherThanDebugLoc(Metadata);
857 
858   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
859                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
860                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
861                     LLVMContext::MD_access_group, LLVMContext::MD_mmra}) {
862     MDNode *MD = I0->getMetadata(Kind);
863     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
864       const Instruction *IJ = cast<Instruction>(VL[J]);
865       MDNode *IMD = IJ->getMetadata(Kind);
866 
867       switch (Kind) {
868       case LLVMContext::MD_mmra: {
869         MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD);
870         break;
871       }
872       case LLVMContext::MD_tbaa:
873         MD = MDNode::getMostGenericTBAA(MD, IMD);
874         break;
875       case LLVMContext::MD_alias_scope:
876         MD = MDNode::getMostGenericAliasScope(MD, IMD);
877         break;
878       case LLVMContext::MD_fpmath:
879         MD = MDNode::getMostGenericFPMath(MD, IMD);
880         break;
881       case LLVMContext::MD_noalias:
882       case LLVMContext::MD_nontemporal:
883       case LLVMContext::MD_invariant_load:
884         MD = MDNode::intersect(MD, IMD);
885         break;
886       case LLVMContext::MD_access_group:
887         MD = intersectAccessGroups(Inst, IJ);
888         break;
889       default:
890         llvm_unreachable("unhandled metadata");
891       }
892     }
893 
894     Inst->setMetadata(Kind, MD);
895   }
896 
897   return Inst;
898 }
899 
900 Constant *
901 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
902                            const InterleaveGroup<Instruction> &Group) {
903   // All 1's means mask is not needed.
904   if (Group.getNumMembers() == Group.getFactor())
905     return nullptr;
906 
907   // TODO: support reversed access.
908   assert(!Group.isReverse() && "Reversed group not supported.");
909 
910   SmallVector<Constant *, 16> Mask;
911   for (unsigned i = 0; i < VF; i++)
912     for (unsigned j = 0; j < Group.getFactor(); ++j) {
913       unsigned HasMember = Group.getMember(j) ? 1 : 0;
914       Mask.push_back(Builder.getInt1(HasMember));
915     }
916 
917   return ConstantVector::get(Mask);
918 }
919 
920 llvm::SmallVector<int, 16>
921 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
922   SmallVector<int, 16> MaskVec;
923   for (unsigned i = 0; i < VF; i++)
924     for (unsigned j = 0; j < ReplicationFactor; j++)
925       MaskVec.push_back(i);
926 
927   return MaskVec;
928 }
929 
930 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
931                                                       unsigned NumVecs) {
932   SmallVector<int, 16> Mask;
933   for (unsigned i = 0; i < VF; i++)
934     for (unsigned j = 0; j < NumVecs; j++)
935       Mask.push_back(j * VF + i);
936 
937   return Mask;
938 }
939 
940 llvm::SmallVector<int, 16>
941 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
942   SmallVector<int, 16> Mask;
943   for (unsigned i = 0; i < VF; i++)
944     Mask.push_back(Start + i * Stride);
945 
946   return Mask;
947 }
948 
949 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
950                                                       unsigned NumInts,
951                                                       unsigned NumUndefs) {
952   SmallVector<int, 16> Mask;
953   for (unsigned i = 0; i < NumInts; i++)
954     Mask.push_back(Start + i);
955 
956   for (unsigned i = 0; i < NumUndefs; i++)
957     Mask.push_back(-1);
958 
959   return Mask;
960 }
961 
962 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
963                                                  unsigned NumElts) {
964   // Avoid casts in the loop and make sure we have a reasonable number.
965   int NumEltsSigned = NumElts;
966   assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
967 
968   // If the mask chooses an element from operand 1, reduce it to choose from the
969   // corresponding element of operand 0. Undef mask elements are unchanged.
970   SmallVector<int, 16> UnaryMask;
971   for (int MaskElt : Mask) {
972     assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
973     int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
974     UnaryMask.push_back(UnaryElt);
975   }
976   return UnaryMask;
977 }
978 
979 /// A helper function for concatenating vectors. This function concatenates two
980 /// vectors having the same element type. If the second vector has fewer
981 /// elements than the first, it is padded with undefs.
982 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
983                                     Value *V2) {
984   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
985   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
986   assert(VecTy1 && VecTy2 &&
987          VecTy1->getScalarType() == VecTy2->getScalarType() &&
988          "Expect two vectors with the same element type");
989 
990   unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
991   unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
992   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
993 
994   if (NumElts1 > NumElts2) {
995     // Extend with UNDEFs.
996     V2 = Builder.CreateShuffleVector(
997         V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
998   }
999 
1000   return Builder.CreateShuffleVector(
1001       V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
1002 }
1003 
1004 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
1005                                 ArrayRef<Value *> Vecs) {
1006   unsigned NumVecs = Vecs.size();
1007   assert(NumVecs > 1 && "Should be at least two vectors");
1008 
1009   SmallVector<Value *, 8> ResList;
1010   ResList.append(Vecs.begin(), Vecs.end());
1011   do {
1012     SmallVector<Value *, 8> TmpList;
1013     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
1014       Value *V0 = ResList[i], *V1 = ResList[i + 1];
1015       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1016              "Only the last vector may have a different type");
1017 
1018       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1019     }
1020 
1021     // Push the last vector if the total number of vectors is odd.
1022     if (NumVecs % 2 != 0)
1023       TmpList.push_back(ResList[NumVecs - 1]);
1024 
1025     ResList = TmpList;
1026     NumVecs = ResList.size();
1027   } while (NumVecs > 1);
1028 
1029   return ResList[0];
1030 }
1031 
1032 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
1033   assert(isa<VectorType>(Mask->getType()) &&
1034          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1035          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1036              1 &&
1037          "Mask must be a vector of i1");
1038 
1039   auto *ConstMask = dyn_cast<Constant>(Mask);
1040   if (!ConstMask)
1041     return false;
1042   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1043     return true;
1044   if (isa<ScalableVectorType>(ConstMask->getType()))
1045     return false;
1046   for (unsigned
1047            I = 0,
1048            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1049        I != E; ++I) {
1050     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1051       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1052         continue;
1053     return false;
1054   }
1055   return true;
1056 }
1057 
1058 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1059   assert(isa<VectorType>(Mask->getType()) &&
1060          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1061          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1062              1 &&
1063          "Mask must be a vector of i1");
1064 
1065   auto *ConstMask = dyn_cast<Constant>(Mask);
1066   if (!ConstMask)
1067     return false;
1068   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1069     return true;
1070   if (isa<ScalableVectorType>(ConstMask->getType()))
1071     return false;
1072   for (unsigned
1073            I = 0,
1074            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1075        I != E; ++I) {
1076     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1077       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1078         continue;
1079     return false;
1080   }
1081   return true;
1082 }
1083 
1084 bool llvm::maskContainsAllOneOrUndef(Value *Mask) {
1085   assert(isa<VectorType>(Mask->getType()) &&
1086          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1087          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1088              1 &&
1089          "Mask must be a vector of i1");
1090 
1091   auto *ConstMask = dyn_cast<Constant>(Mask);
1092   if (!ConstMask)
1093     return false;
1094   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1095     return true;
1096   if (isa<ScalableVectorType>(ConstMask->getType()))
1097     return false;
1098   for (unsigned
1099            I = 0,
1100            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1101        I != E; ++I) {
1102     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1103       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1104         return true;
1105   }
1106   return false;
1107 }
1108 
1109 /// TODO: This is a lot like known bits, but for
1110 /// vectors.  Is there something we can common this with?
1111 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1112   assert(isa<FixedVectorType>(Mask->getType()) &&
1113          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1114          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1115              1 &&
1116          "Mask must be a fixed width vector of i1");
1117 
1118   const unsigned VWidth =
1119       cast<FixedVectorType>(Mask->getType())->getNumElements();
1120   APInt DemandedElts = APInt::getAllOnes(VWidth);
1121   if (auto *CV = dyn_cast<ConstantVector>(Mask))
1122     for (unsigned i = 0; i < VWidth; i++)
1123       if (CV->getAggregateElement(i)->isNullValue())
1124         DemandedElts.clearBit(i);
1125   return DemandedElts;
1126 }
1127 
1128 bool InterleavedAccessInfo::isStrided(int Stride) {
1129   unsigned Factor = std::abs(Stride);
1130   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1131 }
1132 
1133 void InterleavedAccessInfo::collectConstStrideAccesses(
1134     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1135     const DenseMap<Value*, const SCEV*> &Strides) {
1136   auto &DL = TheLoop->getHeader()->getDataLayout();
1137 
1138   // Since it's desired that the load/store instructions be maintained in
1139   // "program order" for the interleaved access analysis, we have to visit the
1140   // blocks in the loop in reverse postorder (i.e., in a topological order).
1141   // Such an ordering will ensure that any load/store that may be executed
1142   // before a second load/store will precede the second load/store in
1143   // AccessStrideInfo.
1144   LoopBlocksDFS DFS(TheLoop);
1145   DFS.perform(LI);
1146   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1147     for (auto &I : *BB) {
1148       Value *Ptr = getLoadStorePointerOperand(&I);
1149       if (!Ptr)
1150         continue;
1151       Type *ElementTy = getLoadStoreType(&I);
1152 
1153       // Currently, codegen doesn't support cases where the type size doesn't
1154       // match the alloc size. Skip them for now.
1155       uint64_t Size = DL.getTypeAllocSize(ElementTy);
1156       if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1157         continue;
1158 
1159       // We don't check wrapping here because we don't know yet if Ptr will be
1160       // part of a full group or a group with gaps. Checking wrapping for all
1161       // pointers (even those that end up in groups with no gaps) will be overly
1162       // conservative. For full groups, wrapping should be ok since if we would
1163       // wrap around the address space we would do a memory access at nullptr
1164       // even without the transformation. The wrapping checks are therefore
1165       // deferred until after we've formed the interleaved groups.
1166       int64_t Stride =
1167         getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1168                      /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1169 
1170       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1171       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1172                                               getLoadStoreAlignment(&I));
1173     }
1174 }
1175 
1176 // Analyze interleaved accesses and collect them into interleaved load and
1177 // store groups.
1178 //
1179 // When generating code for an interleaved load group, we effectively hoist all
1180 // loads in the group to the location of the first load in program order. When
1181 // generating code for an interleaved store group, we sink all stores to the
1182 // location of the last store. This code motion can change the order of load
1183 // and store instructions and may break dependences.
1184 //
1185 // The code generation strategy mentioned above ensures that we won't violate
1186 // any write-after-read (WAR) dependences.
1187 //
1188 // E.g., for the WAR dependence:  a = A[i];      // (1)
1189 //                                A[i] = b;      // (2)
1190 //
1191 // The store group of (2) is always inserted at or below (2), and the load
1192 // group of (1) is always inserted at or above (1). Thus, the instructions will
1193 // never be reordered. All other dependences are checked to ensure the
1194 // correctness of the instruction reordering.
1195 //
1196 // The algorithm visits all memory accesses in the loop in bottom-up program
1197 // order. Program order is established by traversing the blocks in the loop in
1198 // reverse postorder when collecting the accesses.
1199 //
1200 // We visit the memory accesses in bottom-up order because it can simplify the
1201 // construction of store groups in the presence of write-after-write (WAW)
1202 // dependences.
1203 //
1204 // E.g., for the WAW dependence:  A[i] = a;      // (1)
1205 //                                A[i] = b;      // (2)
1206 //                                A[i + 1] = c;  // (3)
1207 //
1208 // We will first create a store group with (3) and (2). (1) can't be added to
1209 // this group because it and (2) are dependent. However, (1) can be grouped
1210 // with other accesses that may precede it in program order. Note that a
1211 // bottom-up order does not imply that WAW dependences should not be checked.
1212 void InterleavedAccessInfo::analyzeInterleaving(
1213                                  bool EnablePredicatedInterleavedMemAccesses) {
1214   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1215   const auto &Strides = LAI->getSymbolicStrides();
1216 
1217   // Holds all accesses with a constant stride.
1218   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1219   collectConstStrideAccesses(AccessStrideInfo, Strides);
1220 
1221   if (AccessStrideInfo.empty())
1222     return;
1223 
1224   // Collect the dependences in the loop.
1225   collectDependences();
1226 
1227   // Holds all interleaved store groups temporarily.
1228   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1229   // Holds all interleaved load groups temporarily.
1230   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1231   // Groups added to this set cannot have new members added.
1232   SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups;
1233 
1234   // Search in bottom-up program order for pairs of accesses (A and B) that can
1235   // form interleaved load or store groups. In the algorithm below, access A
1236   // precedes access B in program order. We initialize a group for B in the
1237   // outer loop of the algorithm, and then in the inner loop, we attempt to
1238   // insert each A into B's group if:
1239   //
1240   //  1. A and B have the same stride,
1241   //  2. A and B have the same memory object size, and
1242   //  3. A belongs in B's group according to its distance from B.
1243   //
1244   // Special care is taken to ensure group formation will not break any
1245   // dependences.
1246   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1247        BI != E; ++BI) {
1248     Instruction *B = BI->first;
1249     StrideDescriptor DesB = BI->second;
1250 
1251     // Initialize a group for B if it has an allowable stride. Even if we don't
1252     // create a group for B, we continue with the bottom-up algorithm to ensure
1253     // we don't break any of B's dependences.
1254     InterleaveGroup<Instruction> *GroupB = nullptr;
1255     if (isStrided(DesB.Stride) &&
1256         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1257       GroupB = getInterleaveGroup(B);
1258       if (!GroupB) {
1259         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1260                           << '\n');
1261         GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1262         if (B->mayWriteToMemory())
1263           StoreGroups.insert(GroupB);
1264         else
1265           LoadGroups.insert(GroupB);
1266       }
1267     }
1268 
1269     for (auto AI = std::next(BI); AI != E; ++AI) {
1270       Instruction *A = AI->first;
1271       StrideDescriptor DesA = AI->second;
1272 
1273       // Our code motion strategy implies that we can't have dependences
1274       // between accesses in an interleaved group and other accesses located
1275       // between the first and last member of the group. Note that this also
1276       // means that a group can't have more than one member at a given offset.
1277       // The accesses in a group can have dependences with other accesses, but
1278       // we must ensure we don't extend the boundaries of the group such that
1279       // we encompass those dependent accesses.
1280       //
1281       // For example, assume we have the sequence of accesses shown below in a
1282       // stride-2 loop:
1283       //
1284       //  (1, 2) is a group | A[i]   = a;  // (1)
1285       //                    | A[i-1] = b;  // (2) |
1286       //                      A[i-3] = c;  // (3)
1287       //                      A[i]   = d;  // (4) | (2, 4) is not a group
1288       //
1289       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1290       // but not with (4). If we did, the dependent access (3) would be within
1291       // the boundaries of the (2, 4) group.
1292       auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1293                                  StrideEntry *A) -> Instruction * {
1294         for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1295           Instruction *MemberOfGroupB = Group->getMember(Index);
1296           if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1297                                     A, &*AccessStrideInfo.find(MemberOfGroupB)))
1298             return MemberOfGroupB;
1299         }
1300         return nullptr;
1301       };
1302 
1303       auto GroupA = getInterleaveGroup(A);
1304       // If A is a load, dependencies are tolerable, there's nothing to do here.
1305       // If both A and B belong to the same (store) group, they are independent,
1306       // even if dependencies have not been recorded.
1307       // If both GroupA and GroupB are null, there's nothing to do here.
1308       if (A->mayWriteToMemory() && GroupA != GroupB) {
1309         Instruction *DependentInst = nullptr;
1310         // If GroupB is a load group, we have to compare AI against all
1311         // members of GroupB because if any load within GroupB has a dependency
1312         // on AI, we need to mark GroupB as complete and also release the
1313         // store GroupA (if A belongs to one). The former prevents incorrect
1314         // hoisting of load B above store A while the latter prevents incorrect
1315         // sinking of store A below load B.
1316         if (GroupB && LoadGroups.contains(GroupB))
1317           DependentInst = DependentMember(GroupB, &*AI);
1318         else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1319           DependentInst = B;
1320 
1321         if (DependentInst) {
1322           // A has a store dependence on B (or on some load within GroupB) and
1323           // is part of a store group. Release A's group to prevent illegal
1324           // sinking of A below B. A will then be free to form another group
1325           // with instructions that precede it.
1326           if (GroupA && StoreGroups.contains(GroupA)) {
1327             LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1328                                  "dependence between "
1329                               << *A << " and " << *DependentInst << '\n');
1330             StoreGroups.remove(GroupA);
1331             releaseGroup(GroupA);
1332           }
1333           // If B is a load and part of an interleave group, no earlier loads
1334           // can be added to B's interleave group, because this would mean the
1335           // DependentInst would move across store A. Mark the interleave group
1336           // as complete.
1337           if (GroupB && LoadGroups.contains(GroupB)) {
1338             LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1339                               << " as complete.\n");
1340             CompletedLoadGroups.insert(GroupB);
1341           }
1342         }
1343       }
1344       if (CompletedLoadGroups.contains(GroupB)) {
1345         // Skip trying to add A to B, continue to look for other conflicting A's
1346         // in groups to be released.
1347         continue;
1348       }
1349 
1350       // At this point, we've checked for illegal code motion. If either A or B
1351       // isn't strided, there's nothing left to do.
1352       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1353         continue;
1354 
1355       // Ignore A if it's already in a group or isn't the same kind of memory
1356       // operation as B.
1357       // Note that mayReadFromMemory() isn't mutually exclusive to
1358       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1359       // here, canVectorizeMemory() should have returned false - except for the
1360       // case we asked for optimization remarks.
1361       if (isInterleaved(A) ||
1362           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1363           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1364         continue;
1365 
1366       // Check rules 1 and 2. Ignore A if its stride or size is different from
1367       // that of B.
1368       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1369         continue;
1370 
1371       // Ignore A if the memory object of A and B don't belong to the same
1372       // address space
1373       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1374         continue;
1375 
1376       // Calculate the distance from A to B.
1377       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1378           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1379       if (!DistToB)
1380         continue;
1381       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1382 
1383       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1384       // size.
1385       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1386         continue;
1387 
1388       // All members of a predicated interleave-group must have the same predicate,
1389       // and currently must reside in the same BB.
1390       BasicBlock *BlockA = A->getParent();
1391       BasicBlock *BlockB = B->getParent();
1392       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1393           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1394         continue;
1395 
1396       // The index of A is the index of B plus A's distance to B in multiples
1397       // of the size.
1398       int IndexA =
1399           GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1400 
1401       // Try to insert A into B's group.
1402       if (GroupB->insertMember(A, IndexA, DesA.Alignment)) {
1403         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1404                           << "    into the interleave group with" << *B
1405                           << '\n');
1406         InterleaveGroupMap[A] = GroupB;
1407 
1408         // Set the first load in program order as the insert position.
1409         if (A->mayReadFromMemory())
1410           GroupB->setInsertPos(A);
1411       }
1412     } // Iteration over A accesses.
1413   }   // Iteration over B accesses.
1414 
1415   auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1416                                             int Index,
1417                                             const char *FirstOrLast) -> bool {
1418     Instruction *Member = Group->getMember(Index);
1419     assert(Member && "Group member does not exist");
1420     Value *MemberPtr = getLoadStorePointerOperand(Member);
1421     Type *AccessTy = getLoadStoreType(Member);
1422     if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1423                      /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1424       return false;
1425     LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1426                       << FirstOrLast
1427                       << " group member potentially pointer-wrapping.\n");
1428     releaseGroup(Group);
1429     return true;
1430   };
1431 
1432   // Remove interleaved groups with gaps whose memory
1433   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1434   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1435   // not check wrapping (see documentation there).
1436   // FORNOW we use Assume=false;
1437   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1438   // of runtime SCEV assumptions checks (thereby potentially failing to
1439   // vectorize altogether).
1440   // Additional optional optimizations:
1441   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1442   // wrap then we can deduce that all pointers in the group don't wrap.
1443   // This means that we can forcefully peel the loop in order to only have to
1444   // check the first pointer for no-wrap. When we'll change to use Assume=true
1445   // we'll only need at most one runtime check per interleaved group.
1446   for (auto *Group : LoadGroups) {
1447     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1448     // load would wrap around the address space we would do a memory access at
1449     // nullptr even without the transformation.
1450     if (Group->getNumMembers() == Group->getFactor())
1451       continue;
1452 
1453     // Case 2: If first and last members of the group don't wrap this implies
1454     // that all the pointers in the group don't wrap.
1455     // So we check only group member 0 (which is always guaranteed to exist),
1456     // and group member Factor - 1; If the latter doesn't exist we rely on
1457     // peeling (if it is a non-reversed access -- see Case 3).
1458     if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
1459       continue;
1460     if (Group->getMember(Group->getFactor() - 1))
1461       InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, "last");
1462     else {
1463       // Case 3: A non-reversed interleaved load group with gaps: We need
1464       // to execute at least one scalar epilogue iteration. This will ensure
1465       // we don't speculatively access memory out-of-bounds. We only need
1466       // to look for a member at index factor - 1, since every group must have
1467       // a member at index zero.
1468       if (Group->isReverse()) {
1469         LLVM_DEBUG(
1470             dbgs() << "LV: Invalidate candidate interleaved group due to "
1471                       "a reverse access with gaps.\n");
1472         releaseGroup(Group);
1473         continue;
1474       }
1475       LLVM_DEBUG(
1476           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1477       RequiresScalarEpilogue = true;
1478     }
1479   }
1480 
1481   for (auto *Group : StoreGroups) {
1482     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1483     // store would wrap around the address space we would do a memory access at
1484     // nullptr even without the transformation.
1485     if (Group->getNumMembers() == Group->getFactor())
1486       continue;
1487 
1488     // Interleave-store-group with gaps is implemented using masked wide store.
1489     // Remove interleaved store groups with gaps if
1490     // masked-interleaved-accesses are not enabled by the target.
1491     if (!EnablePredicatedInterleavedMemAccesses) {
1492       LLVM_DEBUG(
1493           dbgs() << "LV: Invalidate candidate interleaved store group due "
1494                     "to gaps.\n");
1495       releaseGroup(Group);
1496       continue;
1497     }
1498 
1499     // Case 2: If first and last members of the group don't wrap this implies
1500     // that all the pointers in the group don't wrap.
1501     // So we check only group member 0 (which is always guaranteed to exist),
1502     // and the last group member. Case 3 (scalar epilog) is not relevant for
1503     // stores with gaps, which are implemented with masked-store (rather than
1504     // speculative access, as in loads).
1505     if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
1506       continue;
1507     for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1508       if (Group->getMember(Index)) {
1509         InvalidateGroupIfMemberMayWrap(Group, Index, "last");
1510         break;
1511       }
1512   }
1513 }
1514 
1515 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1516   // If no group had triggered the requirement to create an epilogue loop,
1517   // there is nothing to do.
1518   if (!requiresScalarEpilogue())
1519     return;
1520 
1521   // Release groups requiring scalar epilogues. Note that this also removes them
1522   // from InterleaveGroups.
1523   bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) {
1524     if (!Group->requiresScalarEpilogue())
1525       return false;
1526     LLVM_DEBUG(
1527         dbgs()
1528         << "LV: Invalidate candidate interleaved group due to gaps that "
1529            "require a scalar epilogue (not allowed under optsize) and cannot "
1530            "be masked (not enabled). \n");
1531     releaseGroupWithoutRemovingFromSet(Group);
1532     return true;
1533   });
1534   assert(ReleasedGroup && "At least one group must be invalidated, as a "
1535                           "scalar epilogue was required");
1536   (void)ReleasedGroup;
1537   RequiresScalarEpilogue = false;
1538 }
1539 
1540 template <typename InstT>
1541 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1542   llvm_unreachable("addMetadata can only be used for Instruction");
1543 }
1544 
1545 namespace llvm {
1546 template <>
1547 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1548   SmallVector<Value *, 4> VL;
1549   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1550                  [](std::pair<int, Instruction *> p) { return p.second; });
1551   propagateMetadata(NewInst, VL);
1552 }
1553 } // namespace llvm
1554