xref: /llvm-project/llvm/lib/Analysis/VectorUtils.cpp (revision bab7920fd7ea822543b8f1aa8037d489eea2cb73)
1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/CommandLine.h"
30 
31 #define DEBUG_TYPE "vectorutils"
32 
33 using namespace llvm;
34 using namespace llvm::PatternMatch;
35 
36 /// Maximum factor for an interleaved memory access.
37 static cl::opt<unsigned> MaxInterleaveGroupFactor(
38     "max-interleave-group-factor", cl::Hidden,
39     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
40     cl::init(8));
41 
42 /// Return true if all of the intrinsic's arguments and return type are scalars
43 /// for the scalar form of the intrinsic, and vectors for the vector form of the
44 /// intrinsic (except operands that are marked as always being scalar by
45 /// isVectorIntrinsicWithScalarOpAtArg).
46 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
47   switch (ID) {
48   case Intrinsic::abs:   // Begin integer bit-manipulation.
49   case Intrinsic::bswap:
50   case Intrinsic::bitreverse:
51   case Intrinsic::ctpop:
52   case Intrinsic::ctlz:
53   case Intrinsic::cttz:
54   case Intrinsic::fshl:
55   case Intrinsic::fshr:
56   case Intrinsic::smax:
57   case Intrinsic::smin:
58   case Intrinsic::umax:
59   case Intrinsic::umin:
60   case Intrinsic::sadd_sat:
61   case Intrinsic::ssub_sat:
62   case Intrinsic::uadd_sat:
63   case Intrinsic::usub_sat:
64   case Intrinsic::smul_fix:
65   case Intrinsic::smul_fix_sat:
66   case Intrinsic::umul_fix:
67   case Intrinsic::umul_fix_sat:
68   case Intrinsic::sqrt: // Begin floating-point.
69   case Intrinsic::asin:
70   case Intrinsic::acos:
71   case Intrinsic::atan:
72   case Intrinsic::atan2:
73   case Intrinsic::sin:
74   case Intrinsic::cos:
75   case Intrinsic::tan:
76   case Intrinsic::sinh:
77   case Intrinsic::cosh:
78   case Intrinsic::tanh:
79   case Intrinsic::exp:
80   case Intrinsic::exp10:
81   case Intrinsic::exp2:
82   case Intrinsic::log:
83   case Intrinsic::log10:
84   case Intrinsic::log2:
85   case Intrinsic::fabs:
86   case Intrinsic::minnum:
87   case Intrinsic::maxnum:
88   case Intrinsic::minimum:
89   case Intrinsic::maximum:
90   case Intrinsic::copysign:
91   case Intrinsic::floor:
92   case Intrinsic::ceil:
93   case Intrinsic::trunc:
94   case Intrinsic::rint:
95   case Intrinsic::nearbyint:
96   case Intrinsic::round:
97   case Intrinsic::roundeven:
98   case Intrinsic::pow:
99   case Intrinsic::fma:
100   case Intrinsic::fmuladd:
101   case Intrinsic::is_fpclass:
102   case Intrinsic::powi:
103   case Intrinsic::canonicalize:
104   case Intrinsic::fptosi_sat:
105   case Intrinsic::fptoui_sat:
106   case Intrinsic::lrint:
107   case Intrinsic::llrint:
108   case Intrinsic::ucmp:
109   case Intrinsic::scmp:
110     return true;
111   default:
112     return false;
113   }
114 }
115 
116 bool llvm::isTriviallyScalarizable(Intrinsic::ID ID,
117                                    const TargetTransformInfo *TTI) {
118   if (isTriviallyVectorizable(ID))
119     return true;
120 
121   if (TTI && Intrinsic::isTargetIntrinsic(ID))
122     return TTI->isTargetIntrinsicTriviallyScalarizable(ID);
123 
124   // TODO: Move frexp to isTriviallyVectorizable.
125   // https://github.com/llvm/llvm-project/issues/112408
126   switch (ID) {
127   case Intrinsic::frexp:
128     return true;
129   }
130   return false;
131 }
132 
133 /// Identifies if the vector form of the intrinsic has a scalar operand.
134 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
135                                               unsigned ScalarOpdIdx,
136                                               const TargetTransformInfo *TTI) {
137 
138   if (TTI && Intrinsic::isTargetIntrinsic(ID))
139     return TTI->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
140 
141   switch (ID) {
142   case Intrinsic::abs:
143   case Intrinsic::vp_abs:
144   case Intrinsic::ctlz:
145   case Intrinsic::vp_ctlz:
146   case Intrinsic::cttz:
147   case Intrinsic::vp_cttz:
148   case Intrinsic::is_fpclass:
149   case Intrinsic::vp_is_fpclass:
150   case Intrinsic::powi:
151     return (ScalarOpdIdx == 1);
152   case Intrinsic::smul_fix:
153   case Intrinsic::smul_fix_sat:
154   case Intrinsic::umul_fix:
155   case Intrinsic::umul_fix_sat:
156     return (ScalarOpdIdx == 2);
157   default:
158     return false;
159   }
160 }
161 
162 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
163     Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI) {
164   assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
165 
166   if (TTI && Intrinsic::isTargetIntrinsic(ID))
167     return TTI->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
168 
169   if (VPCastIntrinsic::isVPCast(ID))
170     return OpdIdx == -1 || OpdIdx == 0;
171 
172   switch (ID) {
173   case Intrinsic::fptosi_sat:
174   case Intrinsic::fptoui_sat:
175   case Intrinsic::lrint:
176   case Intrinsic::llrint:
177   case Intrinsic::vp_lrint:
178   case Intrinsic::vp_llrint:
179   case Intrinsic::ucmp:
180   case Intrinsic::scmp:
181     return OpdIdx == -1 || OpdIdx == 0;
182   case Intrinsic::is_fpclass:
183   case Intrinsic::vp_is_fpclass:
184     return OpdIdx == 0;
185   case Intrinsic::powi:
186     return OpdIdx == -1 || OpdIdx == 1;
187   default:
188     return OpdIdx == -1;
189   }
190 }
191 
192 bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(
193     Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI) {
194 
195   if (TTI && Intrinsic::isTargetIntrinsic(ID))
196     return TTI->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
197 
198   switch (ID) {
199   case Intrinsic::frexp:
200     return RetIdx == 0 || RetIdx == 1;
201   default:
202     return RetIdx == 0;
203   }
204 }
205 
206 /// Returns intrinsic ID for call.
207 /// For the input call instruction it finds mapping intrinsic and returns
208 /// its ID, in case it does not found it return not_intrinsic.
209 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
210                                                 const TargetLibraryInfo *TLI) {
211   Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
212   if (ID == Intrinsic::not_intrinsic)
213     return Intrinsic::not_intrinsic;
214 
215   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
216       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
217       ID == Intrinsic::experimental_noalias_scope_decl ||
218       ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
219     return ID;
220   return Intrinsic::not_intrinsic;
221 }
222 
223 /// Given a vector and an element number, see if the scalar value is
224 /// already around as a register, for example if it were inserted then extracted
225 /// from the vector.
226 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
227   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
228   VectorType *VTy = cast<VectorType>(V->getType());
229   // For fixed-length vector, return poison for out of range access.
230   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
231     unsigned Width = FVTy->getNumElements();
232     if (EltNo >= Width)
233       return PoisonValue::get(FVTy->getElementType());
234   }
235 
236   if (Constant *C = dyn_cast<Constant>(V))
237     return C->getAggregateElement(EltNo);
238 
239   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
240     // If this is an insert to a variable element, we don't know what it is.
241     if (!isa<ConstantInt>(III->getOperand(2)))
242       return nullptr;
243     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
244 
245     // If this is an insert to the element we are looking for, return the
246     // inserted value.
247     if (EltNo == IIElt)
248       return III->getOperand(1);
249 
250     // Guard against infinite loop on malformed, unreachable IR.
251     if (III == III->getOperand(0))
252       return nullptr;
253 
254     // Otherwise, the insertelement doesn't modify the value, recurse on its
255     // vector input.
256     return findScalarElement(III->getOperand(0), EltNo);
257   }
258 
259   ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
260   // Restrict the following transformation to fixed-length vector.
261   if (SVI && isa<FixedVectorType>(SVI->getType())) {
262     unsigned LHSWidth =
263         cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
264     int InEl = SVI->getMaskValue(EltNo);
265     if (InEl < 0)
266       return PoisonValue::get(VTy->getElementType());
267     if (InEl < (int)LHSWidth)
268       return findScalarElement(SVI->getOperand(0), InEl);
269     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
270   }
271 
272   // Extract a value from a vector add operation with a constant zero.
273   // TODO: Use getBinOpIdentity() to generalize this.
274   Value *Val; Constant *C;
275   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
276     if (Constant *Elt = C->getAggregateElement(EltNo))
277       if (Elt->isNullValue())
278         return findScalarElement(Val, EltNo);
279 
280   // If the vector is a splat then we can trivially find the scalar element.
281   if (isa<ScalableVectorType>(VTy))
282     if (Value *Splat = getSplatValue(V))
283       if (EltNo < VTy->getElementCount().getKnownMinValue())
284         return Splat;
285 
286   // Otherwise, we don't know.
287   return nullptr;
288 }
289 
290 int llvm::getSplatIndex(ArrayRef<int> Mask) {
291   int SplatIndex = -1;
292   for (int M : Mask) {
293     // Ignore invalid (undefined) mask elements.
294     if (M < 0)
295       continue;
296 
297     // There can be only 1 non-negative mask element value if this is a splat.
298     if (SplatIndex != -1 && SplatIndex != M)
299       return -1;
300 
301     // Initialize the splat index to the 1st non-negative mask element.
302     SplatIndex = M;
303   }
304   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
305   return SplatIndex;
306 }
307 
308 /// Get splat value if the input is a splat vector or return nullptr.
309 /// This function is not fully general. It checks only 2 cases:
310 /// the input value is (1) a splat constant vector or (2) a sequence
311 /// of instructions that broadcasts a scalar at element 0.
312 Value *llvm::getSplatValue(const Value *V) {
313   if (isa<VectorType>(V->getType()))
314     if (auto *C = dyn_cast<Constant>(V))
315       return C->getSplatValue();
316 
317   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
318   Value *Splat;
319   if (match(V,
320             m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
321                       m_Value(), m_ZeroMask())))
322     return Splat;
323 
324   return nullptr;
325 }
326 
327 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
328   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
329 
330   if (isa<VectorType>(V->getType())) {
331     if (isa<UndefValue>(V))
332       return true;
333     // FIXME: We can allow undefs, but if Index was specified, we may want to
334     //        check that the constant is defined at that index.
335     if (auto *C = dyn_cast<Constant>(V))
336       return C->getSplatValue() != nullptr;
337   }
338 
339   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
340     // FIXME: We can safely allow undefs here. If Index was specified, we will
341     //        check that the mask elt is defined at the required index.
342     if (!all_equal(Shuf->getShuffleMask()))
343       return false;
344 
345     // Match any index.
346     if (Index == -1)
347       return true;
348 
349     // Match a specific element. The mask should be defined at and match the
350     // specified index.
351     return Shuf->getMaskValue(Index) == Index;
352   }
353 
354   // The remaining tests are all recursive, so bail out if we hit the limit.
355   if (Depth++ == MaxAnalysisRecursionDepth)
356     return false;
357 
358   // If both operands of a binop are splats, the result is a splat.
359   Value *X, *Y, *Z;
360   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
361     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
362 
363   // If all operands of a select are splats, the result is a splat.
364   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
365     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
366            isSplatValue(Z, Index, Depth);
367 
368   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
369 
370   return false;
371 }
372 
373 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
374                                   const APInt &DemandedElts, APInt &DemandedLHS,
375                                   APInt &DemandedRHS, bool AllowUndefElts) {
376   DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
377 
378   // Early out if we don't demand any elements.
379   if (DemandedElts.isZero())
380     return true;
381 
382   // Simple case of a shuffle with zeroinitializer.
383   if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
384     DemandedLHS.setBit(0);
385     return true;
386   }
387 
388   for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
389     int M = Mask[I];
390     assert((-1 <= M) && (M < (SrcWidth * 2)) &&
391            "Invalid shuffle mask constant");
392 
393     if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
394       continue;
395 
396     // For undef elements, we don't know anything about the common state of
397     // the shuffle result.
398     if (M < 0)
399       return false;
400 
401     if (M < SrcWidth)
402       DemandedLHS.setBit(M);
403     else
404       DemandedRHS.setBit(M - SrcWidth);
405   }
406 
407   return true;
408 }
409 
410 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
411                                  SmallVectorImpl<int> &ScaledMask) {
412   assert(Scale > 0 && "Unexpected scaling factor");
413 
414   // Fast-path: if no scaling, then it is just a copy.
415   if (Scale == 1) {
416     ScaledMask.assign(Mask.begin(), Mask.end());
417     return;
418   }
419 
420   ScaledMask.clear();
421   for (int MaskElt : Mask) {
422     if (MaskElt >= 0) {
423       assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
424              "Overflowed 32-bits");
425     }
426     for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
427       ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
428   }
429 }
430 
431 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
432                                 SmallVectorImpl<int> &ScaledMask) {
433   assert(Scale > 0 && "Unexpected scaling factor");
434 
435   // Fast-path: if no scaling, then it is just a copy.
436   if (Scale == 1) {
437     ScaledMask.assign(Mask.begin(), Mask.end());
438     return true;
439   }
440 
441   // We must map the original elements down evenly to a type with less elements.
442   int NumElts = Mask.size();
443   if (NumElts % Scale != 0)
444     return false;
445 
446   ScaledMask.clear();
447   ScaledMask.reserve(NumElts / Scale);
448 
449   // Step through the input mask by splitting into Scale-sized slices.
450   do {
451     ArrayRef<int> MaskSlice = Mask.take_front(Scale);
452     assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
453 
454     // The first element of the slice determines how we evaluate this slice.
455     int SliceFront = MaskSlice.front();
456     if (SliceFront < 0) {
457       // Negative values (undef or other "sentinel" values) must be equal across
458       // the entire slice.
459       if (!all_equal(MaskSlice))
460         return false;
461       ScaledMask.push_back(SliceFront);
462     } else {
463       // A positive mask element must be cleanly divisible.
464       if (SliceFront % Scale != 0)
465         return false;
466       // Elements of the slice must be consecutive.
467       for (int i = 1; i < Scale; ++i)
468         if (MaskSlice[i] != SliceFront + i)
469           return false;
470       ScaledMask.push_back(SliceFront / Scale);
471     }
472     Mask = Mask.drop_front(Scale);
473   } while (!Mask.empty());
474 
475   assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
476 
477   // All elements of the original mask can be scaled down to map to the elements
478   // of a mask with wider elements.
479   return true;
480 }
481 
482 bool llvm::widenShuffleMaskElts(ArrayRef<int> M,
483                                 SmallVectorImpl<int> &NewMask) {
484   unsigned NumElts = M.size();
485   if (NumElts % 2 != 0)
486     return false;
487 
488   NewMask.clear();
489   for (unsigned i = 0; i < NumElts; i += 2) {
490     int M0 = M[i];
491     int M1 = M[i + 1];
492 
493     // If both elements are undef, new mask is undef too.
494     if (M0 == -1 && M1 == -1) {
495       NewMask.push_back(-1);
496       continue;
497     }
498 
499     if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
500       NewMask.push_back(M1 / 2);
501       continue;
502     }
503 
504     if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
505       NewMask.push_back(M0 / 2);
506       continue;
507     }
508 
509     NewMask.clear();
510     return false;
511   }
512 
513   assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
514   return true;
515 }
516 
517 bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
518                                 SmallVectorImpl<int> &ScaledMask) {
519   unsigned NumSrcElts = Mask.size();
520   assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor");
521 
522   // Fast-path: if no scaling, then it is just a copy.
523   if (NumSrcElts == NumDstElts) {
524     ScaledMask.assign(Mask.begin(), Mask.end());
525     return true;
526   }
527 
528   // Ensure we can find a whole scale factor.
529   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
530          "Unexpected scaling factor");
531 
532   if (NumSrcElts > NumDstElts) {
533     int Scale = NumSrcElts / NumDstElts;
534     return widenShuffleMaskElts(Scale, Mask, ScaledMask);
535   }
536 
537   int Scale = NumDstElts / NumSrcElts;
538   narrowShuffleMaskElts(Scale, Mask, ScaledMask);
539   return true;
540 }
541 
542 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
543                                         SmallVectorImpl<int> &ScaledMask) {
544   std::array<SmallVector<int, 16>, 2> TmpMasks;
545   SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
546   ArrayRef<int> InputMask = Mask;
547   for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
548     while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
549       InputMask = *Output;
550       std::swap(Output, Tmp);
551     }
552   }
553   ScaledMask.assign(InputMask.begin(), InputMask.end());
554 }
555 
556 void llvm::processShuffleMasks(
557     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
558     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
559     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
560     function_ref<void(ArrayRef<int>, unsigned, unsigned, bool)>
561         ManyInputsAction) {
562   SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
563   // Try to perform better estimation of the permutation.
564   // 1. Split the source/destination vectors into real registers.
565   // 2. Do the mask analysis to identify which real registers are
566   // permuted.
567   int Sz = Mask.size();
568   unsigned SzDest = Sz / NumOfDestRegs;
569   unsigned SzSrc = Sz / NumOfSrcRegs;
570   for (unsigned I = 0; I < NumOfDestRegs; ++I) {
571     auto &RegMasks = Res[I];
572     RegMasks.assign(2 * NumOfSrcRegs, {});
573     // Check that the values in dest registers are in the one src
574     // register.
575     for (unsigned K = 0; K < SzDest; ++K) {
576       int Idx = I * SzDest + K;
577       if (Idx == Sz)
578         break;
579       if (Mask[Idx] >= 2 * Sz || Mask[Idx] == PoisonMaskElem)
580         continue;
581       int MaskIdx = Mask[Idx] % Sz;
582       int SrcRegIdx = MaskIdx / SzSrc + (Mask[Idx] >= Sz ? NumOfSrcRegs : 0);
583       // Add a cost of PermuteTwoSrc for each new source register permute,
584       // if we have more than one source registers.
585       if (RegMasks[SrcRegIdx].empty())
586         RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem);
587       RegMasks[SrcRegIdx][K] = MaskIdx % SzSrc;
588     }
589   }
590   // Process split mask.
591   for (unsigned I : seq<unsigned>(NumOfUsedRegs)) {
592     auto &Dest = Res[I];
593     int NumSrcRegs =
594         count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
595     switch (NumSrcRegs) {
596     case 0:
597       // No input vectors were used!
598       NoInputAction();
599       break;
600     case 1: {
601       // Find the only mask with at least single undef mask elem.
602       auto *It =
603           find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
604       unsigned SrcReg = std::distance(Dest.begin(), It);
605       SingleInputAction(*It, SrcReg, I);
606       break;
607     }
608     default: {
609       // The first mask is a permutation of a single register. Since we have >2
610       // input registers to shuffle, we merge the masks for 2 first registers
611       // and generate a shuffle of 2 registers rather than the reordering of the
612       // first register and then shuffle with the second register. Next,
613       // generate the shuffles of the resulting register + the remaining
614       // registers from the list.
615       auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
616                                ArrayRef<int> SecondMask) {
617         for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
618           if (SecondMask[Idx] != PoisonMaskElem) {
619             assert(FirstMask[Idx] == PoisonMaskElem &&
620                    "Expected undefined mask element.");
621             FirstMask[Idx] = SecondMask[Idx] + VF;
622           }
623         }
624       };
625       auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
626         for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
627           if (Mask[Idx] != PoisonMaskElem)
628             Mask[Idx] = Idx;
629         }
630       };
631       int SecondIdx;
632       bool NewReg = true;
633       do {
634         int FirstIdx = -1;
635         SecondIdx = -1;
636         MutableArrayRef<int> FirstMask, SecondMask;
637         for (unsigned I : seq<unsigned>(2 * NumOfSrcRegs)) {
638           SmallVectorImpl<int> &RegMask = Dest[I];
639           if (RegMask.empty())
640             continue;
641 
642           if (FirstIdx == SecondIdx) {
643             FirstIdx = I;
644             FirstMask = RegMask;
645             continue;
646           }
647           SecondIdx = I;
648           SecondMask = RegMask;
649           CombineMasks(FirstMask, SecondMask);
650           ManyInputsAction(FirstMask, FirstIdx, SecondIdx, NewReg);
651           NewReg = false;
652           NormalizeMask(FirstMask);
653           RegMask.clear();
654           SecondMask = FirstMask;
655           SecondIdx = FirstIdx;
656         }
657         if (FirstIdx != SecondIdx && SecondIdx >= 0) {
658           CombineMasks(SecondMask, FirstMask);
659           ManyInputsAction(SecondMask, SecondIdx, FirstIdx, NewReg);
660           NewReg = false;
661           Dest[FirstIdx].clear();
662           NormalizeMask(SecondMask);
663         }
664       } while (SecondIdx >= 0);
665       break;
666     }
667     }
668   }
669 }
670 
671 void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
672                                                const APInt &DemandedElts,
673                                                APInt &DemandedLHS,
674                                                APInt &DemandedRHS) {
675   assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported");
676   int NumLanes = VectorBitWidth / 128;
677   int NumElts = DemandedElts.getBitWidth();
678   int NumEltsPerLane = NumElts / NumLanes;
679   int HalfEltsPerLane = NumEltsPerLane / 2;
680 
681   DemandedLHS = APInt::getZero(NumElts);
682   DemandedRHS = APInt::getZero(NumElts);
683 
684   // Map DemandedElts to the horizontal operands.
685   for (int Idx = 0; Idx != NumElts; ++Idx) {
686     if (!DemandedElts[Idx])
687       continue;
688     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
689     int LocalIdx = Idx % NumEltsPerLane;
690     if (LocalIdx < HalfEltsPerLane) {
691       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx);
692     } else {
693       LocalIdx -= HalfEltsPerLane;
694       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx);
695     }
696   }
697 }
698 
699 MapVector<Instruction *, uint64_t>
700 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
701                                const TargetTransformInfo *TTI) {
702 
703   // DemandedBits will give us every value's live-out bits. But we want
704   // to ensure no extra casts would need to be inserted, so every DAG
705   // of connected values must have the same minimum bitwidth.
706   EquivalenceClasses<Value *> ECs;
707   SmallVector<Value *, 16> Worklist;
708   SmallPtrSet<Value *, 4> Roots;
709   SmallPtrSet<Value *, 16> Visited;
710   DenseMap<Value *, uint64_t> DBits;
711   SmallPtrSet<Instruction *, 4> InstructionSet;
712   MapVector<Instruction *, uint64_t> MinBWs;
713 
714   // Determine the roots. We work bottom-up, from truncs or icmps.
715   bool SeenExtFromIllegalType = false;
716   for (auto *BB : Blocks)
717     for (auto &I : *BB) {
718       InstructionSet.insert(&I);
719 
720       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
721           !TTI->isTypeLegal(I.getOperand(0)->getType()))
722         SeenExtFromIllegalType = true;
723 
724       // Only deal with non-vector integers up to 64-bits wide.
725       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
726           !I.getType()->isVectorTy() &&
727           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
728         // Don't make work for ourselves. If we know the loaded type is legal,
729         // don't add it to the worklist.
730         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
731           continue;
732 
733         Worklist.push_back(&I);
734         Roots.insert(&I);
735       }
736     }
737   // Early exit.
738   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
739     return MinBWs;
740 
741   // Now proceed breadth-first, unioning values together.
742   while (!Worklist.empty()) {
743     Value *Val = Worklist.pop_back_val();
744     Value *Leader = ECs.getOrInsertLeaderValue(Val);
745 
746     if (!Visited.insert(Val).second)
747       continue;
748 
749     // Non-instructions terminate a chain successfully.
750     if (!isa<Instruction>(Val))
751       continue;
752     Instruction *I = cast<Instruction>(Val);
753 
754     // If we encounter a type that is larger than 64 bits, we can't represent
755     // it so bail out.
756     if (DB.getDemandedBits(I).getBitWidth() > 64)
757       return MapVector<Instruction *, uint64_t>();
758 
759     uint64_t V = DB.getDemandedBits(I).getZExtValue();
760     DBits[Leader] |= V;
761     DBits[I] = V;
762 
763     // Casts, loads and instructions outside of our range terminate a chain
764     // successfully.
765     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
766         !InstructionSet.count(I))
767       continue;
768 
769     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
770     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
771     // transform anything that relies on them.
772     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
773         !I->getType()->isIntegerTy()) {
774       DBits[Leader] |= ~0ULL;
775       continue;
776     }
777 
778     // We don't modify the types of PHIs. Reductions will already have been
779     // truncated if possible, and inductions' sizes will have been chosen by
780     // indvars.
781     if (isa<PHINode>(I))
782       continue;
783 
784     if (DBits[Leader] == ~0ULL)
785       // All bits demanded, no point continuing.
786       continue;
787 
788     for (Value *O : cast<User>(I)->operands()) {
789       ECs.unionSets(Leader, O);
790       Worklist.push_back(O);
791     }
792   }
793 
794   // Now we've discovered all values, walk them to see if there are
795   // any users we didn't see. If there are, we can't optimize that
796   // chain.
797   for (auto &I : DBits)
798     for (auto *U : I.first->users())
799       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
800         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
801 
802   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
803     uint64_t LeaderDemandedBits = 0;
804     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
805       LeaderDemandedBits |= DBits[M];
806 
807     uint64_t MinBW = llvm::bit_width(LeaderDemandedBits);
808     // Round up to a power of 2
809     MinBW = llvm::bit_ceil(MinBW);
810 
811     // We don't modify the types of PHIs. Reductions will already have been
812     // truncated if possible, and inductions' sizes will have been chosen by
813     // indvars.
814     // If we are required to shrink a PHI, abandon this entire equivalence class.
815     bool Abort = false;
816     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
817       if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
818         Abort = true;
819         break;
820       }
821     if (Abort)
822       continue;
823 
824     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
825       auto *MI = dyn_cast<Instruction>(M);
826       if (!MI)
827         continue;
828       Type *Ty = M->getType();
829       if (Roots.count(M))
830         Ty = MI->getOperand(0)->getType();
831 
832       if (MinBW >= Ty->getScalarSizeInBits())
833         continue;
834 
835       // If any of M's operands demand more bits than MinBW then M cannot be
836       // performed safely in MinBW.
837       if (any_of(MI->operands(), [&DB, MinBW](Use &U) {
838             auto *CI = dyn_cast<ConstantInt>(U);
839             // For constants shift amounts, check if the shift would result in
840             // poison.
841             if (CI &&
842                 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) &&
843                 U.getOperandNo() == 1)
844               return CI->uge(MinBW);
845             uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue());
846             return bit_ceil(BW) > MinBW;
847           }))
848         continue;
849 
850       MinBWs[MI] = MinBW;
851     }
852   }
853 
854   return MinBWs;
855 }
856 
857 /// Add all access groups in @p AccGroups to @p List.
858 template <typename ListT>
859 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
860   // Interpret an access group as a list containing itself.
861   if (AccGroups->getNumOperands() == 0) {
862     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
863     List.insert(AccGroups);
864     return;
865   }
866 
867   for (const auto &AccGroupListOp : AccGroups->operands()) {
868     auto *Item = cast<MDNode>(AccGroupListOp.get());
869     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
870     List.insert(Item);
871   }
872 }
873 
874 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
875   if (!AccGroups1)
876     return AccGroups2;
877   if (!AccGroups2)
878     return AccGroups1;
879   if (AccGroups1 == AccGroups2)
880     return AccGroups1;
881 
882   SmallSetVector<Metadata *, 4> Union;
883   addToAccessGroupList(Union, AccGroups1);
884   addToAccessGroupList(Union, AccGroups2);
885 
886   if (Union.size() == 0)
887     return nullptr;
888   if (Union.size() == 1)
889     return cast<MDNode>(Union.front());
890 
891   LLVMContext &Ctx = AccGroups1->getContext();
892   return MDNode::get(Ctx, Union.getArrayRef());
893 }
894 
895 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
896                                     const Instruction *Inst2) {
897   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
898   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
899 
900   if (!MayAccessMem1 && !MayAccessMem2)
901     return nullptr;
902   if (!MayAccessMem1)
903     return Inst2->getMetadata(LLVMContext::MD_access_group);
904   if (!MayAccessMem2)
905     return Inst1->getMetadata(LLVMContext::MD_access_group);
906 
907   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
908   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
909   if (!MD1 || !MD2)
910     return nullptr;
911   if (MD1 == MD2)
912     return MD1;
913 
914   // Use set for scalable 'contains' check.
915   SmallPtrSet<Metadata *, 4> AccGroupSet2;
916   addToAccessGroupList(AccGroupSet2, MD2);
917 
918   SmallVector<Metadata *, 4> Intersection;
919   if (MD1->getNumOperands() == 0) {
920     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
921     if (AccGroupSet2.count(MD1))
922       Intersection.push_back(MD1);
923   } else {
924     for (const MDOperand &Node : MD1->operands()) {
925       auto *Item = cast<MDNode>(Node.get());
926       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
927       if (AccGroupSet2.count(Item))
928         Intersection.push_back(Item);
929     }
930   }
931 
932   if (Intersection.size() == 0)
933     return nullptr;
934   if (Intersection.size() == 1)
935     return cast<MDNode>(Intersection.front());
936 
937   LLVMContext &Ctx = Inst1->getContext();
938   return MDNode::get(Ctx, Intersection);
939 }
940 
941 /// \returns \p I after propagating metadata from \p VL.
942 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
943   if (VL.empty())
944     return Inst;
945   Instruction *I0 = cast<Instruction>(VL[0]);
946   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
947   I0->getAllMetadataOtherThanDebugLoc(Metadata);
948 
949   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
950                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
951                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
952                     LLVMContext::MD_access_group, LLVMContext::MD_mmra}) {
953     MDNode *MD = I0->getMetadata(Kind);
954     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
955       const Instruction *IJ = cast<Instruction>(VL[J]);
956       MDNode *IMD = IJ->getMetadata(Kind);
957 
958       switch (Kind) {
959       case LLVMContext::MD_mmra: {
960         MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD);
961         break;
962       }
963       case LLVMContext::MD_tbaa:
964         MD = MDNode::getMostGenericTBAA(MD, IMD);
965         break;
966       case LLVMContext::MD_alias_scope:
967         MD = MDNode::getMostGenericAliasScope(MD, IMD);
968         break;
969       case LLVMContext::MD_fpmath:
970         MD = MDNode::getMostGenericFPMath(MD, IMD);
971         break;
972       case LLVMContext::MD_noalias:
973       case LLVMContext::MD_nontemporal:
974       case LLVMContext::MD_invariant_load:
975         MD = MDNode::intersect(MD, IMD);
976         break;
977       case LLVMContext::MD_access_group:
978         MD = intersectAccessGroups(Inst, IJ);
979         break;
980       default:
981         llvm_unreachable("unhandled metadata");
982       }
983     }
984 
985     Inst->setMetadata(Kind, MD);
986   }
987 
988   return Inst;
989 }
990 
991 Constant *
992 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
993                            const InterleaveGroup<Instruction> &Group) {
994   // All 1's means mask is not needed.
995   if (Group.getNumMembers() == Group.getFactor())
996     return nullptr;
997 
998   // TODO: support reversed access.
999   assert(!Group.isReverse() && "Reversed group not supported.");
1000 
1001   SmallVector<Constant *, 16> Mask;
1002   for (unsigned i = 0; i < VF; i++)
1003     for (unsigned j = 0; j < Group.getFactor(); ++j) {
1004       unsigned HasMember = Group.getMember(j) ? 1 : 0;
1005       Mask.push_back(Builder.getInt1(HasMember));
1006     }
1007 
1008   return ConstantVector::get(Mask);
1009 }
1010 
1011 llvm::SmallVector<int, 16>
1012 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
1013   SmallVector<int, 16> MaskVec;
1014   for (unsigned i = 0; i < VF; i++)
1015     for (unsigned j = 0; j < ReplicationFactor; j++)
1016       MaskVec.push_back(i);
1017 
1018   return MaskVec;
1019 }
1020 
1021 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
1022                                                       unsigned NumVecs) {
1023   SmallVector<int, 16> Mask;
1024   for (unsigned i = 0; i < VF; i++)
1025     for (unsigned j = 0; j < NumVecs; j++)
1026       Mask.push_back(j * VF + i);
1027 
1028   return Mask;
1029 }
1030 
1031 llvm::SmallVector<int, 16>
1032 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
1033   SmallVector<int, 16> Mask;
1034   for (unsigned i = 0; i < VF; i++)
1035     Mask.push_back(Start + i * Stride);
1036 
1037   return Mask;
1038 }
1039 
1040 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
1041                                                       unsigned NumInts,
1042                                                       unsigned NumUndefs) {
1043   SmallVector<int, 16> Mask;
1044   for (unsigned i = 0; i < NumInts; i++)
1045     Mask.push_back(Start + i);
1046 
1047   for (unsigned i = 0; i < NumUndefs; i++)
1048     Mask.push_back(-1);
1049 
1050   return Mask;
1051 }
1052 
1053 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
1054                                                  unsigned NumElts) {
1055   // Avoid casts in the loop and make sure we have a reasonable number.
1056   int NumEltsSigned = NumElts;
1057   assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
1058 
1059   // If the mask chooses an element from operand 1, reduce it to choose from the
1060   // corresponding element of operand 0. Undef mask elements are unchanged.
1061   SmallVector<int, 16> UnaryMask;
1062   for (int MaskElt : Mask) {
1063     assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
1064     int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
1065     UnaryMask.push_back(UnaryElt);
1066   }
1067   return UnaryMask;
1068 }
1069 
1070 /// A helper function for concatenating vectors. This function concatenates two
1071 /// vectors having the same element type. If the second vector has fewer
1072 /// elements than the first, it is padded with undefs.
1073 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
1074                                     Value *V2) {
1075   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
1076   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
1077   assert(VecTy1 && VecTy2 &&
1078          VecTy1->getScalarType() == VecTy2->getScalarType() &&
1079          "Expect two vectors with the same element type");
1080 
1081   unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
1082   unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
1083   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
1084 
1085   if (NumElts1 > NumElts2) {
1086     // Extend with UNDEFs.
1087     V2 = Builder.CreateShuffleVector(
1088         V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
1089   }
1090 
1091   return Builder.CreateShuffleVector(
1092       V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
1093 }
1094 
1095 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
1096                                 ArrayRef<Value *> Vecs) {
1097   unsigned NumVecs = Vecs.size();
1098   assert(NumVecs > 1 && "Should be at least two vectors");
1099 
1100   SmallVector<Value *, 8> ResList;
1101   ResList.append(Vecs.begin(), Vecs.end());
1102   do {
1103     SmallVector<Value *, 8> TmpList;
1104     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
1105       Value *V0 = ResList[i], *V1 = ResList[i + 1];
1106       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1107              "Only the last vector may have a different type");
1108 
1109       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1110     }
1111 
1112     // Push the last vector if the total number of vectors is odd.
1113     if (NumVecs % 2 != 0)
1114       TmpList.push_back(ResList[NumVecs - 1]);
1115 
1116     ResList = TmpList;
1117     NumVecs = ResList.size();
1118   } while (NumVecs > 1);
1119 
1120   return ResList[0];
1121 }
1122 
1123 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
1124   assert(isa<VectorType>(Mask->getType()) &&
1125          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1126          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1127              1 &&
1128          "Mask must be a vector of i1");
1129 
1130   auto *ConstMask = dyn_cast<Constant>(Mask);
1131   if (!ConstMask)
1132     return false;
1133   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1134     return true;
1135   if (isa<ScalableVectorType>(ConstMask->getType()))
1136     return false;
1137   for (unsigned
1138            I = 0,
1139            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1140        I != E; ++I) {
1141     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1142       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1143         continue;
1144     return false;
1145   }
1146   return true;
1147 }
1148 
1149 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1150   assert(isa<VectorType>(Mask->getType()) &&
1151          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1152          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1153              1 &&
1154          "Mask must be a vector of i1");
1155 
1156   auto *ConstMask = dyn_cast<Constant>(Mask);
1157   if (!ConstMask)
1158     return false;
1159   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1160     return true;
1161   if (isa<ScalableVectorType>(ConstMask->getType()))
1162     return false;
1163   for (unsigned
1164            I = 0,
1165            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1166        I != E; ++I) {
1167     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1168       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1169         continue;
1170     return false;
1171   }
1172   return true;
1173 }
1174 
1175 bool llvm::maskContainsAllOneOrUndef(Value *Mask) {
1176   assert(isa<VectorType>(Mask->getType()) &&
1177          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1178          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1179              1 &&
1180          "Mask must be a vector of i1");
1181 
1182   auto *ConstMask = dyn_cast<Constant>(Mask);
1183   if (!ConstMask)
1184     return false;
1185   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1186     return true;
1187   if (isa<ScalableVectorType>(ConstMask->getType()))
1188     return false;
1189   for (unsigned
1190            I = 0,
1191            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1192        I != E; ++I) {
1193     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1194       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1195         return true;
1196   }
1197   return false;
1198 }
1199 
1200 /// TODO: This is a lot like known bits, but for
1201 /// vectors.  Is there something we can common this with?
1202 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1203   assert(isa<FixedVectorType>(Mask->getType()) &&
1204          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1205          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1206              1 &&
1207          "Mask must be a fixed width vector of i1");
1208 
1209   const unsigned VWidth =
1210       cast<FixedVectorType>(Mask->getType())->getNumElements();
1211   APInt DemandedElts = APInt::getAllOnes(VWidth);
1212   if (auto *CV = dyn_cast<ConstantVector>(Mask))
1213     for (unsigned i = 0; i < VWidth; i++)
1214       if (CV->getAggregateElement(i)->isNullValue())
1215         DemandedElts.clearBit(i);
1216   return DemandedElts;
1217 }
1218 
1219 bool InterleavedAccessInfo::isStrided(int Stride) {
1220   unsigned Factor = std::abs(Stride);
1221   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1222 }
1223 
1224 void InterleavedAccessInfo::collectConstStrideAccesses(
1225     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1226     const DenseMap<Value*, const SCEV*> &Strides) {
1227   auto &DL = TheLoop->getHeader()->getDataLayout();
1228 
1229   // Since it's desired that the load/store instructions be maintained in
1230   // "program order" for the interleaved access analysis, we have to visit the
1231   // blocks in the loop in reverse postorder (i.e., in a topological order).
1232   // Such an ordering will ensure that any load/store that may be executed
1233   // before a second load/store will precede the second load/store in
1234   // AccessStrideInfo.
1235   LoopBlocksDFS DFS(TheLoop);
1236   DFS.perform(LI);
1237   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1238     for (auto &I : *BB) {
1239       Value *Ptr = getLoadStorePointerOperand(&I);
1240       if (!Ptr)
1241         continue;
1242       Type *ElementTy = getLoadStoreType(&I);
1243 
1244       // Currently, codegen doesn't support cases where the type size doesn't
1245       // match the alloc size. Skip them for now.
1246       uint64_t Size = DL.getTypeAllocSize(ElementTy);
1247       if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1248         continue;
1249 
1250       // We don't check wrapping here because we don't know yet if Ptr will be
1251       // part of a full group or a group with gaps. Checking wrapping for all
1252       // pointers (even those that end up in groups with no gaps) will be overly
1253       // conservative. For full groups, wrapping should be ok since if we would
1254       // wrap around the address space we would do a memory access at nullptr
1255       // even without the transformation. The wrapping checks are therefore
1256       // deferred until after we've formed the interleaved groups.
1257       int64_t Stride =
1258         getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1259                      /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1260 
1261       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1262       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1263                                               getLoadStoreAlignment(&I));
1264     }
1265 }
1266 
1267 // Analyze interleaved accesses and collect them into interleaved load and
1268 // store groups.
1269 //
1270 // When generating code for an interleaved load group, we effectively hoist all
1271 // loads in the group to the location of the first load in program order. When
1272 // generating code for an interleaved store group, we sink all stores to the
1273 // location of the last store. This code motion can change the order of load
1274 // and store instructions and may break dependences.
1275 //
1276 // The code generation strategy mentioned above ensures that we won't violate
1277 // any write-after-read (WAR) dependences.
1278 //
1279 // E.g., for the WAR dependence:  a = A[i];      // (1)
1280 //                                A[i] = b;      // (2)
1281 //
1282 // The store group of (2) is always inserted at or below (2), and the load
1283 // group of (1) is always inserted at or above (1). Thus, the instructions will
1284 // never be reordered. All other dependences are checked to ensure the
1285 // correctness of the instruction reordering.
1286 //
1287 // The algorithm visits all memory accesses in the loop in bottom-up program
1288 // order. Program order is established by traversing the blocks in the loop in
1289 // reverse postorder when collecting the accesses.
1290 //
1291 // We visit the memory accesses in bottom-up order because it can simplify the
1292 // construction of store groups in the presence of write-after-write (WAW)
1293 // dependences.
1294 //
1295 // E.g., for the WAW dependence:  A[i] = a;      // (1)
1296 //                                A[i] = b;      // (2)
1297 //                                A[i + 1] = c;  // (3)
1298 //
1299 // We will first create a store group with (3) and (2). (1) can't be added to
1300 // this group because it and (2) are dependent. However, (1) can be grouped
1301 // with other accesses that may precede it in program order. Note that a
1302 // bottom-up order does not imply that WAW dependences should not be checked.
1303 void InterleavedAccessInfo::analyzeInterleaving(
1304                                  bool EnablePredicatedInterleavedMemAccesses) {
1305   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1306   const auto &Strides = LAI->getSymbolicStrides();
1307 
1308   // Holds all accesses with a constant stride.
1309   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1310   collectConstStrideAccesses(AccessStrideInfo, Strides);
1311 
1312   if (AccessStrideInfo.empty())
1313     return;
1314 
1315   // Collect the dependences in the loop.
1316   collectDependences();
1317 
1318   // Holds all interleaved store groups temporarily.
1319   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1320   // Holds all interleaved load groups temporarily.
1321   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1322   // Groups added to this set cannot have new members added.
1323   SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups;
1324 
1325   // Search in bottom-up program order for pairs of accesses (A and B) that can
1326   // form interleaved load or store groups. In the algorithm below, access A
1327   // precedes access B in program order. We initialize a group for B in the
1328   // outer loop of the algorithm, and then in the inner loop, we attempt to
1329   // insert each A into B's group if:
1330   //
1331   //  1. A and B have the same stride,
1332   //  2. A and B have the same memory object size, and
1333   //  3. A belongs in B's group according to its distance from B.
1334   //
1335   // Special care is taken to ensure group formation will not break any
1336   // dependences.
1337   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1338        BI != E; ++BI) {
1339     Instruction *B = BI->first;
1340     StrideDescriptor DesB = BI->second;
1341 
1342     // Initialize a group for B if it has an allowable stride. Even if we don't
1343     // create a group for B, we continue with the bottom-up algorithm to ensure
1344     // we don't break any of B's dependences.
1345     InterleaveGroup<Instruction> *GroupB = nullptr;
1346     if (isStrided(DesB.Stride) &&
1347         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1348       GroupB = getInterleaveGroup(B);
1349       if (!GroupB) {
1350         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1351                           << '\n');
1352         GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1353         if (B->mayWriteToMemory())
1354           StoreGroups.insert(GroupB);
1355         else
1356           LoadGroups.insert(GroupB);
1357       }
1358     }
1359 
1360     for (auto AI = std::next(BI); AI != E; ++AI) {
1361       Instruction *A = AI->first;
1362       StrideDescriptor DesA = AI->second;
1363 
1364       // Our code motion strategy implies that we can't have dependences
1365       // between accesses in an interleaved group and other accesses located
1366       // between the first and last member of the group. Note that this also
1367       // means that a group can't have more than one member at a given offset.
1368       // The accesses in a group can have dependences with other accesses, but
1369       // we must ensure we don't extend the boundaries of the group such that
1370       // we encompass those dependent accesses.
1371       //
1372       // For example, assume we have the sequence of accesses shown below in a
1373       // stride-2 loop:
1374       //
1375       //  (1, 2) is a group | A[i]   = a;  // (1)
1376       //                    | A[i-1] = b;  // (2) |
1377       //                      A[i-3] = c;  // (3)
1378       //                      A[i]   = d;  // (4) | (2, 4) is not a group
1379       //
1380       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1381       // but not with (4). If we did, the dependent access (3) would be within
1382       // the boundaries of the (2, 4) group.
1383       auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1384                                  StrideEntry *A) -> Instruction * {
1385         for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1386           Instruction *MemberOfGroupB = Group->getMember(Index);
1387           if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1388                                     A, &*AccessStrideInfo.find(MemberOfGroupB)))
1389             return MemberOfGroupB;
1390         }
1391         return nullptr;
1392       };
1393 
1394       auto GroupA = getInterleaveGroup(A);
1395       // If A is a load, dependencies are tolerable, there's nothing to do here.
1396       // If both A and B belong to the same (store) group, they are independent,
1397       // even if dependencies have not been recorded.
1398       // If both GroupA and GroupB are null, there's nothing to do here.
1399       if (A->mayWriteToMemory() && GroupA != GroupB) {
1400         Instruction *DependentInst = nullptr;
1401         // If GroupB is a load group, we have to compare AI against all
1402         // members of GroupB because if any load within GroupB has a dependency
1403         // on AI, we need to mark GroupB as complete and also release the
1404         // store GroupA (if A belongs to one). The former prevents incorrect
1405         // hoisting of load B above store A while the latter prevents incorrect
1406         // sinking of store A below load B.
1407         if (GroupB && LoadGroups.contains(GroupB))
1408           DependentInst = DependentMember(GroupB, &*AI);
1409         else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1410           DependentInst = B;
1411 
1412         if (DependentInst) {
1413           // A has a store dependence on B (or on some load within GroupB) and
1414           // is part of a store group. Release A's group to prevent illegal
1415           // sinking of A below B. A will then be free to form another group
1416           // with instructions that precede it.
1417           if (GroupA && StoreGroups.contains(GroupA)) {
1418             LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1419                                  "dependence between "
1420                               << *A << " and " << *DependentInst << '\n');
1421             StoreGroups.remove(GroupA);
1422             releaseGroup(GroupA);
1423           }
1424           // If B is a load and part of an interleave group, no earlier loads
1425           // can be added to B's interleave group, because this would mean the
1426           // DependentInst would move across store A. Mark the interleave group
1427           // as complete.
1428           if (GroupB && LoadGroups.contains(GroupB)) {
1429             LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1430                               << " as complete.\n");
1431             CompletedLoadGroups.insert(GroupB);
1432           }
1433         }
1434       }
1435       if (CompletedLoadGroups.contains(GroupB)) {
1436         // Skip trying to add A to B, continue to look for other conflicting A's
1437         // in groups to be released.
1438         continue;
1439       }
1440 
1441       // At this point, we've checked for illegal code motion. If either A or B
1442       // isn't strided, there's nothing left to do.
1443       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1444         continue;
1445 
1446       // Ignore A if it's already in a group or isn't the same kind of memory
1447       // operation as B.
1448       // Note that mayReadFromMemory() isn't mutually exclusive to
1449       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1450       // here, canVectorizeMemory() should have returned false - except for the
1451       // case we asked for optimization remarks.
1452       if (isInterleaved(A) ||
1453           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1454           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1455         continue;
1456 
1457       // Check rules 1 and 2. Ignore A if its stride or size is different from
1458       // that of B.
1459       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1460         continue;
1461 
1462       // Ignore A if the memory object of A and B don't belong to the same
1463       // address space
1464       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1465         continue;
1466 
1467       // Calculate the distance from A to B.
1468       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1469           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1470       if (!DistToB)
1471         continue;
1472       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1473 
1474       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1475       // size.
1476       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1477         continue;
1478 
1479       // All members of a predicated interleave-group must have the same predicate,
1480       // and currently must reside in the same BB.
1481       BasicBlock *BlockA = A->getParent();
1482       BasicBlock *BlockB = B->getParent();
1483       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1484           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1485         continue;
1486 
1487       // The index of A is the index of B plus A's distance to B in multiples
1488       // of the size.
1489       int IndexA =
1490           GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1491 
1492       // Try to insert A into B's group.
1493       if (GroupB->insertMember(A, IndexA, DesA.Alignment)) {
1494         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1495                           << "    into the interleave group with" << *B
1496                           << '\n');
1497         InterleaveGroupMap[A] = GroupB;
1498 
1499         // Set the first load in program order as the insert position.
1500         if (A->mayReadFromMemory())
1501           GroupB->setInsertPos(A);
1502       }
1503     } // Iteration over A accesses.
1504   }   // Iteration over B accesses.
1505 
1506   auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1507                                             int Index,
1508                                             const char *FirstOrLast) -> bool {
1509     Instruction *Member = Group->getMember(Index);
1510     assert(Member && "Group member does not exist");
1511     Value *MemberPtr = getLoadStorePointerOperand(Member);
1512     Type *AccessTy = getLoadStoreType(Member);
1513     if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1514                      /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1515       return false;
1516     LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1517                       << FirstOrLast
1518                       << " group member potentially pointer-wrapping.\n");
1519     releaseGroup(Group);
1520     return true;
1521   };
1522 
1523   // Remove interleaved groups with gaps whose memory
1524   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1525   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1526   // not check wrapping (see documentation there).
1527   // FORNOW we use Assume=false;
1528   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1529   // of runtime SCEV assumptions checks (thereby potentially failing to
1530   // vectorize altogether).
1531   // Additional optional optimizations:
1532   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1533   // wrap then we can deduce that all pointers in the group don't wrap.
1534   // This means that we can forcefully peel the loop in order to only have to
1535   // check the first pointer for no-wrap. When we'll change to use Assume=true
1536   // we'll only need at most one runtime check per interleaved group.
1537   for (auto *Group : LoadGroups) {
1538     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1539     // load would wrap around the address space we would do a memory access at
1540     // nullptr even without the transformation.
1541     if (Group->getNumMembers() == Group->getFactor())
1542       continue;
1543 
1544     // Case 2: If first and last members of the group don't wrap this implies
1545     // that all the pointers in the group don't wrap.
1546     // So we check only group member 0 (which is always guaranteed to exist),
1547     // and group member Factor - 1; If the latter doesn't exist we rely on
1548     // peeling (if it is a non-reversed access -- see Case 3).
1549     if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
1550       continue;
1551     if (Group->getMember(Group->getFactor() - 1))
1552       InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, "last");
1553     else {
1554       // Case 3: A non-reversed interleaved load group with gaps: We need
1555       // to execute at least one scalar epilogue iteration. This will ensure
1556       // we don't speculatively access memory out-of-bounds. We only need
1557       // to look for a member at index factor - 1, since every group must have
1558       // a member at index zero.
1559       if (Group->isReverse()) {
1560         LLVM_DEBUG(
1561             dbgs() << "LV: Invalidate candidate interleaved group due to "
1562                       "a reverse access with gaps.\n");
1563         releaseGroup(Group);
1564         continue;
1565       }
1566       LLVM_DEBUG(
1567           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1568       RequiresScalarEpilogue = true;
1569     }
1570   }
1571 
1572   for (auto *Group : StoreGroups) {
1573     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1574     // store would wrap around the address space we would do a memory access at
1575     // nullptr even without the transformation.
1576     if (Group->getNumMembers() == Group->getFactor())
1577       continue;
1578 
1579     // Interleave-store-group with gaps is implemented using masked wide store.
1580     // Remove interleaved store groups with gaps if
1581     // masked-interleaved-accesses are not enabled by the target.
1582     if (!EnablePredicatedInterleavedMemAccesses) {
1583       LLVM_DEBUG(
1584           dbgs() << "LV: Invalidate candidate interleaved store group due "
1585                     "to gaps.\n");
1586       releaseGroup(Group);
1587       continue;
1588     }
1589 
1590     // Case 2: If first and last members of the group don't wrap this implies
1591     // that all the pointers in the group don't wrap.
1592     // So we check only group member 0 (which is always guaranteed to exist),
1593     // and the last group member. Case 3 (scalar epilog) is not relevant for
1594     // stores with gaps, which are implemented with masked-store (rather than
1595     // speculative access, as in loads).
1596     if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
1597       continue;
1598     for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1599       if (Group->getMember(Index)) {
1600         InvalidateGroupIfMemberMayWrap(Group, Index, "last");
1601         break;
1602       }
1603   }
1604 }
1605 
1606 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1607   // If no group had triggered the requirement to create an epilogue loop,
1608   // there is nothing to do.
1609   if (!requiresScalarEpilogue())
1610     return;
1611 
1612   // Release groups requiring scalar epilogues. Note that this also removes them
1613   // from InterleaveGroups.
1614   bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) {
1615     if (!Group->requiresScalarEpilogue())
1616       return false;
1617     LLVM_DEBUG(
1618         dbgs()
1619         << "LV: Invalidate candidate interleaved group due to gaps that "
1620            "require a scalar epilogue (not allowed under optsize) and cannot "
1621            "be masked (not enabled). \n");
1622     releaseGroupWithoutRemovingFromSet(Group);
1623     return true;
1624   });
1625   assert(ReleasedGroup && "At least one group must be invalidated, as a "
1626                           "scalar epilogue was required");
1627   (void)ReleasedGroup;
1628   RequiresScalarEpilogue = false;
1629 }
1630 
1631 template <typename InstT>
1632 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1633   llvm_unreachable("addMetadata can only be used for Instruction");
1634 }
1635 
1636 namespace llvm {
1637 template <>
1638 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1639   SmallVector<Value *, 4> VL;
1640   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1641                  [](std::pair<int, Instruction *> p) { return p.second; });
1642   propagateMetadata(NewInst, VL);
1643 }
1644 } // namespace llvm
1645