xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp (revision e8d8bef961a50d4dc22501cde4fb9fb0be1b2532)
1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/BasicAliasAnalysis.h"
18 #include "llvm/Analysis/GlobalsModRef.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/InitializePasses.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 #include "llvm/Transforms/Vectorize.h"
32 
33 using namespace llvm;
34 using namespace llvm::PatternMatch;
35 
36 #define DEBUG_TYPE "vector-combine"
37 STATISTIC(NumVecLoad, "Number of vector loads formed");
38 STATISTIC(NumVecCmp, "Number of vector compares formed");
39 STATISTIC(NumVecBO, "Number of vector binops formed");
40 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
41 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
42 STATISTIC(NumScalarBO, "Number of scalar binops formed");
43 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
44 
45 static cl::opt<bool> DisableVectorCombine(
46     "disable-vector-combine", cl::init(false), cl::Hidden,
47     cl::desc("Disable all vector combine transforms"));
48 
49 static cl::opt<bool> DisableBinopExtractShuffle(
50     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
51     cl::desc("Disable binop extract to shuffle transforms"));
52 
53 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
54 
55 namespace {
56 class VectorCombine {
57 public:
58   VectorCombine(Function &F, const TargetTransformInfo &TTI,
59                 const DominatorTree &DT)
60       : F(F), Builder(F.getContext()), TTI(TTI), DT(DT) {}
61 
62   bool run();
63 
64 private:
65   Function &F;
66   IRBuilder<> Builder;
67   const TargetTransformInfo &TTI;
68   const DominatorTree &DT;
69 
70   bool vectorizeLoadInsert(Instruction &I);
71   ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
72                                         ExtractElementInst *Ext1,
73                                         unsigned PreferredExtractIndex) const;
74   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
75                              unsigned Opcode,
76                              ExtractElementInst *&ConvertToShuffle,
77                              unsigned PreferredExtractIndex);
78   void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
79                      Instruction &I);
80   void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
81                        Instruction &I);
82   bool foldExtractExtract(Instruction &I);
83   bool foldBitcastShuf(Instruction &I);
84   bool scalarizeBinopOrCmp(Instruction &I);
85   bool foldExtractedCmps(Instruction &I);
86 };
87 } // namespace
88 
89 static void replaceValue(Value &Old, Value &New) {
90   Old.replaceAllUsesWith(&New);
91   New.takeName(&Old);
92 }
93 
94 bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
95   // Match insert into fixed vector of scalar value.
96   // TODO: Handle non-zero insert index.
97   auto *Ty = dyn_cast<FixedVectorType>(I.getType());
98   Value *Scalar;
99   if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
100       !Scalar->hasOneUse())
101     return false;
102 
103   // Optionally match an extract from another vector.
104   Value *X;
105   bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
106   if (!HasExtract)
107     X = Scalar;
108 
109   // Match source value as load of scalar or vector.
110   // Do not vectorize scalar load (widening) if atomic/volatile or under
111   // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
112   // or create data races non-existent in the source.
113   auto *Load = dyn_cast<LoadInst>(X);
114   if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
115       Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
116       mustSuppressSpeculation(*Load))
117     return false;
118 
119   const DataLayout &DL = I.getModule()->getDataLayout();
120   Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
121   assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
122 
123   // If original AS != Load's AS, we can't bitcast the original pointer and have
124   // to use Load's operand instead. Ideally we would want to strip pointer casts
125   // without changing AS, but there's no API to do that ATM.
126   unsigned AS = Load->getPointerAddressSpace();
127   if (AS != SrcPtr->getType()->getPointerAddressSpace())
128     SrcPtr = Load->getPointerOperand();
129 
130   // We are potentially transforming byte-sized (8-bit) memory accesses, so make
131   // sure we have all of our type-based constraints in place for this target.
132   Type *ScalarTy = Scalar->getType();
133   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
134   unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
135   if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
136       ScalarSize % 8 != 0)
137     return false;
138 
139   // Check safety of replacing the scalar load with a larger vector load.
140   // We use minimal alignment (maximum flexibility) because we only care about
141   // the dereferenceable region. When calculating cost and creating a new op,
142   // we may use a larger value based on alignment attributes.
143   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
144   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
145   unsigned OffsetEltIndex = 0;
146   Align Alignment = Load->getAlign();
147   if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
148     // It is not safe to load directly from the pointer, but we can still peek
149     // through gep offsets and check if it safe to load from a base address with
150     // updated alignment. If it is, we can shuffle the element(s) into place
151     // after loading.
152     unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
153     APInt Offset(OffsetBitWidth, 0);
154     SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
155 
156     // We want to shuffle the result down from a high element of a vector, so
157     // the offset must be positive.
158     if (Offset.isNegative())
159       return false;
160 
161     // The offset must be a multiple of the scalar element to shuffle cleanly
162     // in the element's size.
163     uint64_t ScalarSizeInBytes = ScalarSize / 8;
164     if (Offset.urem(ScalarSizeInBytes) != 0)
165       return false;
166 
167     // If we load MinVecNumElts, will our target element still be loaded?
168     OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
169     if (OffsetEltIndex >= MinVecNumElts)
170       return false;
171 
172     if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
173       return false;
174 
175     // Update alignment with offset value. Note that the offset could be negated
176     // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
177     // negation does not change the result of the alignment calculation.
178     Alignment = commonAlignment(Alignment, Offset.getZExtValue());
179   }
180 
181   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
182   // Use the greater of the alignment on the load or its source pointer.
183   Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
184   Type *LoadTy = Load->getType();
185   InstructionCost OldCost =
186       TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
187   APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
188   OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
189                                           /* Insert */ true, HasExtract);
190 
191   // New pattern: load VecPtr
192   InstructionCost NewCost =
193       TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
194   // Optionally, we are shuffling the loaded vector element(s) into place.
195   if (OffsetEltIndex)
196     NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy);
197 
198   // We can aggressively convert to the vector form because the backend can
199   // invert this transform if it does not result in a performance win.
200   if (OldCost < NewCost || !NewCost.isValid())
201     return false;
202 
203   // It is safe and potentially profitable to load a vector directly:
204   // inselt undef, load Scalar, 0 --> load VecPtr
205   IRBuilder<> Builder(Load);
206   Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
207   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
208 
209   // Set everything but element 0 to undef to prevent poison from propagating
210   // from the extra loaded memory. This will also optionally shrink/grow the
211   // vector from the loaded size to the output size.
212   // We assume this operation has no cost in codegen if there was no offset.
213   // Note that we could use freeze to avoid poison problems, but then we might
214   // still need a shuffle to change the vector size.
215   unsigned OutputNumElts = Ty->getNumElements();
216   SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
217   assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
218   Mask[0] = OffsetEltIndex;
219   VecLd = Builder.CreateShuffleVector(VecLd, Mask);
220 
221   replaceValue(I, *VecLd);
222   ++NumVecLoad;
223   return true;
224 }
225 
226 /// Determine which, if any, of the inputs should be replaced by a shuffle
227 /// followed by extract from a different index.
228 ExtractElementInst *VectorCombine::getShuffleExtract(
229     ExtractElementInst *Ext0, ExtractElementInst *Ext1,
230     unsigned PreferredExtractIndex = InvalidIndex) const {
231   assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
232          isa<ConstantInt>(Ext1->getIndexOperand()) &&
233          "Expected constant extract indexes");
234 
235   unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
236   unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
237 
238   // If the extract indexes are identical, no shuffle is needed.
239   if (Index0 == Index1)
240     return nullptr;
241 
242   Type *VecTy = Ext0->getVectorOperand()->getType();
243   assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
244   InstructionCost Cost0 =
245       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
246   InstructionCost Cost1 =
247       TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
248 
249   // If both costs are invalid no shuffle is needed
250   if (!Cost0.isValid() && !Cost1.isValid())
251     return nullptr;
252 
253   // We are extracting from 2 different indexes, so one operand must be shuffled
254   // before performing a vector operation and/or extract. The more expensive
255   // extract will be replaced by a shuffle.
256   if (Cost0 > Cost1)
257     return Ext0;
258   if (Cost1 > Cost0)
259     return Ext1;
260 
261   // If the costs are equal and there is a preferred extract index, shuffle the
262   // opposite operand.
263   if (PreferredExtractIndex == Index0)
264     return Ext1;
265   if (PreferredExtractIndex == Index1)
266     return Ext0;
267 
268   // Otherwise, replace the extract with the higher index.
269   return Index0 > Index1 ? Ext0 : Ext1;
270 }
271 
272 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
273 /// vector operation(s) followed by extract. Return true if the existing
274 /// instructions are cheaper than a vector alternative. Otherwise, return false
275 /// and if one of the extracts should be transformed to a shufflevector, set
276 /// \p ConvertToShuffle to that extract instruction.
277 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
278                                           ExtractElementInst *Ext1,
279                                           unsigned Opcode,
280                                           ExtractElementInst *&ConvertToShuffle,
281                                           unsigned PreferredExtractIndex) {
282   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
283          isa<ConstantInt>(Ext1->getOperand(1)) &&
284          "Expected constant extract indexes");
285   Type *ScalarTy = Ext0->getType();
286   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
287   InstructionCost ScalarOpCost, VectorOpCost;
288 
289   // Get cost estimates for scalar and vector versions of the operation.
290   bool IsBinOp = Instruction::isBinaryOp(Opcode);
291   if (IsBinOp) {
292     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
293     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
294   } else {
295     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
296            "Expected a compare");
297     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
298                                           CmpInst::makeCmpResultType(ScalarTy));
299     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
300                                           CmpInst::makeCmpResultType(VecTy));
301   }
302 
303   // Get cost estimates for the extract elements. These costs will factor into
304   // both sequences.
305   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
306   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
307 
308   InstructionCost Extract0Cost =
309       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
310   InstructionCost Extract1Cost =
311       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
312 
313   // A more expensive extract will always be replaced by a splat shuffle.
314   // For example, if Ext0 is more expensive:
315   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
316   // extelt (opcode (splat V0, Ext0), V1), Ext1
317   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
318   //       check the cost of creating a broadcast shuffle and shuffling both
319   //       operands to element 0.
320   InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
321 
322   // Extra uses of the extracts mean that we include those costs in the
323   // vector total because those instructions will not be eliminated.
324   InstructionCost OldCost, NewCost;
325   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
326     // Handle a special case. If the 2 extracts are identical, adjust the
327     // formulas to account for that. The extra use charge allows for either the
328     // CSE'd pattern or an unoptimized form with identical values:
329     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
330     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
331                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
332     OldCost = CheapExtractCost + ScalarOpCost;
333     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
334   } else {
335     // Handle the general case. Each extract is actually a different value:
336     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
337     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
338     NewCost = VectorOpCost + CheapExtractCost +
339               !Ext0->hasOneUse() * Extract0Cost +
340               !Ext1->hasOneUse() * Extract1Cost;
341   }
342 
343   ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
344   if (ConvertToShuffle) {
345     if (IsBinOp && DisableBinopExtractShuffle)
346       return true;
347 
348     // If we are extracting from 2 different indexes, then one operand must be
349     // shuffled before performing the vector operation. The shuffle mask is
350     // undefined except for 1 lane that is being translated to the remaining
351     // extraction lane. Therefore, it is a splat shuffle. Ex:
352     // ShufMask = { undef, undef, 0, undef }
353     // TODO: The cost model has an option for a "broadcast" shuffle
354     //       (splat-from-element-0), but no option for a more general splat.
355     NewCost +=
356         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
357   }
358 
359   // Aggressively form a vector op if the cost is equal because the transform
360   // may enable further optimization.
361   // Codegen can reverse this transform (scalarize) if it was not profitable.
362   return OldCost < NewCost;
363 }
364 
365 /// Create a shuffle that translates (shifts) 1 element from the input vector
366 /// to a new element location.
367 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
368                                  unsigned NewIndex, IRBuilder<> &Builder) {
369   // The shuffle mask is undefined except for 1 lane that is being translated
370   // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
371   // ShufMask = { 2, undef, undef, undef }
372   auto *VecTy = cast<FixedVectorType>(Vec->getType());
373   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
374   ShufMask[NewIndex] = OldIndex;
375   return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
376 }
377 
378 /// Given an extract element instruction with constant index operand, shuffle
379 /// the source vector (shift the scalar element) to a NewIndex for extraction.
380 /// Return null if the input can be constant folded, so that we are not creating
381 /// unnecessary instructions.
382 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
383                                             unsigned NewIndex,
384                                             IRBuilder<> &Builder) {
385   // If the extract can be constant-folded, this code is unsimplified. Defer
386   // to other passes to handle that.
387   Value *X = ExtElt->getVectorOperand();
388   Value *C = ExtElt->getIndexOperand();
389   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
390   if (isa<Constant>(X))
391     return nullptr;
392 
393   Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
394                                    NewIndex, Builder);
395   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
396 }
397 
398 /// Try to reduce extract element costs by converting scalar compares to vector
399 /// compares followed by extract.
400 /// cmp (ext0 V0, C), (ext1 V1, C)
401 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
402                                   ExtractElementInst *Ext1, Instruction &I) {
403   assert(isa<CmpInst>(&I) && "Expected a compare");
404   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
405              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
406          "Expected matching constant extract indexes");
407 
408   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
409   ++NumVecCmp;
410   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
411   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
412   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
413   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
414   replaceValue(I, *NewExt);
415 }
416 
417 /// Try to reduce extract element costs by converting scalar binops to vector
418 /// binops followed by extract.
419 /// bo (ext0 V0, C), (ext1 V1, C)
420 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
421                                     ExtractElementInst *Ext1, Instruction &I) {
422   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
423   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
424              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
425          "Expected matching constant extract indexes");
426 
427   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
428   ++NumVecBO;
429   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
430   Value *VecBO =
431       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
432 
433   // All IR flags are safe to back-propagate because any potential poison
434   // created in unused vector elements is discarded by the extract.
435   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
436     VecBOInst->copyIRFlags(&I);
437 
438   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
439   replaceValue(I, *NewExt);
440 }
441 
442 /// Match an instruction with extracted vector operands.
443 bool VectorCombine::foldExtractExtract(Instruction &I) {
444   // It is not safe to transform things like div, urem, etc. because we may
445   // create undefined behavior when executing those on unknown vector elements.
446   if (!isSafeToSpeculativelyExecute(&I))
447     return false;
448 
449   Instruction *I0, *I1;
450   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
451   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
452       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
453     return false;
454 
455   Value *V0, *V1;
456   uint64_t C0, C1;
457   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
458       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
459       V0->getType() != V1->getType())
460     return false;
461 
462   // If the scalar value 'I' is going to be re-inserted into a vector, then try
463   // to create an extract to that same element. The extract/insert can be
464   // reduced to a "select shuffle".
465   // TODO: If we add a larger pattern match that starts from an insert, this
466   //       probably becomes unnecessary.
467   auto *Ext0 = cast<ExtractElementInst>(I0);
468   auto *Ext1 = cast<ExtractElementInst>(I1);
469   uint64_t InsertIndex = InvalidIndex;
470   if (I.hasOneUse())
471     match(I.user_back(),
472           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
473 
474   ExtractElementInst *ExtractToChange;
475   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), ExtractToChange,
476                             InsertIndex))
477     return false;
478 
479   if (ExtractToChange) {
480     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
481     ExtractElementInst *NewExtract =
482         translateExtract(ExtractToChange, CheapExtractIdx, Builder);
483     if (!NewExtract)
484       return false;
485     if (ExtractToChange == Ext0)
486       Ext0 = NewExtract;
487     else
488       Ext1 = NewExtract;
489   }
490 
491   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
492     foldExtExtCmp(Ext0, Ext1, I);
493   else
494     foldExtExtBinop(Ext0, Ext1, I);
495 
496   return true;
497 }
498 
499 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
500 /// destination type followed by shuffle. This can enable further transforms by
501 /// moving bitcasts or shuffles together.
502 bool VectorCombine::foldBitcastShuf(Instruction &I) {
503   Value *V;
504   ArrayRef<int> Mask;
505   if (!match(&I, m_BitCast(
506                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
507     return false;
508 
509   // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
510   // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
511   // mask for scalable type is a splat or not.
512   // 2) Disallow non-vector casts and length-changing shuffles.
513   // TODO: We could allow any shuffle.
514   auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
515   auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
516   if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
517     return false;
518 
519   // The new shuffle must not cost more than the old shuffle. The bitcast is
520   // moved ahead of the shuffle, so assume that it has the same cost as before.
521   InstructionCost DestCost =
522       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, DestTy);
523   InstructionCost SrcCost =
524       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy);
525   if (DestCost > SrcCost || !DestCost.isValid())
526     return false;
527 
528   unsigned DestNumElts = DestTy->getNumElements();
529   unsigned SrcNumElts = SrcTy->getNumElements();
530   SmallVector<int, 16> NewMask;
531   if (SrcNumElts <= DestNumElts) {
532     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
533     // always be expanded to the equivalent form choosing narrower elements.
534     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
535     unsigned ScaleFactor = DestNumElts / SrcNumElts;
536     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
537   } else {
538     // The bitcast is from narrow elements to wide elements. The shuffle mask
539     // must choose consecutive elements to allow casting first.
540     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
541     unsigned ScaleFactor = SrcNumElts / DestNumElts;
542     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
543       return false;
544   }
545   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
546   ++NumShufOfBitcast;
547   Value *CastV = Builder.CreateBitCast(V, DestTy);
548   Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
549   replaceValue(I, *Shuf);
550   return true;
551 }
552 
553 /// Match a vector binop or compare instruction with at least one inserted
554 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
555 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
556   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
557   Value *Ins0, *Ins1;
558   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
559       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
560     return false;
561 
562   // Do not convert the vector condition of a vector select into a scalar
563   // condition. That may cause problems for codegen because of differences in
564   // boolean formats and register-file transfers.
565   // TODO: Can we account for that in the cost model?
566   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
567   if (IsCmp)
568     for (User *U : I.users())
569       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
570         return false;
571 
572   // Match against one or both scalar values being inserted into constant
573   // vectors:
574   // vec_op VecC0, (inselt VecC1, V1, Index)
575   // vec_op (inselt VecC0, V0, Index), VecC1
576   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
577   // TODO: Deal with mismatched index constants and variable indexes?
578   Constant *VecC0 = nullptr, *VecC1 = nullptr;
579   Value *V0 = nullptr, *V1 = nullptr;
580   uint64_t Index0 = 0, Index1 = 0;
581   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
582                                m_ConstantInt(Index0))) &&
583       !match(Ins0, m_Constant(VecC0)))
584     return false;
585   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
586                                m_ConstantInt(Index1))) &&
587       !match(Ins1, m_Constant(VecC1)))
588     return false;
589 
590   bool IsConst0 = !V0;
591   bool IsConst1 = !V1;
592   if (IsConst0 && IsConst1)
593     return false;
594   if (!IsConst0 && !IsConst1 && Index0 != Index1)
595     return false;
596 
597   // Bail for single insertion if it is a load.
598   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
599   auto *I0 = dyn_cast_or_null<Instruction>(V0);
600   auto *I1 = dyn_cast_or_null<Instruction>(V1);
601   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
602       (IsConst1 && I0 && I0->mayReadFromMemory()))
603     return false;
604 
605   uint64_t Index = IsConst0 ? Index1 : Index0;
606   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
607   Type *VecTy = I.getType();
608   assert(VecTy->isVectorTy() &&
609          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
610          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
611           ScalarTy->isPointerTy()) &&
612          "Unexpected types for insert element into binop or cmp");
613 
614   unsigned Opcode = I.getOpcode();
615   InstructionCost ScalarOpCost, VectorOpCost;
616   if (IsCmp) {
617     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
618     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
619   } else {
620     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
621     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
622   }
623 
624   // Get cost estimate for the insert element. This cost will factor into
625   // both sequences.
626   InstructionCost InsertCost =
627       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
628   InstructionCost OldCost =
629       (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost;
630   InstructionCost NewCost = ScalarOpCost + InsertCost +
631                             (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
632                             (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
633 
634   // We want to scalarize unless the vector variant actually has lower cost.
635   if (OldCost < NewCost || !NewCost.isValid())
636     return false;
637 
638   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
639   // inselt NewVecC, (scalar_op V0, V1), Index
640   if (IsCmp)
641     ++NumScalarCmp;
642   else
643     ++NumScalarBO;
644 
645   // For constant cases, extract the scalar element, this should constant fold.
646   if (IsConst0)
647     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
648   if (IsConst1)
649     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
650 
651   Value *Scalar =
652       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
653             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
654 
655   Scalar->setName(I.getName() + ".scalar");
656 
657   // All IR flags are safe to back-propagate. There is no potential for extra
658   // poison to be created by the scalar instruction.
659   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
660     ScalarInst->copyIRFlags(&I);
661 
662   // Fold the vector constants in the original vectors into a new base vector.
663   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
664                             : ConstantExpr::get(Opcode, VecC0, VecC1);
665   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
666   replaceValue(I, *Insert);
667   return true;
668 }
669 
670 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
671 /// a vector into vector operations followed by extract. Note: The SLP pass
672 /// may miss this pattern because of implementation problems.
673 bool VectorCombine::foldExtractedCmps(Instruction &I) {
674   // We are looking for a scalar binop of booleans.
675   // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
676   if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
677     return false;
678 
679   // The compare predicates should match, and each compare should have a
680   // constant operand.
681   // TODO: Relax the one-use constraints.
682   Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
683   Instruction *I0, *I1;
684   Constant *C0, *C1;
685   CmpInst::Predicate P0, P1;
686   if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
687       !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
688       P0 != P1)
689     return false;
690 
691   // The compare operands must be extracts of the same vector with constant
692   // extract indexes.
693   // TODO: Relax the one-use constraints.
694   Value *X;
695   uint64_t Index0, Index1;
696   if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
697       !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
698     return false;
699 
700   auto *Ext0 = cast<ExtractElementInst>(I0);
701   auto *Ext1 = cast<ExtractElementInst>(I1);
702   ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
703   if (!ConvertToShuf)
704     return false;
705 
706   // The original scalar pattern is:
707   // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
708   CmpInst::Predicate Pred = P0;
709   unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
710                                                     : Instruction::ICmp;
711   auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
712   if (!VecTy)
713     return false;
714 
715   InstructionCost OldCost =
716       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
717   OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
718   OldCost += TTI.getCmpSelInstrCost(CmpOpcode, I0->getType()) * 2;
719   OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
720 
721   // The proposed vector pattern is:
722   // vcmp = cmp Pred X, VecC
723   // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
724   int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
725   int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
726   auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
727   InstructionCost NewCost = TTI.getCmpSelInstrCost(CmpOpcode, X->getType());
728   NewCost +=
729       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy);
730   NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
731   NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
732 
733   // Aggressively form vector ops if the cost is equal because the transform
734   // may enable further optimization.
735   // Codegen can reverse this transform (scalarize) if it was not profitable.
736   if (OldCost < NewCost || !NewCost.isValid())
737     return false;
738 
739   // Create a vector constant from the 2 scalar constants.
740   SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
741                                    UndefValue::get(VecTy->getElementType()));
742   CmpC[Index0] = C0;
743   CmpC[Index1] = C1;
744   Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
745 
746   Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
747   Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
748                                         VCmp, Shuf);
749   Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
750   replaceValue(I, *NewExt);
751   ++NumVecCmpBO;
752   return true;
753 }
754 
755 /// This is the entry point for all transforms. Pass manager differences are
756 /// handled in the callers of this function.
757 bool VectorCombine::run() {
758   if (DisableVectorCombine)
759     return false;
760 
761   // Don't attempt vectorization if the target does not support vectors.
762   if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
763     return false;
764 
765   bool MadeChange = false;
766   for (BasicBlock &BB : F) {
767     // Ignore unreachable basic blocks.
768     if (!DT.isReachableFromEntry(&BB))
769       continue;
770     // Do not delete instructions under here and invalidate the iterator.
771     // Walk the block forwards to enable simple iterative chains of transforms.
772     // TODO: It could be more efficient to remove dead instructions
773     //       iteratively in this loop rather than waiting until the end.
774     for (Instruction &I : BB) {
775       if (isa<DbgInfoIntrinsic>(I))
776         continue;
777       Builder.SetInsertPoint(&I);
778       MadeChange |= vectorizeLoadInsert(I);
779       MadeChange |= foldExtractExtract(I);
780       MadeChange |= foldBitcastShuf(I);
781       MadeChange |= scalarizeBinopOrCmp(I);
782       MadeChange |= foldExtractedCmps(I);
783     }
784   }
785 
786   // We're done with transforms, so remove dead instructions.
787   if (MadeChange)
788     for (BasicBlock &BB : F)
789       SimplifyInstructionsInBlock(&BB);
790 
791   return MadeChange;
792 }
793 
794 // Pass manager boilerplate below here.
795 
796 namespace {
797 class VectorCombineLegacyPass : public FunctionPass {
798 public:
799   static char ID;
800   VectorCombineLegacyPass() : FunctionPass(ID) {
801     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
802   }
803 
804   void getAnalysisUsage(AnalysisUsage &AU) const override {
805     AU.addRequired<DominatorTreeWrapperPass>();
806     AU.addRequired<TargetTransformInfoWrapperPass>();
807     AU.setPreservesCFG();
808     AU.addPreserved<DominatorTreeWrapperPass>();
809     AU.addPreserved<GlobalsAAWrapperPass>();
810     AU.addPreserved<AAResultsWrapperPass>();
811     AU.addPreserved<BasicAAWrapperPass>();
812     FunctionPass::getAnalysisUsage(AU);
813   }
814 
815   bool runOnFunction(Function &F) override {
816     if (skipFunction(F))
817       return false;
818     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
819     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
820     VectorCombine Combiner(F, TTI, DT);
821     return Combiner.run();
822   }
823 };
824 } // namespace
825 
826 char VectorCombineLegacyPass::ID = 0;
827 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
828                       "Optimize scalar/vector ops", false,
829                       false)
830 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
831 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
832                     "Optimize scalar/vector ops", false, false)
833 Pass *llvm::createVectorCombinePass() {
834   return new VectorCombineLegacyPass();
835 }
836 
837 PreservedAnalyses VectorCombinePass::run(Function &F,
838                                          FunctionAnalysisManager &FAM) {
839   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
840   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
841   VectorCombine Combiner(F, TTI, DT);
842   if (!Combiner.run())
843     return PreservedAnalyses::all();
844   PreservedAnalyses PA;
845   PA.preserveSet<CFGAnalyses>();
846   PA.preserve<GlobalsAA>();
847   PA.preserve<AAManager>();
848   PA.preserve<BasicAA>();
849   return PA;
850 }
851