xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/Vectorize/VectorCombine.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/BasicAliasAnalysis.h"
18 #include "llvm/Analysis/GlobalsModRef.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/InitializePasses.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 #include "llvm/Transforms/Vectorize.h"
32 
33 using namespace llvm;
34 using namespace llvm::PatternMatch;
35 
36 #define DEBUG_TYPE "vector-combine"
37 STATISTIC(NumVecLoad, "Number of vector loads formed");
38 STATISTIC(NumVecCmp, "Number of vector compares formed");
39 STATISTIC(NumVecBO, "Number of vector binops formed");
40 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
41 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
42 STATISTIC(NumScalarBO, "Number of scalar binops formed");
43 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
44 
45 static cl::opt<bool> DisableVectorCombine(
46     "disable-vector-combine", cl::init(false), cl::Hidden,
47     cl::desc("Disable all vector combine transforms"));
48 
49 static cl::opt<bool> DisableBinopExtractShuffle(
50     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
51     cl::desc("Disable binop extract to shuffle transforms"));
52 
53 static cl::opt<unsigned> MaxInstrsToScan(
54     "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden,
55     cl::desc("Max number of instructions to scan for vector combining."));
56 
57 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
58 
59 namespace {
60 class VectorCombine {
61 public:
VectorCombine(Function & F,const TargetTransformInfo & TTI,const DominatorTree & DT,AAResults & AA)62   VectorCombine(Function &F, const TargetTransformInfo &TTI,
63                 const DominatorTree &DT, AAResults &AA)
64       : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA) {}
65 
66   bool run();
67 
68 private:
69   Function &F;
70   IRBuilder<> Builder;
71   const TargetTransformInfo &TTI;
72   const DominatorTree &DT;
73   AAResults &AA;
74 
75   bool vectorizeLoadInsert(Instruction &I);
76   ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
77                                         ExtractElementInst *Ext1,
78                                         unsigned PreferredExtractIndex) const;
79   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
80                              unsigned Opcode,
81                              ExtractElementInst *&ConvertToShuffle,
82                              unsigned PreferredExtractIndex);
83   void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
84                      Instruction &I);
85   void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
86                        Instruction &I);
87   bool foldExtractExtract(Instruction &I);
88   bool foldBitcastShuf(Instruction &I);
89   bool scalarizeBinopOrCmp(Instruction &I);
90   bool foldExtractedCmps(Instruction &I);
91   bool foldSingleElementStore(Instruction &I);
92 };
93 } // namespace
94 
replaceValue(Value & Old,Value & New)95 static void replaceValue(Value &Old, Value &New) {
96   Old.replaceAllUsesWith(&New);
97   New.takeName(&Old);
98 }
99 
vectorizeLoadInsert(Instruction & I)100 bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
101   // Match insert into fixed vector of scalar value.
102   // TODO: Handle non-zero insert index.
103   auto *Ty = dyn_cast<FixedVectorType>(I.getType());
104   Value *Scalar;
105   if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
106       !Scalar->hasOneUse())
107     return false;
108 
109   // Optionally match an extract from another vector.
110   Value *X;
111   bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
112   if (!HasExtract)
113     X = Scalar;
114 
115   // Match source value as load of scalar or vector.
116   // Do not vectorize scalar load (widening) if atomic/volatile or under
117   // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
118   // or create data races non-existent in the source.
119   auto *Load = dyn_cast<LoadInst>(X);
120   if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
121       Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
122       mustSuppressSpeculation(*Load))
123     return false;
124 
125   const DataLayout &DL = I.getModule()->getDataLayout();
126   Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
127   assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
128 
129   // If original AS != Load's AS, we can't bitcast the original pointer and have
130   // to use Load's operand instead. Ideally we would want to strip pointer casts
131   // without changing AS, but there's no API to do that ATM.
132   unsigned AS = Load->getPointerAddressSpace();
133   if (AS != SrcPtr->getType()->getPointerAddressSpace())
134     SrcPtr = Load->getPointerOperand();
135 
136   // We are potentially transforming byte-sized (8-bit) memory accesses, so make
137   // sure we have all of our type-based constraints in place for this target.
138   Type *ScalarTy = Scalar->getType();
139   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
140   unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
141   if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
142       ScalarSize % 8 != 0)
143     return false;
144 
145   // Check safety of replacing the scalar load with a larger vector load.
146   // We use minimal alignment (maximum flexibility) because we only care about
147   // the dereferenceable region. When calculating cost and creating a new op,
148   // we may use a larger value based on alignment attributes.
149   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
150   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
151   unsigned OffsetEltIndex = 0;
152   Align Alignment = Load->getAlign();
153   if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
154     // It is not safe to load directly from the pointer, but we can still peek
155     // through gep offsets and check if it safe to load from a base address with
156     // updated alignment. If it is, we can shuffle the element(s) into place
157     // after loading.
158     unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
159     APInt Offset(OffsetBitWidth, 0);
160     SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
161 
162     // We want to shuffle the result down from a high element of a vector, so
163     // the offset must be positive.
164     if (Offset.isNegative())
165       return false;
166 
167     // The offset must be a multiple of the scalar element to shuffle cleanly
168     // in the element's size.
169     uint64_t ScalarSizeInBytes = ScalarSize / 8;
170     if (Offset.urem(ScalarSizeInBytes) != 0)
171       return false;
172 
173     // If we load MinVecNumElts, will our target element still be loaded?
174     OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
175     if (OffsetEltIndex >= MinVecNumElts)
176       return false;
177 
178     if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
179       return false;
180 
181     // Update alignment with offset value. Note that the offset could be negated
182     // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
183     // negation does not change the result of the alignment calculation.
184     Alignment = commonAlignment(Alignment, Offset.getZExtValue());
185   }
186 
187   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
188   // Use the greater of the alignment on the load or its source pointer.
189   Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
190   Type *LoadTy = Load->getType();
191   InstructionCost OldCost =
192       TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
193   APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
194   OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
195                                           /* Insert */ true, HasExtract);
196 
197   // New pattern: load VecPtr
198   InstructionCost NewCost =
199       TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
200   // Optionally, we are shuffling the loaded vector element(s) into place.
201   // For the mask set everything but element 0 to undef to prevent poison from
202   // propagating from the extra loaded memory. This will also optionally
203   // shrink/grow the vector from the loaded size to the output size.
204   // We assume this operation has no cost in codegen if there was no offset.
205   // Note that we could use freeze to avoid poison problems, but then we might
206   // still need a shuffle to change the vector size.
207   unsigned OutputNumElts = Ty->getNumElements();
208   SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
209   assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
210   Mask[0] = OffsetEltIndex;
211   if (OffsetEltIndex)
212     NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask);
213 
214   // We can aggressively convert to the vector form because the backend can
215   // invert this transform if it does not result in a performance win.
216   if (OldCost < NewCost || !NewCost.isValid())
217     return false;
218 
219   // It is safe and potentially profitable to load a vector directly:
220   // inselt undef, load Scalar, 0 --> load VecPtr
221   IRBuilder<> Builder(Load);
222   Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
223   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
224   VecLd = Builder.CreateShuffleVector(VecLd, Mask);
225 
226   replaceValue(I, *VecLd);
227   ++NumVecLoad;
228   return true;
229 }
230 
231 /// Determine which, if any, of the inputs should be replaced by a shuffle
232 /// followed by extract from a different index.
getShuffleExtract(ExtractElementInst * Ext0,ExtractElementInst * Ext1,unsigned PreferredExtractIndex=InvalidIndex) const233 ExtractElementInst *VectorCombine::getShuffleExtract(
234     ExtractElementInst *Ext0, ExtractElementInst *Ext1,
235     unsigned PreferredExtractIndex = InvalidIndex) const {
236   assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
237          isa<ConstantInt>(Ext1->getIndexOperand()) &&
238          "Expected constant extract indexes");
239 
240   unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
241   unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
242 
243   // If the extract indexes are identical, no shuffle is needed.
244   if (Index0 == Index1)
245     return nullptr;
246 
247   Type *VecTy = Ext0->getVectorOperand()->getType();
248   assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
249   InstructionCost Cost0 =
250       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
251   InstructionCost Cost1 =
252       TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
253 
254   // If both costs are invalid no shuffle is needed
255   if (!Cost0.isValid() && !Cost1.isValid())
256     return nullptr;
257 
258   // We are extracting from 2 different indexes, so one operand must be shuffled
259   // before performing a vector operation and/or extract. The more expensive
260   // extract will be replaced by a shuffle.
261   if (Cost0 > Cost1)
262     return Ext0;
263   if (Cost1 > Cost0)
264     return Ext1;
265 
266   // If the costs are equal and there is a preferred extract index, shuffle the
267   // opposite operand.
268   if (PreferredExtractIndex == Index0)
269     return Ext1;
270   if (PreferredExtractIndex == Index1)
271     return Ext0;
272 
273   // Otherwise, replace the extract with the higher index.
274   return Index0 > Index1 ? Ext0 : Ext1;
275 }
276 
277 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
278 /// vector operation(s) followed by extract. Return true if the existing
279 /// instructions are cheaper than a vector alternative. Otherwise, return false
280 /// and if one of the extracts should be transformed to a shufflevector, set
281 /// \p ConvertToShuffle to that extract instruction.
isExtractExtractCheap(ExtractElementInst * Ext0,ExtractElementInst * Ext1,unsigned Opcode,ExtractElementInst * & ConvertToShuffle,unsigned PreferredExtractIndex)282 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
283                                           ExtractElementInst *Ext1,
284                                           unsigned Opcode,
285                                           ExtractElementInst *&ConvertToShuffle,
286                                           unsigned PreferredExtractIndex) {
287   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
288          isa<ConstantInt>(Ext1->getOperand(1)) &&
289          "Expected constant extract indexes");
290   Type *ScalarTy = Ext0->getType();
291   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
292   InstructionCost ScalarOpCost, VectorOpCost;
293 
294   // Get cost estimates for scalar and vector versions of the operation.
295   bool IsBinOp = Instruction::isBinaryOp(Opcode);
296   if (IsBinOp) {
297     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
298     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
299   } else {
300     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
301            "Expected a compare");
302     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
303                                           CmpInst::makeCmpResultType(ScalarTy));
304     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
305                                           CmpInst::makeCmpResultType(VecTy));
306   }
307 
308   // Get cost estimates for the extract elements. These costs will factor into
309   // both sequences.
310   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
311   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
312 
313   InstructionCost Extract0Cost =
314       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
315   InstructionCost Extract1Cost =
316       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
317 
318   // A more expensive extract will always be replaced by a splat shuffle.
319   // For example, if Ext0 is more expensive:
320   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
321   // extelt (opcode (splat V0, Ext0), V1), Ext1
322   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
323   //       check the cost of creating a broadcast shuffle and shuffling both
324   //       operands to element 0.
325   InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
326 
327   // Extra uses of the extracts mean that we include those costs in the
328   // vector total because those instructions will not be eliminated.
329   InstructionCost OldCost, NewCost;
330   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
331     // Handle a special case. If the 2 extracts are identical, adjust the
332     // formulas to account for that. The extra use charge allows for either the
333     // CSE'd pattern or an unoptimized form with identical values:
334     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
335     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
336                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
337     OldCost = CheapExtractCost + ScalarOpCost;
338     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
339   } else {
340     // Handle the general case. Each extract is actually a different value:
341     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
342     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
343     NewCost = VectorOpCost + CheapExtractCost +
344               !Ext0->hasOneUse() * Extract0Cost +
345               !Ext1->hasOneUse() * Extract1Cost;
346   }
347 
348   ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
349   if (ConvertToShuffle) {
350     if (IsBinOp && DisableBinopExtractShuffle)
351       return true;
352 
353     // If we are extracting from 2 different indexes, then one operand must be
354     // shuffled before performing the vector operation. The shuffle mask is
355     // undefined except for 1 lane that is being translated to the remaining
356     // extraction lane. Therefore, it is a splat shuffle. Ex:
357     // ShufMask = { undef, undef, 0, undef }
358     // TODO: The cost model has an option for a "broadcast" shuffle
359     //       (splat-from-element-0), but no option for a more general splat.
360     NewCost +=
361         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
362   }
363 
364   // Aggressively form a vector op if the cost is equal because the transform
365   // may enable further optimization.
366   // Codegen can reverse this transform (scalarize) if it was not profitable.
367   return OldCost < NewCost;
368 }
369 
370 /// Create a shuffle that translates (shifts) 1 element from the input vector
371 /// to a new element location.
createShiftShuffle(Value * Vec,unsigned OldIndex,unsigned NewIndex,IRBuilder<> & Builder)372 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
373                                  unsigned NewIndex, IRBuilder<> &Builder) {
374   // The shuffle mask is undefined except for 1 lane that is being translated
375   // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
376   // ShufMask = { 2, undef, undef, undef }
377   auto *VecTy = cast<FixedVectorType>(Vec->getType());
378   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
379   ShufMask[NewIndex] = OldIndex;
380   return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
381 }
382 
383 /// Given an extract element instruction with constant index operand, shuffle
384 /// the source vector (shift the scalar element) to a NewIndex for extraction.
385 /// Return null if the input can be constant folded, so that we are not creating
386 /// unnecessary instructions.
translateExtract(ExtractElementInst * ExtElt,unsigned NewIndex,IRBuilder<> & Builder)387 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
388                                             unsigned NewIndex,
389                                             IRBuilder<> &Builder) {
390   // If the extract can be constant-folded, this code is unsimplified. Defer
391   // to other passes to handle that.
392   Value *X = ExtElt->getVectorOperand();
393   Value *C = ExtElt->getIndexOperand();
394   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
395   if (isa<Constant>(X))
396     return nullptr;
397 
398   Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
399                                    NewIndex, Builder);
400   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
401 }
402 
403 /// Try to reduce extract element costs by converting scalar compares to vector
404 /// compares followed by extract.
405 /// cmp (ext0 V0, C), (ext1 V1, C)
foldExtExtCmp(ExtractElementInst * Ext0,ExtractElementInst * Ext1,Instruction & I)406 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
407                                   ExtractElementInst *Ext1, Instruction &I) {
408   assert(isa<CmpInst>(&I) && "Expected a compare");
409   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
410              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
411          "Expected matching constant extract indexes");
412 
413   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
414   ++NumVecCmp;
415   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
416   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
417   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
418   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
419   replaceValue(I, *NewExt);
420 }
421 
422 /// Try to reduce extract element costs by converting scalar binops to vector
423 /// binops followed by extract.
424 /// bo (ext0 V0, C), (ext1 V1, C)
foldExtExtBinop(ExtractElementInst * Ext0,ExtractElementInst * Ext1,Instruction & I)425 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
426                                     ExtractElementInst *Ext1, Instruction &I) {
427   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
428   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
429              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
430          "Expected matching constant extract indexes");
431 
432   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
433   ++NumVecBO;
434   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
435   Value *VecBO =
436       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
437 
438   // All IR flags are safe to back-propagate because any potential poison
439   // created in unused vector elements is discarded by the extract.
440   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
441     VecBOInst->copyIRFlags(&I);
442 
443   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
444   replaceValue(I, *NewExt);
445 }
446 
447 /// Match an instruction with extracted vector operands.
foldExtractExtract(Instruction & I)448 bool VectorCombine::foldExtractExtract(Instruction &I) {
449   // It is not safe to transform things like div, urem, etc. because we may
450   // create undefined behavior when executing those on unknown vector elements.
451   if (!isSafeToSpeculativelyExecute(&I))
452     return false;
453 
454   Instruction *I0, *I1;
455   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
456   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
457       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
458     return false;
459 
460   Value *V0, *V1;
461   uint64_t C0, C1;
462   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
463       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
464       V0->getType() != V1->getType())
465     return false;
466 
467   // If the scalar value 'I' is going to be re-inserted into a vector, then try
468   // to create an extract to that same element. The extract/insert can be
469   // reduced to a "select shuffle".
470   // TODO: If we add a larger pattern match that starts from an insert, this
471   //       probably becomes unnecessary.
472   auto *Ext0 = cast<ExtractElementInst>(I0);
473   auto *Ext1 = cast<ExtractElementInst>(I1);
474   uint64_t InsertIndex = InvalidIndex;
475   if (I.hasOneUse())
476     match(I.user_back(),
477           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
478 
479   ExtractElementInst *ExtractToChange;
480   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), ExtractToChange,
481                             InsertIndex))
482     return false;
483 
484   if (ExtractToChange) {
485     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
486     ExtractElementInst *NewExtract =
487         translateExtract(ExtractToChange, CheapExtractIdx, Builder);
488     if (!NewExtract)
489       return false;
490     if (ExtractToChange == Ext0)
491       Ext0 = NewExtract;
492     else
493       Ext1 = NewExtract;
494   }
495 
496   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
497     foldExtExtCmp(Ext0, Ext1, I);
498   else
499     foldExtExtBinop(Ext0, Ext1, I);
500 
501   return true;
502 }
503 
504 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
505 /// destination type followed by shuffle. This can enable further transforms by
506 /// moving bitcasts or shuffles together.
foldBitcastShuf(Instruction & I)507 bool VectorCombine::foldBitcastShuf(Instruction &I) {
508   Value *V;
509   ArrayRef<int> Mask;
510   if (!match(&I, m_BitCast(
511                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
512     return false;
513 
514   // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
515   // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
516   // mask for scalable type is a splat or not.
517   // 2) Disallow non-vector casts and length-changing shuffles.
518   // TODO: We could allow any shuffle.
519   auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
520   auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
521   if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
522     return false;
523 
524   unsigned DestNumElts = DestTy->getNumElements();
525   unsigned SrcNumElts = SrcTy->getNumElements();
526   SmallVector<int, 16> NewMask;
527   if (SrcNumElts <= DestNumElts) {
528     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
529     // always be expanded to the equivalent form choosing narrower elements.
530     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
531     unsigned ScaleFactor = DestNumElts / SrcNumElts;
532     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
533   } else {
534     // The bitcast is from narrow elements to wide elements. The shuffle mask
535     // must choose consecutive elements to allow casting first.
536     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
537     unsigned ScaleFactor = SrcNumElts / DestNumElts;
538     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
539       return false;
540   }
541 
542   // The new shuffle must not cost more than the old shuffle. The bitcast is
543   // moved ahead of the shuffle, so assume that it has the same cost as before.
544   InstructionCost DestCost = TTI.getShuffleCost(
545       TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask);
546   InstructionCost SrcCost =
547       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask);
548   if (DestCost > SrcCost || !DestCost.isValid())
549     return false;
550 
551   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
552   ++NumShufOfBitcast;
553   Value *CastV = Builder.CreateBitCast(V, DestTy);
554   Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
555   replaceValue(I, *Shuf);
556   return true;
557 }
558 
559 /// Match a vector binop or compare instruction with at least one inserted
560 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
scalarizeBinopOrCmp(Instruction & I)561 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
562   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
563   Value *Ins0, *Ins1;
564   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
565       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
566     return false;
567 
568   // Do not convert the vector condition of a vector select into a scalar
569   // condition. That may cause problems for codegen because of differences in
570   // boolean formats and register-file transfers.
571   // TODO: Can we account for that in the cost model?
572   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
573   if (IsCmp)
574     for (User *U : I.users())
575       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
576         return false;
577 
578   // Match against one or both scalar values being inserted into constant
579   // vectors:
580   // vec_op VecC0, (inselt VecC1, V1, Index)
581   // vec_op (inselt VecC0, V0, Index), VecC1
582   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
583   // TODO: Deal with mismatched index constants and variable indexes?
584   Constant *VecC0 = nullptr, *VecC1 = nullptr;
585   Value *V0 = nullptr, *V1 = nullptr;
586   uint64_t Index0 = 0, Index1 = 0;
587   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
588                                m_ConstantInt(Index0))) &&
589       !match(Ins0, m_Constant(VecC0)))
590     return false;
591   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
592                                m_ConstantInt(Index1))) &&
593       !match(Ins1, m_Constant(VecC1)))
594     return false;
595 
596   bool IsConst0 = !V0;
597   bool IsConst1 = !V1;
598   if (IsConst0 && IsConst1)
599     return false;
600   if (!IsConst0 && !IsConst1 && Index0 != Index1)
601     return false;
602 
603   // Bail for single insertion if it is a load.
604   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
605   auto *I0 = dyn_cast_or_null<Instruction>(V0);
606   auto *I1 = dyn_cast_or_null<Instruction>(V1);
607   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
608       (IsConst1 && I0 && I0->mayReadFromMemory()))
609     return false;
610 
611   uint64_t Index = IsConst0 ? Index1 : Index0;
612   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
613   Type *VecTy = I.getType();
614   assert(VecTy->isVectorTy() &&
615          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
616          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
617           ScalarTy->isPointerTy()) &&
618          "Unexpected types for insert element into binop or cmp");
619 
620   unsigned Opcode = I.getOpcode();
621   InstructionCost ScalarOpCost, VectorOpCost;
622   if (IsCmp) {
623     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
624     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
625   } else {
626     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
627     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
628   }
629 
630   // Get cost estimate for the insert element. This cost will factor into
631   // both sequences.
632   InstructionCost InsertCost =
633       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
634   InstructionCost OldCost =
635       (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost;
636   InstructionCost NewCost = ScalarOpCost + InsertCost +
637                             (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
638                             (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
639 
640   // We want to scalarize unless the vector variant actually has lower cost.
641   if (OldCost < NewCost || !NewCost.isValid())
642     return false;
643 
644   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
645   // inselt NewVecC, (scalar_op V0, V1), Index
646   if (IsCmp)
647     ++NumScalarCmp;
648   else
649     ++NumScalarBO;
650 
651   // For constant cases, extract the scalar element, this should constant fold.
652   if (IsConst0)
653     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
654   if (IsConst1)
655     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
656 
657   Value *Scalar =
658       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
659             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
660 
661   Scalar->setName(I.getName() + ".scalar");
662 
663   // All IR flags are safe to back-propagate. There is no potential for extra
664   // poison to be created by the scalar instruction.
665   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
666     ScalarInst->copyIRFlags(&I);
667 
668   // Fold the vector constants in the original vectors into a new base vector.
669   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
670                             : ConstantExpr::get(Opcode, VecC0, VecC1);
671   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
672   replaceValue(I, *Insert);
673   return true;
674 }
675 
676 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
677 /// a vector into vector operations followed by extract. Note: The SLP pass
678 /// may miss this pattern because of implementation problems.
foldExtractedCmps(Instruction & I)679 bool VectorCombine::foldExtractedCmps(Instruction &I) {
680   // We are looking for a scalar binop of booleans.
681   // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
682   if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
683     return false;
684 
685   // The compare predicates should match, and each compare should have a
686   // constant operand.
687   // TODO: Relax the one-use constraints.
688   Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
689   Instruction *I0, *I1;
690   Constant *C0, *C1;
691   CmpInst::Predicate P0, P1;
692   if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
693       !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
694       P0 != P1)
695     return false;
696 
697   // The compare operands must be extracts of the same vector with constant
698   // extract indexes.
699   // TODO: Relax the one-use constraints.
700   Value *X;
701   uint64_t Index0, Index1;
702   if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
703       !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
704     return false;
705 
706   auto *Ext0 = cast<ExtractElementInst>(I0);
707   auto *Ext1 = cast<ExtractElementInst>(I1);
708   ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
709   if (!ConvertToShuf)
710     return false;
711 
712   // The original scalar pattern is:
713   // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
714   CmpInst::Predicate Pred = P0;
715   unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
716                                                     : Instruction::ICmp;
717   auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
718   if (!VecTy)
719     return false;
720 
721   InstructionCost OldCost =
722       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
723   OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
724   OldCost += TTI.getCmpSelInstrCost(CmpOpcode, I0->getType()) * 2;
725   OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
726 
727   // The proposed vector pattern is:
728   // vcmp = cmp Pred X, VecC
729   // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
730   int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
731   int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
732   auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
733   InstructionCost NewCost = TTI.getCmpSelInstrCost(CmpOpcode, X->getType());
734   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
735   ShufMask[CheapIndex] = ExpensiveIndex;
736   NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy,
737                                 ShufMask);
738   NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
739   NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
740 
741   // Aggressively form vector ops if the cost is equal because the transform
742   // may enable further optimization.
743   // Codegen can reverse this transform (scalarize) if it was not profitable.
744   if (OldCost < NewCost || !NewCost.isValid())
745     return false;
746 
747   // Create a vector constant from the 2 scalar constants.
748   SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
749                                    UndefValue::get(VecTy->getElementType()));
750   CmpC[Index0] = C0;
751   CmpC[Index1] = C1;
752   Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
753 
754   Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
755   Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
756                                         VCmp, Shuf);
757   Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
758   replaceValue(I, *NewExt);
759   ++NumVecCmpBO;
760   return true;
761 }
762 
763 // Check if memory loc modified between two instrs in the same BB
isMemModifiedBetween(BasicBlock::iterator Begin,BasicBlock::iterator End,const MemoryLocation & Loc,AAResults & AA)764 static bool isMemModifiedBetween(BasicBlock::iterator Begin,
765                                  BasicBlock::iterator End,
766                                  const MemoryLocation &Loc, AAResults &AA) {
767   unsigned NumScanned = 0;
768   return std::any_of(Begin, End, [&](const Instruction &Instr) {
769     return isModSet(AA.getModRefInfo(&Instr, Loc)) ||
770            ++NumScanned > MaxInstrsToScan;
771   });
772 }
773 
774 // Combine patterns like:
775 //   %0 = load <4 x i32>, <4 x i32>* %a
776 //   %1 = insertelement <4 x i32> %0, i32 %b, i32 1
777 //   store <4 x i32> %1, <4 x i32>* %a
778 // to:
779 //   %0 = bitcast <4 x i32>* %a to i32*
780 //   %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
781 //   store i32 %b, i32* %1
foldSingleElementStore(Instruction & I)782 bool VectorCombine::foldSingleElementStore(Instruction &I) {
783   StoreInst *SI = dyn_cast<StoreInst>(&I);
784   if (!SI || !SI->isSimple() ||
785       !isa<FixedVectorType>(SI->getValueOperand()->getType()))
786     return false;
787 
788   // TODO: Combine more complicated patterns (multiple insert) by referencing
789   // TargetTransformInfo.
790   Instruction *Source;
791   Value *NewElement;
792   ConstantInt *Idx;
793   if (!match(SI->getValueOperand(),
794              m_InsertElt(m_Instruction(Source), m_Value(NewElement),
795                          m_ConstantInt(Idx))))
796     return false;
797 
798   if (auto *Load = dyn_cast<LoadInst>(Source)) {
799     auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType());
800     const DataLayout &DL = I.getModule()->getDataLayout();
801     Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
802     // Don't optimize for atomic/volatile load or store. Ensure memory is not
803     // modified between, vector type matches store size, and index is inbounds.
804     if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
805         !DL.typeSizeEqualsStoreSize(Load->getType()) ||
806         Idx->uge(VecTy->getNumElements()) ||
807         SrcAddr != SI->getPointerOperand()->stripPointerCasts() ||
808         isMemModifiedBetween(Load->getIterator(), SI->getIterator(),
809                              MemoryLocation::get(SI), AA))
810       return false;
811 
812     Value *GEP = GetElementPtrInst::CreateInBounds(
813         SI->getPointerOperand(), {ConstantInt::get(Idx->getType(), 0), Idx});
814     Builder.Insert(GEP);
815     StoreInst *NSI = Builder.CreateStore(NewElement, GEP);
816     NSI->copyMetadata(*SI);
817     if (SI->getAlign() < NSI->getAlign())
818       NSI->setAlignment(SI->getAlign());
819     replaceValue(I, *NSI);
820     // Need erasing the store manually.
821     I.eraseFromParent();
822     return true;
823   }
824 
825   return false;
826 }
827 
828 /// This is the entry point for all transforms. Pass manager differences are
829 /// handled in the callers of this function.
run()830 bool VectorCombine::run() {
831   if (DisableVectorCombine)
832     return false;
833 
834   // Don't attempt vectorization if the target does not support vectors.
835   if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
836     return false;
837 
838   bool MadeChange = false;
839   for (BasicBlock &BB : F) {
840     // Ignore unreachable basic blocks.
841     if (!DT.isReachableFromEntry(&BB))
842       continue;
843     // Use early increment range so that we can erase instructions in loop.
844     for (Instruction &I : make_early_inc_range(BB)) {
845       if (isa<DbgInfoIntrinsic>(I))
846         continue;
847       Builder.SetInsertPoint(&I);
848       MadeChange |= vectorizeLoadInsert(I);
849       MadeChange |= foldExtractExtract(I);
850       MadeChange |= foldBitcastShuf(I);
851       MadeChange |= scalarizeBinopOrCmp(I);
852       MadeChange |= foldExtractedCmps(I);
853       MadeChange |= foldSingleElementStore(I);
854     }
855   }
856 
857   // We're done with transforms, so remove dead instructions.
858   if (MadeChange)
859     for (BasicBlock &BB : F)
860       SimplifyInstructionsInBlock(&BB);
861 
862   return MadeChange;
863 }
864 
865 // Pass manager boilerplate below here.
866 
867 namespace {
868 class VectorCombineLegacyPass : public FunctionPass {
869 public:
870   static char ID;
VectorCombineLegacyPass()871   VectorCombineLegacyPass() : FunctionPass(ID) {
872     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
873   }
874 
getAnalysisUsage(AnalysisUsage & AU) const875   void getAnalysisUsage(AnalysisUsage &AU) const override {
876     AU.addRequired<DominatorTreeWrapperPass>();
877     AU.addRequired<TargetTransformInfoWrapperPass>();
878     AU.addRequired<AAResultsWrapperPass>();
879     AU.setPreservesCFG();
880     AU.addPreserved<DominatorTreeWrapperPass>();
881     AU.addPreserved<GlobalsAAWrapperPass>();
882     AU.addPreserved<AAResultsWrapperPass>();
883     AU.addPreserved<BasicAAWrapperPass>();
884     FunctionPass::getAnalysisUsage(AU);
885   }
886 
runOnFunction(Function & F)887   bool runOnFunction(Function &F) override {
888     if (skipFunction(F))
889       return false;
890     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
891     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
892     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
893     VectorCombine Combiner(F, TTI, DT, AA);
894     return Combiner.run();
895   }
896 };
897 } // namespace
898 
899 char VectorCombineLegacyPass::ID = 0;
900 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
901                       "Optimize scalar/vector ops", false,
902                       false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)903 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
904 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
905                     "Optimize scalar/vector ops", false, false)
906 Pass *llvm::createVectorCombinePass() {
907   return new VectorCombineLegacyPass();
908 }
909 
run(Function & F,FunctionAnalysisManager & FAM)910 PreservedAnalyses VectorCombinePass::run(Function &F,
911                                          FunctionAnalysisManager &FAM) {
912   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
913   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
914   AAResults &AA = FAM.getResult<AAManager>(F);
915   VectorCombine Combiner(F, TTI, DT, AA);
916   if (!Combiner.run())
917     return PreservedAnalyses::all();
918   PreservedAnalyses PA;
919   PA.preserveSet<CFGAnalyses>();
920   return PA;
921 }
922