xref: /llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp (revision d2484127cd27184b373b6be71da87579b8659143)
1 //===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements IR expansion for vector predication intrinsics, allowing
10 // targets to enable vector predication until just before codegen.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/ExpandVectorPredication.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/TargetTransformInfo.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/Analysis/VectorUtils.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Transforms/Utils/LoopUtils.h"
29 #include <optional>
30 
31 using namespace llvm;
32 
33 using VPLegalization = TargetTransformInfo::VPLegalization;
34 using VPTransform = TargetTransformInfo::VPLegalization::VPTransform;
35 
36 // Keep this in sync with TargetTransformInfo::VPLegalization.
37 #define VPINTERNAL_VPLEGAL_CASES                                               \
38   VPINTERNAL_CASE(Legal)                                                       \
39   VPINTERNAL_CASE(Discard)                                                     \
40   VPINTERNAL_CASE(Convert)
41 
42 #define VPINTERNAL_CASE(X) "|" #X
43 
44 // Override options.
45 static cl::opt<std::string> EVLTransformOverride(
46     "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
47     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48              ". If non-empty, ignore "
49              "TargetTransformInfo and "
50              "always use this transformation for the %evl parameter (Used in "
51              "testing)."));
52 
53 static cl::opt<std::string> MaskTransformOverride(
54     "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
55     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56              ". If non-empty, Ignore "
57              "TargetTransformInfo and "
58              "always use this transformation for the %mask parameter (Used in "
59              "testing)."));
60 
61 #undef VPINTERNAL_CASE
62 #define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63 
64 static VPTransform parseOverrideOption(const std::string &TextOpt) {
65   return StringSwitch<VPTransform>(TextOpt) VPINTERNAL_VPLEGAL_CASES;
66 }
67 
68 #undef VPINTERNAL_VPLEGAL_CASES
69 
70 // Whether any override options are set.
71 static bool anyExpandVPOverridesSet() {
72   return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73 }
74 
75 #define DEBUG_TYPE "expandvp"
76 
77 STATISTIC(NumFoldedVL, "Number of folded vector length params");
78 STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79 
80 ///// Helpers {
81 
82 /// \returns Whether the vector mask \p MaskVal has all lane bits set.
83 static bool isAllTrueMask(Value *MaskVal) {
84   if (Value *SplattedVal = getSplatValue(MaskVal))
85     if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
86       return ConstValue->isAllOnesValue();
87 
88   return false;
89 }
90 
91 /// \returns A non-excepting divisor constant for this type.
92 static Constant *getSafeDivisor(Type *DivTy) {
93   assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94   return ConstantInt::get(DivTy, 1u, false);
95 }
96 
97 /// Transfer operation properties from \p OldVPI to \p NewVal.
98 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99   auto *NewInst = dyn_cast<Instruction>(&NewVal);
100   if (!NewInst || !isa<FPMathOperator>(NewVal))
101     return;
102 
103   auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104   if (!OldFMOp)
105     return;
106 
107   NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108 }
109 
110 /// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111 /// OldVP gets erased.
112 static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113   transferDecorations(NewOp, OldOp);
114   OldOp.replaceAllUsesWith(&NewOp);
115   OldOp.eraseFromParent();
116 }
117 
118 static bool maySpeculateLanes(VPIntrinsic &VPI) {
119   // The result of VP reductions depends on the mask and evl.
120   if (isa<VPReductionIntrinsic>(VPI))
121     return false;
122   // Fallback to whether the intrinsic is speculatable.
123   if (auto IntrID = VPI.getFunctionalIntrinsicID())
124     return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
125         .hasFnAttr(Attribute::AttrKind::Speculatable);
126   if (auto Opc = VPI.getFunctionalOpcode())
127     return isSafeToSpeculativelyExecuteWithOpcode(*Opc, &VPI);
128   return false;
129 }
130 
131 //// } Helpers
132 
133 namespace {
134 
135 // Expansion pass state at function scope.
136 struct CachingVPExpander {
137   const TargetTransformInfo &TTI;
138 
139   /// \returns A (fixed length) vector with ascending integer indices
140   /// (<0, 1, ..., NumElems-1>).
141   /// \p Builder
142   ///    Used for instruction creation.
143   /// \p LaneTy
144   ///    Integer element type of the result vector.
145   /// \p NumElems
146   ///    Number of vector elements.
147   Value *createStepVector(IRBuilder<> &Builder, Type *LaneTy,
148                           unsigned NumElems);
149 
150   /// \returns A bitmask that is true where the lane position is less-than \p
151   /// EVLParam
152   ///
153   /// \p Builder
154   ///    Used for instruction creation.
155   /// \p VLParam
156   ///    The explicit vector length parameter to test against the lane
157   ///    positions.
158   /// \p ElemCount
159   ///    Static (potentially scalable) number of vector elements.
160   Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
161                           ElementCount ElemCount);
162 
163   /// If needed, folds the EVL in the mask operand and discards the EVL
164   /// parameter. Returns a pair of the value of the intrinsic after the change
165   /// (if any) and whether the mask was actually folded.
166   std::pair<Value *, bool> foldEVLIntoMask(VPIntrinsic &VPI);
167 
168   /// "Remove" the %evl parameter of \p PI by setting it to the static vector
169   /// length of the operation. Returns true if the %evl (if any) was effectively
170   /// changed.
171   bool discardEVLParameter(VPIntrinsic &PI);
172 
173   /// Lower this VP binary operator to a unpredicated binary operator.
174   Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
175                                            VPIntrinsic &PI);
176 
177   /// Lower this VP int call to a unpredicated int call.
178   Value *expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI);
179 
180   /// Lower this VP fp call to a unpredicated fp call.
181   Value *expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
182                                    unsigned UnpredicatedIntrinsicID);
183 
184   /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
185   Value *expandPredicationInReduction(IRBuilder<> &Builder,
186                                       VPReductionIntrinsic &PI);
187 
188   /// Lower this VP cast operation to a non-VP intrinsic.
189   Value *expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
190                                           VPIntrinsic &VPI);
191 
192   /// Lower this VP memory operation to a non-VP intrinsic.
193   Value *expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
194                                             VPIntrinsic &VPI);
195 
196   /// Lower this VP comparison to a call to an unpredicated comparison.
197   Value *expandPredicationInComparison(IRBuilder<> &Builder,
198                                        VPCmpIntrinsic &PI);
199 
200   /// Query TTI and expand the vector predication in \p P accordingly.
201   Value *expandPredication(VPIntrinsic &PI);
202 
203   /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
204   /// overrides TTI with the cl::opts listed at the top of this file.
205   VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
206   bool UsingTTIOverrides;
207 
208 public:
209   CachingVPExpander(const TargetTransformInfo &TTI)
210       : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
211 
212   /// Expand llvm.vp.* intrinsics as requested by \p TTI.
213   /// Returns the details of the expansion.
214   VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
215 };
216 
217 //// CachingVPExpander {
218 
219 Value *CachingVPExpander::createStepVector(IRBuilder<> &Builder, Type *LaneTy,
220                                            unsigned NumElems) {
221   // TODO add caching
222   SmallVector<Constant *, 16> ConstElems;
223 
224   for (unsigned Idx = 0; Idx < NumElems; ++Idx)
225     ConstElems.push_back(ConstantInt::get(LaneTy, Idx, false));
226 
227   return ConstantVector::get(ConstElems);
228 }
229 
230 Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
231                                            Value *EVLParam,
232                                            ElementCount ElemCount) {
233   // TODO add caching
234   // Scalable vector %evl conversion.
235   if (ElemCount.isScalable()) {
236     Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
237     // `get_active_lane_mask` performs an implicit less-than comparison.
238     Value *ConstZero = Builder.getInt32(0);
239     return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
240                                    {BoolVecTy, EVLParam->getType()},
241                                    {ConstZero, EVLParam});
242   }
243 
244   // Fixed vector %evl conversion.
245   Type *LaneTy = EVLParam->getType();
246   unsigned NumElems = ElemCount.getFixedValue();
247   Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
248   Value *IdxVec = createStepVector(Builder, LaneTy, NumElems);
249   return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
250 }
251 
252 Value *
253 CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
254                                                      VPIntrinsic &VPI) {
255   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
256          "Implicitly dropping %evl in non-speculatable operator!");
257 
258   auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
259   assert(Instruction::isBinaryOp(OC));
260 
261   Value *Op0 = VPI.getOperand(0);
262   Value *Op1 = VPI.getOperand(1);
263   Value *Mask = VPI.getMaskParam();
264 
265   // Blend in safe operands.
266   if (Mask && !isAllTrueMask(Mask)) {
267     switch (OC) {
268     default:
269       // Can safely ignore the predicate.
270       break;
271 
272     // Division operators need a safe divisor on masked-off lanes (1).
273     case Instruction::UDiv:
274     case Instruction::SDiv:
275     case Instruction::URem:
276     case Instruction::SRem:
277       // 2nd operand must not be zero.
278       Value *SafeDivisor = getSafeDivisor(VPI.getType());
279       Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
280     }
281   }
282 
283   Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
284 
285   replaceOperation(*NewBinOp, VPI);
286   return NewBinOp;
287 }
288 
289 Value *CachingVPExpander::expandPredicationToIntCall(IRBuilder<> &Builder,
290                                                      VPIntrinsic &VPI) {
291   std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
292   if (!FID)
293     return nullptr;
294   SmallVector<Value *, 2> Argument;
295   for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
296     Argument.push_back(VPI.getOperand(i));
297   }
298   Value *NewOp = Builder.CreateIntrinsic(FID.value(), {VPI.getType()}, Argument,
299                                          /*FMFSource=*/nullptr, VPI.getName());
300   replaceOperation(*NewOp, VPI);
301   return NewOp;
302 }
303 
304 Value *CachingVPExpander::expandPredicationToFPCall(
305     IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
306   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
307          "Implicitly dropping %evl in non-speculatable operator!");
308 
309   switch (UnpredicatedIntrinsicID) {
310   case Intrinsic::fabs:
311   case Intrinsic::sqrt:
312   case Intrinsic::maxnum:
313   case Intrinsic::minnum: {
314     SmallVector<Value *, 2> Argument;
315     for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
316       Argument.push_back(VPI.getOperand(i));
317     }
318     Value *NewOp = Builder.CreateIntrinsic(
319         UnpredicatedIntrinsicID, {VPI.getType()}, Argument,
320         /*FMFSource=*/nullptr, VPI.getName());
321     replaceOperation(*NewOp, VPI);
322     return NewOp;
323   }
324   case Intrinsic::fma:
325   case Intrinsic::fmuladd:
326   case Intrinsic::experimental_constrained_fma:
327   case Intrinsic::experimental_constrained_fmuladd: {
328     Value *Op0 = VPI.getOperand(0);
329     Value *Op1 = VPI.getOperand(1);
330     Value *Op2 = VPI.getOperand(2);
331     Function *Fn = Intrinsic::getOrInsertDeclaration(
332         VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
333     Value *NewOp;
334     if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
335       NewOp =
336           Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
337     else
338       NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
339     replaceOperation(*NewOp, VPI);
340     return NewOp;
341   }
342   }
343 
344   return nullptr;
345 }
346 
347 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
348                                          Type *EltTy) {
349   Intrinsic::ID RdxID = *VPI.getFunctionalIntrinsicID();
350   FastMathFlags FMF;
351   if (isa<FPMathOperator>(VPI))
352     FMF = VPI.getFastMathFlags();
353   return getReductionIdentity(RdxID, EltTy, FMF);
354 }
355 
356 Value *
357 CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
358                                                 VPReductionIntrinsic &VPI) {
359   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
360          "Implicitly dropping %evl in non-speculatable operator!");
361 
362   Value *Mask = VPI.getMaskParam();
363   Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
364 
365   // Insert neutral element in masked-out positions
366   if (Mask && !isAllTrueMask(Mask)) {
367     auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
368     auto *NeutralVector = Builder.CreateVectorSplat(
369         cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
370     RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
371   }
372 
373   Value *Reduction;
374   Value *Start = VPI.getOperand(VPI.getStartParamPos());
375 
376   switch (VPI.getIntrinsicID()) {
377   default:
378     llvm_unreachable("Impossible reduction kind");
379   case Intrinsic::vp_reduce_add:
380   case Intrinsic::vp_reduce_mul:
381   case Intrinsic::vp_reduce_and:
382   case Intrinsic::vp_reduce_or:
383   case Intrinsic::vp_reduce_xor: {
384     Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
385     unsigned Opc = getArithmeticReductionInstruction(RedID);
386     assert(Instruction::isBinaryOp(Opc));
387     Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
388     Reduction =
389         Builder.CreateBinOp((Instruction::BinaryOps)Opc, Reduction, Start);
390     break;
391   }
392   case Intrinsic::vp_reduce_smax:
393   case Intrinsic::vp_reduce_smin:
394   case Intrinsic::vp_reduce_umax:
395   case Intrinsic::vp_reduce_umin:
396   case Intrinsic::vp_reduce_fmax:
397   case Intrinsic::vp_reduce_fmin:
398   case Intrinsic::vp_reduce_fmaximum:
399   case Intrinsic::vp_reduce_fminimum: {
400     Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
401     Intrinsic::ID ScalarID = getMinMaxReductionIntrinsicOp(RedID);
402     Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
403     transferDecorations(*Reduction, VPI);
404     Reduction = Builder.CreateBinaryIntrinsic(ScalarID, Reduction, Start);
405     break;
406   }
407   case Intrinsic::vp_reduce_fadd:
408     Reduction = Builder.CreateFAddReduce(Start, RedOp);
409     break;
410   case Intrinsic::vp_reduce_fmul:
411     Reduction = Builder.CreateFMulReduce(Start, RedOp);
412     break;
413   }
414 
415   replaceOperation(*Reduction, VPI);
416   return Reduction;
417 }
418 
419 Value *CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
420                                                            VPIntrinsic &VPI) {
421   Intrinsic::ID VPID = VPI.getIntrinsicID();
422   unsigned CastOpcode = VPIntrinsic::getFunctionalOpcodeForVP(VPID).value();
423   assert(Instruction::isCast(CastOpcode));
424   Value *CastOp =
425       Builder.CreateCast(Instruction::CastOps(CastOpcode), VPI.getOperand(0),
426                          VPI.getType(), VPI.getName());
427 
428   replaceOperation(*CastOp, VPI);
429   return CastOp;
430 }
431 
432 Value *
433 CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
434                                                       VPIntrinsic &VPI) {
435   assert(VPI.canIgnoreVectorLengthParam());
436 
437   const auto &DL = VPI.getDataLayout();
438 
439   Value *MaskParam = VPI.getMaskParam();
440   Value *PtrParam = VPI.getMemoryPointerParam();
441   Value *DataParam = VPI.getMemoryDataParam();
442   bool IsUnmasked = isAllTrueMask(MaskParam);
443 
444   MaybeAlign AlignOpt = VPI.getPointerAlignment();
445 
446   Value *NewMemoryInst = nullptr;
447   switch (VPI.getIntrinsicID()) {
448   default:
449     llvm_unreachable("Not a VP memory intrinsic");
450   case Intrinsic::vp_store:
451     if (IsUnmasked) {
452       StoreInst *NewStore =
453           Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
454       if (AlignOpt.has_value())
455         NewStore->setAlignment(*AlignOpt);
456       NewMemoryInst = NewStore;
457     } else
458       NewMemoryInst = Builder.CreateMaskedStore(
459           DataParam, PtrParam, AlignOpt.valueOrOne(), MaskParam);
460 
461     break;
462   case Intrinsic::vp_load:
463     if (IsUnmasked) {
464       LoadInst *NewLoad =
465           Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
466       if (AlignOpt.has_value())
467         NewLoad->setAlignment(*AlignOpt);
468       NewMemoryInst = NewLoad;
469     } else
470       NewMemoryInst = Builder.CreateMaskedLoad(
471           VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
472 
473     break;
474   case Intrinsic::vp_scatter: {
475     auto *ElementType =
476         cast<VectorType>(DataParam->getType())->getElementType();
477     NewMemoryInst = Builder.CreateMaskedScatter(
478         DataParam, PtrParam,
479         AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam);
480     break;
481   }
482   case Intrinsic::vp_gather: {
483     auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
484     NewMemoryInst = Builder.CreateMaskedGather(
485         VPI.getType(), PtrParam,
486         AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam, nullptr,
487         VPI.getName());
488     break;
489   }
490   }
491 
492   assert(NewMemoryInst);
493   replaceOperation(*NewMemoryInst, VPI);
494   return NewMemoryInst;
495 }
496 
497 Value *CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
498                                                         VPCmpIntrinsic &VPI) {
499   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
500          "Implicitly dropping %evl in non-speculatable operator!");
501 
502   assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
503          *VPI.getFunctionalOpcode() == Instruction::FCmp);
504 
505   Value *Op0 = VPI.getOperand(0);
506   Value *Op1 = VPI.getOperand(1);
507   auto Pred = VPI.getPredicate();
508 
509   auto *NewCmp = Builder.CreateCmp(Pred, Op0, Op1);
510 
511   replaceOperation(*NewCmp, VPI);
512   return NewCmp;
513 }
514 
515 bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
516   LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
517 
518   if (VPI.canIgnoreVectorLengthParam())
519     return false;
520 
521   Value *EVLParam = VPI.getVectorLengthParam();
522   if (!EVLParam)
523     return false;
524 
525   ElementCount StaticElemCount = VPI.getStaticVectorLength();
526   Value *MaxEVL = nullptr;
527   Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
528   if (StaticElemCount.isScalable()) {
529     // TODO add caching
530     IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
531     Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
532     Value *VScale = Builder.CreateIntrinsic(Intrinsic::vscale, Int32Ty, {},
533                                             /*FMFSource=*/nullptr, "vscale");
534     MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
535                                /*NUW*/ true, /*NSW*/ false);
536   } else {
537     MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
538   }
539   VPI.setVectorLengthParam(MaxEVL);
540   return true;
541 }
542 
543 std::pair<Value *, bool> CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
544   LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
545 
546   IRBuilder<> Builder(&VPI);
547 
548   // Ineffective %evl parameter and so nothing to do here.
549   if (VPI.canIgnoreVectorLengthParam())
550     return {&VPI, false};
551 
552   // Only VP intrinsics can have an %evl parameter.
553   Value *OldMaskParam = VPI.getMaskParam();
554   Value *OldEVLParam = VPI.getVectorLengthParam();
555   assert(OldMaskParam && "no mask param to fold the vl param into");
556   assert(OldEVLParam && "no EVL param to fold away");
557 
558   LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
559   LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
560 
561   // Convert the %evl predication into vector mask predication.
562   ElementCount ElemCount = VPI.getStaticVectorLength();
563   Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
564   Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
565   VPI.setMaskParam(NewMaskParam);
566 
567   // Drop the %evl parameter.
568   discardEVLParameter(VPI);
569   assert(VPI.canIgnoreVectorLengthParam() &&
570          "transformation did not render the evl param ineffective!");
571 
572   // Reassess the modified instruction.
573   return {&VPI, true};
574 }
575 
576 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
577   LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
578 
579   IRBuilder<> Builder(&VPI);
580 
581   // Try lowering to a LLVM instruction first.
582   auto OC = VPI.getFunctionalOpcode();
583 
584   if (OC && Instruction::isBinaryOp(*OC))
585     return expandPredicationInBinaryOperator(Builder, VPI);
586 
587   if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
588     return expandPredicationInReduction(Builder, *VPRI);
589 
590   if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
591     return expandPredicationInComparison(Builder, *VPCmp);
592 
593   if (VPCastIntrinsic::isVPCast(VPI.getIntrinsicID())) {
594     return expandPredicationToCastIntrinsic(Builder, VPI);
595   }
596 
597   switch (VPI.getIntrinsicID()) {
598   default:
599     break;
600   case Intrinsic::vp_fneg: {
601     Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
602     replaceOperation(*NewNegOp, VPI);
603     return NewNegOp;
604   }
605   case Intrinsic::vp_abs:
606   case Intrinsic::vp_smax:
607   case Intrinsic::vp_smin:
608   case Intrinsic::vp_umax:
609   case Intrinsic::vp_umin:
610   case Intrinsic::vp_bswap:
611   case Intrinsic::vp_bitreverse:
612   case Intrinsic::vp_ctpop:
613   case Intrinsic::vp_ctlz:
614   case Intrinsic::vp_cttz:
615   case Intrinsic::vp_sadd_sat:
616   case Intrinsic::vp_uadd_sat:
617   case Intrinsic::vp_ssub_sat:
618   case Intrinsic::vp_usub_sat:
619   case Intrinsic::vp_fshl:
620   case Intrinsic::vp_fshr:
621     return expandPredicationToIntCall(Builder, VPI);
622   case Intrinsic::vp_fabs:
623   case Intrinsic::vp_sqrt:
624   case Intrinsic::vp_maxnum:
625   case Intrinsic::vp_minnum:
626   case Intrinsic::vp_maximum:
627   case Intrinsic::vp_minimum:
628   case Intrinsic::vp_fma:
629   case Intrinsic::vp_fmuladd:
630     return expandPredicationToFPCall(Builder, VPI,
631                                      VPI.getFunctionalIntrinsicID().value());
632   case Intrinsic::vp_load:
633   case Intrinsic::vp_store:
634   case Intrinsic::vp_gather:
635   case Intrinsic::vp_scatter:
636     return expandPredicationInMemoryIntrinsic(Builder, VPI);
637   }
638 
639   if (auto CID = VPI.getConstrainedIntrinsicID())
640     if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
641       return Call;
642 
643   return &VPI;
644 }
645 
646 //// } CachingVPExpander
647 
648 void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
649   // Operations with speculatable lanes do not strictly need predication.
650   if (maySpeculateLanes(VPI)) {
651     // Converting a speculatable VP intrinsic means dropping %mask and %evl.
652     // No need to expand %evl into the %mask only to ignore that code.
653     if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
654       LegalizeStrat.EVLParamStrategy = VPLegalization::Discard;
655     return;
656   }
657 
658   // We have to preserve the predicating effect of %evl for this
659   // non-speculatable VP intrinsic.
660   // 1) Never discard %evl.
661   // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
662   //    %evl gets folded into %mask.
663   if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
664       (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
665     LegalizeStrat.EVLParamStrategy = VPLegalization::Convert;
666   }
667 }
668 
669 VPLegalization
670 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
671   auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
672   if (LLVM_LIKELY(!UsingTTIOverrides)) {
673     // No overrides - we are in production.
674     return VPStrat;
675   }
676 
677   // Overrides set - we are in testing, the following does not need to be
678   // efficient.
679   VPStrat.EVLParamStrategy = parseOverrideOption(EVLTransformOverride);
680   VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
681   return VPStrat;
682 }
683 
684 VPExpansionDetails
685 CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
686   auto Strategy = getVPLegalizationStrategy(VPI);
687   sanitizeStrategy(VPI, Strategy);
688 
689   VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
690 
691   // Transform the EVL parameter.
692   switch (Strategy.EVLParamStrategy) {
693   case VPLegalization::Legal:
694     break;
695   case VPLegalization::Discard:
696     if (discardEVLParameter(VPI))
697       Changed = VPExpansionDetails::IntrinsicUpdated;
698     break;
699   case VPLegalization::Convert:
700     if (auto [NewVPI, Folded] = foldEVLIntoMask(VPI); Folded) {
701       (void)NewVPI;
702       Changed = VPExpansionDetails::IntrinsicUpdated;
703       ++NumFoldedVL;
704     }
705     break;
706   }
707 
708   // Replace with a non-predicated operation.
709   switch (Strategy.OpStrategy) {
710   case VPLegalization::Legal:
711     break;
712   case VPLegalization::Discard:
713     llvm_unreachable("Invalid strategy for operators.");
714   case VPLegalization::Convert:
715     if (Value *V = expandPredication(VPI); V != &VPI) {
716       ++NumLoweredVPOps;
717       Changed = VPExpansionDetails::IntrinsicReplaced;
718     }
719     break;
720   }
721 
722   return Changed;
723 }
724 } // namespace
725 
726 VPExpansionDetails
727 llvm::expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
728                                        const TargetTransformInfo &TTI) {
729   return CachingVPExpander(TTI).expandVectorPredication(VPI);
730 }
731