xref: /llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp (revision ef77188fa85728b07059b5f42b92236998d25b19)
1 //===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements IR expansion for vector predication intrinsics, allowing
10 // targets to enable vector predication until just before codegen.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/ExpandVectorPredication.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/TargetTransformInfo.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/Analysis/VectorUtils.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Transforms/Utils/LoopUtils.h"
29 #include <optional>
30 
31 using namespace llvm;
32 
33 using VPLegalization = TargetTransformInfo::VPLegalization;
34 using VPTransform = TargetTransformInfo::VPLegalization::VPTransform;
35 
36 // Keep this in sync with TargetTransformInfo::VPLegalization.
37 #define VPINTERNAL_VPLEGAL_CASES                                               \
38   VPINTERNAL_CASE(Legal)                                                       \
39   VPINTERNAL_CASE(Discard)                                                     \
40   VPINTERNAL_CASE(Convert)
41 
42 #define VPINTERNAL_CASE(X) "|" #X
43 
44 // Override options.
45 static cl::opt<std::string> EVLTransformOverride(
46     "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
47     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48              ". If non-empty, ignore "
49              "TargetTransformInfo and "
50              "always use this transformation for the %evl parameter (Used in "
51              "testing)."));
52 
53 static cl::opt<std::string> MaskTransformOverride(
54     "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
55     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56              ". If non-empty, Ignore "
57              "TargetTransformInfo and "
58              "always use this transformation for the %mask parameter (Used in "
59              "testing)."));
60 
61 #undef VPINTERNAL_CASE
62 #define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63 
64 static VPTransform parseOverrideOption(const std::string &TextOpt) {
65   return StringSwitch<VPTransform>(TextOpt) VPINTERNAL_VPLEGAL_CASES;
66 }
67 
68 #undef VPINTERNAL_VPLEGAL_CASES
69 
70 // Whether any override options are set.
71 static bool anyExpandVPOverridesSet() {
72   return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73 }
74 
75 #define DEBUG_TYPE "expandvp"
76 
77 STATISTIC(NumFoldedVL, "Number of folded vector length params");
78 STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79 
80 ///// Helpers {
81 
82 /// \returns Whether the vector mask \p MaskVal has all lane bits set.
83 static bool isAllTrueMask(Value *MaskVal) {
84   if (Value *SplattedVal = getSplatValue(MaskVal))
85     if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
86       return ConstValue->isAllOnesValue();
87 
88   return false;
89 }
90 
91 /// \returns A non-excepting divisor constant for this type.
92 static Constant *getSafeDivisor(Type *DivTy) {
93   assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94   return ConstantInt::get(DivTy, 1u, false);
95 }
96 
97 /// Transfer operation properties from \p OldVPI to \p NewVal.
98 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99   auto *NewInst = dyn_cast<Instruction>(&NewVal);
100   if (!NewInst || !isa<FPMathOperator>(NewVal))
101     return;
102 
103   auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104   if (!OldFMOp)
105     return;
106 
107   NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108 }
109 
110 /// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111 /// OldVP gets erased.
112 static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113   transferDecorations(NewOp, OldOp);
114   OldOp.replaceAllUsesWith(&NewOp);
115   OldOp.eraseFromParent();
116 }
117 
118 static bool maySpeculateLanes(VPIntrinsic &VPI) {
119   // The result of VP reductions depends on the mask and evl.
120   if (isa<VPReductionIntrinsic>(VPI))
121     return false;
122   // Fallback to whether the intrinsic is speculatable.
123   if (auto IntrID = VPI.getFunctionalIntrinsicID())
124     return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
125         .hasFnAttr(Attribute::AttrKind::Speculatable);
126   if (auto Opc = VPI.getFunctionalOpcode())
127     return isSafeToSpeculativelyExecuteWithOpcode(*Opc, &VPI);
128   return false;
129 }
130 
131 //// } Helpers
132 
133 namespace {
134 
135 // Expansion pass state at function scope.
136 struct CachingVPExpander {
137   const TargetTransformInfo &TTI;
138 
139   /// \returns A bitmask that is true where the lane position is less-than \p
140   /// EVLParam
141   ///
142   /// \p Builder
143   ///    Used for instruction creation.
144   /// \p VLParam
145   ///    The explicit vector length parameter to test against the lane
146   ///    positions.
147   /// \p ElemCount
148   ///    Static (potentially scalable) number of vector elements.
149   Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
150                           ElementCount ElemCount);
151 
152   /// If needed, folds the EVL in the mask operand and discards the EVL
153   /// parameter. Returns a pair of the value of the intrinsic after the change
154   /// (if any) and whether the mask was actually folded.
155   std::pair<Value *, bool> foldEVLIntoMask(VPIntrinsic &VPI);
156 
157   /// "Remove" the %evl parameter of \p PI by setting it to the static vector
158   /// length of the operation. Returns true if the %evl (if any) was effectively
159   /// changed.
160   bool discardEVLParameter(VPIntrinsic &PI);
161 
162   /// Lower this VP binary operator to a unpredicated binary operator.
163   Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
164                                            VPIntrinsic &PI);
165 
166   /// Lower this VP int call to a unpredicated int call.
167   Value *expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI);
168 
169   /// Lower this VP fp call to a unpredicated fp call.
170   Value *expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
171                                    unsigned UnpredicatedIntrinsicID);
172 
173   /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
174   Value *expandPredicationInReduction(IRBuilder<> &Builder,
175                                       VPReductionIntrinsic &PI);
176 
177   /// Lower this VP cast operation to a non-VP intrinsic.
178   Value *expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
179                                           VPIntrinsic &VPI);
180 
181   /// Lower this VP memory operation to a non-VP intrinsic.
182   Value *expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
183                                             VPIntrinsic &VPI);
184 
185   /// Lower this VP comparison to a call to an unpredicated comparison.
186   Value *expandPredicationInComparison(IRBuilder<> &Builder,
187                                        VPCmpIntrinsic &PI);
188 
189   /// Query TTI and expand the vector predication in \p P accordingly.
190   Value *expandPredication(VPIntrinsic &PI);
191 
192   /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
193   /// overrides TTI with the cl::opts listed at the top of this file.
194   VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
195   bool UsingTTIOverrides;
196 
197 public:
198   CachingVPExpander(const TargetTransformInfo &TTI)
199       : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
200 
201   /// Expand llvm.vp.* intrinsics as requested by \p TTI.
202   /// Returns the details of the expansion.
203   VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
204 };
205 
206 //// CachingVPExpander {
207 
208 Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
209                                            Value *EVLParam,
210                                            ElementCount ElemCount) {
211   // TODO add caching
212   // Scalable vector %evl conversion.
213   if (ElemCount.isScalable()) {
214     Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
215     // `get_active_lane_mask` performs an implicit less-than comparison.
216     Value *ConstZero = Builder.getInt32(0);
217     return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
218                                    {BoolVecTy, EVLParam->getType()},
219                                    {ConstZero, EVLParam});
220   }
221 
222   // Fixed vector %evl conversion.
223   Type *LaneTy = EVLParam->getType();
224   unsigned NumElems = ElemCount.getFixedValue();
225   Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
226   Value *IdxVec = Builder.CreateStepVector(VectorType::get(LaneTy, ElemCount));
227   return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
228 }
229 
230 Value *
231 CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
232                                                      VPIntrinsic &VPI) {
233   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
234          "Implicitly dropping %evl in non-speculatable operator!");
235 
236   auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
237   assert(Instruction::isBinaryOp(OC));
238 
239   Value *Op0 = VPI.getOperand(0);
240   Value *Op1 = VPI.getOperand(1);
241   Value *Mask = VPI.getMaskParam();
242 
243   // Blend in safe operands.
244   if (Mask && !isAllTrueMask(Mask)) {
245     switch (OC) {
246     default:
247       // Can safely ignore the predicate.
248       break;
249 
250     // Division operators need a safe divisor on masked-off lanes (1).
251     case Instruction::UDiv:
252     case Instruction::SDiv:
253     case Instruction::URem:
254     case Instruction::SRem:
255       // 2nd operand must not be zero.
256       Value *SafeDivisor = getSafeDivisor(VPI.getType());
257       Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
258     }
259   }
260 
261   Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
262 
263   replaceOperation(*NewBinOp, VPI);
264   return NewBinOp;
265 }
266 
267 Value *CachingVPExpander::expandPredicationToIntCall(IRBuilder<> &Builder,
268                                                      VPIntrinsic &VPI) {
269   std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
270   if (!FID)
271     return nullptr;
272   SmallVector<Value *, 2> Argument;
273   for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
274     Argument.push_back(VPI.getOperand(i));
275   }
276   Value *NewOp = Builder.CreateIntrinsic(FID.value(), {VPI.getType()}, Argument,
277                                          /*FMFSource=*/nullptr, VPI.getName());
278   replaceOperation(*NewOp, VPI);
279   return NewOp;
280 }
281 
282 Value *CachingVPExpander::expandPredicationToFPCall(
283     IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
284   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
285          "Implicitly dropping %evl in non-speculatable operator!");
286 
287   switch (UnpredicatedIntrinsicID) {
288   case Intrinsic::fabs:
289   case Intrinsic::sqrt:
290   case Intrinsic::maxnum:
291   case Intrinsic::minnum: {
292     SmallVector<Value *, 2> Argument;
293     for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
294       Argument.push_back(VPI.getOperand(i));
295     }
296     Value *NewOp = Builder.CreateIntrinsic(
297         UnpredicatedIntrinsicID, {VPI.getType()}, Argument,
298         /*FMFSource=*/nullptr, VPI.getName());
299     replaceOperation(*NewOp, VPI);
300     return NewOp;
301   }
302   case Intrinsic::fma:
303   case Intrinsic::fmuladd:
304   case Intrinsic::experimental_constrained_fma:
305   case Intrinsic::experimental_constrained_fmuladd: {
306     Value *Op0 = VPI.getOperand(0);
307     Value *Op1 = VPI.getOperand(1);
308     Value *Op2 = VPI.getOperand(2);
309     Function *Fn = Intrinsic::getOrInsertDeclaration(
310         VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
311     Value *NewOp;
312     if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
313       NewOp =
314           Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
315     else
316       NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
317     replaceOperation(*NewOp, VPI);
318     return NewOp;
319   }
320   }
321 
322   return nullptr;
323 }
324 
325 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
326                                          Type *EltTy) {
327   Intrinsic::ID RdxID = *VPI.getFunctionalIntrinsicID();
328   FastMathFlags FMF;
329   if (isa<FPMathOperator>(VPI))
330     FMF = VPI.getFastMathFlags();
331   return getReductionIdentity(RdxID, EltTy, FMF);
332 }
333 
334 Value *
335 CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
336                                                 VPReductionIntrinsic &VPI) {
337   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
338          "Implicitly dropping %evl in non-speculatable operator!");
339 
340   Value *Mask = VPI.getMaskParam();
341   Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
342 
343   // Insert neutral element in masked-out positions
344   if (Mask && !isAllTrueMask(Mask)) {
345     auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
346     auto *NeutralVector = Builder.CreateVectorSplat(
347         cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
348     RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
349   }
350 
351   Value *Reduction;
352   Value *Start = VPI.getOperand(VPI.getStartParamPos());
353 
354   switch (VPI.getIntrinsicID()) {
355   default:
356     llvm_unreachable("Impossible reduction kind");
357   case Intrinsic::vp_reduce_add:
358   case Intrinsic::vp_reduce_mul:
359   case Intrinsic::vp_reduce_and:
360   case Intrinsic::vp_reduce_or:
361   case Intrinsic::vp_reduce_xor: {
362     Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
363     unsigned Opc = getArithmeticReductionInstruction(RedID);
364     assert(Instruction::isBinaryOp(Opc));
365     Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
366     Reduction =
367         Builder.CreateBinOp((Instruction::BinaryOps)Opc, Reduction, Start);
368     break;
369   }
370   case Intrinsic::vp_reduce_smax:
371   case Intrinsic::vp_reduce_smin:
372   case Intrinsic::vp_reduce_umax:
373   case Intrinsic::vp_reduce_umin:
374   case Intrinsic::vp_reduce_fmax:
375   case Intrinsic::vp_reduce_fmin:
376   case Intrinsic::vp_reduce_fmaximum:
377   case Intrinsic::vp_reduce_fminimum: {
378     Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
379     Intrinsic::ID ScalarID = getMinMaxReductionIntrinsicOp(RedID);
380     Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
381     transferDecorations(*Reduction, VPI);
382     Reduction = Builder.CreateBinaryIntrinsic(ScalarID, Reduction, Start);
383     break;
384   }
385   case Intrinsic::vp_reduce_fadd:
386     Reduction = Builder.CreateFAddReduce(Start, RedOp);
387     break;
388   case Intrinsic::vp_reduce_fmul:
389     Reduction = Builder.CreateFMulReduce(Start, RedOp);
390     break;
391   }
392 
393   replaceOperation(*Reduction, VPI);
394   return Reduction;
395 }
396 
397 Value *CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
398                                                            VPIntrinsic &VPI) {
399   Intrinsic::ID VPID = VPI.getIntrinsicID();
400   unsigned CastOpcode = VPIntrinsic::getFunctionalOpcodeForVP(VPID).value();
401   assert(Instruction::isCast(CastOpcode));
402   Value *CastOp =
403       Builder.CreateCast(Instruction::CastOps(CastOpcode), VPI.getOperand(0),
404                          VPI.getType(), VPI.getName());
405 
406   replaceOperation(*CastOp, VPI);
407   return CastOp;
408 }
409 
410 Value *
411 CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
412                                                       VPIntrinsic &VPI) {
413   assert(VPI.canIgnoreVectorLengthParam());
414 
415   const auto &DL = VPI.getDataLayout();
416 
417   Value *MaskParam = VPI.getMaskParam();
418   Value *PtrParam = VPI.getMemoryPointerParam();
419   Value *DataParam = VPI.getMemoryDataParam();
420   bool IsUnmasked = isAllTrueMask(MaskParam);
421 
422   MaybeAlign AlignOpt = VPI.getPointerAlignment();
423 
424   Value *NewMemoryInst = nullptr;
425   switch (VPI.getIntrinsicID()) {
426   default:
427     llvm_unreachable("Not a VP memory intrinsic");
428   case Intrinsic::vp_store:
429     if (IsUnmasked) {
430       StoreInst *NewStore =
431           Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
432       if (AlignOpt.has_value())
433         NewStore->setAlignment(*AlignOpt);
434       NewMemoryInst = NewStore;
435     } else
436       NewMemoryInst = Builder.CreateMaskedStore(
437           DataParam, PtrParam, AlignOpt.valueOrOne(), MaskParam);
438 
439     break;
440   case Intrinsic::vp_load:
441     if (IsUnmasked) {
442       LoadInst *NewLoad =
443           Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
444       if (AlignOpt.has_value())
445         NewLoad->setAlignment(*AlignOpt);
446       NewMemoryInst = NewLoad;
447     } else
448       NewMemoryInst = Builder.CreateMaskedLoad(
449           VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
450 
451     break;
452   case Intrinsic::vp_scatter: {
453     auto *ElementType =
454         cast<VectorType>(DataParam->getType())->getElementType();
455     NewMemoryInst = Builder.CreateMaskedScatter(
456         DataParam, PtrParam,
457         AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam);
458     break;
459   }
460   case Intrinsic::vp_gather: {
461     auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
462     NewMemoryInst = Builder.CreateMaskedGather(
463         VPI.getType(), PtrParam,
464         AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam, nullptr,
465         VPI.getName());
466     break;
467   }
468   }
469 
470   assert(NewMemoryInst);
471   replaceOperation(*NewMemoryInst, VPI);
472   return NewMemoryInst;
473 }
474 
475 Value *CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
476                                                         VPCmpIntrinsic &VPI) {
477   assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
478          "Implicitly dropping %evl in non-speculatable operator!");
479 
480   assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
481          *VPI.getFunctionalOpcode() == Instruction::FCmp);
482 
483   Value *Op0 = VPI.getOperand(0);
484   Value *Op1 = VPI.getOperand(1);
485   auto Pred = VPI.getPredicate();
486 
487   auto *NewCmp = Builder.CreateCmp(Pred, Op0, Op1);
488 
489   replaceOperation(*NewCmp, VPI);
490   return NewCmp;
491 }
492 
493 bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
494   LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
495 
496   if (VPI.canIgnoreVectorLengthParam())
497     return false;
498 
499   Value *EVLParam = VPI.getVectorLengthParam();
500   if (!EVLParam)
501     return false;
502 
503   ElementCount StaticElemCount = VPI.getStaticVectorLength();
504   Value *MaxEVL = nullptr;
505   Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
506   if (StaticElemCount.isScalable()) {
507     // TODO add caching
508     IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
509     Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
510     Value *VScale = Builder.CreateIntrinsic(Intrinsic::vscale, Int32Ty, {},
511                                             /*FMFSource=*/nullptr, "vscale");
512     MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
513                                /*NUW*/ true, /*NSW*/ false);
514   } else {
515     MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
516   }
517   VPI.setVectorLengthParam(MaxEVL);
518   return true;
519 }
520 
521 std::pair<Value *, bool> CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
522   LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
523 
524   IRBuilder<> Builder(&VPI);
525 
526   // Ineffective %evl parameter and so nothing to do here.
527   if (VPI.canIgnoreVectorLengthParam())
528     return {&VPI, false};
529 
530   // Only VP intrinsics can have an %evl parameter.
531   Value *OldMaskParam = VPI.getMaskParam();
532   Value *OldEVLParam = VPI.getVectorLengthParam();
533   assert(OldMaskParam && "no mask param to fold the vl param into");
534   assert(OldEVLParam && "no EVL param to fold away");
535 
536   LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
537   LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
538 
539   // Convert the %evl predication into vector mask predication.
540   ElementCount ElemCount = VPI.getStaticVectorLength();
541   Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
542   Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
543   VPI.setMaskParam(NewMaskParam);
544 
545   // Drop the %evl parameter.
546   discardEVLParameter(VPI);
547   assert(VPI.canIgnoreVectorLengthParam() &&
548          "transformation did not render the evl param ineffective!");
549 
550   // Reassess the modified instruction.
551   return {&VPI, true};
552 }
553 
554 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
555   LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
556 
557   IRBuilder<> Builder(&VPI);
558 
559   // Try lowering to a LLVM instruction first.
560   auto OC = VPI.getFunctionalOpcode();
561 
562   if (OC && Instruction::isBinaryOp(*OC))
563     return expandPredicationInBinaryOperator(Builder, VPI);
564 
565   if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
566     return expandPredicationInReduction(Builder, *VPRI);
567 
568   if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
569     return expandPredicationInComparison(Builder, *VPCmp);
570 
571   if (VPCastIntrinsic::isVPCast(VPI.getIntrinsicID())) {
572     return expandPredicationToCastIntrinsic(Builder, VPI);
573   }
574 
575   switch (VPI.getIntrinsicID()) {
576   default:
577     break;
578   case Intrinsic::vp_fneg: {
579     Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
580     replaceOperation(*NewNegOp, VPI);
581     return NewNegOp;
582   }
583   case Intrinsic::vp_abs:
584   case Intrinsic::vp_smax:
585   case Intrinsic::vp_smin:
586   case Intrinsic::vp_umax:
587   case Intrinsic::vp_umin:
588   case Intrinsic::vp_bswap:
589   case Intrinsic::vp_bitreverse:
590   case Intrinsic::vp_ctpop:
591   case Intrinsic::vp_ctlz:
592   case Intrinsic::vp_cttz:
593   case Intrinsic::vp_sadd_sat:
594   case Intrinsic::vp_uadd_sat:
595   case Intrinsic::vp_ssub_sat:
596   case Intrinsic::vp_usub_sat:
597   case Intrinsic::vp_fshl:
598   case Intrinsic::vp_fshr:
599     return expandPredicationToIntCall(Builder, VPI);
600   case Intrinsic::vp_fabs:
601   case Intrinsic::vp_sqrt:
602   case Intrinsic::vp_maxnum:
603   case Intrinsic::vp_minnum:
604   case Intrinsic::vp_maximum:
605   case Intrinsic::vp_minimum:
606   case Intrinsic::vp_fma:
607   case Intrinsic::vp_fmuladd:
608     return expandPredicationToFPCall(Builder, VPI,
609                                      VPI.getFunctionalIntrinsicID().value());
610   case Intrinsic::vp_load:
611   case Intrinsic::vp_store:
612   case Intrinsic::vp_gather:
613   case Intrinsic::vp_scatter:
614     return expandPredicationInMemoryIntrinsic(Builder, VPI);
615   }
616 
617   if (auto CID = VPI.getConstrainedIntrinsicID())
618     if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
619       return Call;
620 
621   return &VPI;
622 }
623 
624 //// } CachingVPExpander
625 
626 void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
627   // Operations with speculatable lanes do not strictly need predication.
628   if (maySpeculateLanes(VPI)) {
629     // Converting a speculatable VP intrinsic means dropping %mask and %evl.
630     // No need to expand %evl into the %mask only to ignore that code.
631     if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
632       LegalizeStrat.EVLParamStrategy = VPLegalization::Discard;
633     return;
634   }
635 
636   // We have to preserve the predicating effect of %evl for this
637   // non-speculatable VP intrinsic.
638   // 1) Never discard %evl.
639   // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
640   //    %evl gets folded into %mask.
641   if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
642       (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
643     LegalizeStrat.EVLParamStrategy = VPLegalization::Convert;
644   }
645 }
646 
647 VPLegalization
648 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
649   auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
650   if (LLVM_LIKELY(!UsingTTIOverrides)) {
651     // No overrides - we are in production.
652     return VPStrat;
653   }
654 
655   // Overrides set - we are in testing, the following does not need to be
656   // efficient.
657   VPStrat.EVLParamStrategy = parseOverrideOption(EVLTransformOverride);
658   VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
659   return VPStrat;
660 }
661 
662 VPExpansionDetails
663 CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
664   auto Strategy = getVPLegalizationStrategy(VPI);
665   sanitizeStrategy(VPI, Strategy);
666 
667   VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
668 
669   // Transform the EVL parameter.
670   switch (Strategy.EVLParamStrategy) {
671   case VPLegalization::Legal:
672     break;
673   case VPLegalization::Discard:
674     if (discardEVLParameter(VPI))
675       Changed = VPExpansionDetails::IntrinsicUpdated;
676     break;
677   case VPLegalization::Convert:
678     if (auto [NewVPI, Folded] = foldEVLIntoMask(VPI); Folded) {
679       (void)NewVPI;
680       Changed = VPExpansionDetails::IntrinsicUpdated;
681       ++NumFoldedVL;
682     }
683     break;
684   }
685 
686   // Replace with a non-predicated operation.
687   switch (Strategy.OpStrategy) {
688   case VPLegalization::Legal:
689     break;
690   case VPLegalization::Discard:
691     llvm_unreachable("Invalid strategy for operators.");
692   case VPLegalization::Convert:
693     if (Value *V = expandPredication(VPI); V != &VPI) {
694       ++NumLoweredVPOps;
695       Changed = VPExpansionDetails::IntrinsicReplaced;
696     }
697     break;
698   }
699 
700   return Changed;
701 }
702 } // namespace
703 
704 VPExpansionDetails
705 llvm::expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
706                                        const TargetTransformInfo &TTI) {
707   return CachingVPExpander(TTI).expandVectorPredication(VPI);
708 }
709