Lines Matching defs:VPI
98 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
103 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
118 static bool maySpeculateLanes(VPIntrinsic &VPI) {
120 if (isa<VPReductionIntrinsic>(VPI))
123 if (auto IntrID = VPI.getFunctionalIntrinsicID())
124 return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
126 if (auto Opc = VPI.getFunctionalOpcode())
127 return isSafeToSpeculativelyExecuteWithOpcode(*Opc, &VPI);
155 std::pair<Value *, bool> foldEVLIntoMask(VPIntrinsic &VPI);
179 VPIntrinsic &VPI);
183 VPIntrinsic &VPI);
192 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
194 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
203 VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
232 VPIntrinsic &VPI) {
233 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
236 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
239 Value *Op0 = VPI.getOperand(0);
240 Value *Op1 = VPI.getOperand(1);
241 Value *Mask = VPI.getMaskParam();
256 Value *SafeDivisor = getSafeDivisor(VPI.getType());
261 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
263 replaceOperation(*NewBinOp, VPI);
268 VPIntrinsic &VPI) {
269 std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
273 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
274 Argument.push_back(VPI.getOperand(i));
276 Value *NewOp = Builder.CreateIntrinsic(FID.value(), {VPI.getType()}, Argument,
277 /*FMFSource=*/nullptr, VPI.getName());
278 replaceOperation(*NewOp, VPI);
283 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
284 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
293 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
294 Argument.push_back(VPI.getOperand(i));
297 UnpredicatedIntrinsicID, {VPI.getType()}, Argument,
298 /*FMFSource=*/nullptr, VPI.getName());
299 replaceOperation(*NewOp, VPI);
306 Value *Op0 = VPI.getOperand(0);
307 Value *Op1 = VPI.getOperand(1);
308 Value *Op2 = VPI.getOperand(2);
310 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
314 Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
316 NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
317 replaceOperation(*NewOp, VPI);
325 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
327 Intrinsic::ID RdxID = *VPI.getFunctionalIntrinsicID();
329 if (isa<FPMathOperator>(VPI))
330 FMF = VPI.getFastMathFlags();
336 VPReductionIntrinsic &VPI) {
337 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
340 Value *Mask = VPI.getMaskParam();
341 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
345 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
352 Value *Start = VPI.getOperand(VPI.getStartParamPos());
354 switch (VPI.getIntrinsicID()) {
362 Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
378 Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
381 transferDecorations(*Reduction, VPI);
393 replaceOperation(*Reduction, VPI);
398 VPIntrinsic &VPI) {
399 Intrinsic::ID VPID = VPI.getIntrinsicID();
403 Builder.CreateCast(Instruction::CastOps(CastOpcode), VPI.getOperand(0),
404 VPI.getType(), VPI.getName());
406 replaceOperation(*CastOp, VPI);
412 VPIntrinsic &VPI) {
413 assert(VPI.canIgnoreVectorLengthParam());
415 const auto &DL = VPI.getDataLayout();
417 Value *MaskParam = VPI.getMaskParam();
418 Value *PtrParam = VPI.getMemoryPointerParam();
419 Value *DataParam = VPI.getMemoryDataParam();
422 MaybeAlign AlignOpt = VPI.getPointerAlignment();
425 switch (VPI.getIntrinsicID()) {
443 Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
449 VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
461 auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
463 VPI.getType(), PtrParam,
465 VPI.getName());
471 replaceOperation(*NewMemoryInst, VPI);
476 VPCmpIntrinsic &VPI) {
477 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
480 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
481 *VPI.getFunctionalOpcode() == Instruction::FCmp);
483 Value *Op0 = VPI.getOperand(0);
484 Value *Op1 = VPI.getOperand(1);
485 auto Pred = VPI.getPredicate();
489 replaceOperation(*NewCmp, VPI);
493 bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
494 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
496 if (VPI.canIgnoreVectorLengthParam())
499 Value *EVLParam = VPI.getVectorLengthParam();
503 ElementCount StaticElemCount = VPI.getStaticVectorLength();
505 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
508 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
517 VPI.setVectorLengthParam(MaxEVL);
521 std::pair<Value *, bool> CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
522 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
524 IRBuilder<> Builder(&VPI);
527 if (VPI.canIgnoreVectorLengthParam())
528 return {&VPI, false};
531 Value *OldMaskParam = VPI.getMaskParam();
532 Value *OldEVLParam = VPI.getVectorLengthParam();
540 ElementCount ElemCount = VPI.getStaticVectorLength();
543 VPI.setMaskParam(NewMaskParam);
546 discardEVLParameter(VPI);
547 assert(VPI.canIgnoreVectorLengthParam() &&
551 return {&VPI, true};
554 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
555 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
557 IRBuilder<> Builder(&VPI);
560 auto OC = VPI.getFunctionalOpcode();
563 return expandPredicationInBinaryOperator(Builder, VPI);
565 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
568 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
571 if (VPCastIntrinsic::isVPCast(VPI.getIntrinsicID())) {
572 return expandPredicationToCastIntrinsic(Builder, VPI);
575 switch (VPI.getIntrinsicID()) {
579 Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
580 replaceOperation(*NewNegOp, VPI);
599 return expandPredicationToIntCall(Builder, VPI);
608 return expandPredicationToFPCall(Builder, VPI,
609 VPI.getFunctionalIntrinsicID().value());
614 return expandPredicationInMemoryIntrinsic(Builder, VPI);
617 if (auto CID = VPI.getConstrainedIntrinsicID())
618 if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
621 return &VPI;
626 void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
628 if (maySpeculateLanes(VPI)) {
648 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
649 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
663 CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
664 auto Strategy = getVPLegalizationStrategy(VPI);
665 sanitizeStrategy(VPI, Strategy);
674 if (discardEVLParameter(VPI))
678 if (auto [NewVPI, Folded] = foldEVLIntoMask(VPI); Folded) {
693 if (Value *V = expandPredication(VPI); V != &VPI) {
705 llvm::expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
707 return CachingVPExpander(TTI).expandVectorPredication(VPI);