Lines Matching defs:VPI

101 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
106 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
121 static bool maySpeculateLanes(VPIntrinsic &VPI) {
123 if (isa<VPReductionIntrinsic>(VPI))
126 if (auto IntrID = VPI.getFunctionalIntrinsicID())
127 return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
129 if (auto Opc = VPI.getFunctionalOpcode())
130 return isSafeToSpeculativelyExecuteWithOpcode(*Opc, &VPI);
167 Value *foldEVLIntoMask(VPIntrinsic &VPI);
191 VPIntrinsic &VPI);
195 VPIntrinsic &VPI);
204 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
206 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
254 VPIntrinsic &VPI) {
255 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
258 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
261 Value *Op0 = VPI.getOperand(0);
262 Value *Op1 = VPI.getOperand(1);
263 Value *Mask = VPI.getMaskParam();
278 Value *SafeDivisor = getSafeDivisor(VPI.getType());
283 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
285 replaceOperation(*NewBinOp, VPI);
290 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
297 Value *Op0 = VPI.getOperand(0);
298 Value *Op1 = VPI.getOperand(1);
300 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
301 Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
302 replaceOperation(*NewOp, VPI);
307 Value *Op = VPI.getOperand(0);
309 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
310 Value *NewOp = Builder.CreateCall(Fn, {Op}, VPI.getName());
311 replaceOperation(*NewOp, VPI);
319 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
320 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
326 Value *Op0 = VPI.getOperand(0);
328 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
329 Value *NewOp = Builder.CreateCall(Fn, {Op0}, VPI.getName());
330 replaceOperation(*NewOp, VPI);
335 Value *Op0 = VPI.getOperand(0);
336 Value *Op1 = VPI.getOperand(1);
338 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
339 Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
340 replaceOperation(*NewOp, VPI);
347 Value *Op0 = VPI.getOperand(0);
348 Value *Op1 = VPI.getOperand(1);
349 Value *Op2 = VPI.getOperand(2);
351 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
355 Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
357 NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
358 replaceOperation(*NewOp, VPI);
366 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
370 Intrinsic::ID VID = VPI.getIntrinsicID();
398 FastMathFlags Flags = VPI.getFastMathFlags();
416 VPReductionIntrinsic &VPI) {
417 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
420 Value *Mask = VPI.getMaskParam();
421 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
425 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
432 Value *Start = VPI.getOperand(VPI.getStartParamPos());
434 switch (VPI.getIntrinsicID()) {
479 transferDecorations(*Reduction, VPI);
485 transferDecorations(*Reduction, VPI);
491 transferDecorations(*Reduction, VPI);
497 transferDecorations(*Reduction, VPI);
509 replaceOperation(*Reduction, VPI);
514 VPIntrinsic &VPI) {
516 switch (VPI.getIntrinsicID()) {
521 Builder.CreateSExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
525 Builder.CreateZExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
529 Builder.CreateTrunc(VPI.getOperand(0), VPI.getType(), VPI.getName());
533 Builder.CreateIntToPtr(VPI.getOperand(0), VPI.getType(), VPI.getName());
537 Builder.CreatePtrToInt(VPI.getOperand(0), VPI.getType(), VPI.getName());
541 Builder.CreateFPToSI(VPI.getOperand(0), VPI.getType(), VPI.getName());
546 Builder.CreateFPToUI(VPI.getOperand(0), VPI.getType(), VPI.getName());
550 Builder.CreateSIToFP(VPI.getOperand(0), VPI.getType(), VPI.getName());
554 Builder.CreateUIToFP(VPI.getOperand(0), VPI.getType(), VPI.getName());
558 Builder.CreateFPTrunc(VPI.getOperand(0), VPI.getType(), VPI.getName());
562 Builder.CreateFPExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
565 replaceOperation(*CastOp, VPI);
571 VPIntrinsic &VPI) {
572 assert(VPI.canIgnoreVectorLengthParam());
576 Value *MaskParam = VPI.getMaskParam();
577 Value *PtrParam = VPI.getMemoryPointerParam();
578 Value *DataParam = VPI.getMemoryDataParam();
581 MaybeAlign AlignOpt = VPI.getPointerAlignment();
584 switch (VPI.getIntrinsicID()) {
602 Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
608 VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
620 auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
622 VPI.getType(), PtrParam,
624 VPI.getName());
630 replaceOperation(*NewMemoryInst, VPI);
635 VPCmpIntrinsic &VPI) {
636 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
639 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
640 *VPI.getFunctionalOpcode() == Instruction::FCmp);
642 Value *Op0 = VPI.getOperand(0);
643 Value *Op1 = VPI.getOperand(1);
644 auto Pred = VPI.getPredicate();
648 replaceOperation(*NewCmp, VPI);
652 void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
653 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
655 if (VPI.canIgnoreVectorLengthParam())
658 Value *EVLParam = VPI.getVectorLengthParam();
662 ElementCount StaticElemCount = VPI.getStaticVectorLength();
664 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
667 auto *M = VPI.getModule();
670 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
678 VPI.setVectorLengthParam(MaxEVL);
681 Value *CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
682 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
684 IRBuilder<> Builder(&VPI);
687 if (VPI.canIgnoreVectorLengthParam())
688 return &VPI;
691 Value *OldMaskParam = VPI.getMaskParam();
692 Value *OldEVLParam = VPI.getVectorLengthParam();
700 ElementCount ElemCount = VPI.getStaticVectorLength();
703 VPI.setMaskParam(NewMaskParam);
706 discardEVLParameter(VPI);
707 assert(VPI.canIgnoreVectorLengthParam() &&
711 return &VPI;
714 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
715 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
717 IRBuilder<> Builder(&VPI);
720 auto OC = VPI.getFunctionalOpcode();
723 return expandPredicationInBinaryOperator(Builder, VPI);
725 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
728 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
731 if (VPCastIntrinsic::isVPCast(VPI.getIntrinsicID())) {
732 return expandPredicationToCastIntrinsic(Builder, VPI);
735 switch (VPI.getIntrinsicID()) {
739 Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
740 replaceOperation(*NewNegOp, VPI);
750 return expandPredicationToIntCall(Builder, VPI,
751 VPI.getFunctionalIntrinsicID().value());
760 return expandPredicationToFPCall(Builder, VPI,
761 VPI.getFunctionalIntrinsicID().value());
766 return expandPredicationInMemoryIntrinsic(Builder, VPI);
769 if (auto CID = VPI.getConstrainedIntrinsicID())
770 if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
773 return &VPI;
787 void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
789 if (maySpeculateLanes(VPI)) {
809 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
810 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
830 auto *VPI = dyn_cast<VPIntrinsic>(&I);
831 if (!VPI)
833 auto VPStrat = getVPLegalizationStrategy(*VPI);
834 sanitizeStrategy(*VPI, VPStrat);
836 Worklist.emplace_back(VPI, VPStrat);