Lines Matching refs:Ops
6402 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
6412 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
6414 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
6418 return Builder.CreateConstrainedFPCall(F, Ops, name);
6420 return Builder.CreateCall(F, Ops, name);
7599 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
7621 std::swap(Ops[0], Ops[1]);
7637 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
7641 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
7644 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
7645 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
7646 Ops[j] =
7647 CGF.Builder.CreateInsertElement(PoisonValue::get(ArgTy), Ops[j], C0);
7650 Value *Result = CGF.EmitNeonCall(F, Ops, s);
7662 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
7705 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7706 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
7718 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
7719 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
7723 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7724 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
7725 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
7726 return Builder.CreateBitCast(Ops[0], Ty);
7733 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7734 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7735 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
7740 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
7743 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
7749 std::swap(Ops[0], Ops[1]);
7771 return EmitNeonCall(F, Ops, NameHint);
7775 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
7779 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
7783 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
7787 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
7791 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
7797 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
7801 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7804 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7805 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7810 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7813 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7814 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7821 return EmitNeonCall(F, Ops, "vcvt_n");
7830 return EmitNeonCall(F, Ops, "vcvt_n");
7846 return EmitNeonCall(F, Ops, "vcvt_n");
7860 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
7861 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
7862 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
7913 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7917 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7922 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
7927 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7928 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7929 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
7933 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7934 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7935 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7940 {Ops[1], Ops[2], Ops[0]});
7945 Ops.push_back(getAlignmentValue32(PtrOp0));
7946 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
7956 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
7957 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7974 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
7975 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7983 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
7984 return EmitNeonSplat(Ops[0], CI);
7994 for (unsigned I = 2; I < Ops.size() - 1; ++I)
7995 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
7996 Ops.push_back(getAlignmentValue32(PtrOp1));
7997 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), NameHint);
7998 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
8003 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
8005 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
8006 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
8011 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
8012 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
8022 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
8032 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
8042 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
8046 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
8047 Ops[1] =
8049 Ops.resize(2);
8050 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
8064 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
8073 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
8077 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
8081 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
8088 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
8094 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
8097 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
8104 return EmitNeonCall(F, Ops, "");
8108 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
8109 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
8114 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
8116 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
8118 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
8119 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
8120 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
8125 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
8126 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
8128 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
8130 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
8131 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
8135 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
8151 Ops.push_back(getAlignmentValue32(PtrOp0));
8152 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
8160 return EmitNeonCall(F, Ops, "");
8167 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
8168 return EmitNeonCall(F, Ops, "");
8181 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
8182 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
8185 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
8192 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
8193 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
8194 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
8199 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
8202 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
8206 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8207 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8216 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
8217 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
8224 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8225 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8226 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
8227 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
8229 return Builder.CreateSExt(Ops[0], Ty, "vtst");
8233 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8234 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8242 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
8243 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
8250 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
8251 return EmitNeonCall(F, Ops, "");
8255 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8256 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8265 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
8266 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
8278 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
8285 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
8292 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
8299 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
8306 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
8313 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vmmla");
8319 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
8326 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
8333 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
8338 return EmitNeonCall(F, Ops, "vcvtfp2bf");
8348 Value *Result = EmitNeonCall(F, Ops, NameHint);
8379 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
8389 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
8395 int PairPos = 0, End = Ops.size() - 1;
8397 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
8398 Ops[PairPos+1], Indices,
8407 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
8501 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
8502 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8665 Value *Ops[2];
8667 Ops[i] = EmitScalarExpr(E->getArg(i));
8671 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8973 SmallVector<Value*, 4> Ops;
9004 Ops.push_back(PtrOp0.emitRawPointer(*this));
9031 Ops.push_back(PtrOp1.emitRawPointer(*this));
9036 Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
9056 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
9076 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9079 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
9082 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
9085 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
9088 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
9092 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
9103 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
9104 Ops[3], Ops[4], Ops[5]});
9131 return Builder.CreateCall(F, Ops, "vcvtr");
9154 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
9164 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9165 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
9167 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
9173 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
9176 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
9180 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9183 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
9188 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
9192 Ops, "vqrshrun_n", 1, true);
9195 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
9199 Ops, "vqshrun_n", 1, true);
9203 Ops, "vrecpe");
9206 Ops, "vrshrn_n", 1, true);
9209 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9210 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9211 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
9213 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
9214 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
9221 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
9223 Ops, "vsli_n");
9226 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9227 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
9228 return Builder.CreateAdd(Ops[0], Ops[1]);
9233 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9234 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
9235 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
9236 Ops[2] = getAlignmentValue32(PtrOp0);
9237 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
9239 Tys), Ops);
9243 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9244 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
9245 return Builder.CreateStore(Ops[1],
9246 PtrOp0.withElementType(Ops[1]->getType()));
9250 Ops, "vtbl1");
9253 Ops, "vtbl2");
9256 Ops, "vtbl3");
9259 Ops, "vtbl4");
9262 Ops, "vtbx1");
9265 Ops, "vtbx2");
9268 Ops, "vtbx3");
9271 Ops, "vtbx4");
9419 llvm::SmallVector<Value *, 4> Ops;
9438 Ops.push_back(EmitScalarExpr(Addr));
9442 Value *LoadResult = Builder.CreateCall(F, Ops);
9456 llvm::SmallVector<Value *, 4> Ops;
9460 Ops.push_back(EmitScalarExpr(Addr));
9481 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
9486 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
9487 ToReturn = Builder.CreateCall(F, Ops);
9488 Ops.pop_back();
9509 SmallVectorImpl<Value *> &Ops,
9566 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 1), nullptr, Ops[1],
9570 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 2), nullptr, Ops[2],
9574 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 3), nullptr, Ops[3],
9578 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 4), nullptr, Ops[4],
9583 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 1), nullptr, Ops[2], Ty,
9587 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
9590 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
9595 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 2), Ops[0], Ops[3],
9600 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 3), nullptr, Ops[4], Ty,
9604 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
9608 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
9613 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 4), Ops[0], Ops[5],
9647 return CGF.EmitNeonCall(F, Ops, s);
9836 SmallVectorImpl<Value *> &Ops,
9843 if (Ops[1]->getType()->isVectorTy())
9847 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
9862 Ops[0] = EmitSVEPredicateCast(
9863 Ops[0], cast<llvm::ScalableVectorType>(F->getArg(0)->getType()));
9868 if (Ops.size() == 2) {
9869 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9870 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9875 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
9878 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9881 Value *Call = Builder.CreateCall(F, Ops);
9890 SmallVectorImpl<Value *> &Ops,
9898 Ops.insert(Ops.begin(), Ops.pop_back_val());
9901 if (Ops[2]->getType()->isVectorTy())
9905 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
9916 if (Ops.size() == 3) {
9917 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9918 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9923 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
9932 Ops[1] = EmitSVEPredicateCast(
9933 Ops[1], cast<llvm::ScalableVectorType>(F->getArg(1)->getType()));
9937 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
9940 Ops[3] = Builder.CreateShl(Ops[3], Log2_32(BytesPerElt));
9943 return Builder.CreateCall(F, Ops);
9947 SmallVectorImpl<Value *> &Ops,
9951 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
9953 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
9956 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
9959 if (Ops[1]->getType()->isVectorTy()) {
9960 if (Ops.size() == 3) {
9962 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9965 std::swap(Ops[2], Ops[3]);
9971 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9976 return Builder.CreateCall(F, Ops);
9980 SmallVectorImpl<Value*> &Ops,
10008 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
10009 Value *BasePtr = Ops[1];
10012 if (Ops.size() > 2)
10013 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
10028 SmallVectorImpl<Value*> &Ops,
10054 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
10055 Value *BasePtr = Ops[1];
10058 if (Ops.size() > (2 + N))
10059 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
10064 for (unsigned I = Ops.size() - N; I < Ops.size(); ++I)
10065 Operands.push_back(Ops[I]);
10076 SmallVectorImpl<Value *> &Ops,
10081 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
10085 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
10086 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
10094 ArrayRef<Value *> Ops, unsigned BuiltinID) {
10097 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
10101 SmallVectorImpl<Value *> &Ops,
10107 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
10108 Value *BasePtr = Ops[1];
10111 if (Ops.size() > 3)
10112 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
10114 Value *PrfOp = Ops.back();
10122 SmallVectorImpl<Value *> &Ops,
10149 Value *Predicate = EmitSVEPredicateCast(Ops[0], PredTy);
10150 Value *BasePtr = Ops[1];
10153 if (Ops.size() > 2)
10154 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
10170 SmallVectorImpl<Value *> &Ops,
10178 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
10196 Value *Predicate = EmitSVEPredicateCast(Ops[0], PredTy);
10197 Value *BasePtr = Ops[1];
10200 if (Ops.size() == 4)
10201 BasePtr = Builder.CreateGEP(AddrMemoryTy, BasePtr, Ops[2]);
10205 IsQuadStore ? Ops.back() : Builder.CreateTrunc(Ops.back(), MemoryTy);
10217 SmallVectorImpl<Value *> &Ops,
10219 Ops[2] = EmitSVEPredicateCast(
10220 Ops[2], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags)));
10223 NewOps.push_back(Ops[2]);
10225 llvm::Value *BasePtr = Ops[3];
10229 if (Ops.size() == 5) {
10235 Builder.CreateMul(StreamingVectorLengthCall, Ops[4], "mulvl");
10237 BasePtr = Builder.CreateGEP(Int8Ty, Ops[3], Mulvl);
10240 NewOps.push_back(Ops[0]);
10241 NewOps.push_back(Ops[1]);
10247 SmallVectorImpl<Value *> &Ops,
10252 Ops[1] = EmitSVEPredicateCast(Ops[1], VecTy);
10254 Ops[2] = EmitSVEPredicateCast(Ops[2], VecTy);
10255 return Builder.CreateCall(F, Ops);
10259 SmallVectorImpl<Value *> &Ops,
10262 if (Ops.size() == 0)
10263 Ops.push_back(llvm::ConstantInt::get(Int32Ty, 255));
10265 return Builder.CreateCall(F, Ops);
10269 SmallVectorImpl<Value *> &Ops,
10271 if (Ops.size() == 2)
10272 Ops.push_back(Builder.getInt32(0));
10274 Ops[2] = Builder.CreateIntCast(Ops[2], Int32Ty, true);
10276 return Builder.CreateCall(F, Ops);
10301 SmallVectorImpl<Value *> &Ops) {
10303 Ops.insert(Ops.begin(), SplatZero);
10307 SmallVectorImpl<Value *> &Ops) {
10309 Ops.insert(Ops.begin(), SplatUndef);
10315 ArrayRef<Value *> Ops) {
10322 return {DefaultType, Ops[1]->getType()};
10325 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
10328 return {Ops[0]->getType(), Ops.back()->getType()};
10332 return {ResultType, Ops[1]->getType()};
10340 ArrayRef<Value *> Ops) {
10344 unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue();
10346 TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
10355 return Builder.CreateInsertVector(Ty, Ops[0], Ops[2], Idx);
10356 return Builder.CreateExtractVector(Ty, Ops[0], Idx);
10361 ArrayRef<Value *> Ops) {
10364 auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType());
10371 for (unsigned I = 0; I < Ops.size(); I++) {
10373 Call = Builder.CreateInsertVector(Ty, Call, Ops[I], Idx);
10415 unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops,
10442 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
10447 Ops.push_back(Arg);
10457 Ops.push_back(Arg);
10465 Ops.push_back(Builder.CreateExtractVector(NewVTy, Arg, Idx));
10482 llvm::SmallVector<Value *, 4> Ops;
10484 GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
10487 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
10490 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
10492 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10494 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10496 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10498 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10500 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10502 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10504 return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops);
10506 return EmitSVETupleCreate(TypeFlags, Ty, Ops);
10511 InsertExplicitZeroOperand(Builder, Ty, Ops);
10514 InsertExplicitUndefOperand(Builder, Ty, Ops);
10519 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
10521 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
10524 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
10525 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
10527 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
10532 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
10536 std::swap(Ops[1], Ops[2]);
10538 std::swap(Ops[1], Ops[2]);
10541 std::swap(Ops[1], Ops[2]);
10544 std::swap(Ops[1], Ops[3]);
10548 llvm::Type *OpndTy = Ops[1]->getType();
10550 Ops[1] = Builder.CreateSelect(Ops[0], Ops[1], SplatZero);
10554 getSVEOverloadTypes(TypeFlags, Ty, Ops));
10555 Value *Call = Builder.CreateCall(F, Ops);
10574 return Builder.CreateCall(CastFromSVCountF, Ops[0]);
10581 return Builder.CreateCall(CastToSVCountF, Ops[0]);
10592 bool IsSVCount = isa<TargetExtType>(Ops[0]->getType());
10593 assert(((!IsSVCount || cast<TargetExtType>(Ops[0]->getType())->getName() ==
10606 IsSVCount ? Builder.CreateCall(CastFromSVCountF, Ops[0]) : Ops[0];
10607 llvm::Value *Ops1 = EmitSVEPredicateCast(Ops[1], OverloadedTy);
10608 llvm::Value *PSel = Builder.CreateCall(F, {Ops0, Ops1, Ops[2]});
10616 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
10624 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
10630 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
10635 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
10640 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
10645 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
10651 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
10657 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
10664 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
10688 unsigned NumOpnds = Ops.size();
10696 llvm::Type *EltTy = Ops[0]->getType();
10702 VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
10775 return Builder.CreateCall(F, Ops);
10790 return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
10805 return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
10820 Value *Insert = Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
10832 SmallVectorImpl<Value *> &Ops) {
10852 std::swap(Ops[I + 1], Ops[I + 1 + MultiVec]);
10860 llvm::SmallVector<Value *, 4> Ops;
10862 GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
10865 return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10867 return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10870 return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10875 return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10878 swapCommutativeSMEOperands(BuiltinID, Ops);
10885 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
10886 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
10888 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
10894 Value *Call = Builder.CreateCall(F, Ops);
11110 Value *Ops[2];
11112 Ops[i] = EmitScalarExpr(E->getArg(i));
11116 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
11227 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
11228 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
11435 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
11436 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
11495 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
11496 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
11519 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
11520 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
11627 llvm::SmallVector<Value*, 4> Ops;
11649 Ops.push_back(PtrOp0.emitRawPointer(*this));
11653 Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
11661 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
11662 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
11681 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11682 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
11685 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11686 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11687 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11688 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
11690 return Builder.CreateBitCast(Ops[0], Int128Ty);
11699 Value *Ptr = Ops[0];
11708 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11709 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
11712 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
11714 return Builder.CreateUIToFP(Ops[0], FTy);
11715 return Builder.CreateSIToFP(Ops[0], FTy);
11725 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11728 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
11730 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
11734 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
11736 return Builder.CreateUIToFP(Ops[0], FTy);
11737 return Builder.CreateSIToFP(Ops[0], FTy);
11753 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11777 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
11778 return Builder.CreateTrunc(Ops[0], Int16Ty);
11788 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11796 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
11798 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
11800 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
11801 return Builder.CreateTrunc(Ops[0], Int16Ty);
11809 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11817 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
11818 return Builder.CreateTrunc(Ops[0], Int16Ty);
11826 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11831 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
11835 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
11838 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
11880 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11882 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11888 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11890 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11896 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11898 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11904 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11906 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11912 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11914 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11918 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11919 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11920 Ops[0] =
11921 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
11922 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
11938 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11939 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11940 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
11942 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11944 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11945 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
11961 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11962 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
11963 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
11965 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11967 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11968 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
11984 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11985 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
11986 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
11988 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11990 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11991 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
12017 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12018 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
12019 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
12020 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
12021 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
12025 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12026 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
12027 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
12028 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
12029 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
12031 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
12045 Ops.push_back(EmitScalarExpr(E->getArg(2)));
12046 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
12049 Ops[1] =
12050 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
12051 Ops.push_back(EmitScalarExpr(E->getArg(2)));
12052 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
12055 Ops[1] =
12056 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
12057 Ops.push_back(EmitScalarExpr(E->getArg(2)));
12058 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
12062 Ops[0] =
12063 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
12064 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12068 Ops[0] =
12069 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
12070 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12074 Ops[0] =
12075 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
12076 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12080 Ops[0] =
12081 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
12082 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12086 Ops[0] =
12087 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
12088 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12091 Ops[0] =
12092 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
12093 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12097 Ops[0] =
12098 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
12099 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12103 Ops[0] =
12104 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
12105 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12108 Ops[0] =
12109 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
12110 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12114 Ops[0] =
12115 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
12116 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12119 Ops[0] =
12120 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
12121 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12124 Ops[0] =
12125 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
12126 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12130 Ops[0] =
12131 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
12132 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12136 Ops[0] =
12137 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
12138 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12141 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12142 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
12144 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12145 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
12147 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12148 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
12150 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12151 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
12156 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
12163 {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]});
12167 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
12170 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
12174 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
12177 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
12180 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
12185 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
12188 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12189 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
12191 Ops, "vqshlu_n");
12198 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12199 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
12200 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
12207 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12208 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
12209 Ops[1] = ConstantInt::get(Int64Ty, -SV);
12210 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
12217 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
12218 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
12219 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
12220 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
12221 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
12227 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
12232 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
12242 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
12247 Ops[1] = Builder.CreateAShr(
12248 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
12251 return Builder.CreateAdd(Ops[0], Ops[1]);
12257 // As Op + 0 = Op, return Ops[0] directly.
12259 return Ops[0];
12260 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
12262 return Builder.CreateAdd(Ops[0], Ops[1]);
12268 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
12271 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
12272 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
12274 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
12277 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
12278 Ops.pop_back();
12284 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
12289 ProductOps.push_back(Ops[1]);
12291 Ops[1] =
12298 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
12304 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
12307 ProductOps.push_back(Ops[1]);
12308 ProductOps.push_back(Ops[2]);
12309 Ops[1] =
12312 Ops.pop_back();
12318 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
12323 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12329 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
12357 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
12360 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
12369 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
12370 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
12371 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
12373 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
12374 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
12375 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
12376 return Builder.CreateBitCast(Ops[0], Ty);
12382 Value *Addend = Ops[0];
12383 Value *Multiplicand = Ops[1];
12384 Value *LaneSource = Ops[2];
12385 Ops[0] = Multiplicand;
12386 Ops[1] = LaneSource;
12387 Ops[2] = Addend;
12394 llvm::Constant *cst = cast<Constant>(Ops[3]);
12396 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
12397 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
12399 Ops.pop_back();
12402 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
12408 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
12409 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
12412 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
12413 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
12417 DoubleTy, {Ops[1], Ops[2], Ops[0]});
12420 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12421 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12425 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
12427 cast<ConstantInt>(Ops[3]));
12428 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
12432 {Ops[2], Ops[1], Ops[0]});
12435 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12436 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12438 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12439 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
12442 {Ops[2], Ops[1], Ops[0]});
12450 Ops.push_back(EmitScalarExpr(E->getArg(3)));
12452 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
12455 {Ops[1], Ops[2], Ops[0]});
12461 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
12467 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
12469 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12471 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
12478 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
12480 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12482 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
12489 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
12500 TmpOps.push_back(Ops[1]);
12503 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
12511 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
12517 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
12521 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
12523 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12525 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
12529 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
12531 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12533 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
12535 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12537 Ops, "vrecps");
12540 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12542 Ops, "vrecps");
12544 Ops.push_back(EmitScalarExpr(E->getArg(1)));
12546 Ops, "vrecps");
12549 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
12552 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
12555 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
12558 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
12561 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
12563 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12567 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
12574 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
12577 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12581 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
12584 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12588 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
12595 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
12598 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12602 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
12609 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
12612 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12616 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
12619 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12623 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
12630 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
12633 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12637 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
12644 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
12647 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12651 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
12657 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12659 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
12665 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12667 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
12673 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12675 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
12681 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12683 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
12690 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
12694 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12696 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
12697 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
12702 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
12704 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
12710 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
12712 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
12729 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
12745 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
12761 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
12777 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
12793 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
12798 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
12804 Ops.push_back(EmitScalarExpr(E->getArg(2)));
12805 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
12806 Ops.pop_back();
12808 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
12816 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
12819 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
12820 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
12821 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
12831 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
12836 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
12839 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12843 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
12850 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12851 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
12856 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
12867 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12868 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12869 return Builder.CreateTrunc(Ops[0], Int8Ty);
12879 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12880 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12881 return Builder.CreateTrunc(Ops[0], Int16Ty);
12891 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12892 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12893 return Builder.CreateTrunc(Ops[0], Int8Ty);
12903 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12904 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12905 return Builder.CreateTrunc(Ops[0], Int16Ty);
12912 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12913 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12914 return Builder.CreateTrunc(Ops[0], Int8Ty);
12921 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12922 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12923 return Builder.CreateTrunc(Ops[0], Int16Ty);
12930 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12931 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12932 return Builder.CreateTrunc(Ops[0], Int8Ty);
12939 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12940 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12941 return Builder.CreateTrunc(Ops[0], Int16Ty);
12948 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12949 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12950 return Builder.CreateTrunc(Ops[0], Int8Ty);
12957 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12958 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12959 return Builder.CreateTrunc(Ops[0], Int16Ty);
12966 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12967 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12968 return Builder.CreateTrunc(Ops[0], Int8Ty);
12975 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12976 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12977 return Builder.CreateTrunc(Ops[0], Int16Ty);
12984 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12985 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12986 return Builder.CreateTrunc(Ops[0], HalfTy);
12993 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12994 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12995 return Builder.CreateTrunc(Ops[0], HalfTy);
13002 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13003 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13004 return Builder.CreateTrunc(Ops[0], Int8Ty);
13011 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13012 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13013 return Builder.CreateTrunc(Ops[0], Int16Ty);
13020 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13021 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13022 return Builder.CreateTrunc(Ops[0], Int8Ty);
13029 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13030 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13031 return Builder.CreateTrunc(Ops[0], Int16Ty);
13038 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13039 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13040 return Builder.CreateTrunc(Ops[0], Int8Ty);
13047 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13048 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13049 return Builder.CreateTrunc(Ops[0], Int16Ty);
13056 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13057 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13058 return Builder.CreateTrunc(Ops[0], Int8Ty);
13065 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13066 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13067 return Builder.CreateTrunc(Ops[0], Int16Ty);
13074 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13075 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13076 return Builder.CreateTrunc(Ops[0], HalfTy);
13083 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13084 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
13085 return Builder.CreateTrunc(Ops[0], HalfTy);
13092 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13093 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
13094 return Builder.CreateTrunc(Ops[0], HalfTy);
13101 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13102 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
13103 return Builder.CreateTrunc(Ops[0], HalfTy);
13110 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13111 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
13112 return Builder.CreateTrunc(Ops[0], HalfTy);
13119 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13120 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
13121 return Builder.CreateTrunc(Ops[0], HalfTy);
13124 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
13126 return Builder.CreateFMul(Ops[0], RHS);
13133 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13134 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13135 return Builder.CreateTrunc(Ops[0], Int16Ty);
13142 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13143 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13150 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13151 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13152 return Builder.CreateTrunc(Ops[0], Int16Ty);
13159 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13160 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13167 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13168 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13169 return Builder.CreateTrunc(Ops[0], Int16Ty);
13176 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13177 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13184 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13185 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13186 return Builder.CreateTrunc(Ops[0], Int16Ty);
13193 Ops.push_back(EmitScalarExpr(E->getArg(0)));
13194 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
13200 return EmitNeonCall(Intrin, Ops, "vsri_n");
13206 return EmitNeonCall(Intrin, Ops, "vsli_n");
13210 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
13211 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
13212 return Builder.CreateAdd(Ops[0], Ops[1]);
13217 TmpOps.push_back(Ops[1]);
13218 TmpOps.push_back(Ops[2]);
13221 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
13222 return Builder.CreateAdd(Ops[0], tmp);
13226 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
13230 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
13231 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
13234 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13235 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
13237 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
13241 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13243 VTy->getElementType(), Ops[0], PtrOp0.getAlignment());
13245 Ops[0] = LI;
13246 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vldap1_lane");
13251 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
13254 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
13255 return EmitNeonSplat(Ops[0], CI);
13259 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13260 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
13261 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
13264 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13265 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
13267 Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
13275 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
13276 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13282 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
13283 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13289 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
13290 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13296 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
13297 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13303 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
13304 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13310 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
13311 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13315 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
13317 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
13318 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13319 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13320 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
13321 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld2_lane");
13322 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13326 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
13328 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
13329 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13330 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13331 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
13332 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
13333 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld3_lane");
13334 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13338 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
13340 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
13341 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13342 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13343 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
13344 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
13345 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
13346 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld4_lane");
13347 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
13351 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13352 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
13354 Ops, "");
13358 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13359 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
13360 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
13362 Ops, "");
13366 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13367 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
13369 Ops, "");
13373 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13374 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
13375 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
13377 Ops, "");
13381 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13382 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
13384 Ops, "");
13388 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
13389 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
13390 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
13392 Ops, "");
13396 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13397 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13406 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
13407 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
13414 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13415 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13423 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
13424 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
13431 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
13432 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
13441 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
13442 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
13449 Ops, "vtbl1");
13453 Ops, "vtbl2");
13457 Ops, "vtbl3");
13461 Ops, "vtbl4");
13465 Ops, "vtbx1");
13469 Ops, "vtbx2");
13473 Ops, "vtbx3");
13477 Ops, "vtbx4");
13482 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
13487 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
13601 BuildVector(ArrayRef<llvm::Value*> Ops) {
13602 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
13605 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
13606 AllConstants &= isa<Constant>(Ops[i]);
13611 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
13612 CstOps.push_back(cast<Constant>(Ops[i]));
13618 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
13620 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
13621 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt64(i));
13647 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13649 Value *Ptr = Ops[0];
13652 CGF, Ops[2],
13653 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
13655 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
13658 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13660 llvm::Type *Ty = Ops[1]->getType();
13661 Value *Ptr = Ops[0];
13664 CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
13666 return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
13670 ArrayRef<Value *> Ops) {
13671 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
13672 Value *Ptr = Ops[0];
13675 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
13679 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
13683 ArrayRef<Value *> Ops,
13685 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
13687 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
13692 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
13696 ArrayRef<Value *> Ops) {
13697 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
13698 Value *Ptr = Ops[0];
13700 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
13704 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
13708 ArrayRef<Value *> Ops,
13710 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13711 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
13712 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
13718 Ops[0]->getType());
13739 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13741 Value *Op0 = Ops[0];
13742 Value *Op1 = Ops[1];
13744 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13831 bool Signed, ArrayRef<Value *> Ops) {
13832 assert((Ops.size() == 2 || Ops.size() == 4) &&
13835 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13855 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
13859 if (Ops.size() == 4)
13860 MaskIn = Ops[3];
13871 ArrayRef<Value *> Ops, bool IsSigned) {
13872 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
13873 llvm::Type *Ty = Ops[1]->getType();
13879 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
13880 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
13883 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
13884 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
13887 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
13892 ArrayRef<Value *> Ops, unsigned BuiltinID,
13947 Value *A = Ops[0];
13948 Value *B = Ops[1];
13949 Value *C = Ops[2];
13958 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
13961 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
13984 MaskFalseVal = Ops[0];
13992 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
14006 MaskFalseVal = Ops[2];
14011 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
14017 MutableArrayRef<Value *> Ops, Value *Upper,
14021 if (Ops.size() > 4)
14022 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
14025 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
14027 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
14028 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
14029 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
14034 switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
14048 {Ops[0], Ops[1], Ops[2], Ops[4]});
14052 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
14053 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
14055 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
14056 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
14059 if (Ops.size() > 3) {
14061 : Ops[PTIdx];
14069 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
14075 ArrayRef<Value *> Ops) {
14076 llvm::Type *Ty = Ops[0]->getType();
14080 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
14081 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
14104 ArrayRef<Value *> Ops) {
14105 llvm::Type *Ty = Ops[0]->getType();
14126 Ops.drop_back());
14127 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
14128 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
14147 ArrayRef<Value *> Ops,
14149 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
14153 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
14156 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
14160 Value *Src = Ops[0];
14177 if (Ops.size() >= 3)
14178 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
14372 SmallVector<Value*, 4> Ops;
14383 Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
14392 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
14393 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
14395 return Builder.CreateCall(F, Ops);
14403 auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
14408 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14410 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14411 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
14420 Value *Address = Ops[0];
14421 ConstantInt *C = cast<ConstantInt>(Ops[1]);
14430 Ops[0]);
14450 Ops[0]);
14456 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
14457 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
14462 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
14463 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
14477 return Builder.CreateBitCast(BuildVector(Ops),
14490 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14491 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
14495 return Builder.CreateExtractElement(Ops[0], Index);
14506 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14507 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
14511 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
14516 Builder.CreateStore(Ops[0], Tmp);
14567 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
14568 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
14569 Ops[1] = Mhi;
14570 Ops.push_back(Mlo);
14571 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14575 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
14594 return EmitX86MaskedStore(*this, Ops, Align(1));
14599 return EmitX86MaskedStore(*this, Ops, Align(1));
14615 return Builder.CreateCall(F, Ops);
14629 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
14643 return EmitX86ConvertToMask(*this, Ops[0]);
14651 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
14658 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
14665 return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
14668 return EmitScalarFMAExpr(*this, E, Ops,
14669 Constant::getNullValue(Ops[0]->getType()));
14673 return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
14677 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
14681 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
14701 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
14714 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
14729 *this, Ops,
14750 return EmitX86MaskedLoad(*this, Ops, Align(1));
14755 return EmitX86MaskedLoad(*this, Ops, Align(1));
14770 *this, Ops,
14791 return EmitX86ExpandLoad(*this, Ops);
14811 return EmitX86CompressStore(*this, Ops);
14831 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
14851 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
14955 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
14956 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
14957 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
14959 return Builder.CreateCall(Intr, Ops);
15064 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
15065 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
15066 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
15068 return Builder.CreateCall(Intr, Ops);
15090 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15092 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
15101 Value *Res = Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
15104 if (Ops.size() == 4)
15105 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
15126 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15128 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
15130 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
15140 Ops[1], ArrayRef(Indices, DstNumElts), "widen");
15149 return Builder.CreateShuffleVector(Ops[0], Op1,
15154 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
15155 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
15160 if (const auto *C = dyn_cast<Constant>(Ops[2]))
15162 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
15179 return Builder.CreateCall(Intr, Ops);
15190 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15191 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
15199 return Builder.CreateShuffleVector(Ops[0], Ops[1],
15205 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15206 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15222 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
15228 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15229 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15245 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
15257 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15258 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15274 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
15283 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
15284 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15303 return Builder.CreateShuffleVector(Ops[0], Ops[1],
15310 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15311 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15320 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
15326 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
15329 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15341 Ops[1] = Ops[0];
15342 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
15356 return Builder.CreateShuffleVector(Ops[1], Ops[0],
15366 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15367 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
15376 return Builder.CreateShuffleVector(Ops[1], Ops[0],
15387 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
15388 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
15404 return Builder.CreateShuffleVector(Ops[0], Ops[1],
15412 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
15414 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15426 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
15428 OutOps[l] = Ops[1];
15430 OutOps[l] = Ops[0];
15450 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
15451 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
15470 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
15474 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
15479 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
15480 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
15499 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
15509 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
15510 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15513 return llvm::Constant::getNullValue(Ops[0]->getType());
15515 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
15524 return Builder.CreateBitCast(SV, Ops[0]->getType());
15530 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
15531 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15534 return llvm::Constant::getNullValue(Ops[0]->getType());
15536 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
15545 return Builder.CreateBitCast(SV, Ops[0]->getType());
15554 Value *Ptr = Ops[0];
15555 Value *Src = Ops[1];
15589 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
15602 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
15627 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
15632 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
15633 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
15634 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
15635 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
15649 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
15650 return EmitX86MaskedCompare(*this, CC, true, Ops);
15664 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
15665 return EmitX86MaskedCompare(*this, CC, false, Ops);
15671 return EmitX86vpcom(*this, Ops, true);
15676 return EmitX86vpcom(*this, Ops, false);
15682 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
15683 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
15691 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
15692 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
15734 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15735 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15736 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15762 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15763 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15764 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15767 return Builder.CreateBitCast(Res, Ops[0]->getType());
15773 return EmitX86MaskLogic(*this, Instruction::And, Ops);
15778 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
15783 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
15788 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
15793 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
15798 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15799 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
15801 Ops[0]->getType());
15810 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15811 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
15812 return Builder.CreateBitCast(Res, Ops[0]->getType());
15818 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15819 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15820 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15833 return Builder.CreateBitCast(Res, Ops[0]->getType());
15842 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
15843 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
15847 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
15858 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
15863 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
15882 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15884 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
15895 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
15896 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
15897 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
15908 if (Ops.size() == 2) {
15909 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15928 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15934 Ops[0]->getType());
15935 return Builder.CreateConstrainedFPCall(F, Ops[0]);
15937 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
15938 return Builder.CreateCall(F, Ops[0]);
15945 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
15950 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
15958 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
15966 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
15977 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
15988 // Ops 0 and 1 are swapped.
15989 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
16000 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
16011 // Ops 0 and 1 are swapped.
16012 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
16021 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
16024 return Builder.CreateCall(F, {Ops[0], Ops[1]});
16032 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
16035 return Builder.CreateCall(F, {Ops[0], Ops[1]});
16043 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
16046 return Builder.CreateCall(F, {Ops[0]});
16054 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
16057 return Builder.CreateCall(F, {Ops[0]});
16091 Ops[0]);
16116 { Ops[0], Ops[1], Ops[2] });
16118 Ops[3]);
16132 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
16133 Value *MaskIn = Ops[2];
16134 Ops.erase(&Ops[2]);
16168 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
16179 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
16204 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
16207 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
16211 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
16231 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
16238 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
16239 Value *MaskIn = Ops[2];
16240 Ops.erase(&Ops[2]);
16256 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
16307 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
16396 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
16397 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
16398 Value *Cmp = Builder.CreateCall(Intr, Ops);
16402 return Builder.CreateCall(Intr, Ops);
16413 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
16416 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
16418 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
16419 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
16466 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
16471 Ops[2] = getMaskVecValue(
16472 *this, Ops[2],
16473 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
16475 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16478 return Builder.CreateFPExt(Ops[0], Builder.getFloatTy());
16492 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
16493 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
16540 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
16541 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
16552 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
16553 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
16585 std::swap(Ops[0], Ops[1]);
16586 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
16587 return Builder.CreateCall(F, Ops);
16604 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
16627 Ops[0], llvm::PointerType::get(getLLVMContext(), 257));
16639 Ops[0], llvm::PointerType::get(getLLVMContext(), 256));
16648 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
16652 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
16662 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
16666 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
16699 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
16712 Builder.CreateDefaultAlignedStore(Out, Ops[0]);
16717 Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
16750 InOps[0] = Ops[2];
16752 Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
16770 Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
16779 Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
16794 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16795 return EmitX86Select(*this, Ops[3], Call, Ops[0]);
16803 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16804 Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
16805 return EmitX86Select(*this, And, Call, Ops[0]);
16813 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16815 return Builder.CreateShuffleVector(Call, Ops[2], Mask);
16819 CGM.getIntrinsic(Intrinsic::prefetch, Ops[0]->getType()),
16820 {Ops[0], llvm::ConstantInt::get(Int32Ty, 0), Ops[1],
16999 SmallVector<Value *, 2> Ops;
17000 Ops.push_back(EmitScalarExpr(E->getArg(0)));
17001 Ops.push_back(EmitScalarExpr(E->getArg(1)));
17004 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
17005 Ops.pop_back();
17051 return Builder.CreateCall(F, Ops, "");
17067 SmallVector<Value *, 3> Ops;
17068 Ops.push_back(EmitScalarExpr(E->getArg(0)));
17069 Ops.push_back(EmitScalarExpr(E->getArg(1)));
17070 Ops.push_back(EmitScalarExpr(E->getArg(2)));
17073 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
17074 Ops.pop_back();
17114 return Builder.CreateCall(F, Ops, "");
17371 SmallVector<Value *, 2> Ops;
17376 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
17377 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
17381 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
17387 SmallVector<Value *, 3> Ops;
17393 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
17394 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
17395 Ops.push_back(Builder.CreateBitCast(Op2, V1I128Ty));
17412 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
17846 SmallVector<Value *, 4> Ops;
17849 Ops.push_back(
17852 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17871 Value *Ptr = Ops[0];
17889 std::reverse(Ops.begin() + 1, Ops.end());
17906 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
17908 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
17910 Ops.pop_back();
17912 return Builder.CreateCall(F, Ops, "");
17920 for (unsigned i=1; i<Ops.size(); i++)
17921 CallOps.push_back(Ops[i]);
17924 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
21363 Value *Ops[18];
21365 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
21366 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
21371 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
21374 return Builder.CreateCall(Callee, Ops);
21680 SmallVector<llvm::Value*,5> Ops = { Base };
21682 Ops.push_back(EmitScalarExpr(E->getArg(i)));
21684 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
21795 SmallVector<llvm::Value*,4> Ops;
21801 Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
21804 Ops.push_back(EmitScalarExpr(E->getArg(i)));
21805 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
21852 SmallVector<Value *, 4> Ops;
21881 Ops.push_back(AggValue);
21884 Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
21929 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
21930 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
21938 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
21939 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
22023 if (Ops.size() == 2)
22024 DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
22041 Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
22051 if (Ops.size() == 3)
22052 DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
22060 StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
22077 return Builder.CreateCall(F, Ops, "");