17330f729Sjoerg //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
27330f729Sjoerg //
37330f729Sjoerg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
47330f729Sjoerg // See https://llvm.org/LICENSE.txt for license information.
57330f729Sjoerg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
67330f729Sjoerg //
77330f729Sjoerg //===----------------------------------------------------------------------===//
87330f729Sjoerg //
97330f729Sjoerg // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
107330f729Sjoerg //
117330f729Sjoerg //===----------------------------------------------------------------------===//
127330f729Sjoerg
137330f729Sjoerg #include "CGCXXABI.h"
147330f729Sjoerg #include "CGCleanup.h"
157330f729Sjoerg #include "CGDebugInfo.h"
167330f729Sjoerg #include "CGObjCRuntime.h"
17*e038c9c4Sjoerg #include "CGOpenMPRuntime.h"
187330f729Sjoerg #include "CodeGenFunction.h"
197330f729Sjoerg #include "CodeGenModule.h"
207330f729Sjoerg #include "ConstantEmitter.h"
217330f729Sjoerg #include "TargetInfo.h"
227330f729Sjoerg #include "clang/AST/ASTContext.h"
23*e038c9c4Sjoerg #include "clang/AST/Attr.h"
247330f729Sjoerg #include "clang/AST/DeclObjC.h"
257330f729Sjoerg #include "clang/AST/Expr.h"
267330f729Sjoerg #include "clang/AST/RecordLayout.h"
277330f729Sjoerg #include "clang/AST/StmtVisitor.h"
287330f729Sjoerg #include "clang/Basic/CodeGenOptions.h"
297330f729Sjoerg #include "clang/Basic/TargetInfo.h"
30*e038c9c4Sjoerg #include "llvm/ADT/APFixedPoint.h"
317330f729Sjoerg #include "llvm/ADT/Optional.h"
327330f729Sjoerg #include "llvm/IR/CFG.h"
337330f729Sjoerg #include "llvm/IR/Constants.h"
347330f729Sjoerg #include "llvm/IR/DataLayout.h"
35*e038c9c4Sjoerg #include "llvm/IR/FixedPointBuilder.h"
367330f729Sjoerg #include "llvm/IR/Function.h"
377330f729Sjoerg #include "llvm/IR/GetElementPtrTypeIterator.h"
387330f729Sjoerg #include "llvm/IR/GlobalVariable.h"
397330f729Sjoerg #include "llvm/IR/Intrinsics.h"
40*e038c9c4Sjoerg #include "llvm/IR/IntrinsicsPowerPC.h"
41*e038c9c4Sjoerg #include "llvm/IR/MatrixBuilder.h"
427330f729Sjoerg #include "llvm/IR/Module.h"
437330f729Sjoerg #include <cstdarg>
447330f729Sjoerg
457330f729Sjoerg using namespace clang;
467330f729Sjoerg using namespace CodeGen;
477330f729Sjoerg using llvm::Value;
487330f729Sjoerg
497330f729Sjoerg //===----------------------------------------------------------------------===//
507330f729Sjoerg // Scalar Expression Emitter
517330f729Sjoerg //===----------------------------------------------------------------------===//
527330f729Sjoerg
537330f729Sjoerg namespace {
547330f729Sjoerg
557330f729Sjoerg /// Determine whether the given binary operation may overflow.
567330f729Sjoerg /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
577330f729Sjoerg /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
587330f729Sjoerg /// the returned overflow check is precise. The returned value is 'true' for
597330f729Sjoerg /// all other opcodes, to be conservative.
mayHaveIntegerOverflow(llvm::ConstantInt * LHS,llvm::ConstantInt * RHS,BinaryOperator::Opcode Opcode,bool Signed,llvm::APInt & Result)607330f729Sjoerg bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
617330f729Sjoerg BinaryOperator::Opcode Opcode, bool Signed,
627330f729Sjoerg llvm::APInt &Result) {
637330f729Sjoerg // Assume overflow is possible, unless we can prove otherwise.
647330f729Sjoerg bool Overflow = true;
657330f729Sjoerg const auto &LHSAP = LHS->getValue();
667330f729Sjoerg const auto &RHSAP = RHS->getValue();
677330f729Sjoerg if (Opcode == BO_Add) {
687330f729Sjoerg if (Signed)
697330f729Sjoerg Result = LHSAP.sadd_ov(RHSAP, Overflow);
707330f729Sjoerg else
717330f729Sjoerg Result = LHSAP.uadd_ov(RHSAP, Overflow);
727330f729Sjoerg } else if (Opcode == BO_Sub) {
737330f729Sjoerg if (Signed)
747330f729Sjoerg Result = LHSAP.ssub_ov(RHSAP, Overflow);
757330f729Sjoerg else
767330f729Sjoerg Result = LHSAP.usub_ov(RHSAP, Overflow);
777330f729Sjoerg } else if (Opcode == BO_Mul) {
787330f729Sjoerg if (Signed)
797330f729Sjoerg Result = LHSAP.smul_ov(RHSAP, Overflow);
807330f729Sjoerg else
817330f729Sjoerg Result = LHSAP.umul_ov(RHSAP, Overflow);
827330f729Sjoerg } else if (Opcode == BO_Div || Opcode == BO_Rem) {
837330f729Sjoerg if (Signed && !RHS->isZero())
847330f729Sjoerg Result = LHSAP.sdiv_ov(RHSAP, Overflow);
857330f729Sjoerg else
867330f729Sjoerg return false;
877330f729Sjoerg }
887330f729Sjoerg return Overflow;
897330f729Sjoerg }
907330f729Sjoerg
917330f729Sjoerg struct BinOpInfo {
927330f729Sjoerg Value *LHS;
937330f729Sjoerg Value *RHS;
947330f729Sjoerg QualType Ty; // Computation Type.
957330f729Sjoerg BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
967330f729Sjoerg FPOptions FPFeatures;
977330f729Sjoerg const Expr *E; // Entire expr, for error unsupported. May not be binop.
987330f729Sjoerg
997330f729Sjoerg /// Check if the binop can result in integer overflow.
mayHaveIntegerOverflow__anonacb3eee80111::BinOpInfo1007330f729Sjoerg bool mayHaveIntegerOverflow() const {
1017330f729Sjoerg // Without constant input, we can't rule out overflow.
1027330f729Sjoerg auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
1037330f729Sjoerg auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
1047330f729Sjoerg if (!LHSCI || !RHSCI)
1057330f729Sjoerg return true;
1067330f729Sjoerg
1077330f729Sjoerg llvm::APInt Result;
1087330f729Sjoerg return ::mayHaveIntegerOverflow(
1097330f729Sjoerg LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
1107330f729Sjoerg }
1117330f729Sjoerg
1127330f729Sjoerg /// Check if the binop computes a division or a remainder.
isDivremOp__anonacb3eee80111::BinOpInfo1137330f729Sjoerg bool isDivremOp() const {
1147330f729Sjoerg return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
1157330f729Sjoerg Opcode == BO_RemAssign;
1167330f729Sjoerg }
1177330f729Sjoerg
1187330f729Sjoerg /// Check if the binop can result in an integer division by zero.
mayHaveIntegerDivisionByZero__anonacb3eee80111::BinOpInfo1197330f729Sjoerg bool mayHaveIntegerDivisionByZero() const {
1207330f729Sjoerg if (isDivremOp())
1217330f729Sjoerg if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
1227330f729Sjoerg return CI->isZero();
1237330f729Sjoerg return true;
1247330f729Sjoerg }
1257330f729Sjoerg
1267330f729Sjoerg /// Check if the binop can result in a float division by zero.
mayHaveFloatDivisionByZero__anonacb3eee80111::BinOpInfo1277330f729Sjoerg bool mayHaveFloatDivisionByZero() const {
1287330f729Sjoerg if (isDivremOp())
1297330f729Sjoerg if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
1307330f729Sjoerg return CFP->isZero();
1317330f729Sjoerg return true;
1327330f729Sjoerg }
1337330f729Sjoerg
134*e038c9c4Sjoerg /// Check if at least one operand is a fixed point type. In such cases, this
135*e038c9c4Sjoerg /// operation did not follow usual arithmetic conversion and both operands
136*e038c9c4Sjoerg /// might not be of the same type.
isFixedPointOp__anonacb3eee80111::BinOpInfo137*e038c9c4Sjoerg bool isFixedPointOp() const {
1387330f729Sjoerg // We cannot simply check the result type since comparison operations return
1397330f729Sjoerg // an int.
1407330f729Sjoerg if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
1417330f729Sjoerg QualType LHSType = BinOp->getLHS()->getType();
1427330f729Sjoerg QualType RHSType = BinOp->getRHS()->getType();
1437330f729Sjoerg return LHSType->isFixedPointType() || RHSType->isFixedPointType();
1447330f729Sjoerg }
145*e038c9c4Sjoerg if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146*e038c9c4Sjoerg return UnOp->getSubExpr()->getType()->isFixedPointType();
1477330f729Sjoerg return false;
1487330f729Sjoerg }
1497330f729Sjoerg };
1507330f729Sjoerg
MustVisitNullValue(const Expr * E)1517330f729Sjoerg static bool MustVisitNullValue(const Expr *E) {
1527330f729Sjoerg // If a null pointer expression's type is the C++0x nullptr_t, then
1537330f729Sjoerg // it's not necessarily a simple constant and it must be evaluated
1547330f729Sjoerg // for its potential side effects.
1557330f729Sjoerg return E->getType()->isNullPtrType();
1567330f729Sjoerg }
1577330f729Sjoerg
1587330f729Sjoerg /// If \p E is a widened promoted integer, get its base (unpromoted) type.
getUnwidenedIntegerType(const ASTContext & Ctx,const Expr * E)1597330f729Sjoerg static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
1607330f729Sjoerg const Expr *E) {
1617330f729Sjoerg const Expr *Base = E->IgnoreImpCasts();
1627330f729Sjoerg if (E == Base)
1637330f729Sjoerg return llvm::None;
1647330f729Sjoerg
1657330f729Sjoerg QualType BaseTy = Base->getType();
1667330f729Sjoerg if (!BaseTy->isPromotableIntegerType() ||
1677330f729Sjoerg Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
1687330f729Sjoerg return llvm::None;
1697330f729Sjoerg
1707330f729Sjoerg return BaseTy;
1717330f729Sjoerg }
1727330f729Sjoerg
1737330f729Sjoerg /// Check if \p E is a widened promoted integer.
IsWidenedIntegerOp(const ASTContext & Ctx,const Expr * E)1747330f729Sjoerg static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
1757330f729Sjoerg return getUnwidenedIntegerType(Ctx, E).hasValue();
1767330f729Sjoerg }
1777330f729Sjoerg
1787330f729Sjoerg /// Check if we can skip the overflow check for \p Op.
CanElideOverflowCheck(const ASTContext & Ctx,const BinOpInfo & Op)1797330f729Sjoerg static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
1807330f729Sjoerg assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
1817330f729Sjoerg "Expected a unary or binary operator");
1827330f729Sjoerg
1837330f729Sjoerg // If the binop has constant inputs and we can prove there is no overflow,
1847330f729Sjoerg // we can elide the overflow check.
1857330f729Sjoerg if (!Op.mayHaveIntegerOverflow())
1867330f729Sjoerg return true;
1877330f729Sjoerg
1887330f729Sjoerg // If a unary op has a widened operand, the op cannot overflow.
1897330f729Sjoerg if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
1907330f729Sjoerg return !UO->canOverflow();
1917330f729Sjoerg
1927330f729Sjoerg // We usually don't need overflow checks for binops with widened operands.
1937330f729Sjoerg // Multiplication with promoted unsigned operands is a special case.
1947330f729Sjoerg const auto *BO = cast<BinaryOperator>(Op.E);
1957330f729Sjoerg auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
1967330f729Sjoerg if (!OptionalLHSTy)
1977330f729Sjoerg return false;
1987330f729Sjoerg
1997330f729Sjoerg auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
2007330f729Sjoerg if (!OptionalRHSTy)
2017330f729Sjoerg return false;
2027330f729Sjoerg
2037330f729Sjoerg QualType LHSTy = *OptionalLHSTy;
2047330f729Sjoerg QualType RHSTy = *OptionalRHSTy;
2057330f729Sjoerg
2067330f729Sjoerg // This is the simple case: binops without unsigned multiplication, and with
2077330f729Sjoerg // widened operands. No overflow check is needed here.
2087330f729Sjoerg if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
2097330f729Sjoerg !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
2107330f729Sjoerg return true;
2117330f729Sjoerg
2127330f729Sjoerg // For unsigned multiplication the overflow check can be elided if either one
2137330f729Sjoerg // of the unpromoted types are less than half the size of the promoted type.
2147330f729Sjoerg unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
2157330f729Sjoerg return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
2167330f729Sjoerg (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
2177330f729Sjoerg }
2187330f729Sjoerg
2197330f729Sjoerg class ScalarExprEmitter
2207330f729Sjoerg : public StmtVisitor<ScalarExprEmitter, Value*> {
2217330f729Sjoerg CodeGenFunction &CGF;
2227330f729Sjoerg CGBuilderTy &Builder;
2237330f729Sjoerg bool IgnoreResultAssign;
2247330f729Sjoerg llvm::LLVMContext &VMContext;
2257330f729Sjoerg public:
2267330f729Sjoerg
ScalarExprEmitter(CodeGenFunction & cgf,bool ira=false)2277330f729Sjoerg ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
2287330f729Sjoerg : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
2297330f729Sjoerg VMContext(cgf.getLLVMContext()) {
2307330f729Sjoerg }
2317330f729Sjoerg
2327330f729Sjoerg //===--------------------------------------------------------------------===//
2337330f729Sjoerg // Utilities
2347330f729Sjoerg //===--------------------------------------------------------------------===//
2357330f729Sjoerg
TestAndClearIgnoreResultAssign()2367330f729Sjoerg bool TestAndClearIgnoreResultAssign() {
2377330f729Sjoerg bool I = IgnoreResultAssign;
2387330f729Sjoerg IgnoreResultAssign = false;
2397330f729Sjoerg return I;
2407330f729Sjoerg }
2417330f729Sjoerg
ConvertType(QualType T)2427330f729Sjoerg llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
EmitLValue(const Expr * E)2437330f729Sjoerg LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
EmitCheckedLValue(const Expr * E,CodeGenFunction::TypeCheckKind TCK)2447330f729Sjoerg LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
2457330f729Sjoerg return CGF.EmitCheckedLValue(E, TCK);
2467330f729Sjoerg }
2477330f729Sjoerg
2487330f729Sjoerg void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
2497330f729Sjoerg const BinOpInfo &Info);
2507330f729Sjoerg
EmitLoadOfLValue(LValue LV,SourceLocation Loc)2517330f729Sjoerg Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2527330f729Sjoerg return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
2537330f729Sjoerg }
2547330f729Sjoerg
EmitLValueAlignmentAssumption(const Expr * E,Value * V)2557330f729Sjoerg void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
2567330f729Sjoerg const AlignValueAttr *AVAttr = nullptr;
2577330f729Sjoerg if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
2587330f729Sjoerg const ValueDecl *VD = DRE->getDecl();
2597330f729Sjoerg
2607330f729Sjoerg if (VD->getType()->isReferenceType()) {
2617330f729Sjoerg if (const auto *TTy =
2627330f729Sjoerg dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
2637330f729Sjoerg AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
2647330f729Sjoerg } else {
2657330f729Sjoerg // Assumptions for function parameters are emitted at the start of the
2667330f729Sjoerg // function, so there is no need to repeat that here,
2677330f729Sjoerg // unless the alignment-assumption sanitizer is enabled,
2687330f729Sjoerg // then we prefer the assumption over alignment attribute
2697330f729Sjoerg // on IR function param.
2707330f729Sjoerg if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
2717330f729Sjoerg return;
2727330f729Sjoerg
2737330f729Sjoerg AVAttr = VD->getAttr<AlignValueAttr>();
2747330f729Sjoerg }
2757330f729Sjoerg }
2767330f729Sjoerg
2777330f729Sjoerg if (!AVAttr)
2787330f729Sjoerg if (const auto *TTy =
2797330f729Sjoerg dyn_cast<TypedefType>(E->getType()))
2807330f729Sjoerg AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
2817330f729Sjoerg
2827330f729Sjoerg if (!AVAttr)
2837330f729Sjoerg return;
2847330f729Sjoerg
2857330f729Sjoerg Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
2867330f729Sjoerg llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287*e038c9c4Sjoerg CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
2887330f729Sjoerg }
2897330f729Sjoerg
2907330f729Sjoerg /// EmitLoadOfLValue - Given an expression with complex type that represents a
2917330f729Sjoerg /// value l-value, this method emits the address of the l-value, then loads
2927330f729Sjoerg /// and returns the result.
EmitLoadOfLValue(const Expr * E)2937330f729Sjoerg Value *EmitLoadOfLValue(const Expr *E) {
2947330f729Sjoerg Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
2957330f729Sjoerg E->getExprLoc());
2967330f729Sjoerg
2977330f729Sjoerg EmitLValueAlignmentAssumption(E, V);
2987330f729Sjoerg return V;
2997330f729Sjoerg }
3007330f729Sjoerg
3017330f729Sjoerg /// EmitConversionToBool - Convert the specified expression value to a
3027330f729Sjoerg /// boolean (i1) truth value. This is equivalent to "Val != 0".
3037330f729Sjoerg Value *EmitConversionToBool(Value *Src, QualType DstTy);
3047330f729Sjoerg
3057330f729Sjoerg /// Emit a check that a conversion from a floating-point type does not
3067330f729Sjoerg /// overflow.
3077330f729Sjoerg void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
3087330f729Sjoerg Value *Src, QualType SrcType, QualType DstType,
3097330f729Sjoerg llvm::Type *DstTy, SourceLocation Loc);
3107330f729Sjoerg
3117330f729Sjoerg /// Known implicit conversion check kinds.
3127330f729Sjoerg /// Keep in sync with the enum of the same name in ubsan_handlers.h
3137330f729Sjoerg enum ImplicitConversionCheckKind : unsigned char {
3147330f729Sjoerg ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
3157330f729Sjoerg ICCK_UnsignedIntegerTruncation = 1,
3167330f729Sjoerg ICCK_SignedIntegerTruncation = 2,
3177330f729Sjoerg ICCK_IntegerSignChange = 3,
3187330f729Sjoerg ICCK_SignedIntegerTruncationOrSignChange = 4,
3197330f729Sjoerg };
3207330f729Sjoerg
3217330f729Sjoerg /// Emit a check that an [implicit] truncation of an integer does not
3227330f729Sjoerg /// discard any bits. It is not UB, so we use the value after truncation.
3237330f729Sjoerg void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
3247330f729Sjoerg QualType DstType, SourceLocation Loc);
3257330f729Sjoerg
3267330f729Sjoerg /// Emit a check that an [implicit] conversion of an integer does not change
3277330f729Sjoerg /// the sign of the value. It is not UB, so we use the value after conversion.
3287330f729Sjoerg /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
3297330f729Sjoerg void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
3307330f729Sjoerg QualType DstType, SourceLocation Loc);
3317330f729Sjoerg
3327330f729Sjoerg /// Emit a conversion from the specified type to the specified destination
3337330f729Sjoerg /// type, both of which are LLVM scalar types.
3347330f729Sjoerg struct ScalarConversionOpts {
3357330f729Sjoerg bool TreatBooleanAsSigned;
3367330f729Sjoerg bool EmitImplicitIntegerTruncationChecks;
3377330f729Sjoerg bool EmitImplicitIntegerSignChangeChecks;
3387330f729Sjoerg
ScalarConversionOpts__anonacb3eee80111::ScalarExprEmitter::ScalarConversionOpts3397330f729Sjoerg ScalarConversionOpts()
3407330f729Sjoerg : TreatBooleanAsSigned(false),
3417330f729Sjoerg EmitImplicitIntegerTruncationChecks(false),
3427330f729Sjoerg EmitImplicitIntegerSignChangeChecks(false) {}
3437330f729Sjoerg
ScalarConversionOpts__anonacb3eee80111::ScalarExprEmitter::ScalarConversionOpts3447330f729Sjoerg ScalarConversionOpts(clang::SanitizerSet SanOpts)
3457330f729Sjoerg : TreatBooleanAsSigned(false),
3467330f729Sjoerg EmitImplicitIntegerTruncationChecks(
3477330f729Sjoerg SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
3487330f729Sjoerg EmitImplicitIntegerSignChangeChecks(
3497330f729Sjoerg SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
3507330f729Sjoerg };
351*e038c9c4Sjoerg Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352*e038c9c4Sjoerg llvm::Type *SrcTy, llvm::Type *DstTy,
353*e038c9c4Sjoerg ScalarConversionOpts Opts);
3547330f729Sjoerg Value *
3557330f729Sjoerg EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
3567330f729Sjoerg SourceLocation Loc,
3577330f729Sjoerg ScalarConversionOpts Opts = ScalarConversionOpts());
3587330f729Sjoerg
3597330f729Sjoerg /// Convert between either a fixed point and other fixed point or fixed point
3607330f729Sjoerg /// and an integer.
3617330f729Sjoerg Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
3627330f729Sjoerg SourceLocation Loc);
3637330f729Sjoerg
3647330f729Sjoerg /// Emit a conversion from the specified complex type to the specified
3657330f729Sjoerg /// destination type, where the destination type is an LLVM scalar type.
3667330f729Sjoerg Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
3677330f729Sjoerg QualType SrcTy, QualType DstTy,
3687330f729Sjoerg SourceLocation Loc);
3697330f729Sjoerg
3707330f729Sjoerg /// EmitNullValue - Emit a value that corresponds to null for the given type.
3717330f729Sjoerg Value *EmitNullValue(QualType Ty);
3727330f729Sjoerg
3737330f729Sjoerg /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
EmitFloatToBoolConversion(Value * V)3747330f729Sjoerg Value *EmitFloatToBoolConversion(Value *V) {
3757330f729Sjoerg // Compare against 0.0 for fp scalars.
3767330f729Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
3777330f729Sjoerg return Builder.CreateFCmpUNE(V, Zero, "tobool");
3787330f729Sjoerg }
3797330f729Sjoerg
3807330f729Sjoerg /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
EmitPointerToBoolConversion(Value * V,QualType QT)3817330f729Sjoerg Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
3827330f729Sjoerg Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
3837330f729Sjoerg
3847330f729Sjoerg return Builder.CreateICmpNE(V, Zero, "tobool");
3857330f729Sjoerg }
3867330f729Sjoerg
EmitIntToBoolConversion(Value * V)3877330f729Sjoerg Value *EmitIntToBoolConversion(Value *V) {
3887330f729Sjoerg // Because of the type rules of C, we often end up computing a
3897330f729Sjoerg // logical value, then zero extending it to int, then wanting it
3907330f729Sjoerg // as a logical value again. Optimize this common case.
3917330f729Sjoerg if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
3927330f729Sjoerg if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
3937330f729Sjoerg Value *Result = ZI->getOperand(0);
3947330f729Sjoerg // If there aren't any more uses, zap the instruction to save space.
3957330f729Sjoerg // Note that there can be more uses, for example if this
3967330f729Sjoerg // is the result of an assignment.
3977330f729Sjoerg if (ZI->use_empty())
3987330f729Sjoerg ZI->eraseFromParent();
3997330f729Sjoerg return Result;
4007330f729Sjoerg }
4017330f729Sjoerg }
4027330f729Sjoerg
4037330f729Sjoerg return Builder.CreateIsNotNull(V, "tobool");
4047330f729Sjoerg }
4057330f729Sjoerg
4067330f729Sjoerg //===--------------------------------------------------------------------===//
4077330f729Sjoerg // Visitor Methods
4087330f729Sjoerg //===--------------------------------------------------------------------===//
4097330f729Sjoerg
Visit(Expr * E)4107330f729Sjoerg Value *Visit(Expr *E) {
4117330f729Sjoerg ApplyDebugLocation DL(CGF, E);
4127330f729Sjoerg return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
4137330f729Sjoerg }
4147330f729Sjoerg
VisitStmt(Stmt * S)4157330f729Sjoerg Value *VisitStmt(Stmt *S) {
416*e038c9c4Sjoerg S->dump(llvm::errs(), CGF.getContext());
4177330f729Sjoerg llvm_unreachable("Stmt can't have complex result type!");
4187330f729Sjoerg }
4197330f729Sjoerg Value *VisitExpr(Expr *S);
4207330f729Sjoerg
VisitConstantExpr(ConstantExpr * E)4217330f729Sjoerg Value *VisitConstantExpr(ConstantExpr *E) {
422*e038c9c4Sjoerg if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423*e038c9c4Sjoerg if (E->isGLValue())
424*e038c9c4Sjoerg return CGF.Builder.CreateLoad(Address(
425*e038c9c4Sjoerg Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426*e038c9c4Sjoerg return Result;
427*e038c9c4Sjoerg }
4287330f729Sjoerg return Visit(E->getSubExpr());
4297330f729Sjoerg }
VisitParenExpr(ParenExpr * PE)4307330f729Sjoerg Value *VisitParenExpr(ParenExpr *PE) {
4317330f729Sjoerg return Visit(PE->getSubExpr());
4327330f729Sjoerg }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)4337330f729Sjoerg Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
4347330f729Sjoerg return Visit(E->getReplacement());
4357330f729Sjoerg }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)4367330f729Sjoerg Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
4377330f729Sjoerg return Visit(GE->getResultExpr());
4387330f729Sjoerg }
VisitCoawaitExpr(CoawaitExpr * S)4397330f729Sjoerg Value *VisitCoawaitExpr(CoawaitExpr *S) {
4407330f729Sjoerg return CGF.EmitCoawaitExpr(*S).getScalarVal();
4417330f729Sjoerg }
VisitCoyieldExpr(CoyieldExpr * S)4427330f729Sjoerg Value *VisitCoyieldExpr(CoyieldExpr *S) {
4437330f729Sjoerg return CGF.EmitCoyieldExpr(*S).getScalarVal();
4447330f729Sjoerg }
VisitUnaryCoawait(const UnaryOperator * E)4457330f729Sjoerg Value *VisitUnaryCoawait(const UnaryOperator *E) {
4467330f729Sjoerg return Visit(E->getSubExpr());
4477330f729Sjoerg }
4487330f729Sjoerg
4497330f729Sjoerg // Leaves.
VisitIntegerLiteral(const IntegerLiteral * E)4507330f729Sjoerg Value *VisitIntegerLiteral(const IntegerLiteral *E) {
4517330f729Sjoerg return Builder.getInt(E->getValue());
4527330f729Sjoerg }
VisitFixedPointLiteral(const FixedPointLiteral * E)4537330f729Sjoerg Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
4547330f729Sjoerg return Builder.getInt(E->getValue());
4557330f729Sjoerg }
VisitFloatingLiteral(const FloatingLiteral * E)4567330f729Sjoerg Value *VisitFloatingLiteral(const FloatingLiteral *E) {
4577330f729Sjoerg return llvm::ConstantFP::get(VMContext, E->getValue());
4587330f729Sjoerg }
VisitCharacterLiteral(const CharacterLiteral * E)4597330f729Sjoerg Value *VisitCharacterLiteral(const CharacterLiteral *E) {
4607330f729Sjoerg return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
4617330f729Sjoerg }
VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr * E)4627330f729Sjoerg Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
4637330f729Sjoerg return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
4647330f729Sjoerg }
VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr * E)4657330f729Sjoerg Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
4667330f729Sjoerg return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
4677330f729Sjoerg }
VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr * E)4687330f729Sjoerg Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
4697330f729Sjoerg return EmitNullValue(E->getType());
4707330f729Sjoerg }
VisitGNUNullExpr(const GNUNullExpr * E)4717330f729Sjoerg Value *VisitGNUNullExpr(const GNUNullExpr *E) {
4727330f729Sjoerg return EmitNullValue(E->getType());
4737330f729Sjoerg }
4747330f729Sjoerg Value *VisitOffsetOfExpr(OffsetOfExpr *E);
4757330f729Sjoerg Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
VisitAddrLabelExpr(const AddrLabelExpr * E)4767330f729Sjoerg Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
4777330f729Sjoerg llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
4787330f729Sjoerg return Builder.CreateBitCast(V, ConvertType(E->getType()));
4797330f729Sjoerg }
4807330f729Sjoerg
VisitSizeOfPackExpr(SizeOfPackExpr * E)4817330f729Sjoerg Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
4827330f729Sjoerg return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
4837330f729Sjoerg }
4847330f729Sjoerg
VisitPseudoObjectExpr(PseudoObjectExpr * E)4857330f729Sjoerg Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
4867330f729Sjoerg return CGF.EmitPseudoObjectRValue(E).getScalarVal();
4877330f729Sjoerg }
4887330f729Sjoerg
VisitOpaqueValueExpr(OpaqueValueExpr * E)4897330f729Sjoerg Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
4907330f729Sjoerg if (E->isGLValue())
4917330f729Sjoerg return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
4927330f729Sjoerg E->getExprLoc());
4937330f729Sjoerg
4947330f729Sjoerg // Otherwise, assume the mapping is the scalar directly.
4957330f729Sjoerg return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
4967330f729Sjoerg }
4977330f729Sjoerg
4987330f729Sjoerg // l-values.
VisitDeclRefExpr(DeclRefExpr * E)4997330f729Sjoerg Value *VisitDeclRefExpr(DeclRefExpr *E) {
5007330f729Sjoerg if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
5017330f729Sjoerg return CGF.emitScalarConstant(Constant, E);
5027330f729Sjoerg return EmitLoadOfLValue(E);
5037330f729Sjoerg }
5047330f729Sjoerg
VisitObjCSelectorExpr(ObjCSelectorExpr * E)5057330f729Sjoerg Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
5067330f729Sjoerg return CGF.EmitObjCSelectorExpr(E);
5077330f729Sjoerg }
VisitObjCProtocolExpr(ObjCProtocolExpr * E)5087330f729Sjoerg Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
5097330f729Sjoerg return CGF.EmitObjCProtocolExpr(E);
5107330f729Sjoerg }
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)5117330f729Sjoerg Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
5127330f729Sjoerg return EmitLoadOfLValue(E);
5137330f729Sjoerg }
VisitObjCMessageExpr(ObjCMessageExpr * E)5147330f729Sjoerg Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
5157330f729Sjoerg if (E->getMethodDecl() &&
5167330f729Sjoerg E->getMethodDecl()->getReturnType()->isReferenceType())
5177330f729Sjoerg return EmitLoadOfLValue(E);
5187330f729Sjoerg return CGF.EmitObjCMessageExpr(E).getScalarVal();
5197330f729Sjoerg }
5207330f729Sjoerg
VisitObjCIsaExpr(ObjCIsaExpr * E)5217330f729Sjoerg Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
5227330f729Sjoerg LValue LV = CGF.EmitObjCIsaExpr(E);
5237330f729Sjoerg Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
5247330f729Sjoerg return V;
5257330f729Sjoerg }
5267330f729Sjoerg
VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr * E)5277330f729Sjoerg Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
5287330f729Sjoerg VersionTuple Version = E->getVersion();
5297330f729Sjoerg
5307330f729Sjoerg // If we're checking for a platform older than our minimum deployment
5317330f729Sjoerg // target, we can fold the check away.
5327330f729Sjoerg if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
5337330f729Sjoerg return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
5347330f729Sjoerg
535*e038c9c4Sjoerg return CGF.EmitBuiltinAvailable(Version);
5367330f729Sjoerg }
5377330f729Sjoerg
5387330f729Sjoerg Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
539*e038c9c4Sjoerg Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
5407330f729Sjoerg Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
5417330f729Sjoerg Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
5427330f729Sjoerg Value *VisitMemberExpr(MemberExpr *E);
VisitExtVectorElementExpr(Expr * E)5437330f729Sjoerg Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)5447330f729Sjoerg Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
545*e038c9c4Sjoerg // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
546*e038c9c4Sjoerg // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
547*e038c9c4Sjoerg // literals aren't l-values in C++. We do so simply because that's the
548*e038c9c4Sjoerg // cleanest way to handle compound literals in C++.
549*e038c9c4Sjoerg // See the discussion here: https://reviews.llvm.org/D64464
5507330f729Sjoerg return EmitLoadOfLValue(E);
5517330f729Sjoerg }
5527330f729Sjoerg
5537330f729Sjoerg Value *VisitInitListExpr(InitListExpr *E);
5547330f729Sjoerg
VisitArrayInitIndexExpr(ArrayInitIndexExpr * E)5557330f729Sjoerg Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
5567330f729Sjoerg assert(CGF.getArrayInitIndex() &&
5577330f729Sjoerg "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
5587330f729Sjoerg return CGF.getArrayInitIndex();
5597330f729Sjoerg }
5607330f729Sjoerg
VisitImplicitValueInitExpr(const ImplicitValueInitExpr * E)5617330f729Sjoerg Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
5627330f729Sjoerg return EmitNullValue(E->getType());
5637330f729Sjoerg }
VisitExplicitCastExpr(ExplicitCastExpr * E)5647330f729Sjoerg Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
5657330f729Sjoerg CGF.CGM.EmitExplicitCastExprType(E, &CGF);
5667330f729Sjoerg return VisitCastExpr(E);
5677330f729Sjoerg }
5687330f729Sjoerg Value *VisitCastExpr(CastExpr *E);
5697330f729Sjoerg
VisitCallExpr(const CallExpr * E)5707330f729Sjoerg Value *VisitCallExpr(const CallExpr *E) {
5717330f729Sjoerg if (E->getCallReturnType(CGF.getContext())->isReferenceType())
5727330f729Sjoerg return EmitLoadOfLValue(E);
5737330f729Sjoerg
5747330f729Sjoerg Value *V = CGF.EmitCallExpr(E).getScalarVal();
5757330f729Sjoerg
5767330f729Sjoerg EmitLValueAlignmentAssumption(E, V);
5777330f729Sjoerg return V;
5787330f729Sjoerg }
5797330f729Sjoerg
5807330f729Sjoerg Value *VisitStmtExpr(const StmtExpr *E);
5817330f729Sjoerg
5827330f729Sjoerg // Unary Operators.
VisitUnaryPostDec(const UnaryOperator * E)5837330f729Sjoerg Value *VisitUnaryPostDec(const UnaryOperator *E) {
5847330f729Sjoerg LValue LV = EmitLValue(E->getSubExpr());
5857330f729Sjoerg return EmitScalarPrePostIncDec(E, LV, false, false);
5867330f729Sjoerg }
VisitUnaryPostInc(const UnaryOperator * E)5877330f729Sjoerg Value *VisitUnaryPostInc(const UnaryOperator *E) {
5887330f729Sjoerg LValue LV = EmitLValue(E->getSubExpr());
5897330f729Sjoerg return EmitScalarPrePostIncDec(E, LV, true, false);
5907330f729Sjoerg }
VisitUnaryPreDec(const UnaryOperator * E)5917330f729Sjoerg Value *VisitUnaryPreDec(const UnaryOperator *E) {
5927330f729Sjoerg LValue LV = EmitLValue(E->getSubExpr());
5937330f729Sjoerg return EmitScalarPrePostIncDec(E, LV, false, true);
5947330f729Sjoerg }
VisitUnaryPreInc(const UnaryOperator * E)5957330f729Sjoerg Value *VisitUnaryPreInc(const UnaryOperator *E) {
5967330f729Sjoerg LValue LV = EmitLValue(E->getSubExpr());
5977330f729Sjoerg return EmitScalarPrePostIncDec(E, LV, true, true);
5987330f729Sjoerg }
5997330f729Sjoerg
6007330f729Sjoerg llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
6017330f729Sjoerg llvm::Value *InVal,
6027330f729Sjoerg bool IsInc);
6037330f729Sjoerg
6047330f729Sjoerg llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
6057330f729Sjoerg bool isInc, bool isPre);
6067330f729Sjoerg
6077330f729Sjoerg
VisitUnaryAddrOf(const UnaryOperator * E)6087330f729Sjoerg Value *VisitUnaryAddrOf(const UnaryOperator *E) {
6097330f729Sjoerg if (isa<MemberPointerType>(E->getType())) // never sugared
6107330f729Sjoerg return CGF.CGM.getMemberPointerConstant(E);
6117330f729Sjoerg
612*e038c9c4Sjoerg return EmitLValue(E->getSubExpr()).getPointer(CGF);
6137330f729Sjoerg }
VisitUnaryDeref(const UnaryOperator * E)6147330f729Sjoerg Value *VisitUnaryDeref(const UnaryOperator *E) {
6157330f729Sjoerg if (E->getType()->isVoidType())
6167330f729Sjoerg return Visit(E->getSubExpr()); // the actual value should be unused
6177330f729Sjoerg return EmitLoadOfLValue(E);
6187330f729Sjoerg }
VisitUnaryPlus(const UnaryOperator * E)6197330f729Sjoerg Value *VisitUnaryPlus(const UnaryOperator *E) {
6207330f729Sjoerg // This differs from gcc, though, most likely due to a bug in gcc.
6217330f729Sjoerg TestAndClearIgnoreResultAssign();
6227330f729Sjoerg return Visit(E->getSubExpr());
6237330f729Sjoerg }
6247330f729Sjoerg Value *VisitUnaryMinus (const UnaryOperator *E);
6257330f729Sjoerg Value *VisitUnaryNot (const UnaryOperator *E);
6267330f729Sjoerg Value *VisitUnaryLNot (const UnaryOperator *E);
6277330f729Sjoerg Value *VisitUnaryReal (const UnaryOperator *E);
6287330f729Sjoerg Value *VisitUnaryImag (const UnaryOperator *E);
VisitUnaryExtension(const UnaryOperator * E)6297330f729Sjoerg Value *VisitUnaryExtension(const UnaryOperator *E) {
6307330f729Sjoerg return Visit(E->getSubExpr());
6317330f729Sjoerg }
6327330f729Sjoerg
6337330f729Sjoerg // C++
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * E)6347330f729Sjoerg Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
6357330f729Sjoerg return EmitLoadOfLValue(E);
6367330f729Sjoerg }
VisitSourceLocExpr(SourceLocExpr * SLE)6377330f729Sjoerg Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
6387330f729Sjoerg auto &Ctx = CGF.getContext();
6397330f729Sjoerg APValue Evaluated =
6407330f729Sjoerg SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
641*e038c9c4Sjoerg return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
642*e038c9c4Sjoerg SLE->getType());
6437330f729Sjoerg }
6447330f729Sjoerg
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)6457330f729Sjoerg Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
6467330f729Sjoerg CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
6477330f729Sjoerg return Visit(DAE->getExpr());
6487330f729Sjoerg }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)6497330f729Sjoerg Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
6507330f729Sjoerg CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
6517330f729Sjoerg return Visit(DIE->getExpr());
6527330f729Sjoerg }
VisitCXXThisExpr(CXXThisExpr * TE)6537330f729Sjoerg Value *VisitCXXThisExpr(CXXThisExpr *TE) {
6547330f729Sjoerg return CGF.LoadCXXThis();
6557330f729Sjoerg }
6567330f729Sjoerg
6577330f729Sjoerg Value *VisitExprWithCleanups(ExprWithCleanups *E);
VisitCXXNewExpr(const CXXNewExpr * E)6587330f729Sjoerg Value *VisitCXXNewExpr(const CXXNewExpr *E) {
6597330f729Sjoerg return CGF.EmitCXXNewExpr(E);
6607330f729Sjoerg }
VisitCXXDeleteExpr(const CXXDeleteExpr * E)6617330f729Sjoerg Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
6627330f729Sjoerg CGF.EmitCXXDeleteExpr(E);
6637330f729Sjoerg return nullptr;
6647330f729Sjoerg }
6657330f729Sjoerg
VisitTypeTraitExpr(const TypeTraitExpr * E)6667330f729Sjoerg Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
6677330f729Sjoerg return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
6687330f729Sjoerg }
6697330f729Sjoerg
VisitConceptSpecializationExpr(const ConceptSpecializationExpr * E)6707330f729Sjoerg Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
6717330f729Sjoerg return Builder.getInt1(E->isSatisfied());
6727330f729Sjoerg }
6737330f729Sjoerg
VisitRequiresExpr(const RequiresExpr * E)674*e038c9c4Sjoerg Value *VisitRequiresExpr(const RequiresExpr *E) {
675*e038c9c4Sjoerg return Builder.getInt1(E->isSatisfied());
676*e038c9c4Sjoerg }
677*e038c9c4Sjoerg
VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr * E)6787330f729Sjoerg Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
6797330f729Sjoerg return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
6807330f729Sjoerg }
6817330f729Sjoerg
VisitExpressionTraitExpr(const ExpressionTraitExpr * E)6827330f729Sjoerg Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
6837330f729Sjoerg return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
6847330f729Sjoerg }
6857330f729Sjoerg
VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)6867330f729Sjoerg Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
6877330f729Sjoerg // C++ [expr.pseudo]p1:
6887330f729Sjoerg // The result shall only be used as the operand for the function call
6897330f729Sjoerg // operator (), and the result of such a call has type void. The only
6907330f729Sjoerg // effect is the evaluation of the postfix-expression before the dot or
6917330f729Sjoerg // arrow.
6927330f729Sjoerg CGF.EmitScalarExpr(E->getBase());
6937330f729Sjoerg return nullptr;
6947330f729Sjoerg }
6957330f729Sjoerg
VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr * E)6967330f729Sjoerg Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
6977330f729Sjoerg return EmitNullValue(E->getType());
6987330f729Sjoerg }
6997330f729Sjoerg
VisitCXXThrowExpr(const CXXThrowExpr * E)7007330f729Sjoerg Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
7017330f729Sjoerg CGF.EmitCXXThrowExpr(E);
7027330f729Sjoerg return nullptr;
7037330f729Sjoerg }
7047330f729Sjoerg
VisitCXXNoexceptExpr(const CXXNoexceptExpr * E)7057330f729Sjoerg Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
7067330f729Sjoerg return Builder.getInt1(E->getValue());
7077330f729Sjoerg }
7087330f729Sjoerg
7097330f729Sjoerg // Binary Operators.
EmitMul(const BinOpInfo & Ops)7107330f729Sjoerg Value *EmitMul(const BinOpInfo &Ops) {
7117330f729Sjoerg if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
7127330f729Sjoerg switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
7137330f729Sjoerg case LangOptions::SOB_Defined:
7147330f729Sjoerg return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
7157330f729Sjoerg case LangOptions::SOB_Undefined:
7167330f729Sjoerg if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
7177330f729Sjoerg return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
7187330f729Sjoerg LLVM_FALLTHROUGH;
7197330f729Sjoerg case LangOptions::SOB_Trapping:
7207330f729Sjoerg if (CanElideOverflowCheck(CGF.getContext(), Ops))
7217330f729Sjoerg return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
7227330f729Sjoerg return EmitOverflowCheckedBinOp(Ops);
7237330f729Sjoerg }
7247330f729Sjoerg }
7257330f729Sjoerg
726*e038c9c4Sjoerg if (Ops.Ty->isConstantMatrixType()) {
727*e038c9c4Sjoerg llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
728*e038c9c4Sjoerg // We need to check the types of the operands of the operator to get the
729*e038c9c4Sjoerg // correct matrix dimensions.
730*e038c9c4Sjoerg auto *BO = cast<BinaryOperator>(Ops.E);
731*e038c9c4Sjoerg auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
732*e038c9c4Sjoerg BO->getLHS()->getType().getCanonicalType());
733*e038c9c4Sjoerg auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
734*e038c9c4Sjoerg BO->getRHS()->getType().getCanonicalType());
735*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
736*e038c9c4Sjoerg if (LHSMatTy && RHSMatTy)
737*e038c9c4Sjoerg return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
738*e038c9c4Sjoerg LHSMatTy->getNumColumns(),
739*e038c9c4Sjoerg RHSMatTy->getNumColumns());
740*e038c9c4Sjoerg return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
741*e038c9c4Sjoerg }
742*e038c9c4Sjoerg
7437330f729Sjoerg if (Ops.Ty->isUnsignedIntegerType() &&
7447330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
7457330f729Sjoerg !CanElideOverflowCheck(CGF.getContext(), Ops))
7467330f729Sjoerg return EmitOverflowCheckedBinOp(Ops);
7477330f729Sjoerg
7487330f729Sjoerg if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
749*e038c9c4Sjoerg // Preserve the old values
750*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
751*e038c9c4Sjoerg return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
7527330f729Sjoerg }
753*e038c9c4Sjoerg if (Ops.isFixedPointOp())
754*e038c9c4Sjoerg return EmitFixedPointBinOp(Ops);
7557330f729Sjoerg return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
7567330f729Sjoerg }
7577330f729Sjoerg /// Create a binary op that checks for overflow.
7587330f729Sjoerg /// Currently only supports +, - and *.
7597330f729Sjoerg Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
7607330f729Sjoerg
7617330f729Sjoerg // Check for undefined division and modulus behaviors.
7627330f729Sjoerg void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
7637330f729Sjoerg llvm::Value *Zero,bool isDiv);
7647330f729Sjoerg // Common helper for getting how wide LHS of shift is.
7657330f729Sjoerg static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
766*e038c9c4Sjoerg
767*e038c9c4Sjoerg // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
768*e038c9c4Sjoerg // non powers of two.
769*e038c9c4Sjoerg Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
770*e038c9c4Sjoerg
7717330f729Sjoerg Value *EmitDiv(const BinOpInfo &Ops);
7727330f729Sjoerg Value *EmitRem(const BinOpInfo &Ops);
7737330f729Sjoerg Value *EmitAdd(const BinOpInfo &Ops);
7747330f729Sjoerg Value *EmitSub(const BinOpInfo &Ops);
7757330f729Sjoerg Value *EmitShl(const BinOpInfo &Ops);
7767330f729Sjoerg Value *EmitShr(const BinOpInfo &Ops);
EmitAnd(const BinOpInfo & Ops)7777330f729Sjoerg Value *EmitAnd(const BinOpInfo &Ops) {
7787330f729Sjoerg return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
7797330f729Sjoerg }
EmitXor(const BinOpInfo & Ops)7807330f729Sjoerg Value *EmitXor(const BinOpInfo &Ops) {
7817330f729Sjoerg return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
7827330f729Sjoerg }
EmitOr(const BinOpInfo & Ops)7837330f729Sjoerg Value *EmitOr (const BinOpInfo &Ops) {
7847330f729Sjoerg return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
7857330f729Sjoerg }
7867330f729Sjoerg
7877330f729Sjoerg // Helper functions for fixed point binary operations.
7887330f729Sjoerg Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
7897330f729Sjoerg
7907330f729Sjoerg BinOpInfo EmitBinOps(const BinaryOperator *E);
7917330f729Sjoerg LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
7927330f729Sjoerg Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
7937330f729Sjoerg Value *&Result);
7947330f729Sjoerg
7957330f729Sjoerg Value *EmitCompoundAssign(const CompoundAssignOperator *E,
7967330f729Sjoerg Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
7977330f729Sjoerg
7987330f729Sjoerg // Binary operators and binary compound assignment operators.
7997330f729Sjoerg #define HANDLEBINOP(OP) \
8007330f729Sjoerg Value *VisitBin ## OP(const BinaryOperator *E) { \
8017330f729Sjoerg return Emit ## OP(EmitBinOps(E)); \
8027330f729Sjoerg } \
8037330f729Sjoerg Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
8047330f729Sjoerg return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
8057330f729Sjoerg }
8067330f729Sjoerg HANDLEBINOP(Mul)
8077330f729Sjoerg HANDLEBINOP(Div)
8087330f729Sjoerg HANDLEBINOP(Rem)
8097330f729Sjoerg HANDLEBINOP(Add)
8107330f729Sjoerg HANDLEBINOP(Sub)
8117330f729Sjoerg HANDLEBINOP(Shl)
8127330f729Sjoerg HANDLEBINOP(Shr)
8137330f729Sjoerg HANDLEBINOP(And)
8147330f729Sjoerg HANDLEBINOP(Xor)
8157330f729Sjoerg HANDLEBINOP(Or)
8167330f729Sjoerg #undef HANDLEBINOP
8177330f729Sjoerg
8187330f729Sjoerg // Comparisons.
8197330f729Sjoerg Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
8207330f729Sjoerg llvm::CmpInst::Predicate SICmpOpc,
821*e038c9c4Sjoerg llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
822*e038c9c4Sjoerg #define VISITCOMP(CODE, UI, SI, FP, SIG) \
8237330f729Sjoerg Value *VisitBin##CODE(const BinaryOperator *E) { \
8247330f729Sjoerg return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
825*e038c9c4Sjoerg llvm::FCmpInst::FP, SIG); }
826*e038c9c4Sjoerg VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
827*e038c9c4Sjoerg VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
828*e038c9c4Sjoerg VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
829*e038c9c4Sjoerg VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
830*e038c9c4Sjoerg VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
831*e038c9c4Sjoerg VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
8327330f729Sjoerg #undef VISITCOMP
8337330f729Sjoerg
8347330f729Sjoerg Value *VisitBinAssign (const BinaryOperator *E);
8357330f729Sjoerg
8367330f729Sjoerg Value *VisitBinLAnd (const BinaryOperator *E);
8377330f729Sjoerg Value *VisitBinLOr (const BinaryOperator *E);
8387330f729Sjoerg Value *VisitBinComma (const BinaryOperator *E);
8397330f729Sjoerg
VisitBinPtrMemD(const Expr * E)8407330f729Sjoerg Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
VisitBinPtrMemI(const Expr * E)8417330f729Sjoerg Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
8427330f729Sjoerg
VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator * E)8437330f729Sjoerg Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
8447330f729Sjoerg return Visit(E->getSemanticForm());
8457330f729Sjoerg }
8467330f729Sjoerg
8477330f729Sjoerg // Other Operators.
8487330f729Sjoerg Value *VisitBlockExpr(const BlockExpr *BE);
8497330f729Sjoerg Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
8507330f729Sjoerg Value *VisitChooseExpr(ChooseExpr *CE);
8517330f729Sjoerg Value *VisitVAArgExpr(VAArgExpr *VE);
VisitObjCStringLiteral(const ObjCStringLiteral * E)8527330f729Sjoerg Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
8537330f729Sjoerg return CGF.EmitObjCStringLiteral(E);
8547330f729Sjoerg }
VisitObjCBoxedExpr(ObjCBoxedExpr * E)8557330f729Sjoerg Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
8567330f729Sjoerg return CGF.EmitObjCBoxedExpr(E);
8577330f729Sjoerg }
VisitObjCArrayLiteral(ObjCArrayLiteral * E)8587330f729Sjoerg Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
8597330f729Sjoerg return CGF.EmitObjCArrayLiteral(E);
8607330f729Sjoerg }
VisitObjCDictionaryLiteral(ObjCDictionaryLiteral * E)8617330f729Sjoerg Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
8627330f729Sjoerg return CGF.EmitObjCDictionaryLiteral(E);
8637330f729Sjoerg }
8647330f729Sjoerg Value *VisitAsTypeExpr(AsTypeExpr *CE);
8657330f729Sjoerg Value *VisitAtomicExpr(AtomicExpr *AE);
8667330f729Sjoerg };
8677330f729Sjoerg } // end anonymous namespace.
8687330f729Sjoerg
8697330f729Sjoerg //===----------------------------------------------------------------------===//
8707330f729Sjoerg // Utilities
8717330f729Sjoerg //===----------------------------------------------------------------------===//
8727330f729Sjoerg
8737330f729Sjoerg /// EmitConversionToBool - Convert the specified expression value to a
8747330f729Sjoerg /// boolean (i1) truth value. This is equivalent to "Val != 0".
EmitConversionToBool(Value * Src,QualType SrcType)8757330f729Sjoerg Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
8767330f729Sjoerg assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
8777330f729Sjoerg
8787330f729Sjoerg if (SrcType->isRealFloatingType())
8797330f729Sjoerg return EmitFloatToBoolConversion(Src);
8807330f729Sjoerg
8817330f729Sjoerg if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
8827330f729Sjoerg return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
8837330f729Sjoerg
8847330f729Sjoerg assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
8857330f729Sjoerg "Unknown scalar type to convert");
8867330f729Sjoerg
8877330f729Sjoerg if (isa<llvm::IntegerType>(Src->getType()))
8887330f729Sjoerg return EmitIntToBoolConversion(Src);
8897330f729Sjoerg
8907330f729Sjoerg assert(isa<llvm::PointerType>(Src->getType()));
8917330f729Sjoerg return EmitPointerToBoolConversion(Src, SrcType);
8927330f729Sjoerg }
8937330f729Sjoerg
EmitFloatConversionCheck(Value * OrigSrc,QualType OrigSrcType,Value * Src,QualType SrcType,QualType DstType,llvm::Type * DstTy,SourceLocation Loc)8947330f729Sjoerg void ScalarExprEmitter::EmitFloatConversionCheck(
8957330f729Sjoerg Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
8967330f729Sjoerg QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
8977330f729Sjoerg assert(SrcType->isFloatingType() && "not a conversion from floating point");
8987330f729Sjoerg if (!isa<llvm::IntegerType>(DstTy))
8997330f729Sjoerg return;
9007330f729Sjoerg
9017330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
9027330f729Sjoerg using llvm::APFloat;
9037330f729Sjoerg using llvm::APSInt;
9047330f729Sjoerg
9057330f729Sjoerg llvm::Value *Check = nullptr;
9067330f729Sjoerg const llvm::fltSemantics &SrcSema =
9077330f729Sjoerg CGF.getContext().getFloatTypeSemantics(OrigSrcType);
9087330f729Sjoerg
9097330f729Sjoerg // Floating-point to integer. This has undefined behavior if the source is
9107330f729Sjoerg // +-Inf, NaN, or doesn't fit into the destination type (after truncation
9117330f729Sjoerg // to an integer).
9127330f729Sjoerg unsigned Width = CGF.getContext().getIntWidth(DstType);
9137330f729Sjoerg bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
9147330f729Sjoerg
9157330f729Sjoerg APSInt Min = APSInt::getMinValue(Width, Unsigned);
9167330f729Sjoerg APFloat MinSrc(SrcSema, APFloat::uninitialized);
9177330f729Sjoerg if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
9187330f729Sjoerg APFloat::opOverflow)
9197330f729Sjoerg // Don't need an overflow check for lower bound. Just check for
9207330f729Sjoerg // -Inf/NaN.
9217330f729Sjoerg MinSrc = APFloat::getInf(SrcSema, true);
9227330f729Sjoerg else
9237330f729Sjoerg // Find the largest value which is too small to represent (before
9247330f729Sjoerg // truncation toward zero).
9257330f729Sjoerg MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
9267330f729Sjoerg
9277330f729Sjoerg APSInt Max = APSInt::getMaxValue(Width, Unsigned);
9287330f729Sjoerg APFloat MaxSrc(SrcSema, APFloat::uninitialized);
9297330f729Sjoerg if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
9307330f729Sjoerg APFloat::opOverflow)
9317330f729Sjoerg // Don't need an overflow check for upper bound. Just check for
9327330f729Sjoerg // +Inf/NaN.
9337330f729Sjoerg MaxSrc = APFloat::getInf(SrcSema, false);
9347330f729Sjoerg else
9357330f729Sjoerg // Find the smallest value which is too large to represent (before
9367330f729Sjoerg // truncation toward zero).
9377330f729Sjoerg MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
9387330f729Sjoerg
9397330f729Sjoerg // If we're converting from __half, convert the range to float to match
9407330f729Sjoerg // the type of src.
9417330f729Sjoerg if (OrigSrcType->isHalfType()) {
9427330f729Sjoerg const llvm::fltSemantics &Sema =
9437330f729Sjoerg CGF.getContext().getFloatTypeSemantics(SrcType);
9447330f729Sjoerg bool IsInexact;
9457330f729Sjoerg MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
9467330f729Sjoerg MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
9477330f729Sjoerg }
9487330f729Sjoerg
9497330f729Sjoerg llvm::Value *GE =
9507330f729Sjoerg Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
9517330f729Sjoerg llvm::Value *LE =
9527330f729Sjoerg Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
9537330f729Sjoerg Check = Builder.CreateAnd(GE, LE);
9547330f729Sjoerg
9557330f729Sjoerg llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
9567330f729Sjoerg CGF.EmitCheckTypeDescriptor(OrigSrcType),
9577330f729Sjoerg CGF.EmitCheckTypeDescriptor(DstType)};
9587330f729Sjoerg CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
9597330f729Sjoerg SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
9607330f729Sjoerg }
9617330f729Sjoerg
9627330f729Sjoerg // Should be called within CodeGenFunction::SanitizerScope RAII scope.
9637330f729Sjoerg // Returns 'i1 false' when the truncation Src -> Dst was lossy.
9647330f729Sjoerg static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
9657330f729Sjoerg std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerTruncationCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)9667330f729Sjoerg EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
9677330f729Sjoerg QualType DstType, CGBuilderTy &Builder) {
9687330f729Sjoerg llvm::Type *SrcTy = Src->getType();
9697330f729Sjoerg llvm::Type *DstTy = Dst->getType();
9707330f729Sjoerg (void)DstTy; // Only used in assert()
9717330f729Sjoerg
9727330f729Sjoerg // This should be truncation of integral types.
9737330f729Sjoerg assert(Src != Dst);
9747330f729Sjoerg assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
9757330f729Sjoerg assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
9767330f729Sjoerg "non-integer llvm type");
9777330f729Sjoerg
9787330f729Sjoerg bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
9797330f729Sjoerg bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
9807330f729Sjoerg
9817330f729Sjoerg // If both (src and dst) types are unsigned, then it's an unsigned truncation.
9827330f729Sjoerg // Else, it is a signed truncation.
9837330f729Sjoerg ScalarExprEmitter::ImplicitConversionCheckKind Kind;
9847330f729Sjoerg SanitizerMask Mask;
9857330f729Sjoerg if (!SrcSigned && !DstSigned) {
9867330f729Sjoerg Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
9877330f729Sjoerg Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
9887330f729Sjoerg } else {
9897330f729Sjoerg Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
9907330f729Sjoerg Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
9917330f729Sjoerg }
9927330f729Sjoerg
9937330f729Sjoerg llvm::Value *Check = nullptr;
9947330f729Sjoerg // 1. Extend the truncated value back to the same width as the Src.
9957330f729Sjoerg Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
9967330f729Sjoerg // 2. Equality-compare with the original source value
9977330f729Sjoerg Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
9987330f729Sjoerg // If the comparison result is 'i1 false', then the truncation was lossy.
9997330f729Sjoerg return std::make_pair(Kind, std::make_pair(Check, Mask));
10007330f729Sjoerg }
10017330f729Sjoerg
PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType,QualType DstType)1002*e038c9c4Sjoerg static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1003*e038c9c4Sjoerg QualType SrcType, QualType DstType) {
1004*e038c9c4Sjoerg return SrcType->isIntegerType() && DstType->isIntegerType();
1005*e038c9c4Sjoerg }
1006*e038c9c4Sjoerg
EmitIntegerTruncationCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)10077330f729Sjoerg void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
10087330f729Sjoerg Value *Dst, QualType DstType,
10097330f729Sjoerg SourceLocation Loc) {
10107330f729Sjoerg if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
10117330f729Sjoerg return;
10127330f729Sjoerg
10137330f729Sjoerg // We only care about int->int conversions here.
10147330f729Sjoerg // We ignore conversions to/from pointer and/or bool.
1015*e038c9c4Sjoerg if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1016*e038c9c4Sjoerg DstType))
10177330f729Sjoerg return;
10187330f729Sjoerg
10197330f729Sjoerg unsigned SrcBits = Src->getType()->getScalarSizeInBits();
10207330f729Sjoerg unsigned DstBits = Dst->getType()->getScalarSizeInBits();
10217330f729Sjoerg // This must be truncation. Else we do not care.
10227330f729Sjoerg if (SrcBits <= DstBits)
10237330f729Sjoerg return;
10247330f729Sjoerg
10257330f729Sjoerg assert(!DstType->isBooleanType() && "we should not get here with booleans.");
10267330f729Sjoerg
10277330f729Sjoerg // If the integer sign change sanitizer is enabled,
10287330f729Sjoerg // and we are truncating from larger unsigned type to smaller signed type,
10297330f729Sjoerg // let that next sanitizer deal with it.
10307330f729Sjoerg bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
10317330f729Sjoerg bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
10327330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
10337330f729Sjoerg (!SrcSigned && DstSigned))
10347330f729Sjoerg return;
10357330f729Sjoerg
10367330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
10377330f729Sjoerg
10387330f729Sjoerg std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
10397330f729Sjoerg std::pair<llvm::Value *, SanitizerMask>>
10407330f729Sjoerg Check =
10417330f729Sjoerg EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
10427330f729Sjoerg // If the comparison result is 'i1 false', then the truncation was lossy.
10437330f729Sjoerg
10447330f729Sjoerg // Do we care about this type of truncation?
10457330f729Sjoerg if (!CGF.SanOpts.has(Check.second.second))
10467330f729Sjoerg return;
10477330f729Sjoerg
10487330f729Sjoerg llvm::Constant *StaticArgs[] = {
10497330f729Sjoerg CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
10507330f729Sjoerg CGF.EmitCheckTypeDescriptor(DstType),
10517330f729Sjoerg llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
10527330f729Sjoerg CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
10537330f729Sjoerg {Src, Dst});
10547330f729Sjoerg }
10557330f729Sjoerg
10567330f729Sjoerg // Should be called within CodeGenFunction::SanitizerScope RAII scope.
10577330f729Sjoerg // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
10587330f729Sjoerg static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
10597330f729Sjoerg std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerSignChangeCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)10607330f729Sjoerg EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
10617330f729Sjoerg QualType DstType, CGBuilderTy &Builder) {
10627330f729Sjoerg llvm::Type *SrcTy = Src->getType();
10637330f729Sjoerg llvm::Type *DstTy = Dst->getType();
10647330f729Sjoerg
10657330f729Sjoerg assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
10667330f729Sjoerg "non-integer llvm type");
10677330f729Sjoerg
10687330f729Sjoerg bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
10697330f729Sjoerg bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
10707330f729Sjoerg (void)SrcSigned; // Only used in assert()
10717330f729Sjoerg (void)DstSigned; // Only used in assert()
10727330f729Sjoerg unsigned SrcBits = SrcTy->getScalarSizeInBits();
10737330f729Sjoerg unsigned DstBits = DstTy->getScalarSizeInBits();
10747330f729Sjoerg (void)SrcBits; // Only used in assert()
10757330f729Sjoerg (void)DstBits; // Only used in assert()
10767330f729Sjoerg
10777330f729Sjoerg assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
10787330f729Sjoerg "either the widths should be different, or the signednesses.");
10797330f729Sjoerg
10807330f729Sjoerg // NOTE: zero value is considered to be non-negative.
10817330f729Sjoerg auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
10827330f729Sjoerg const char *Name) -> Value * {
10837330f729Sjoerg // Is this value a signed type?
10847330f729Sjoerg bool VSigned = VType->isSignedIntegerOrEnumerationType();
10857330f729Sjoerg llvm::Type *VTy = V->getType();
10867330f729Sjoerg if (!VSigned) {
10877330f729Sjoerg // If the value is unsigned, then it is never negative.
10887330f729Sjoerg // FIXME: can we encounter non-scalar VTy here?
10897330f729Sjoerg return llvm::ConstantInt::getFalse(VTy->getContext());
10907330f729Sjoerg }
10917330f729Sjoerg // Get the zero of the same type with which we will be comparing.
10927330f729Sjoerg llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
10937330f729Sjoerg // %V.isnegative = icmp slt %V, 0
10947330f729Sjoerg // I.e is %V *strictly* less than zero, does it have negative value?
10957330f729Sjoerg return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
10967330f729Sjoerg llvm::Twine(Name) + "." + V->getName() +
10977330f729Sjoerg ".negativitycheck");
10987330f729Sjoerg };
10997330f729Sjoerg
11007330f729Sjoerg // 1. Was the old Value negative?
11017330f729Sjoerg llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
11027330f729Sjoerg // 2. Is the new Value negative?
11037330f729Sjoerg llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
11047330f729Sjoerg // 3. Now, was the 'negativity status' preserved during the conversion?
11057330f729Sjoerg // NOTE: conversion from negative to zero is considered to change the sign.
11067330f729Sjoerg // (We want to get 'false' when the conversion changed the sign)
11077330f729Sjoerg // So we should just equality-compare the negativity statuses.
11087330f729Sjoerg llvm::Value *Check = nullptr;
11097330f729Sjoerg Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
11107330f729Sjoerg // If the comparison result is 'false', then the conversion changed the sign.
11117330f729Sjoerg return std::make_pair(
11127330f729Sjoerg ScalarExprEmitter::ICCK_IntegerSignChange,
11137330f729Sjoerg std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
11147330f729Sjoerg }
11157330f729Sjoerg
EmitIntegerSignChangeCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)11167330f729Sjoerg void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
11177330f729Sjoerg Value *Dst, QualType DstType,
11187330f729Sjoerg SourceLocation Loc) {
11197330f729Sjoerg if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
11207330f729Sjoerg return;
11217330f729Sjoerg
11227330f729Sjoerg llvm::Type *SrcTy = Src->getType();
11237330f729Sjoerg llvm::Type *DstTy = Dst->getType();
11247330f729Sjoerg
11257330f729Sjoerg // We only care about int->int conversions here.
11267330f729Sjoerg // We ignore conversions to/from pointer and/or bool.
1127*e038c9c4Sjoerg if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1128*e038c9c4Sjoerg DstType))
11297330f729Sjoerg return;
11307330f729Sjoerg
11317330f729Sjoerg bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
11327330f729Sjoerg bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
11337330f729Sjoerg unsigned SrcBits = SrcTy->getScalarSizeInBits();
11347330f729Sjoerg unsigned DstBits = DstTy->getScalarSizeInBits();
11357330f729Sjoerg
11367330f729Sjoerg // Now, we do not need to emit the check in *all* of the cases.
11377330f729Sjoerg // We can avoid emitting it in some obvious cases where it would have been
11387330f729Sjoerg // dropped by the opt passes (instcombine) always anyways.
11397330f729Sjoerg // If it's a cast between effectively the same type, no check.
11407330f729Sjoerg // NOTE: this is *not* equivalent to checking the canonical types.
11417330f729Sjoerg if (SrcSigned == DstSigned && SrcBits == DstBits)
11427330f729Sjoerg return;
11437330f729Sjoerg // At least one of the values needs to have signed type.
11447330f729Sjoerg // If both are unsigned, then obviously, neither of them can be negative.
11457330f729Sjoerg if (!SrcSigned && !DstSigned)
11467330f729Sjoerg return;
11477330f729Sjoerg // If the conversion is to *larger* *signed* type, then no check is needed.
11487330f729Sjoerg // Because either sign-extension happens (so the sign will remain),
11497330f729Sjoerg // or zero-extension will happen (the sign bit will be zero.)
11507330f729Sjoerg if ((DstBits > SrcBits) && DstSigned)
11517330f729Sjoerg return;
11527330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
11537330f729Sjoerg (SrcBits > DstBits) && SrcSigned) {
11547330f729Sjoerg // If the signed integer truncation sanitizer is enabled,
11557330f729Sjoerg // and this is a truncation from signed type, then no check is needed.
11567330f729Sjoerg // Because here sign change check is interchangeable with truncation check.
11577330f729Sjoerg return;
11587330f729Sjoerg }
11597330f729Sjoerg // That's it. We can't rule out any more cases with the data we have.
11607330f729Sjoerg
11617330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
11627330f729Sjoerg
11637330f729Sjoerg std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
11647330f729Sjoerg std::pair<llvm::Value *, SanitizerMask>>
11657330f729Sjoerg Check;
11667330f729Sjoerg
11677330f729Sjoerg // Each of these checks needs to return 'false' when an issue was detected.
11687330f729Sjoerg ImplicitConversionCheckKind CheckKind;
11697330f729Sjoerg llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
11707330f729Sjoerg // So we can 'and' all the checks together, and still get 'false',
11717330f729Sjoerg // if at least one of the checks detected an issue.
11727330f729Sjoerg
11737330f729Sjoerg Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
11747330f729Sjoerg CheckKind = Check.first;
11757330f729Sjoerg Checks.emplace_back(Check.second);
11767330f729Sjoerg
11777330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
11787330f729Sjoerg (SrcBits > DstBits) && !SrcSigned && DstSigned) {
11797330f729Sjoerg // If the signed integer truncation sanitizer was enabled,
11807330f729Sjoerg // and we are truncating from larger unsigned type to smaller signed type,
11817330f729Sjoerg // let's handle the case we skipped in that check.
11827330f729Sjoerg Check =
11837330f729Sjoerg EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
11847330f729Sjoerg CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
11857330f729Sjoerg Checks.emplace_back(Check.second);
11867330f729Sjoerg // If the comparison result is 'i1 false', then the truncation was lossy.
11877330f729Sjoerg }
11887330f729Sjoerg
11897330f729Sjoerg llvm::Constant *StaticArgs[] = {
11907330f729Sjoerg CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
11917330f729Sjoerg CGF.EmitCheckTypeDescriptor(DstType),
11927330f729Sjoerg llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
11937330f729Sjoerg // EmitCheck() will 'and' all the checks together.
11947330f729Sjoerg CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
11957330f729Sjoerg {Src, Dst});
11967330f729Sjoerg }
11977330f729Sjoerg
EmitScalarCast(Value * Src,QualType SrcType,QualType DstType,llvm::Type * SrcTy,llvm::Type * DstTy,ScalarConversionOpts Opts)1198*e038c9c4Sjoerg Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1199*e038c9c4Sjoerg QualType DstType, llvm::Type *SrcTy,
1200*e038c9c4Sjoerg llvm::Type *DstTy,
1201*e038c9c4Sjoerg ScalarConversionOpts Opts) {
1202*e038c9c4Sjoerg // The Element types determine the type of cast to perform.
1203*e038c9c4Sjoerg llvm::Type *SrcElementTy;
1204*e038c9c4Sjoerg llvm::Type *DstElementTy;
1205*e038c9c4Sjoerg QualType SrcElementType;
1206*e038c9c4Sjoerg QualType DstElementType;
1207*e038c9c4Sjoerg if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1208*e038c9c4Sjoerg SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1209*e038c9c4Sjoerg DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1210*e038c9c4Sjoerg SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1211*e038c9c4Sjoerg DstElementType = DstType->castAs<MatrixType>()->getElementType();
1212*e038c9c4Sjoerg } else {
1213*e038c9c4Sjoerg assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1214*e038c9c4Sjoerg "cannot cast between matrix and non-matrix types");
1215*e038c9c4Sjoerg SrcElementTy = SrcTy;
1216*e038c9c4Sjoerg DstElementTy = DstTy;
1217*e038c9c4Sjoerg SrcElementType = SrcType;
1218*e038c9c4Sjoerg DstElementType = DstType;
1219*e038c9c4Sjoerg }
1220*e038c9c4Sjoerg
1221*e038c9c4Sjoerg if (isa<llvm::IntegerType>(SrcElementTy)) {
1222*e038c9c4Sjoerg bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1223*e038c9c4Sjoerg if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1224*e038c9c4Sjoerg InputSigned = true;
1225*e038c9c4Sjoerg }
1226*e038c9c4Sjoerg
1227*e038c9c4Sjoerg if (isa<llvm::IntegerType>(DstElementTy))
1228*e038c9c4Sjoerg return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1229*e038c9c4Sjoerg if (InputSigned)
1230*e038c9c4Sjoerg return Builder.CreateSIToFP(Src, DstTy, "conv");
1231*e038c9c4Sjoerg return Builder.CreateUIToFP(Src, DstTy, "conv");
1232*e038c9c4Sjoerg }
1233*e038c9c4Sjoerg
1234*e038c9c4Sjoerg if (isa<llvm::IntegerType>(DstElementTy)) {
1235*e038c9c4Sjoerg assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1236*e038c9c4Sjoerg if (DstElementType->isSignedIntegerOrEnumerationType())
1237*e038c9c4Sjoerg return Builder.CreateFPToSI(Src, DstTy, "conv");
1238*e038c9c4Sjoerg return Builder.CreateFPToUI(Src, DstTy, "conv");
1239*e038c9c4Sjoerg }
1240*e038c9c4Sjoerg
1241*e038c9c4Sjoerg if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1242*e038c9c4Sjoerg return Builder.CreateFPTrunc(Src, DstTy, "conv");
1243*e038c9c4Sjoerg return Builder.CreateFPExt(Src, DstTy, "conv");
1244*e038c9c4Sjoerg }
1245*e038c9c4Sjoerg
12467330f729Sjoerg /// Emit a conversion from the specified type to the specified destination type,
12477330f729Sjoerg /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcType,QualType DstType,SourceLocation Loc,ScalarConversionOpts Opts)12487330f729Sjoerg Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
12497330f729Sjoerg QualType DstType,
12507330f729Sjoerg SourceLocation Loc,
12517330f729Sjoerg ScalarConversionOpts Opts) {
12527330f729Sjoerg // All conversions involving fixed point types should be handled by the
12537330f729Sjoerg // EmitFixedPoint family functions. This is done to prevent bloating up this
12547330f729Sjoerg // function more, and although fixed point numbers are represented by
12557330f729Sjoerg // integers, we do not want to follow any logic that assumes they should be
12567330f729Sjoerg // treated as integers.
12577330f729Sjoerg // TODO(leonardchan): When necessary, add another if statement checking for
12587330f729Sjoerg // conversions to fixed point types from other types.
12597330f729Sjoerg if (SrcType->isFixedPointType()) {
12607330f729Sjoerg if (DstType->isBooleanType())
12617330f729Sjoerg // It is important that we check this before checking if the dest type is
12627330f729Sjoerg // an integer because booleans are technically integer types.
12637330f729Sjoerg // We do not need to check the padding bit on unsigned types if unsigned
12647330f729Sjoerg // padding is enabled because overflow into this bit is undefined
12657330f729Sjoerg // behavior.
12667330f729Sjoerg return Builder.CreateIsNotNull(Src, "tobool");
1267*e038c9c4Sjoerg if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1268*e038c9c4Sjoerg DstType->isRealFloatingType())
12697330f729Sjoerg return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
12707330f729Sjoerg
12717330f729Sjoerg llvm_unreachable(
12727330f729Sjoerg "Unhandled scalar conversion from a fixed point type to another type.");
12737330f729Sjoerg } else if (DstType->isFixedPointType()) {
1274*e038c9c4Sjoerg if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
12757330f729Sjoerg // This also includes converting booleans and enums to fixed point types.
12767330f729Sjoerg return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
12777330f729Sjoerg
12787330f729Sjoerg llvm_unreachable(
12797330f729Sjoerg "Unhandled scalar conversion to a fixed point type from another type.");
12807330f729Sjoerg }
12817330f729Sjoerg
12827330f729Sjoerg QualType NoncanonicalSrcType = SrcType;
12837330f729Sjoerg QualType NoncanonicalDstType = DstType;
12847330f729Sjoerg
12857330f729Sjoerg SrcType = CGF.getContext().getCanonicalType(SrcType);
12867330f729Sjoerg DstType = CGF.getContext().getCanonicalType(DstType);
12877330f729Sjoerg if (SrcType == DstType) return Src;
12887330f729Sjoerg
12897330f729Sjoerg if (DstType->isVoidType()) return nullptr;
12907330f729Sjoerg
12917330f729Sjoerg llvm::Value *OrigSrc = Src;
12927330f729Sjoerg QualType OrigSrcType = SrcType;
12937330f729Sjoerg llvm::Type *SrcTy = Src->getType();
12947330f729Sjoerg
12957330f729Sjoerg // Handle conversions to bool first, they are special: comparisons against 0.
12967330f729Sjoerg if (DstType->isBooleanType())
12977330f729Sjoerg return EmitConversionToBool(Src, SrcType);
12987330f729Sjoerg
12997330f729Sjoerg llvm::Type *DstTy = ConvertType(DstType);
13007330f729Sjoerg
13017330f729Sjoerg // Cast from half through float if half isn't a native type.
13027330f729Sjoerg if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
13037330f729Sjoerg // Cast to FP using the intrinsic if the half type itself isn't supported.
13047330f729Sjoerg if (DstTy->isFloatingPointTy()) {
13057330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
13067330f729Sjoerg return Builder.CreateCall(
13077330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
13087330f729Sjoerg Src);
13097330f729Sjoerg } else {
13107330f729Sjoerg // Cast to other types through float, using either the intrinsic or FPExt,
13117330f729Sjoerg // depending on whether the half type itself is supported
13127330f729Sjoerg // (as opposed to operations on half, available with NativeHalfType).
13137330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
13147330f729Sjoerg Src = Builder.CreateCall(
13157330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
13167330f729Sjoerg CGF.CGM.FloatTy),
13177330f729Sjoerg Src);
13187330f729Sjoerg } else {
13197330f729Sjoerg Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
13207330f729Sjoerg }
13217330f729Sjoerg SrcType = CGF.getContext().FloatTy;
13227330f729Sjoerg SrcTy = CGF.FloatTy;
13237330f729Sjoerg }
13247330f729Sjoerg }
13257330f729Sjoerg
13267330f729Sjoerg // Ignore conversions like int -> uint.
13277330f729Sjoerg if (SrcTy == DstTy) {
13287330f729Sjoerg if (Opts.EmitImplicitIntegerSignChangeChecks)
13297330f729Sjoerg EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
13307330f729Sjoerg NoncanonicalDstType, Loc);
13317330f729Sjoerg
13327330f729Sjoerg return Src;
13337330f729Sjoerg }
13347330f729Sjoerg
13357330f729Sjoerg // Handle pointer conversions next: pointers can only be converted to/from
13367330f729Sjoerg // other pointers and integers. Check for pointer types in terms of LLVM, as
13377330f729Sjoerg // some native types (like Obj-C id) may map to a pointer type.
13387330f729Sjoerg if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
13397330f729Sjoerg // The source value may be an integer, or a pointer.
13407330f729Sjoerg if (isa<llvm::PointerType>(SrcTy))
13417330f729Sjoerg return Builder.CreateBitCast(Src, DstTy, "conv");
13427330f729Sjoerg
13437330f729Sjoerg assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
13447330f729Sjoerg // First, convert to the correct width so that we control the kind of
13457330f729Sjoerg // extension.
13467330f729Sjoerg llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
13477330f729Sjoerg bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
13487330f729Sjoerg llvm::Value* IntResult =
13497330f729Sjoerg Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
13507330f729Sjoerg // Then, cast to pointer.
13517330f729Sjoerg return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
13527330f729Sjoerg }
13537330f729Sjoerg
13547330f729Sjoerg if (isa<llvm::PointerType>(SrcTy)) {
13557330f729Sjoerg // Must be an ptr to int cast.
13567330f729Sjoerg assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
13577330f729Sjoerg return Builder.CreatePtrToInt(Src, DstTy, "conv");
13587330f729Sjoerg }
13597330f729Sjoerg
13607330f729Sjoerg // A scalar can be splatted to an extended vector of the same element type
13617330f729Sjoerg if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
13627330f729Sjoerg // Sema should add casts to make sure that the source expression's type is
13637330f729Sjoerg // the same as the vector's element type (sans qualifiers)
13647330f729Sjoerg assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
13657330f729Sjoerg SrcType.getTypePtr() &&
13667330f729Sjoerg "Splatted expr doesn't match with vector element type?");
13677330f729Sjoerg
13687330f729Sjoerg // Splat the element across to all elements
1369*e038c9c4Sjoerg unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
13707330f729Sjoerg return Builder.CreateVectorSplat(NumElements, Src, "splat");
13717330f729Sjoerg }
13727330f729Sjoerg
1373*e038c9c4Sjoerg if (SrcType->isMatrixType() && DstType->isMatrixType())
1374*e038c9c4Sjoerg return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1375*e038c9c4Sjoerg
13767330f729Sjoerg if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
13777330f729Sjoerg // Allow bitcast from vector to integer/fp of the same size.
13787330f729Sjoerg unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
13797330f729Sjoerg unsigned DstSize = DstTy->getPrimitiveSizeInBits();
13807330f729Sjoerg if (SrcSize == DstSize)
13817330f729Sjoerg return Builder.CreateBitCast(Src, DstTy, "conv");
13827330f729Sjoerg
13837330f729Sjoerg // Conversions between vectors of different sizes are not allowed except
13847330f729Sjoerg // when vectors of half are involved. Operations on storage-only half
13857330f729Sjoerg // vectors require promoting half vector operands to float vectors and
13867330f729Sjoerg // truncating the result, which is either an int or float vector, to a
13877330f729Sjoerg // short or half vector.
13887330f729Sjoerg
13897330f729Sjoerg // Source and destination are both expected to be vectors.
1390*e038c9c4Sjoerg llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1391*e038c9c4Sjoerg llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
13927330f729Sjoerg (void)DstElementTy;
13937330f729Sjoerg
13947330f729Sjoerg assert(((SrcElementTy->isIntegerTy() &&
13957330f729Sjoerg DstElementTy->isIntegerTy()) ||
13967330f729Sjoerg (SrcElementTy->isFloatingPointTy() &&
13977330f729Sjoerg DstElementTy->isFloatingPointTy())) &&
13987330f729Sjoerg "unexpected conversion between a floating-point vector and an "
13997330f729Sjoerg "integer vector");
14007330f729Sjoerg
14017330f729Sjoerg // Truncate an i32 vector to an i16 vector.
14027330f729Sjoerg if (SrcElementTy->isIntegerTy())
14037330f729Sjoerg return Builder.CreateIntCast(Src, DstTy, false, "conv");
14047330f729Sjoerg
14057330f729Sjoerg // Truncate a float vector to a half vector.
14067330f729Sjoerg if (SrcSize > DstSize)
14077330f729Sjoerg return Builder.CreateFPTrunc(Src, DstTy, "conv");
14087330f729Sjoerg
14097330f729Sjoerg // Promote a half vector to a float vector.
14107330f729Sjoerg return Builder.CreateFPExt(Src, DstTy, "conv");
14117330f729Sjoerg }
14127330f729Sjoerg
14137330f729Sjoerg // Finally, we have the arithmetic types: real int/float.
14147330f729Sjoerg Value *Res = nullptr;
14157330f729Sjoerg llvm::Type *ResTy = DstTy;
14167330f729Sjoerg
14177330f729Sjoerg // An overflowing conversion has undefined behavior if either the source type
14187330f729Sjoerg // or the destination type is a floating-point type. However, we consider the
14197330f729Sjoerg // range of representable values for all floating-point types to be
14207330f729Sjoerg // [-inf,+inf], so no overflow can ever happen when the destination type is a
14217330f729Sjoerg // floating-point type.
14227330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
14237330f729Sjoerg OrigSrcType->isFloatingType())
14247330f729Sjoerg EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
14257330f729Sjoerg Loc);
14267330f729Sjoerg
14277330f729Sjoerg // Cast to half through float if half isn't a native type.
14287330f729Sjoerg if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
14297330f729Sjoerg // Make sure we cast in a single step if from another FP type.
14307330f729Sjoerg if (SrcTy->isFloatingPointTy()) {
14317330f729Sjoerg // Use the intrinsic if the half type itself isn't supported
14327330f729Sjoerg // (as opposed to operations on half, available with NativeHalfType).
14337330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
14347330f729Sjoerg return Builder.CreateCall(
14357330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
14367330f729Sjoerg // If the half type is supported, just use an fptrunc.
14377330f729Sjoerg return Builder.CreateFPTrunc(Src, DstTy);
14387330f729Sjoerg }
14397330f729Sjoerg DstTy = CGF.FloatTy;
14407330f729Sjoerg }
14417330f729Sjoerg
1442*e038c9c4Sjoerg Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
14437330f729Sjoerg
14447330f729Sjoerg if (DstTy != ResTy) {
14457330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
14467330f729Sjoerg assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
14477330f729Sjoerg Res = Builder.CreateCall(
14487330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
14497330f729Sjoerg Res);
14507330f729Sjoerg } else {
14517330f729Sjoerg Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
14527330f729Sjoerg }
14537330f729Sjoerg }
14547330f729Sjoerg
14557330f729Sjoerg if (Opts.EmitImplicitIntegerTruncationChecks)
14567330f729Sjoerg EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
14577330f729Sjoerg NoncanonicalDstType, Loc);
14587330f729Sjoerg
14597330f729Sjoerg if (Opts.EmitImplicitIntegerSignChangeChecks)
14607330f729Sjoerg EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
14617330f729Sjoerg NoncanonicalDstType, Loc);
14627330f729Sjoerg
14637330f729Sjoerg return Res;
14647330f729Sjoerg }
14657330f729Sjoerg
EmitFixedPointConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)14667330f729Sjoerg Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
14677330f729Sjoerg QualType DstTy,
14687330f729Sjoerg SourceLocation Loc) {
1469*e038c9c4Sjoerg llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1470*e038c9c4Sjoerg llvm::Value *Result;
1471*e038c9c4Sjoerg if (SrcTy->isRealFloatingType())
1472*e038c9c4Sjoerg Result = FPBuilder.CreateFloatingToFixed(Src,
1473*e038c9c4Sjoerg CGF.getContext().getFixedPointSemantics(DstTy));
1474*e038c9c4Sjoerg else if (DstTy->isRealFloatingType())
1475*e038c9c4Sjoerg Result = FPBuilder.CreateFixedToFloating(Src,
1476*e038c9c4Sjoerg CGF.getContext().getFixedPointSemantics(SrcTy),
1477*e038c9c4Sjoerg ConvertType(DstTy));
1478*e038c9c4Sjoerg else {
1479*e038c9c4Sjoerg auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1480*e038c9c4Sjoerg auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
14817330f729Sjoerg
1482*e038c9c4Sjoerg if (DstTy->isIntegerType())
1483*e038c9c4Sjoerg Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1484*e038c9c4Sjoerg DstFPSema.getWidth(),
1485*e038c9c4Sjoerg DstFPSema.isSigned());
1486*e038c9c4Sjoerg else if (SrcTy->isIntegerType())
1487*e038c9c4Sjoerg Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1488*e038c9c4Sjoerg DstFPSema);
1489*e038c9c4Sjoerg else
1490*e038c9c4Sjoerg Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
14917330f729Sjoerg }
14927330f729Sjoerg return Result;
14937330f729Sjoerg }
14947330f729Sjoerg
14957330f729Sjoerg /// Emit a conversion from the specified complex type to the specified
14967330f729Sjoerg /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)14977330f729Sjoerg Value *ScalarExprEmitter::EmitComplexToScalarConversion(
14987330f729Sjoerg CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
14997330f729Sjoerg SourceLocation Loc) {
15007330f729Sjoerg // Get the source element type.
15017330f729Sjoerg SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
15027330f729Sjoerg
15037330f729Sjoerg // Handle conversions to bool first, they are special: comparisons against 0.
15047330f729Sjoerg if (DstTy->isBooleanType()) {
15057330f729Sjoerg // Complex != 0 -> (Real != 0) | (Imag != 0)
15067330f729Sjoerg Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
15077330f729Sjoerg Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
15087330f729Sjoerg return Builder.CreateOr(Src.first, Src.second, "tobool");
15097330f729Sjoerg }
15107330f729Sjoerg
15117330f729Sjoerg // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
15127330f729Sjoerg // the imaginary part of the complex value is discarded and the value of the
15137330f729Sjoerg // real part is converted according to the conversion rules for the
15147330f729Sjoerg // corresponding real type.
15157330f729Sjoerg return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
15167330f729Sjoerg }
15177330f729Sjoerg
EmitNullValue(QualType Ty)15187330f729Sjoerg Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
15197330f729Sjoerg return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
15207330f729Sjoerg }
15217330f729Sjoerg
15227330f729Sjoerg /// Emit a sanitization check for the given "binary" operation (which
15237330f729Sjoerg /// might actually be a unary increment which has been lowered to a binary
15247330f729Sjoerg /// operation). The check passes if all values in \p Checks (which are \c i1),
15257330f729Sjoerg /// are \c true.
EmitBinOpCheck(ArrayRef<std::pair<Value *,SanitizerMask>> Checks,const BinOpInfo & Info)15267330f729Sjoerg void ScalarExprEmitter::EmitBinOpCheck(
15277330f729Sjoerg ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
15287330f729Sjoerg assert(CGF.IsSanitizerScope);
15297330f729Sjoerg SanitizerHandler Check;
15307330f729Sjoerg SmallVector<llvm::Constant *, 4> StaticData;
15317330f729Sjoerg SmallVector<llvm::Value *, 2> DynamicData;
15327330f729Sjoerg
15337330f729Sjoerg BinaryOperatorKind Opcode = Info.Opcode;
15347330f729Sjoerg if (BinaryOperator::isCompoundAssignmentOp(Opcode))
15357330f729Sjoerg Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
15367330f729Sjoerg
15377330f729Sjoerg StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
15387330f729Sjoerg const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
15397330f729Sjoerg if (UO && UO->getOpcode() == UO_Minus) {
15407330f729Sjoerg Check = SanitizerHandler::NegateOverflow;
15417330f729Sjoerg StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
15427330f729Sjoerg DynamicData.push_back(Info.RHS);
15437330f729Sjoerg } else {
15447330f729Sjoerg if (BinaryOperator::isShiftOp(Opcode)) {
15457330f729Sjoerg // Shift LHS negative or too large, or RHS out of bounds.
15467330f729Sjoerg Check = SanitizerHandler::ShiftOutOfBounds;
15477330f729Sjoerg const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
15487330f729Sjoerg StaticData.push_back(
15497330f729Sjoerg CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
15507330f729Sjoerg StaticData.push_back(
15517330f729Sjoerg CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
15527330f729Sjoerg } else if (Opcode == BO_Div || Opcode == BO_Rem) {
15537330f729Sjoerg // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
15547330f729Sjoerg Check = SanitizerHandler::DivremOverflow;
15557330f729Sjoerg StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
15567330f729Sjoerg } else {
15577330f729Sjoerg // Arithmetic overflow (+, -, *).
15587330f729Sjoerg switch (Opcode) {
15597330f729Sjoerg case BO_Add: Check = SanitizerHandler::AddOverflow; break;
15607330f729Sjoerg case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
15617330f729Sjoerg case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
15627330f729Sjoerg default: llvm_unreachable("unexpected opcode for bin op check");
15637330f729Sjoerg }
15647330f729Sjoerg StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
15657330f729Sjoerg }
15667330f729Sjoerg DynamicData.push_back(Info.LHS);
15677330f729Sjoerg DynamicData.push_back(Info.RHS);
15687330f729Sjoerg }
15697330f729Sjoerg
15707330f729Sjoerg CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
15717330f729Sjoerg }
15727330f729Sjoerg
15737330f729Sjoerg //===----------------------------------------------------------------------===//
15747330f729Sjoerg // Visitor Methods
15757330f729Sjoerg //===----------------------------------------------------------------------===//
15767330f729Sjoerg
VisitExpr(Expr * E)15777330f729Sjoerg Value *ScalarExprEmitter::VisitExpr(Expr *E) {
15787330f729Sjoerg CGF.ErrorUnsupported(E, "scalar expression");
15797330f729Sjoerg if (E->getType()->isVoidType())
15807330f729Sjoerg return nullptr;
15817330f729Sjoerg return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
15827330f729Sjoerg }
15837330f729Sjoerg
VisitShuffleVectorExpr(ShuffleVectorExpr * E)15847330f729Sjoerg Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
15857330f729Sjoerg // Vector Mask Case
15867330f729Sjoerg if (E->getNumSubExprs() == 2) {
15877330f729Sjoerg Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
15887330f729Sjoerg Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
15897330f729Sjoerg Value *Mask;
15907330f729Sjoerg
1591*e038c9c4Sjoerg auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
15927330f729Sjoerg unsigned LHSElts = LTy->getNumElements();
15937330f729Sjoerg
15947330f729Sjoerg Mask = RHS;
15957330f729Sjoerg
1596*e038c9c4Sjoerg auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
15977330f729Sjoerg
15987330f729Sjoerg // Mask off the high bits of each shuffle index.
15997330f729Sjoerg Value *MaskBits =
16007330f729Sjoerg llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
16017330f729Sjoerg Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
16027330f729Sjoerg
16037330f729Sjoerg // newv = undef
16047330f729Sjoerg // mask = mask & maskbits
16057330f729Sjoerg // for each elt
16067330f729Sjoerg // n = extract mask i
16077330f729Sjoerg // x = extract val n
16087330f729Sjoerg // newv = insert newv, x, i
1609*e038c9c4Sjoerg auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
16107330f729Sjoerg MTy->getNumElements());
16117330f729Sjoerg Value* NewV = llvm::UndefValue::get(RTy);
16127330f729Sjoerg for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
16137330f729Sjoerg Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
16147330f729Sjoerg Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
16157330f729Sjoerg
16167330f729Sjoerg Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
16177330f729Sjoerg NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
16187330f729Sjoerg }
16197330f729Sjoerg return NewV;
16207330f729Sjoerg }
16217330f729Sjoerg
16227330f729Sjoerg Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
16237330f729Sjoerg Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
16247330f729Sjoerg
1625*e038c9c4Sjoerg SmallVector<int, 32> Indices;
16267330f729Sjoerg for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
16277330f729Sjoerg llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
16287330f729Sjoerg // Check for -1 and output it as undef in the IR.
16297330f729Sjoerg if (Idx.isSigned() && Idx.isAllOnesValue())
1630*e038c9c4Sjoerg Indices.push_back(-1);
16317330f729Sjoerg else
1632*e038c9c4Sjoerg Indices.push_back(Idx.getZExtValue());
16337330f729Sjoerg }
16347330f729Sjoerg
1635*e038c9c4Sjoerg return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
16367330f729Sjoerg }
16377330f729Sjoerg
VisitConvertVectorExpr(ConvertVectorExpr * E)16387330f729Sjoerg Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
16397330f729Sjoerg QualType SrcType = E->getSrcExpr()->getType(),
16407330f729Sjoerg DstType = E->getType();
16417330f729Sjoerg
16427330f729Sjoerg Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
16437330f729Sjoerg
16447330f729Sjoerg SrcType = CGF.getContext().getCanonicalType(SrcType);
16457330f729Sjoerg DstType = CGF.getContext().getCanonicalType(DstType);
16467330f729Sjoerg if (SrcType == DstType) return Src;
16477330f729Sjoerg
16487330f729Sjoerg assert(SrcType->isVectorType() &&
16497330f729Sjoerg "ConvertVector source type must be a vector");
16507330f729Sjoerg assert(DstType->isVectorType() &&
16517330f729Sjoerg "ConvertVector destination type must be a vector");
16527330f729Sjoerg
16537330f729Sjoerg llvm::Type *SrcTy = Src->getType();
16547330f729Sjoerg llvm::Type *DstTy = ConvertType(DstType);
16557330f729Sjoerg
16567330f729Sjoerg // Ignore conversions like int -> uint.
16577330f729Sjoerg if (SrcTy == DstTy)
16587330f729Sjoerg return Src;
16597330f729Sjoerg
16607330f729Sjoerg QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
16617330f729Sjoerg DstEltType = DstType->castAs<VectorType>()->getElementType();
16627330f729Sjoerg
16637330f729Sjoerg assert(SrcTy->isVectorTy() &&
16647330f729Sjoerg "ConvertVector source IR type must be a vector");
16657330f729Sjoerg assert(DstTy->isVectorTy() &&
16667330f729Sjoerg "ConvertVector destination IR type must be a vector");
16677330f729Sjoerg
1668*e038c9c4Sjoerg llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1669*e038c9c4Sjoerg *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
16707330f729Sjoerg
16717330f729Sjoerg if (DstEltType->isBooleanType()) {
16727330f729Sjoerg assert((SrcEltTy->isFloatingPointTy() ||
16737330f729Sjoerg isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
16747330f729Sjoerg
16757330f729Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
16767330f729Sjoerg if (SrcEltTy->isFloatingPointTy()) {
16777330f729Sjoerg return Builder.CreateFCmpUNE(Src, Zero, "tobool");
16787330f729Sjoerg } else {
16797330f729Sjoerg return Builder.CreateICmpNE(Src, Zero, "tobool");
16807330f729Sjoerg }
16817330f729Sjoerg }
16827330f729Sjoerg
16837330f729Sjoerg // We have the arithmetic types: real int/float.
16847330f729Sjoerg Value *Res = nullptr;
16857330f729Sjoerg
16867330f729Sjoerg if (isa<llvm::IntegerType>(SrcEltTy)) {
16877330f729Sjoerg bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
16887330f729Sjoerg if (isa<llvm::IntegerType>(DstEltTy))
16897330f729Sjoerg Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
16907330f729Sjoerg else if (InputSigned)
16917330f729Sjoerg Res = Builder.CreateSIToFP(Src, DstTy, "conv");
16927330f729Sjoerg else
16937330f729Sjoerg Res = Builder.CreateUIToFP(Src, DstTy, "conv");
16947330f729Sjoerg } else if (isa<llvm::IntegerType>(DstEltTy)) {
16957330f729Sjoerg assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
16967330f729Sjoerg if (DstEltType->isSignedIntegerOrEnumerationType())
16977330f729Sjoerg Res = Builder.CreateFPToSI(Src, DstTy, "conv");
16987330f729Sjoerg else
16997330f729Sjoerg Res = Builder.CreateFPToUI(Src, DstTy, "conv");
17007330f729Sjoerg } else {
17017330f729Sjoerg assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
17027330f729Sjoerg "Unknown real conversion");
17037330f729Sjoerg if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
17047330f729Sjoerg Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
17057330f729Sjoerg else
17067330f729Sjoerg Res = Builder.CreateFPExt(Src, DstTy, "conv");
17077330f729Sjoerg }
17087330f729Sjoerg
17097330f729Sjoerg return Res;
17107330f729Sjoerg }
17117330f729Sjoerg
VisitMemberExpr(MemberExpr * E)17127330f729Sjoerg Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
17137330f729Sjoerg if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
17147330f729Sjoerg CGF.EmitIgnoredExpr(E->getBase());
17157330f729Sjoerg return CGF.emitScalarConstant(Constant, E);
17167330f729Sjoerg } else {
17177330f729Sjoerg Expr::EvalResult Result;
17187330f729Sjoerg if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
17197330f729Sjoerg llvm::APSInt Value = Result.Val.getInt();
17207330f729Sjoerg CGF.EmitIgnoredExpr(E->getBase());
17217330f729Sjoerg return Builder.getInt(Value);
17227330f729Sjoerg }
17237330f729Sjoerg }
17247330f729Sjoerg
17257330f729Sjoerg return EmitLoadOfLValue(E);
17267330f729Sjoerg }
17277330f729Sjoerg
VisitArraySubscriptExpr(ArraySubscriptExpr * E)17287330f729Sjoerg Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
17297330f729Sjoerg TestAndClearIgnoreResultAssign();
17307330f729Sjoerg
17317330f729Sjoerg // Emit subscript expressions in rvalue context's. For most cases, this just
17327330f729Sjoerg // loads the lvalue formed by the subscript expr. However, we have to be
17337330f729Sjoerg // careful, because the base of a vector subscript is occasionally an rvalue,
17347330f729Sjoerg // so we can't get it as an lvalue.
17357330f729Sjoerg if (!E->getBase()->getType()->isVectorType())
17367330f729Sjoerg return EmitLoadOfLValue(E);
17377330f729Sjoerg
17387330f729Sjoerg // Handle the vector case. The base must be a vector, the index must be an
17397330f729Sjoerg // integer value.
17407330f729Sjoerg Value *Base = Visit(E->getBase());
17417330f729Sjoerg Value *Idx = Visit(E->getIdx());
17427330f729Sjoerg QualType IdxTy = E->getIdx()->getType();
17437330f729Sjoerg
17447330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
17457330f729Sjoerg CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
17467330f729Sjoerg
17477330f729Sjoerg return Builder.CreateExtractElement(Base, Idx, "vecext");
17487330f729Sjoerg }
17497330f729Sjoerg
VisitMatrixSubscriptExpr(MatrixSubscriptExpr * E)1750*e038c9c4Sjoerg Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1751*e038c9c4Sjoerg TestAndClearIgnoreResultAssign();
1752*e038c9c4Sjoerg
1753*e038c9c4Sjoerg // Handle the vector case. The base must be a vector, the index must be an
1754*e038c9c4Sjoerg // integer value.
1755*e038c9c4Sjoerg Value *RowIdx = Visit(E->getRowIdx());
1756*e038c9c4Sjoerg Value *ColumnIdx = Visit(E->getColumnIdx());
1757*e038c9c4Sjoerg Value *Matrix = Visit(E->getBase());
1758*e038c9c4Sjoerg
1759*e038c9c4Sjoerg // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1760*e038c9c4Sjoerg llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1761*e038c9c4Sjoerg return MB.CreateExtractElement(
1762*e038c9c4Sjoerg Matrix, RowIdx, ColumnIdx,
1763*e038c9c4Sjoerg E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
17647330f729Sjoerg }
17657330f729Sjoerg
getMaskElt(llvm::ShuffleVectorInst * SVI,unsigned Idx,unsigned Off)1766*e038c9c4Sjoerg static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1767*e038c9c4Sjoerg unsigned Off) {
1768*e038c9c4Sjoerg int MV = SVI->getMaskValue(Idx);
1769*e038c9c4Sjoerg if (MV == -1)
1770*e038c9c4Sjoerg return -1;
1771*e038c9c4Sjoerg return Off + MV;
17727330f729Sjoerg }
1773*e038c9c4Sjoerg
getAsInt32(llvm::ConstantInt * C,llvm::Type * I32Ty)1774*e038c9c4Sjoerg static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1775*e038c9c4Sjoerg assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1776*e038c9c4Sjoerg "Index operand too large for shufflevector mask!");
1777*e038c9c4Sjoerg return C->getZExtValue();
17787330f729Sjoerg }
17797330f729Sjoerg
VisitInitListExpr(InitListExpr * E)17807330f729Sjoerg Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
17817330f729Sjoerg bool Ignore = TestAndClearIgnoreResultAssign();
17827330f729Sjoerg (void)Ignore;
17837330f729Sjoerg assert (Ignore == false && "init list ignored");
17847330f729Sjoerg unsigned NumInitElements = E->getNumInits();
17857330f729Sjoerg
17867330f729Sjoerg if (E->hadArrayRangeDesignator())
17877330f729Sjoerg CGF.ErrorUnsupported(E, "GNU array range designator extension");
17887330f729Sjoerg
17897330f729Sjoerg llvm::VectorType *VType =
17907330f729Sjoerg dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
17917330f729Sjoerg
17927330f729Sjoerg if (!VType) {
17937330f729Sjoerg if (NumInitElements == 0) {
17947330f729Sjoerg // C++11 value-initialization for the scalar.
17957330f729Sjoerg return EmitNullValue(E->getType());
17967330f729Sjoerg }
17977330f729Sjoerg // We have a scalar in braces. Just use the first element.
17987330f729Sjoerg return Visit(E->getInit(0));
17997330f729Sjoerg }
18007330f729Sjoerg
1801*e038c9c4Sjoerg unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
18027330f729Sjoerg
18037330f729Sjoerg // Loop over initializers collecting the Value for each, and remembering
18047330f729Sjoerg // whether the source was swizzle (ExtVectorElementExpr). This will allow
18057330f729Sjoerg // us to fold the shuffle for the swizzle into the shuffle for the vector
18067330f729Sjoerg // initializer, since LLVM optimizers generally do not want to touch
18077330f729Sjoerg // shuffles.
18087330f729Sjoerg unsigned CurIdx = 0;
18097330f729Sjoerg bool VIsUndefShuffle = false;
18107330f729Sjoerg llvm::Value *V = llvm::UndefValue::get(VType);
18117330f729Sjoerg for (unsigned i = 0; i != NumInitElements; ++i) {
18127330f729Sjoerg Expr *IE = E->getInit(i);
18137330f729Sjoerg Value *Init = Visit(IE);
1814*e038c9c4Sjoerg SmallVector<int, 16> Args;
18157330f729Sjoerg
18167330f729Sjoerg llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
18177330f729Sjoerg
18187330f729Sjoerg // Handle scalar elements. If the scalar initializer is actually one
18197330f729Sjoerg // element of a different vector of the same width, use shuffle instead of
18207330f729Sjoerg // extract+insert.
18217330f729Sjoerg if (!VVT) {
18227330f729Sjoerg if (isa<ExtVectorElementExpr>(IE)) {
18237330f729Sjoerg llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
18247330f729Sjoerg
1825*e038c9c4Sjoerg if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1826*e038c9c4Sjoerg ->getNumElements() == ResElts) {
18277330f729Sjoerg llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
18287330f729Sjoerg Value *LHS = nullptr, *RHS = nullptr;
18297330f729Sjoerg if (CurIdx == 0) {
18307330f729Sjoerg // insert into undef -> shuffle (src, undef)
18317330f729Sjoerg // shufflemask must use an i32
18327330f729Sjoerg Args.push_back(getAsInt32(C, CGF.Int32Ty));
1833*e038c9c4Sjoerg Args.resize(ResElts, -1);
18347330f729Sjoerg
18357330f729Sjoerg LHS = EI->getVectorOperand();
18367330f729Sjoerg RHS = V;
18377330f729Sjoerg VIsUndefShuffle = true;
18387330f729Sjoerg } else if (VIsUndefShuffle) {
18397330f729Sjoerg // insert into undefshuffle && size match -> shuffle (v, src)
18407330f729Sjoerg llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
18417330f729Sjoerg for (unsigned j = 0; j != CurIdx; ++j)
1842*e038c9c4Sjoerg Args.push_back(getMaskElt(SVV, j, 0));
1843*e038c9c4Sjoerg Args.push_back(ResElts + C->getZExtValue());
1844*e038c9c4Sjoerg Args.resize(ResElts, -1);
18457330f729Sjoerg
18467330f729Sjoerg LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
18477330f729Sjoerg RHS = EI->getVectorOperand();
18487330f729Sjoerg VIsUndefShuffle = false;
18497330f729Sjoerg }
18507330f729Sjoerg if (!Args.empty()) {
1851*e038c9c4Sjoerg V = Builder.CreateShuffleVector(LHS, RHS, Args);
18527330f729Sjoerg ++CurIdx;
18537330f729Sjoerg continue;
18547330f729Sjoerg }
18557330f729Sjoerg }
18567330f729Sjoerg }
18577330f729Sjoerg V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
18587330f729Sjoerg "vecinit");
18597330f729Sjoerg VIsUndefShuffle = false;
18607330f729Sjoerg ++CurIdx;
18617330f729Sjoerg continue;
18627330f729Sjoerg }
18637330f729Sjoerg
1864*e038c9c4Sjoerg unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
18657330f729Sjoerg
18667330f729Sjoerg // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
18677330f729Sjoerg // input is the same width as the vector being constructed, generate an
18687330f729Sjoerg // optimized shuffle of the swizzle input into the result.
18697330f729Sjoerg unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
18707330f729Sjoerg if (isa<ExtVectorElementExpr>(IE)) {
18717330f729Sjoerg llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
18727330f729Sjoerg Value *SVOp = SVI->getOperand(0);
1873*e038c9c4Sjoerg auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
18747330f729Sjoerg
18757330f729Sjoerg if (OpTy->getNumElements() == ResElts) {
18767330f729Sjoerg for (unsigned j = 0; j != CurIdx; ++j) {
18777330f729Sjoerg // If the current vector initializer is a shuffle with undef, merge
18787330f729Sjoerg // this shuffle directly into it.
18797330f729Sjoerg if (VIsUndefShuffle) {
1880*e038c9c4Sjoerg Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
18817330f729Sjoerg } else {
1882*e038c9c4Sjoerg Args.push_back(j);
18837330f729Sjoerg }
18847330f729Sjoerg }
18857330f729Sjoerg for (unsigned j = 0, je = InitElts; j != je; ++j)
1886*e038c9c4Sjoerg Args.push_back(getMaskElt(SVI, j, Offset));
1887*e038c9c4Sjoerg Args.resize(ResElts, -1);
18887330f729Sjoerg
18897330f729Sjoerg if (VIsUndefShuffle)
18907330f729Sjoerg V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
18917330f729Sjoerg
18927330f729Sjoerg Init = SVOp;
18937330f729Sjoerg }
18947330f729Sjoerg }
18957330f729Sjoerg
18967330f729Sjoerg // Extend init to result vector length, and then shuffle its contribution
18977330f729Sjoerg // to the vector initializer into V.
18987330f729Sjoerg if (Args.empty()) {
18997330f729Sjoerg for (unsigned j = 0; j != InitElts; ++j)
1900*e038c9c4Sjoerg Args.push_back(j);
1901*e038c9c4Sjoerg Args.resize(ResElts, -1);
1902*e038c9c4Sjoerg Init = Builder.CreateShuffleVector(Init, Args, "vext");
19037330f729Sjoerg
19047330f729Sjoerg Args.clear();
19057330f729Sjoerg for (unsigned j = 0; j != CurIdx; ++j)
1906*e038c9c4Sjoerg Args.push_back(j);
19077330f729Sjoerg for (unsigned j = 0; j != InitElts; ++j)
1908*e038c9c4Sjoerg Args.push_back(j + Offset);
1909*e038c9c4Sjoerg Args.resize(ResElts, -1);
19107330f729Sjoerg }
19117330f729Sjoerg
19127330f729Sjoerg // If V is undef, make sure it ends up on the RHS of the shuffle to aid
19137330f729Sjoerg // merging subsequent shuffles into this one.
19147330f729Sjoerg if (CurIdx == 0)
19157330f729Sjoerg std::swap(V, Init);
1916*e038c9c4Sjoerg V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
19177330f729Sjoerg VIsUndefShuffle = isa<llvm::UndefValue>(Init);
19187330f729Sjoerg CurIdx += InitElts;
19197330f729Sjoerg }
19207330f729Sjoerg
19217330f729Sjoerg // FIXME: evaluate codegen vs. shuffling against constant null vector.
19227330f729Sjoerg // Emit remaining default initializers.
19237330f729Sjoerg llvm::Type *EltTy = VType->getElementType();
19247330f729Sjoerg
19257330f729Sjoerg // Emit remaining default initializers
19267330f729Sjoerg for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
19277330f729Sjoerg Value *Idx = Builder.getInt32(CurIdx);
19287330f729Sjoerg llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
19297330f729Sjoerg V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
19307330f729Sjoerg }
19317330f729Sjoerg return V;
19327330f729Sjoerg }
19337330f729Sjoerg
ShouldNullCheckClassCastValue(const CastExpr * CE)19347330f729Sjoerg bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
19357330f729Sjoerg const Expr *E = CE->getSubExpr();
19367330f729Sjoerg
19377330f729Sjoerg if (CE->getCastKind() == CK_UncheckedDerivedToBase)
19387330f729Sjoerg return false;
19397330f729Sjoerg
19407330f729Sjoerg if (isa<CXXThisExpr>(E->IgnoreParens())) {
19417330f729Sjoerg // We always assume that 'this' is never null.
19427330f729Sjoerg return false;
19437330f729Sjoerg }
19447330f729Sjoerg
19457330f729Sjoerg if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
19467330f729Sjoerg // And that glvalue casts are never null.
19477330f729Sjoerg if (ICE->getValueKind() != VK_RValue)
19487330f729Sjoerg return false;
19497330f729Sjoerg }
19507330f729Sjoerg
19517330f729Sjoerg return true;
19527330f729Sjoerg }
19537330f729Sjoerg
19547330f729Sjoerg // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
19557330f729Sjoerg // have to handle a more broad range of conversions than explicit casts, as they
19567330f729Sjoerg // handle things like function to ptr-to-function decay etc.
VisitCastExpr(CastExpr * CE)19577330f729Sjoerg Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
19587330f729Sjoerg Expr *E = CE->getSubExpr();
19597330f729Sjoerg QualType DestTy = CE->getType();
19607330f729Sjoerg CastKind Kind = CE->getCastKind();
19617330f729Sjoerg
19627330f729Sjoerg // These cases are generally not written to ignore the result of
19637330f729Sjoerg // evaluating their sub-expressions, so we clear this now.
19647330f729Sjoerg bool Ignored = TestAndClearIgnoreResultAssign();
19657330f729Sjoerg
19667330f729Sjoerg // Since almost all cast kinds apply to scalars, this switch doesn't have
19677330f729Sjoerg // a default case, so the compiler will warn on a missing case. The cases
19687330f729Sjoerg // are in the same order as in the CastKind enum.
19697330f729Sjoerg switch (Kind) {
19707330f729Sjoerg case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
19717330f729Sjoerg case CK_BuiltinFnToFnPtr:
19727330f729Sjoerg llvm_unreachable("builtin functions are handled elsewhere");
19737330f729Sjoerg
19747330f729Sjoerg case CK_LValueBitCast:
19757330f729Sjoerg case CK_ObjCObjectLValueCast: {
1976*e038c9c4Sjoerg Address Addr = EmitLValue(E).getAddress(CGF);
19777330f729Sjoerg Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
19787330f729Sjoerg LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
19797330f729Sjoerg return EmitLoadOfLValue(LV, CE->getExprLoc());
19807330f729Sjoerg }
19817330f729Sjoerg
19827330f729Sjoerg case CK_LValueToRValueBitCast: {
19837330f729Sjoerg LValue SourceLVal = CGF.EmitLValue(E);
1984*e038c9c4Sjoerg Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
19857330f729Sjoerg CGF.ConvertTypeForMem(DestTy));
19867330f729Sjoerg LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
19877330f729Sjoerg DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
19887330f729Sjoerg return EmitLoadOfLValue(DestLV, CE->getExprLoc());
19897330f729Sjoerg }
19907330f729Sjoerg
19917330f729Sjoerg case CK_CPointerToObjCPointerCast:
19927330f729Sjoerg case CK_BlockPointerToObjCPointerCast:
19937330f729Sjoerg case CK_AnyPointerToBlockPointerCast:
19947330f729Sjoerg case CK_BitCast: {
19957330f729Sjoerg Value *Src = Visit(const_cast<Expr*>(E));
19967330f729Sjoerg llvm::Type *SrcTy = Src->getType();
19977330f729Sjoerg llvm::Type *DstTy = ConvertType(DestTy);
19987330f729Sjoerg if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
19997330f729Sjoerg SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
20007330f729Sjoerg llvm_unreachable("wrong cast for pointers in different address spaces"
20017330f729Sjoerg "(must be an address space cast)!");
20027330f729Sjoerg }
20037330f729Sjoerg
20047330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
20057330f729Sjoerg if (auto PT = DestTy->getAs<PointerType>())
20067330f729Sjoerg CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
20077330f729Sjoerg /*MayBeNull=*/true,
20087330f729Sjoerg CodeGenFunction::CFITCK_UnrelatedCast,
20097330f729Sjoerg CE->getBeginLoc());
20107330f729Sjoerg }
20117330f729Sjoerg
20127330f729Sjoerg if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
20137330f729Sjoerg const QualType SrcType = E->getType();
20147330f729Sjoerg
20157330f729Sjoerg if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
20167330f729Sjoerg // Casting to pointer that could carry dynamic information (provided by
20177330f729Sjoerg // invariant.group) requires launder.
20187330f729Sjoerg Src = Builder.CreateLaunderInvariantGroup(Src);
20197330f729Sjoerg } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
20207330f729Sjoerg // Casting to pointer that does not carry dynamic information (provided
20217330f729Sjoerg // by invariant.group) requires stripping it. Note that we don't do it
20227330f729Sjoerg // if the source could not be dynamic type and destination could be
20237330f729Sjoerg // dynamic because dynamic information is already laundered. It is
20247330f729Sjoerg // because launder(strip(src)) == launder(src), so there is no need to
20257330f729Sjoerg // add extra strip before launder.
20267330f729Sjoerg Src = Builder.CreateStripInvariantGroup(Src);
20277330f729Sjoerg }
20287330f729Sjoerg }
20297330f729Sjoerg
2030*e038c9c4Sjoerg // Update heapallocsite metadata when there is an explicit pointer cast.
2031*e038c9c4Sjoerg if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2032*e038c9c4Sjoerg if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2033*e038c9c4Sjoerg QualType PointeeType = DestTy->getPointeeType();
2034*e038c9c4Sjoerg if (!PointeeType.isNull())
2035*e038c9c4Sjoerg CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2036*e038c9c4Sjoerg CE->getExprLoc());
2037*e038c9c4Sjoerg }
2038*e038c9c4Sjoerg }
2039*e038c9c4Sjoerg
2040*e038c9c4Sjoerg // If Src is a fixed vector and Dst is a scalable vector, and both have the
2041*e038c9c4Sjoerg // same element type, use the llvm.experimental.vector.insert intrinsic to
2042*e038c9c4Sjoerg // perform the bitcast.
2043*e038c9c4Sjoerg if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2044*e038c9c4Sjoerg if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2045*e038c9c4Sjoerg if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2046*e038c9c4Sjoerg llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2047*e038c9c4Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2048*e038c9c4Sjoerg return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2049*e038c9c4Sjoerg "castScalableSve");
2050*e038c9c4Sjoerg }
2051*e038c9c4Sjoerg }
2052*e038c9c4Sjoerg }
2053*e038c9c4Sjoerg
2054*e038c9c4Sjoerg // If Src is a scalable vector and Dst is a fixed vector, and both have the
2055*e038c9c4Sjoerg // same element type, use the llvm.experimental.vector.extract intrinsic to
2056*e038c9c4Sjoerg // perform the bitcast.
2057*e038c9c4Sjoerg if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2058*e038c9c4Sjoerg if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2059*e038c9c4Sjoerg if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2060*e038c9c4Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2061*e038c9c4Sjoerg return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2062*e038c9c4Sjoerg }
2063*e038c9c4Sjoerg }
2064*e038c9c4Sjoerg }
2065*e038c9c4Sjoerg
2066*e038c9c4Sjoerg // Perform VLAT <-> VLST bitcast through memory.
2067*e038c9c4Sjoerg // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2068*e038c9c4Sjoerg // require the element types of the vectors to be the same, we
2069*e038c9c4Sjoerg // need to keep this around for casting between predicates, or more
2070*e038c9c4Sjoerg // generally for bitcasts between VLAT <-> VLST where the element
2071*e038c9c4Sjoerg // types of the vectors are not the same, until we figure out a better
2072*e038c9c4Sjoerg // way of doing these casts.
2073*e038c9c4Sjoerg if ((isa<llvm::FixedVectorType>(SrcTy) &&
2074*e038c9c4Sjoerg isa<llvm::ScalableVectorType>(DstTy)) ||
2075*e038c9c4Sjoerg (isa<llvm::ScalableVectorType>(SrcTy) &&
2076*e038c9c4Sjoerg isa<llvm::FixedVectorType>(DstTy))) {
2077*e038c9c4Sjoerg if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2078*e038c9c4Sjoerg // Call expressions can't have a scalar return unless the return type
2079*e038c9c4Sjoerg // is a reference type so an lvalue can't be emitted. Create a temp
2080*e038c9c4Sjoerg // alloca to store the call, bitcast the address then load.
2081*e038c9c4Sjoerg QualType RetTy = CE->getCallReturnType(CGF.getContext());
2082*e038c9c4Sjoerg Address Addr =
2083*e038c9c4Sjoerg CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2084*e038c9c4Sjoerg LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2085*e038c9c4Sjoerg CGF.EmitStoreOfScalar(Src, LV);
2086*e038c9c4Sjoerg Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2087*e038c9c4Sjoerg "castFixedSve");
2088*e038c9c4Sjoerg LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2089*e038c9c4Sjoerg DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2090*e038c9c4Sjoerg return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2091*e038c9c4Sjoerg }
2092*e038c9c4Sjoerg
2093*e038c9c4Sjoerg Address Addr = EmitLValue(E).getAddress(CGF);
2094*e038c9c4Sjoerg Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2095*e038c9c4Sjoerg LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2096*e038c9c4Sjoerg DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2097*e038c9c4Sjoerg return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2098*e038c9c4Sjoerg }
20997330f729Sjoerg
21007330f729Sjoerg return Builder.CreateBitCast(Src, DstTy);
21017330f729Sjoerg }
21027330f729Sjoerg case CK_AddressSpaceConversion: {
21037330f729Sjoerg Expr::EvalResult Result;
21047330f729Sjoerg if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
21057330f729Sjoerg Result.Val.isNullPointer()) {
21067330f729Sjoerg // If E has side effect, it is emitted even if its final result is a
21077330f729Sjoerg // null pointer. In that case, a DCE pass should be able to
21087330f729Sjoerg // eliminate the useless instructions emitted during translating E.
21097330f729Sjoerg if (Result.HasSideEffects)
21107330f729Sjoerg Visit(E);
21117330f729Sjoerg return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
21127330f729Sjoerg ConvertType(DestTy)), DestTy);
21137330f729Sjoerg }
21147330f729Sjoerg // Since target may map different address spaces in AST to the same address
21157330f729Sjoerg // space, an address space conversion may end up as a bitcast.
21167330f729Sjoerg return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
21177330f729Sjoerg CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
21187330f729Sjoerg DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
21197330f729Sjoerg }
21207330f729Sjoerg case CK_AtomicToNonAtomic:
21217330f729Sjoerg case CK_NonAtomicToAtomic:
21227330f729Sjoerg case CK_NoOp:
21237330f729Sjoerg case CK_UserDefinedConversion:
21247330f729Sjoerg return Visit(const_cast<Expr*>(E));
21257330f729Sjoerg
21267330f729Sjoerg case CK_BaseToDerived: {
21277330f729Sjoerg const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
21287330f729Sjoerg assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
21297330f729Sjoerg
21307330f729Sjoerg Address Base = CGF.EmitPointerWithAlignment(E);
21317330f729Sjoerg Address Derived =
21327330f729Sjoerg CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
21337330f729Sjoerg CE->path_begin(), CE->path_end(),
21347330f729Sjoerg CGF.ShouldNullCheckClassCastValue(CE));
21357330f729Sjoerg
21367330f729Sjoerg // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
21377330f729Sjoerg // performed and the object is not of the derived type.
21387330f729Sjoerg if (CGF.sanitizePerformTypeCheck())
21397330f729Sjoerg CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
21407330f729Sjoerg Derived.getPointer(), DestTy->getPointeeType());
21417330f729Sjoerg
21427330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
21437330f729Sjoerg CGF.EmitVTablePtrCheckForCast(
21447330f729Sjoerg DestTy->getPointeeType(), Derived.getPointer(),
21457330f729Sjoerg /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
21467330f729Sjoerg CE->getBeginLoc());
21477330f729Sjoerg
21487330f729Sjoerg return Derived.getPointer();
21497330f729Sjoerg }
21507330f729Sjoerg case CK_UncheckedDerivedToBase:
21517330f729Sjoerg case CK_DerivedToBase: {
21527330f729Sjoerg // The EmitPointerWithAlignment path does this fine; just discard
21537330f729Sjoerg // the alignment.
21547330f729Sjoerg return CGF.EmitPointerWithAlignment(CE).getPointer();
21557330f729Sjoerg }
21567330f729Sjoerg
21577330f729Sjoerg case CK_Dynamic: {
21587330f729Sjoerg Address V = CGF.EmitPointerWithAlignment(E);
21597330f729Sjoerg const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
21607330f729Sjoerg return CGF.EmitDynamicCast(V, DCE);
21617330f729Sjoerg }
21627330f729Sjoerg
21637330f729Sjoerg case CK_ArrayToPointerDecay:
21647330f729Sjoerg return CGF.EmitArrayToPointerDecay(E).getPointer();
21657330f729Sjoerg case CK_FunctionToPointerDecay:
2166*e038c9c4Sjoerg return EmitLValue(E).getPointer(CGF);
21677330f729Sjoerg
21687330f729Sjoerg case CK_NullToPointer:
21697330f729Sjoerg if (MustVisitNullValue(E))
21707330f729Sjoerg CGF.EmitIgnoredExpr(E);
21717330f729Sjoerg
21727330f729Sjoerg return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
21737330f729Sjoerg DestTy);
21747330f729Sjoerg
21757330f729Sjoerg case CK_NullToMemberPointer: {
21767330f729Sjoerg if (MustVisitNullValue(E))
21777330f729Sjoerg CGF.EmitIgnoredExpr(E);
21787330f729Sjoerg
21797330f729Sjoerg const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
21807330f729Sjoerg return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
21817330f729Sjoerg }
21827330f729Sjoerg
21837330f729Sjoerg case CK_ReinterpretMemberPointer:
21847330f729Sjoerg case CK_BaseToDerivedMemberPointer:
21857330f729Sjoerg case CK_DerivedToBaseMemberPointer: {
21867330f729Sjoerg Value *Src = Visit(E);
21877330f729Sjoerg
21887330f729Sjoerg // Note that the AST doesn't distinguish between checked and
21897330f729Sjoerg // unchecked member pointer conversions, so we always have to
21907330f729Sjoerg // implement checked conversions here. This is inefficient when
21917330f729Sjoerg // actual control flow may be required in order to perform the
21927330f729Sjoerg // check, which it is for data member pointers (but not member
21937330f729Sjoerg // function pointers on Itanium and ARM).
21947330f729Sjoerg return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
21957330f729Sjoerg }
21967330f729Sjoerg
21977330f729Sjoerg case CK_ARCProduceObject:
21987330f729Sjoerg return CGF.EmitARCRetainScalarExpr(E);
21997330f729Sjoerg case CK_ARCConsumeObject:
22007330f729Sjoerg return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
22017330f729Sjoerg case CK_ARCReclaimReturnedObject:
22027330f729Sjoerg return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
22037330f729Sjoerg case CK_ARCExtendBlockObject:
22047330f729Sjoerg return CGF.EmitARCExtendBlockObject(E);
22057330f729Sjoerg
22067330f729Sjoerg case CK_CopyAndAutoreleaseBlockObject:
22077330f729Sjoerg return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
22087330f729Sjoerg
22097330f729Sjoerg case CK_FloatingRealToComplex:
22107330f729Sjoerg case CK_FloatingComplexCast:
22117330f729Sjoerg case CK_IntegralRealToComplex:
22127330f729Sjoerg case CK_IntegralComplexCast:
22137330f729Sjoerg case CK_IntegralComplexToFloatingComplex:
22147330f729Sjoerg case CK_FloatingComplexToIntegralComplex:
22157330f729Sjoerg case CK_ConstructorConversion:
22167330f729Sjoerg case CK_ToUnion:
22177330f729Sjoerg llvm_unreachable("scalar cast to non-scalar value");
22187330f729Sjoerg
22197330f729Sjoerg case CK_LValueToRValue:
22207330f729Sjoerg assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
22217330f729Sjoerg assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
22227330f729Sjoerg return Visit(const_cast<Expr*>(E));
22237330f729Sjoerg
22247330f729Sjoerg case CK_IntegralToPointer: {
22257330f729Sjoerg Value *Src = Visit(const_cast<Expr*>(E));
22267330f729Sjoerg
22277330f729Sjoerg // First, convert to the correct width so that we control the kind of
22287330f729Sjoerg // extension.
22297330f729Sjoerg auto DestLLVMTy = ConvertType(DestTy);
22307330f729Sjoerg llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
22317330f729Sjoerg bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
22327330f729Sjoerg llvm::Value* IntResult =
22337330f729Sjoerg Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
22347330f729Sjoerg
22357330f729Sjoerg auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
22367330f729Sjoerg
22377330f729Sjoerg if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
22387330f729Sjoerg // Going from integer to pointer that could be dynamic requires reloading
22397330f729Sjoerg // dynamic information from invariant.group.
22407330f729Sjoerg if (DestTy.mayBeDynamicClass())
22417330f729Sjoerg IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
22427330f729Sjoerg }
22437330f729Sjoerg return IntToPtr;
22447330f729Sjoerg }
22457330f729Sjoerg case CK_PointerToIntegral: {
22467330f729Sjoerg assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
22477330f729Sjoerg auto *PtrExpr = Visit(E);
22487330f729Sjoerg
22497330f729Sjoerg if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
22507330f729Sjoerg const QualType SrcType = E->getType();
22517330f729Sjoerg
22527330f729Sjoerg // Casting to integer requires stripping dynamic information as it does
22537330f729Sjoerg // not carries it.
22547330f729Sjoerg if (SrcType.mayBeDynamicClass())
22557330f729Sjoerg PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
22567330f729Sjoerg }
22577330f729Sjoerg
22587330f729Sjoerg return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
22597330f729Sjoerg }
22607330f729Sjoerg case CK_ToVoid: {
22617330f729Sjoerg CGF.EmitIgnoredExpr(E);
22627330f729Sjoerg return nullptr;
22637330f729Sjoerg }
2264*e038c9c4Sjoerg case CK_MatrixCast: {
2265*e038c9c4Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2266*e038c9c4Sjoerg CE->getExprLoc());
2267*e038c9c4Sjoerg }
22687330f729Sjoerg case CK_VectorSplat: {
22697330f729Sjoerg llvm::Type *DstTy = ConvertType(DestTy);
22707330f729Sjoerg Value *Elt = Visit(const_cast<Expr*>(E));
22717330f729Sjoerg // Splat the element across to all elements
2272*e038c9c4Sjoerg unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
22737330f729Sjoerg return Builder.CreateVectorSplat(NumElements, Elt, "splat");
22747330f729Sjoerg }
22757330f729Sjoerg
22767330f729Sjoerg case CK_FixedPointCast:
22777330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
22787330f729Sjoerg CE->getExprLoc());
22797330f729Sjoerg
22807330f729Sjoerg case CK_FixedPointToBoolean:
22817330f729Sjoerg assert(E->getType()->isFixedPointType() &&
22827330f729Sjoerg "Expected src type to be fixed point type");
22837330f729Sjoerg assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
22847330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
22857330f729Sjoerg CE->getExprLoc());
22867330f729Sjoerg
22877330f729Sjoerg case CK_FixedPointToIntegral:
22887330f729Sjoerg assert(E->getType()->isFixedPointType() &&
22897330f729Sjoerg "Expected src type to be fixed point type");
22907330f729Sjoerg assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
22917330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
22927330f729Sjoerg CE->getExprLoc());
22937330f729Sjoerg
22947330f729Sjoerg case CK_IntegralToFixedPoint:
22957330f729Sjoerg assert(E->getType()->isIntegerType() &&
22967330f729Sjoerg "Expected src type to be an integer");
22977330f729Sjoerg assert(DestTy->isFixedPointType() &&
22987330f729Sjoerg "Expected dest type to be fixed point type");
22997330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
23007330f729Sjoerg CE->getExprLoc());
23017330f729Sjoerg
23027330f729Sjoerg case CK_IntegralCast: {
23037330f729Sjoerg ScalarConversionOpts Opts;
23047330f729Sjoerg if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
23057330f729Sjoerg if (!ICE->isPartOfExplicitCast())
23067330f729Sjoerg Opts = ScalarConversionOpts(CGF.SanOpts);
23077330f729Sjoerg }
23087330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
23097330f729Sjoerg CE->getExprLoc(), Opts);
23107330f729Sjoerg }
23117330f729Sjoerg case CK_IntegralToFloating:
23127330f729Sjoerg case CK_FloatingToIntegral:
23137330f729Sjoerg case CK_FloatingCast:
2314*e038c9c4Sjoerg case CK_FixedPointToFloating:
2315*e038c9c4Sjoerg case CK_FloatingToFixedPoint: {
2316*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
23177330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
23187330f729Sjoerg CE->getExprLoc());
2319*e038c9c4Sjoerg }
23207330f729Sjoerg case CK_BooleanToSignedIntegral: {
23217330f729Sjoerg ScalarConversionOpts Opts;
23227330f729Sjoerg Opts.TreatBooleanAsSigned = true;
23237330f729Sjoerg return EmitScalarConversion(Visit(E), E->getType(), DestTy,
23247330f729Sjoerg CE->getExprLoc(), Opts);
23257330f729Sjoerg }
23267330f729Sjoerg case CK_IntegralToBoolean:
23277330f729Sjoerg return EmitIntToBoolConversion(Visit(E));
23287330f729Sjoerg case CK_PointerToBoolean:
23297330f729Sjoerg return EmitPointerToBoolConversion(Visit(E), E->getType());
2330*e038c9c4Sjoerg case CK_FloatingToBoolean: {
2331*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
23327330f729Sjoerg return EmitFloatToBoolConversion(Visit(E));
2333*e038c9c4Sjoerg }
23347330f729Sjoerg case CK_MemberPointerToBoolean: {
23357330f729Sjoerg llvm::Value *MemPtr = Visit(E);
23367330f729Sjoerg const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
23377330f729Sjoerg return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
23387330f729Sjoerg }
23397330f729Sjoerg
23407330f729Sjoerg case CK_FloatingComplexToReal:
23417330f729Sjoerg case CK_IntegralComplexToReal:
23427330f729Sjoerg return CGF.EmitComplexExpr(E, false, true).first;
23437330f729Sjoerg
23447330f729Sjoerg case CK_FloatingComplexToBoolean:
23457330f729Sjoerg case CK_IntegralComplexToBoolean: {
23467330f729Sjoerg CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
23477330f729Sjoerg
23487330f729Sjoerg // TODO: kill this function off, inline appropriate case here
23497330f729Sjoerg return EmitComplexToScalarConversion(V, E->getType(), DestTy,
23507330f729Sjoerg CE->getExprLoc());
23517330f729Sjoerg }
23527330f729Sjoerg
23537330f729Sjoerg case CK_ZeroToOCLOpaqueType: {
23547330f729Sjoerg assert((DestTy->isEventT() || DestTy->isQueueT() ||
23557330f729Sjoerg DestTy->isOCLIntelSubgroupAVCType()) &&
23567330f729Sjoerg "CK_ZeroToOCLEvent cast on non-event type");
23577330f729Sjoerg return llvm::Constant::getNullValue(ConvertType(DestTy));
23587330f729Sjoerg }
23597330f729Sjoerg
23607330f729Sjoerg case CK_IntToOCLSampler:
23617330f729Sjoerg return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
23627330f729Sjoerg
23637330f729Sjoerg } // end of switch
23647330f729Sjoerg
23657330f729Sjoerg llvm_unreachable("unknown scalar cast");
23667330f729Sjoerg }
23677330f729Sjoerg
VisitStmtExpr(const StmtExpr * E)23687330f729Sjoerg Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
23697330f729Sjoerg CodeGenFunction::StmtExprEvaluation eval(CGF);
23707330f729Sjoerg Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
23717330f729Sjoerg !E->getType()->isVoidType());
23727330f729Sjoerg if (!RetAlloca.isValid())
23737330f729Sjoerg return nullptr;
23747330f729Sjoerg return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
23757330f729Sjoerg E->getExprLoc());
23767330f729Sjoerg }
23777330f729Sjoerg
VisitExprWithCleanups(ExprWithCleanups * E)23787330f729Sjoerg Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
23797330f729Sjoerg CodeGenFunction::RunCleanupsScope Scope(CGF);
23807330f729Sjoerg Value *V = Visit(E->getSubExpr());
23817330f729Sjoerg // Defend against dominance problems caused by jumps out of expression
23827330f729Sjoerg // evaluation through the shared cleanup block.
23837330f729Sjoerg Scope.ForceCleanup({&V});
23847330f729Sjoerg return V;
23857330f729Sjoerg }
23867330f729Sjoerg
23877330f729Sjoerg //===----------------------------------------------------------------------===//
23887330f729Sjoerg // Unary Operators
23897330f729Sjoerg //===----------------------------------------------------------------------===//
23907330f729Sjoerg
createBinOpInfoFromIncDec(const UnaryOperator * E,llvm::Value * InVal,bool IsInc,FPOptions FPFeatures)23917330f729Sjoerg static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2392*e038c9c4Sjoerg llvm::Value *InVal, bool IsInc,
2393*e038c9c4Sjoerg FPOptions FPFeatures) {
23947330f729Sjoerg BinOpInfo BinOp;
23957330f729Sjoerg BinOp.LHS = InVal;
23967330f729Sjoerg BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
23977330f729Sjoerg BinOp.Ty = E->getType();
23987330f729Sjoerg BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2399*e038c9c4Sjoerg BinOp.FPFeatures = FPFeatures;
24007330f729Sjoerg BinOp.E = E;
24017330f729Sjoerg return BinOp;
24027330f729Sjoerg }
24037330f729Sjoerg
EmitIncDecConsiderOverflowBehavior(const UnaryOperator * E,llvm::Value * InVal,bool IsInc)24047330f729Sjoerg llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
24057330f729Sjoerg const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
24067330f729Sjoerg llvm::Value *Amount =
24077330f729Sjoerg llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
24087330f729Sjoerg StringRef Name = IsInc ? "inc" : "dec";
24097330f729Sjoerg switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
24107330f729Sjoerg case LangOptions::SOB_Defined:
24117330f729Sjoerg return Builder.CreateAdd(InVal, Amount, Name);
24127330f729Sjoerg case LangOptions::SOB_Undefined:
24137330f729Sjoerg if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
24147330f729Sjoerg return Builder.CreateNSWAdd(InVal, Amount, Name);
24157330f729Sjoerg LLVM_FALLTHROUGH;
24167330f729Sjoerg case LangOptions::SOB_Trapping:
24177330f729Sjoerg if (!E->canOverflow())
24187330f729Sjoerg return Builder.CreateNSWAdd(InVal, Amount, Name);
2419*e038c9c4Sjoerg return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2420*e038c9c4Sjoerg E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
24217330f729Sjoerg }
24227330f729Sjoerg llvm_unreachable("Unknown SignedOverflowBehaviorTy");
24237330f729Sjoerg }
24247330f729Sjoerg
2425*e038c9c4Sjoerg namespace {
2426*e038c9c4Sjoerg /// Handles check and update for lastprivate conditional variables.
2427*e038c9c4Sjoerg class OMPLastprivateConditionalUpdateRAII {
2428*e038c9c4Sjoerg private:
2429*e038c9c4Sjoerg CodeGenFunction &CGF;
2430*e038c9c4Sjoerg const UnaryOperator *E;
2431*e038c9c4Sjoerg
2432*e038c9c4Sjoerg public:
OMPLastprivateConditionalUpdateRAII(CodeGenFunction & CGF,const UnaryOperator * E)2433*e038c9c4Sjoerg OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2434*e038c9c4Sjoerg const UnaryOperator *E)
2435*e038c9c4Sjoerg : CGF(CGF), E(E) {}
~OMPLastprivateConditionalUpdateRAII()2436*e038c9c4Sjoerg ~OMPLastprivateConditionalUpdateRAII() {
2437*e038c9c4Sjoerg if (CGF.getLangOpts().OpenMP)
2438*e038c9c4Sjoerg CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2439*e038c9c4Sjoerg CGF, E->getSubExpr());
2440*e038c9c4Sjoerg }
2441*e038c9c4Sjoerg };
2442*e038c9c4Sjoerg } // namespace
2443*e038c9c4Sjoerg
24447330f729Sjoerg llvm::Value *
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)24457330f729Sjoerg ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
24467330f729Sjoerg bool isInc, bool isPre) {
2447*e038c9c4Sjoerg OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
24487330f729Sjoerg QualType type = E->getSubExpr()->getType();
24497330f729Sjoerg llvm::PHINode *atomicPHI = nullptr;
24507330f729Sjoerg llvm::Value *value;
24517330f729Sjoerg llvm::Value *input;
24527330f729Sjoerg
24537330f729Sjoerg int amount = (isInc ? 1 : -1);
24547330f729Sjoerg bool isSubtraction = !isInc;
24557330f729Sjoerg
24567330f729Sjoerg if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
24577330f729Sjoerg type = atomicTy->getValueType();
24587330f729Sjoerg if (isInc && type->isBooleanType()) {
24597330f729Sjoerg llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
24607330f729Sjoerg if (isPre) {
2461*e038c9c4Sjoerg Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
24627330f729Sjoerg ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
24637330f729Sjoerg return Builder.getTrue();
24647330f729Sjoerg }
24657330f729Sjoerg // For atomic bool increment, we just store true and return it for
24667330f729Sjoerg // preincrement, do an atomic swap with true for postincrement
24677330f729Sjoerg return Builder.CreateAtomicRMW(
2468*e038c9c4Sjoerg llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
24697330f729Sjoerg llvm::AtomicOrdering::SequentiallyConsistent);
24707330f729Sjoerg }
24717330f729Sjoerg // Special case for atomic increment / decrement on integers, emit
24727330f729Sjoerg // atomicrmw instructions. We skip this if we want to be doing overflow
24737330f729Sjoerg // checking, and fall into the slow path with the atomic cmpxchg loop.
24747330f729Sjoerg if (!type->isBooleanType() && type->isIntegerType() &&
24757330f729Sjoerg !(type->isUnsignedIntegerType() &&
24767330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
24777330f729Sjoerg CGF.getLangOpts().getSignedOverflowBehavior() !=
24787330f729Sjoerg LangOptions::SOB_Trapping) {
24797330f729Sjoerg llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
24807330f729Sjoerg llvm::AtomicRMWInst::Sub;
24817330f729Sjoerg llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
24827330f729Sjoerg llvm::Instruction::Sub;
24837330f729Sjoerg llvm::Value *amt = CGF.EmitToMemory(
24847330f729Sjoerg llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2485*e038c9c4Sjoerg llvm::Value *old =
2486*e038c9c4Sjoerg Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2487*e038c9c4Sjoerg llvm::AtomicOrdering::SequentiallyConsistent);
24887330f729Sjoerg return isPre ? Builder.CreateBinOp(op, old, amt) : old;
24897330f729Sjoerg }
24907330f729Sjoerg value = EmitLoadOfLValue(LV, E->getExprLoc());
24917330f729Sjoerg input = value;
24927330f729Sjoerg // For every other atomic operation, we need to emit a load-op-cmpxchg loop
24937330f729Sjoerg llvm::BasicBlock *startBB = Builder.GetInsertBlock();
24947330f729Sjoerg llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
24957330f729Sjoerg value = CGF.EmitToMemory(value, type);
24967330f729Sjoerg Builder.CreateBr(opBB);
24977330f729Sjoerg Builder.SetInsertPoint(opBB);
24987330f729Sjoerg atomicPHI = Builder.CreatePHI(value->getType(), 2);
24997330f729Sjoerg atomicPHI->addIncoming(value, startBB);
25007330f729Sjoerg value = atomicPHI;
25017330f729Sjoerg } else {
25027330f729Sjoerg value = EmitLoadOfLValue(LV, E->getExprLoc());
25037330f729Sjoerg input = value;
25047330f729Sjoerg }
25057330f729Sjoerg
25067330f729Sjoerg // Special case of integer increment that we have to check first: bool++.
25077330f729Sjoerg // Due to promotion rules, we get:
25087330f729Sjoerg // bool++ -> bool = bool + 1
25097330f729Sjoerg // -> bool = (int)bool + 1
25107330f729Sjoerg // -> bool = ((int)bool + 1 != 0)
25117330f729Sjoerg // An interesting aspect of this is that increment is always true.
25127330f729Sjoerg // Decrement does not have this property.
25137330f729Sjoerg if (isInc && type->isBooleanType()) {
25147330f729Sjoerg value = Builder.getTrue();
25157330f729Sjoerg
25167330f729Sjoerg // Most common case by far: integer increment.
25177330f729Sjoerg } else if (type->isIntegerType()) {
2518*e038c9c4Sjoerg QualType promotedType;
2519*e038c9c4Sjoerg bool canPerformLossyDemotionCheck = false;
2520*e038c9c4Sjoerg if (type->isPromotableIntegerType()) {
2521*e038c9c4Sjoerg promotedType = CGF.getContext().getPromotedIntegerType(type);
2522*e038c9c4Sjoerg assert(promotedType != type && "Shouldn't promote to the same type.");
2523*e038c9c4Sjoerg canPerformLossyDemotionCheck = true;
2524*e038c9c4Sjoerg canPerformLossyDemotionCheck &=
2525*e038c9c4Sjoerg CGF.getContext().getCanonicalType(type) !=
2526*e038c9c4Sjoerg CGF.getContext().getCanonicalType(promotedType);
2527*e038c9c4Sjoerg canPerformLossyDemotionCheck &=
2528*e038c9c4Sjoerg PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2529*e038c9c4Sjoerg type, promotedType);
2530*e038c9c4Sjoerg assert((!canPerformLossyDemotionCheck ||
2531*e038c9c4Sjoerg type->isSignedIntegerOrEnumerationType() ||
2532*e038c9c4Sjoerg promotedType->isSignedIntegerOrEnumerationType() ||
2533*e038c9c4Sjoerg ConvertType(type)->getScalarSizeInBits() ==
2534*e038c9c4Sjoerg ConvertType(promotedType)->getScalarSizeInBits()) &&
2535*e038c9c4Sjoerg "The following check expects that if we do promotion to different "
2536*e038c9c4Sjoerg "underlying canonical type, at least one of the types (either "
2537*e038c9c4Sjoerg "base or promoted) will be signed, or the bitwidths will match.");
2538*e038c9c4Sjoerg }
2539*e038c9c4Sjoerg if (CGF.SanOpts.hasOneOf(
2540*e038c9c4Sjoerg SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2541*e038c9c4Sjoerg canPerformLossyDemotionCheck) {
2542*e038c9c4Sjoerg // While `x += 1` (for `x` with width less than int) is modeled as
2543*e038c9c4Sjoerg // promotion+arithmetics+demotion, and we can catch lossy demotion with
2544*e038c9c4Sjoerg // ease; inc/dec with width less than int can't overflow because of
2545*e038c9c4Sjoerg // promotion rules, so we omit promotion+demotion, which means that we can
2546*e038c9c4Sjoerg // not catch lossy "demotion". Because we still want to catch these cases
2547*e038c9c4Sjoerg // when the sanitizer is enabled, we perform the promotion, then perform
2548*e038c9c4Sjoerg // the increment/decrement in the wider type, and finally
2549*e038c9c4Sjoerg // perform the demotion. This will catch lossy demotions.
2550*e038c9c4Sjoerg
2551*e038c9c4Sjoerg value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2552*e038c9c4Sjoerg Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2553*e038c9c4Sjoerg value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2554*e038c9c4Sjoerg // Do pass non-default ScalarConversionOpts so that sanitizer check is
2555*e038c9c4Sjoerg // emitted.
2556*e038c9c4Sjoerg value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2557*e038c9c4Sjoerg ScalarConversionOpts(CGF.SanOpts));
2558*e038c9c4Sjoerg
25597330f729Sjoerg // Note that signed integer inc/dec with width less than int can't
2560*e038c9c4Sjoerg // overflow because of promotion rules; we're just eliding a few steps
2561*e038c9c4Sjoerg // here.
2562*e038c9c4Sjoerg } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
25637330f729Sjoerg value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
25647330f729Sjoerg } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
25657330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2566*e038c9c4Sjoerg value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2567*e038c9c4Sjoerg E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
25687330f729Sjoerg } else {
25697330f729Sjoerg llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
25707330f729Sjoerg value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
25717330f729Sjoerg }
25727330f729Sjoerg
25737330f729Sjoerg // Next most common: pointer increment.
25747330f729Sjoerg } else if (const PointerType *ptr = type->getAs<PointerType>()) {
25757330f729Sjoerg QualType type = ptr->getPointeeType();
25767330f729Sjoerg
25777330f729Sjoerg // VLA types don't have constant size.
25787330f729Sjoerg if (const VariableArrayType *vla
25797330f729Sjoerg = CGF.getContext().getAsVariableArrayType(type)) {
25807330f729Sjoerg llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
25817330f729Sjoerg if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
25827330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined())
25837330f729Sjoerg value = Builder.CreateGEP(value, numElts, "vla.inc");
25847330f729Sjoerg else
25857330f729Sjoerg value = CGF.EmitCheckedInBoundsGEP(
25867330f729Sjoerg value, numElts, /*SignedIndices=*/false, isSubtraction,
25877330f729Sjoerg E->getExprLoc(), "vla.inc");
25887330f729Sjoerg
25897330f729Sjoerg // Arithmetic on function pointers (!) is just +-1.
25907330f729Sjoerg } else if (type->isFunctionType()) {
25917330f729Sjoerg llvm::Value *amt = Builder.getInt32(amount);
25927330f729Sjoerg
25937330f729Sjoerg value = CGF.EmitCastToVoidPtr(value);
25947330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined())
25957330f729Sjoerg value = Builder.CreateGEP(value, amt, "incdec.funcptr");
25967330f729Sjoerg else
25977330f729Sjoerg value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
25987330f729Sjoerg isSubtraction, E->getExprLoc(),
25997330f729Sjoerg "incdec.funcptr");
26007330f729Sjoerg value = Builder.CreateBitCast(value, input->getType());
26017330f729Sjoerg
26027330f729Sjoerg // For everything else, we can just do a simple increment.
26037330f729Sjoerg } else {
26047330f729Sjoerg llvm::Value *amt = Builder.getInt32(amount);
26057330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined())
26067330f729Sjoerg value = Builder.CreateGEP(value, amt, "incdec.ptr");
26077330f729Sjoerg else
26087330f729Sjoerg value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
26097330f729Sjoerg isSubtraction, E->getExprLoc(),
26107330f729Sjoerg "incdec.ptr");
26117330f729Sjoerg }
26127330f729Sjoerg
26137330f729Sjoerg // Vector increment/decrement.
26147330f729Sjoerg } else if (type->isVectorType()) {
26157330f729Sjoerg if (type->hasIntegerRepresentation()) {
26167330f729Sjoerg llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
26177330f729Sjoerg
26187330f729Sjoerg value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
26197330f729Sjoerg } else {
26207330f729Sjoerg value = Builder.CreateFAdd(
26217330f729Sjoerg value,
26227330f729Sjoerg llvm::ConstantFP::get(value->getType(), amount),
26237330f729Sjoerg isInc ? "inc" : "dec");
26247330f729Sjoerg }
26257330f729Sjoerg
26267330f729Sjoerg // Floating point.
26277330f729Sjoerg } else if (type->isRealFloatingType()) {
26287330f729Sjoerg // Add the inc/dec to the real part.
26297330f729Sjoerg llvm::Value *amt;
2630*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
26317330f729Sjoerg
26327330f729Sjoerg if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
26337330f729Sjoerg // Another special case: half FP increment should be done via float
26347330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
26357330f729Sjoerg value = Builder.CreateCall(
26367330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
26377330f729Sjoerg CGF.CGM.FloatTy),
26387330f729Sjoerg input, "incdec.conv");
26397330f729Sjoerg } else {
26407330f729Sjoerg value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
26417330f729Sjoerg }
26427330f729Sjoerg }
26437330f729Sjoerg
26447330f729Sjoerg if (value->getType()->isFloatTy())
26457330f729Sjoerg amt = llvm::ConstantFP::get(VMContext,
26467330f729Sjoerg llvm::APFloat(static_cast<float>(amount)));
26477330f729Sjoerg else if (value->getType()->isDoubleTy())
26487330f729Sjoerg amt = llvm::ConstantFP::get(VMContext,
26497330f729Sjoerg llvm::APFloat(static_cast<double>(amount)));
26507330f729Sjoerg else {
26517330f729Sjoerg // Remaining types are Half, LongDouble or __float128. Convert from float.
26527330f729Sjoerg llvm::APFloat F(static_cast<float>(amount));
26537330f729Sjoerg bool ignored;
26547330f729Sjoerg const llvm::fltSemantics *FS;
26557330f729Sjoerg // Don't use getFloatTypeSemantics because Half isn't
26567330f729Sjoerg // necessarily represented using the "half" LLVM type.
26577330f729Sjoerg if (value->getType()->isFP128Ty())
26587330f729Sjoerg FS = &CGF.getTarget().getFloat128Format();
26597330f729Sjoerg else if (value->getType()->isHalfTy())
26607330f729Sjoerg FS = &CGF.getTarget().getHalfFormat();
26617330f729Sjoerg else
26627330f729Sjoerg FS = &CGF.getTarget().getLongDoubleFormat();
26637330f729Sjoerg F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
26647330f729Sjoerg amt = llvm::ConstantFP::get(VMContext, F);
26657330f729Sjoerg }
26667330f729Sjoerg value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
26677330f729Sjoerg
26687330f729Sjoerg if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
26697330f729Sjoerg if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
26707330f729Sjoerg value = Builder.CreateCall(
26717330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
26727330f729Sjoerg CGF.CGM.FloatTy),
26737330f729Sjoerg value, "incdec.conv");
26747330f729Sjoerg } else {
26757330f729Sjoerg value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
26767330f729Sjoerg }
26777330f729Sjoerg }
26787330f729Sjoerg
2679*e038c9c4Sjoerg // Fixed-point types.
2680*e038c9c4Sjoerg } else if (type->isFixedPointType()) {
2681*e038c9c4Sjoerg // Fixed-point types are tricky. In some cases, it isn't possible to
2682*e038c9c4Sjoerg // represent a 1 or a -1 in the type at all. Piggyback off of
2683*e038c9c4Sjoerg // EmitFixedPointBinOp to avoid having to reimplement saturation.
2684*e038c9c4Sjoerg BinOpInfo Info;
2685*e038c9c4Sjoerg Info.E = E;
2686*e038c9c4Sjoerg Info.Ty = E->getType();
2687*e038c9c4Sjoerg Info.Opcode = isInc ? BO_Add : BO_Sub;
2688*e038c9c4Sjoerg Info.LHS = value;
2689*e038c9c4Sjoerg Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2690*e038c9c4Sjoerg // If the type is signed, it's better to represent this as +(-1) or -(-1),
2691*e038c9c4Sjoerg // since -1 is guaranteed to be representable.
2692*e038c9c4Sjoerg if (type->isSignedFixedPointType()) {
2693*e038c9c4Sjoerg Info.Opcode = isInc ? BO_Sub : BO_Add;
2694*e038c9c4Sjoerg Info.RHS = Builder.CreateNeg(Info.RHS);
2695*e038c9c4Sjoerg }
2696*e038c9c4Sjoerg // Now, convert from our invented integer literal to the type of the unary
2697*e038c9c4Sjoerg // op. This will upscale and saturate if necessary. This value can become
2698*e038c9c4Sjoerg // undef in some cases.
2699*e038c9c4Sjoerg llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2700*e038c9c4Sjoerg auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2701*e038c9c4Sjoerg Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2702*e038c9c4Sjoerg value = EmitFixedPointBinOp(Info);
2703*e038c9c4Sjoerg
27047330f729Sjoerg // Objective-C pointer types.
27057330f729Sjoerg } else {
27067330f729Sjoerg const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
27077330f729Sjoerg value = CGF.EmitCastToVoidPtr(value);
27087330f729Sjoerg
27097330f729Sjoerg CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
27107330f729Sjoerg if (!isInc) size = -size;
27117330f729Sjoerg llvm::Value *sizeValue =
27127330f729Sjoerg llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
27137330f729Sjoerg
27147330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined())
27157330f729Sjoerg value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
27167330f729Sjoerg else
27177330f729Sjoerg value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
27187330f729Sjoerg /*SignedIndices=*/false, isSubtraction,
27197330f729Sjoerg E->getExprLoc(), "incdec.objptr");
27207330f729Sjoerg value = Builder.CreateBitCast(value, input->getType());
27217330f729Sjoerg }
27227330f729Sjoerg
27237330f729Sjoerg if (atomicPHI) {
27247330f729Sjoerg llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
27257330f729Sjoerg llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
27267330f729Sjoerg auto Pair = CGF.EmitAtomicCompareExchange(
27277330f729Sjoerg LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
27287330f729Sjoerg llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
27297330f729Sjoerg llvm::Value *success = Pair.second;
27307330f729Sjoerg atomicPHI->addIncoming(old, curBlock);
27317330f729Sjoerg Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
27327330f729Sjoerg Builder.SetInsertPoint(contBB);
27337330f729Sjoerg return isPre ? value : input;
27347330f729Sjoerg }
27357330f729Sjoerg
27367330f729Sjoerg // Store the updated result through the lvalue.
27377330f729Sjoerg if (LV.isBitField())
27387330f729Sjoerg CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
27397330f729Sjoerg else
27407330f729Sjoerg CGF.EmitStoreThroughLValue(RValue::get(value), LV);
27417330f729Sjoerg
27427330f729Sjoerg // If this is a postinc, return the value read from memory, otherwise use the
27437330f729Sjoerg // updated value.
27447330f729Sjoerg return isPre ? value : input;
27457330f729Sjoerg }
27467330f729Sjoerg
27477330f729Sjoerg
27487330f729Sjoerg
VisitUnaryMinus(const UnaryOperator * E)27497330f729Sjoerg Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
27507330f729Sjoerg TestAndClearIgnoreResultAssign();
27517330f729Sjoerg Value *Op = Visit(E->getSubExpr());
27527330f729Sjoerg
27537330f729Sjoerg // Generate a unary FNeg for FP ops.
27547330f729Sjoerg if (Op->getType()->isFPOrFPVectorTy())
27557330f729Sjoerg return Builder.CreateFNeg(Op, "fneg");
27567330f729Sjoerg
27577330f729Sjoerg // Emit unary minus with EmitSub so we handle overflow cases etc.
27587330f729Sjoerg BinOpInfo BinOp;
27597330f729Sjoerg BinOp.RHS = Op;
27607330f729Sjoerg BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
27617330f729Sjoerg BinOp.Ty = E->getType();
27627330f729Sjoerg BinOp.Opcode = BO_Sub;
2763*e038c9c4Sjoerg BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
27647330f729Sjoerg BinOp.E = E;
27657330f729Sjoerg return EmitSub(BinOp);
27667330f729Sjoerg }
27677330f729Sjoerg
VisitUnaryNot(const UnaryOperator * E)27687330f729Sjoerg Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
27697330f729Sjoerg TestAndClearIgnoreResultAssign();
27707330f729Sjoerg Value *Op = Visit(E->getSubExpr());
27717330f729Sjoerg return Builder.CreateNot(Op, "neg");
27727330f729Sjoerg }
27737330f729Sjoerg
VisitUnaryLNot(const UnaryOperator * E)27747330f729Sjoerg Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
27757330f729Sjoerg // Perform vector logical not on comparison with zero vector.
2776*e038c9c4Sjoerg if (E->getType()->isVectorType() &&
2777*e038c9c4Sjoerg E->getType()->castAs<VectorType>()->getVectorKind() ==
2778*e038c9c4Sjoerg VectorType::GenericVector) {
27797330f729Sjoerg Value *Oper = Visit(E->getSubExpr());
27807330f729Sjoerg Value *Zero = llvm::Constant::getNullValue(Oper->getType());
27817330f729Sjoerg Value *Result;
2782*e038c9c4Sjoerg if (Oper->getType()->isFPOrFPVectorTy()) {
2783*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2784*e038c9c4Sjoerg CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
27857330f729Sjoerg Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2786*e038c9c4Sjoerg } else
27877330f729Sjoerg Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
27887330f729Sjoerg return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
27897330f729Sjoerg }
27907330f729Sjoerg
27917330f729Sjoerg // Compare operand to zero.
27927330f729Sjoerg Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
27937330f729Sjoerg
27947330f729Sjoerg // Invert value.
27957330f729Sjoerg // TODO: Could dynamically modify easy computations here. For example, if
27967330f729Sjoerg // the operand is an icmp ne, turn into icmp eq.
27977330f729Sjoerg BoolVal = Builder.CreateNot(BoolVal, "lnot");
27987330f729Sjoerg
27997330f729Sjoerg // ZExt result to the expr type.
28007330f729Sjoerg return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
28017330f729Sjoerg }
28027330f729Sjoerg
VisitOffsetOfExpr(OffsetOfExpr * E)28037330f729Sjoerg Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
28047330f729Sjoerg // Try folding the offsetof to a constant.
28057330f729Sjoerg Expr::EvalResult EVResult;
28067330f729Sjoerg if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
28077330f729Sjoerg llvm::APSInt Value = EVResult.Val.getInt();
28087330f729Sjoerg return Builder.getInt(Value);
28097330f729Sjoerg }
28107330f729Sjoerg
28117330f729Sjoerg // Loop over the components of the offsetof to compute the value.
28127330f729Sjoerg unsigned n = E->getNumComponents();
28137330f729Sjoerg llvm::Type* ResultType = ConvertType(E->getType());
28147330f729Sjoerg llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
28157330f729Sjoerg QualType CurrentType = E->getTypeSourceInfo()->getType();
28167330f729Sjoerg for (unsigned i = 0; i != n; ++i) {
28177330f729Sjoerg OffsetOfNode ON = E->getComponent(i);
28187330f729Sjoerg llvm::Value *Offset = nullptr;
28197330f729Sjoerg switch (ON.getKind()) {
28207330f729Sjoerg case OffsetOfNode::Array: {
28217330f729Sjoerg // Compute the index
28227330f729Sjoerg Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
28237330f729Sjoerg llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
28247330f729Sjoerg bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
28257330f729Sjoerg Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
28267330f729Sjoerg
28277330f729Sjoerg // Save the element type
28287330f729Sjoerg CurrentType =
28297330f729Sjoerg CGF.getContext().getAsArrayType(CurrentType)->getElementType();
28307330f729Sjoerg
28317330f729Sjoerg // Compute the element size
28327330f729Sjoerg llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
28337330f729Sjoerg CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
28347330f729Sjoerg
28357330f729Sjoerg // Multiply out to compute the result
28367330f729Sjoerg Offset = Builder.CreateMul(Idx, ElemSize);
28377330f729Sjoerg break;
28387330f729Sjoerg }
28397330f729Sjoerg
28407330f729Sjoerg case OffsetOfNode::Field: {
28417330f729Sjoerg FieldDecl *MemberDecl = ON.getField();
28427330f729Sjoerg RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
28437330f729Sjoerg const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
28447330f729Sjoerg
28457330f729Sjoerg // Compute the index of the field in its parent.
28467330f729Sjoerg unsigned i = 0;
28477330f729Sjoerg // FIXME: It would be nice if we didn't have to loop here!
28487330f729Sjoerg for (RecordDecl::field_iterator Field = RD->field_begin(),
28497330f729Sjoerg FieldEnd = RD->field_end();
28507330f729Sjoerg Field != FieldEnd; ++Field, ++i) {
28517330f729Sjoerg if (*Field == MemberDecl)
28527330f729Sjoerg break;
28537330f729Sjoerg }
28547330f729Sjoerg assert(i < RL.getFieldCount() && "offsetof field in wrong type");
28557330f729Sjoerg
28567330f729Sjoerg // Compute the offset to the field
28577330f729Sjoerg int64_t OffsetInt = RL.getFieldOffset(i) /
28587330f729Sjoerg CGF.getContext().getCharWidth();
28597330f729Sjoerg Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
28607330f729Sjoerg
28617330f729Sjoerg // Save the element type.
28627330f729Sjoerg CurrentType = MemberDecl->getType();
28637330f729Sjoerg break;
28647330f729Sjoerg }
28657330f729Sjoerg
28667330f729Sjoerg case OffsetOfNode::Identifier:
28677330f729Sjoerg llvm_unreachable("dependent __builtin_offsetof");
28687330f729Sjoerg
28697330f729Sjoerg case OffsetOfNode::Base: {
28707330f729Sjoerg if (ON.getBase()->isVirtual()) {
28717330f729Sjoerg CGF.ErrorUnsupported(E, "virtual base in offsetof");
28727330f729Sjoerg continue;
28737330f729Sjoerg }
28747330f729Sjoerg
28757330f729Sjoerg RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
28767330f729Sjoerg const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
28777330f729Sjoerg
28787330f729Sjoerg // Save the element type.
28797330f729Sjoerg CurrentType = ON.getBase()->getType();
28807330f729Sjoerg
28817330f729Sjoerg // Compute the offset to the base.
28827330f729Sjoerg const RecordType *BaseRT = CurrentType->getAs<RecordType>();
28837330f729Sjoerg CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
28847330f729Sjoerg CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
28857330f729Sjoerg Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
28867330f729Sjoerg break;
28877330f729Sjoerg }
28887330f729Sjoerg }
28897330f729Sjoerg Result = Builder.CreateAdd(Result, Offset);
28907330f729Sjoerg }
28917330f729Sjoerg return Result;
28927330f729Sjoerg }
28937330f729Sjoerg
28947330f729Sjoerg /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
28957330f729Sjoerg /// argument of the sizeof expression as an integer.
28967330f729Sjoerg Value *
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr * E)28977330f729Sjoerg ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
28987330f729Sjoerg const UnaryExprOrTypeTraitExpr *E) {
28997330f729Sjoerg QualType TypeToSize = E->getTypeOfArgument();
29007330f729Sjoerg if (E->getKind() == UETT_SizeOf) {
29017330f729Sjoerg if (const VariableArrayType *VAT =
29027330f729Sjoerg CGF.getContext().getAsVariableArrayType(TypeToSize)) {
29037330f729Sjoerg if (E->isArgumentType()) {
29047330f729Sjoerg // sizeof(type) - make sure to emit the VLA size.
29057330f729Sjoerg CGF.EmitVariablyModifiedType(TypeToSize);
29067330f729Sjoerg } else {
29077330f729Sjoerg // C99 6.5.3.4p2: If the argument is an expression of type
29087330f729Sjoerg // VLA, it is evaluated.
29097330f729Sjoerg CGF.EmitIgnoredExpr(E->getArgumentExpr());
29107330f729Sjoerg }
29117330f729Sjoerg
29127330f729Sjoerg auto VlaSize = CGF.getVLASize(VAT);
29137330f729Sjoerg llvm::Value *size = VlaSize.NumElts;
29147330f729Sjoerg
29157330f729Sjoerg // Scale the number of non-VLA elements by the non-VLA element size.
29167330f729Sjoerg CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
29177330f729Sjoerg if (!eltSize.isOne())
29187330f729Sjoerg size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
29197330f729Sjoerg
29207330f729Sjoerg return size;
29217330f729Sjoerg }
29227330f729Sjoerg } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
29237330f729Sjoerg auto Alignment =
29247330f729Sjoerg CGF.getContext()
29257330f729Sjoerg .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
29267330f729Sjoerg E->getTypeOfArgument()->getPointeeType()))
29277330f729Sjoerg .getQuantity();
29287330f729Sjoerg return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
29297330f729Sjoerg }
29307330f729Sjoerg
29317330f729Sjoerg // If this isn't sizeof(vla), the result must be constant; use the constant
29327330f729Sjoerg // folding logic so we don't have to duplicate it here.
29337330f729Sjoerg return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
29347330f729Sjoerg }
29357330f729Sjoerg
VisitUnaryReal(const UnaryOperator * E)29367330f729Sjoerg Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
29377330f729Sjoerg Expr *Op = E->getSubExpr();
29387330f729Sjoerg if (Op->getType()->isAnyComplexType()) {
29397330f729Sjoerg // If it's an l-value, load through the appropriate subobject l-value.
29407330f729Sjoerg // Note that we have to ask E because Op might be an l-value that
29417330f729Sjoerg // this won't work for, e.g. an Obj-C property.
29427330f729Sjoerg if (E->isGLValue())
29437330f729Sjoerg return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
29447330f729Sjoerg E->getExprLoc()).getScalarVal();
29457330f729Sjoerg
29467330f729Sjoerg // Otherwise, calculate and project.
29477330f729Sjoerg return CGF.EmitComplexExpr(Op, false, true).first;
29487330f729Sjoerg }
29497330f729Sjoerg
29507330f729Sjoerg return Visit(Op);
29517330f729Sjoerg }
29527330f729Sjoerg
VisitUnaryImag(const UnaryOperator * E)29537330f729Sjoerg Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
29547330f729Sjoerg Expr *Op = E->getSubExpr();
29557330f729Sjoerg if (Op->getType()->isAnyComplexType()) {
29567330f729Sjoerg // If it's an l-value, load through the appropriate subobject l-value.
29577330f729Sjoerg // Note that we have to ask E because Op might be an l-value that
29587330f729Sjoerg // this won't work for, e.g. an Obj-C property.
29597330f729Sjoerg if (Op->isGLValue())
29607330f729Sjoerg return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
29617330f729Sjoerg E->getExprLoc()).getScalarVal();
29627330f729Sjoerg
29637330f729Sjoerg // Otherwise, calculate and project.
29647330f729Sjoerg return CGF.EmitComplexExpr(Op, true, false).second;
29657330f729Sjoerg }
29667330f729Sjoerg
29677330f729Sjoerg // __imag on a scalar returns zero. Emit the subexpr to ensure side
29687330f729Sjoerg // effects are evaluated, but not the actual value.
29697330f729Sjoerg if (Op->isGLValue())
29707330f729Sjoerg CGF.EmitLValue(Op);
29717330f729Sjoerg else
29727330f729Sjoerg CGF.EmitScalarExpr(Op, true);
29737330f729Sjoerg return llvm::Constant::getNullValue(ConvertType(E->getType()));
29747330f729Sjoerg }
29757330f729Sjoerg
29767330f729Sjoerg //===----------------------------------------------------------------------===//
29777330f729Sjoerg // Binary Operators
29787330f729Sjoerg //===----------------------------------------------------------------------===//
29797330f729Sjoerg
EmitBinOps(const BinaryOperator * E)29807330f729Sjoerg BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
29817330f729Sjoerg TestAndClearIgnoreResultAssign();
29827330f729Sjoerg BinOpInfo Result;
29837330f729Sjoerg Result.LHS = Visit(E->getLHS());
29847330f729Sjoerg Result.RHS = Visit(E->getRHS());
29857330f729Sjoerg Result.Ty = E->getType();
29867330f729Sjoerg Result.Opcode = E->getOpcode();
2987*e038c9c4Sjoerg Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
29887330f729Sjoerg Result.E = E;
29897330f729Sjoerg return Result;
29907330f729Sjoerg }
29917330f729Sjoerg
EmitCompoundAssignLValue(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &),Value * & Result)29927330f729Sjoerg LValue ScalarExprEmitter::EmitCompoundAssignLValue(
29937330f729Sjoerg const CompoundAssignOperator *E,
29947330f729Sjoerg Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
29957330f729Sjoerg Value *&Result) {
29967330f729Sjoerg QualType LHSTy = E->getLHS()->getType();
29977330f729Sjoerg BinOpInfo OpInfo;
29987330f729Sjoerg
29997330f729Sjoerg if (E->getComputationResultType()->isAnyComplexType())
30007330f729Sjoerg return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
30017330f729Sjoerg
30027330f729Sjoerg // Emit the RHS first. __block variables need to have the rhs evaluated
30037330f729Sjoerg // first, plus this should improve codegen a little.
30047330f729Sjoerg OpInfo.RHS = Visit(E->getRHS());
30057330f729Sjoerg OpInfo.Ty = E->getComputationResultType();
30067330f729Sjoerg OpInfo.Opcode = E->getOpcode();
3007*e038c9c4Sjoerg OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
30087330f729Sjoerg OpInfo.E = E;
30097330f729Sjoerg // Load/convert the LHS.
30107330f729Sjoerg LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
30117330f729Sjoerg
30127330f729Sjoerg llvm::PHINode *atomicPHI = nullptr;
30137330f729Sjoerg if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
30147330f729Sjoerg QualType type = atomicTy->getValueType();
30157330f729Sjoerg if (!type->isBooleanType() && type->isIntegerType() &&
30167330f729Sjoerg !(type->isUnsignedIntegerType() &&
30177330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
30187330f729Sjoerg CGF.getLangOpts().getSignedOverflowBehavior() !=
30197330f729Sjoerg LangOptions::SOB_Trapping) {
3020*e038c9c4Sjoerg llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3021*e038c9c4Sjoerg llvm::Instruction::BinaryOps Op;
30227330f729Sjoerg switch (OpInfo.Opcode) {
30237330f729Sjoerg // We don't have atomicrmw operands for *, %, /, <<, >>
30247330f729Sjoerg case BO_MulAssign: case BO_DivAssign:
30257330f729Sjoerg case BO_RemAssign:
30267330f729Sjoerg case BO_ShlAssign:
30277330f729Sjoerg case BO_ShrAssign:
30287330f729Sjoerg break;
30297330f729Sjoerg case BO_AddAssign:
3030*e038c9c4Sjoerg AtomicOp = llvm::AtomicRMWInst::Add;
3031*e038c9c4Sjoerg Op = llvm::Instruction::Add;
30327330f729Sjoerg break;
30337330f729Sjoerg case BO_SubAssign:
3034*e038c9c4Sjoerg AtomicOp = llvm::AtomicRMWInst::Sub;
3035*e038c9c4Sjoerg Op = llvm::Instruction::Sub;
30367330f729Sjoerg break;
30377330f729Sjoerg case BO_AndAssign:
3038*e038c9c4Sjoerg AtomicOp = llvm::AtomicRMWInst::And;
3039*e038c9c4Sjoerg Op = llvm::Instruction::And;
30407330f729Sjoerg break;
30417330f729Sjoerg case BO_XorAssign:
3042*e038c9c4Sjoerg AtomicOp = llvm::AtomicRMWInst::Xor;
3043*e038c9c4Sjoerg Op = llvm::Instruction::Xor;
30447330f729Sjoerg break;
30457330f729Sjoerg case BO_OrAssign:
3046*e038c9c4Sjoerg AtomicOp = llvm::AtomicRMWInst::Or;
3047*e038c9c4Sjoerg Op = llvm::Instruction::Or;
30487330f729Sjoerg break;
30497330f729Sjoerg default:
30507330f729Sjoerg llvm_unreachable("Invalid compound assignment type");
30517330f729Sjoerg }
3052*e038c9c4Sjoerg if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3053*e038c9c4Sjoerg llvm::Value *Amt = CGF.EmitToMemory(
30547330f729Sjoerg EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
30557330f729Sjoerg E->getExprLoc()),
30567330f729Sjoerg LHSTy);
3057*e038c9c4Sjoerg Value *OldVal = Builder.CreateAtomicRMW(
3058*e038c9c4Sjoerg AtomicOp, LHSLV.getPointer(CGF), Amt,
30597330f729Sjoerg llvm::AtomicOrdering::SequentiallyConsistent);
3060*e038c9c4Sjoerg
3061*e038c9c4Sjoerg // Since operation is atomic, the result type is guaranteed to be the
3062*e038c9c4Sjoerg // same as the input in LLVM terms.
3063*e038c9c4Sjoerg Result = Builder.CreateBinOp(Op, OldVal, Amt);
30647330f729Sjoerg return LHSLV;
30657330f729Sjoerg }
30667330f729Sjoerg }
30677330f729Sjoerg // FIXME: For floating point types, we should be saving and restoring the
30687330f729Sjoerg // floating point environment in the loop.
30697330f729Sjoerg llvm::BasicBlock *startBB = Builder.GetInsertBlock();
30707330f729Sjoerg llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
30717330f729Sjoerg OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
30727330f729Sjoerg OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
30737330f729Sjoerg Builder.CreateBr(opBB);
30747330f729Sjoerg Builder.SetInsertPoint(opBB);
30757330f729Sjoerg atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
30767330f729Sjoerg atomicPHI->addIncoming(OpInfo.LHS, startBB);
30777330f729Sjoerg OpInfo.LHS = atomicPHI;
30787330f729Sjoerg }
30797330f729Sjoerg else
30807330f729Sjoerg OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
30817330f729Sjoerg
3082*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
30837330f729Sjoerg SourceLocation Loc = E->getExprLoc();
30847330f729Sjoerg OpInfo.LHS =
30857330f729Sjoerg EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
30867330f729Sjoerg
30877330f729Sjoerg // Expand the binary operator.
30887330f729Sjoerg Result = (this->*Func)(OpInfo);
30897330f729Sjoerg
30907330f729Sjoerg // Convert the result back to the LHS type,
30917330f729Sjoerg // potentially with Implicit Conversion sanitizer check.
30927330f729Sjoerg Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
30937330f729Sjoerg Loc, ScalarConversionOpts(CGF.SanOpts));
30947330f729Sjoerg
30957330f729Sjoerg if (atomicPHI) {
30967330f729Sjoerg llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
30977330f729Sjoerg llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
30987330f729Sjoerg auto Pair = CGF.EmitAtomicCompareExchange(
30997330f729Sjoerg LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
31007330f729Sjoerg llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
31017330f729Sjoerg llvm::Value *success = Pair.second;
31027330f729Sjoerg atomicPHI->addIncoming(old, curBlock);
31037330f729Sjoerg Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
31047330f729Sjoerg Builder.SetInsertPoint(contBB);
31057330f729Sjoerg return LHSLV;
31067330f729Sjoerg }
31077330f729Sjoerg
31087330f729Sjoerg // Store the result value into the LHS lvalue. Bit-fields are handled
31097330f729Sjoerg // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
31107330f729Sjoerg // 'An assignment expression has the value of the left operand after the
31117330f729Sjoerg // assignment...'.
31127330f729Sjoerg if (LHSLV.isBitField())
31137330f729Sjoerg CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
31147330f729Sjoerg else
31157330f729Sjoerg CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
31167330f729Sjoerg
3117*e038c9c4Sjoerg if (CGF.getLangOpts().OpenMP)
3118*e038c9c4Sjoerg CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3119*e038c9c4Sjoerg E->getLHS());
31207330f729Sjoerg return LHSLV;
31217330f729Sjoerg }
31227330f729Sjoerg
EmitCompoundAssign(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &))31237330f729Sjoerg Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
31247330f729Sjoerg Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
31257330f729Sjoerg bool Ignore = TestAndClearIgnoreResultAssign();
31267330f729Sjoerg Value *RHS = nullptr;
31277330f729Sjoerg LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
31287330f729Sjoerg
31297330f729Sjoerg // If the result is clearly ignored, return now.
31307330f729Sjoerg if (Ignore)
31317330f729Sjoerg return nullptr;
31327330f729Sjoerg
31337330f729Sjoerg // The result of an assignment in C is the assigned r-value.
31347330f729Sjoerg if (!CGF.getLangOpts().CPlusPlus)
31357330f729Sjoerg return RHS;
31367330f729Sjoerg
31377330f729Sjoerg // If the lvalue is non-volatile, return the computed value of the assignment.
31387330f729Sjoerg if (!LHS.isVolatileQualified())
31397330f729Sjoerg return RHS;
31407330f729Sjoerg
31417330f729Sjoerg // Otherwise, reload the value.
31427330f729Sjoerg return EmitLoadOfLValue(LHS, E->getExprLoc());
31437330f729Sjoerg }
31447330f729Sjoerg
EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo & Ops,llvm::Value * Zero,bool isDiv)31457330f729Sjoerg void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
31467330f729Sjoerg const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
31477330f729Sjoerg SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
31487330f729Sjoerg
31497330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
31507330f729Sjoerg Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
31517330f729Sjoerg SanitizerKind::IntegerDivideByZero));
31527330f729Sjoerg }
31537330f729Sjoerg
31547330f729Sjoerg const auto *BO = cast<BinaryOperator>(Ops.E);
31557330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
31567330f729Sjoerg Ops.Ty->hasSignedIntegerRepresentation() &&
31577330f729Sjoerg !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
31587330f729Sjoerg Ops.mayHaveIntegerOverflow()) {
31597330f729Sjoerg llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
31607330f729Sjoerg
31617330f729Sjoerg llvm::Value *IntMin =
31627330f729Sjoerg Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3163*e038c9c4Sjoerg llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
31647330f729Sjoerg
31657330f729Sjoerg llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
31667330f729Sjoerg llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
31677330f729Sjoerg llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
31687330f729Sjoerg Checks.push_back(
31697330f729Sjoerg std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
31707330f729Sjoerg }
31717330f729Sjoerg
31727330f729Sjoerg if (Checks.size() > 0)
31737330f729Sjoerg EmitBinOpCheck(Checks, Ops);
31747330f729Sjoerg }
31757330f729Sjoerg
EmitDiv(const BinOpInfo & Ops)31767330f729Sjoerg Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
31777330f729Sjoerg {
31787330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
31797330f729Sjoerg if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
31807330f729Sjoerg CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
31817330f729Sjoerg Ops.Ty->isIntegerType() &&
31827330f729Sjoerg (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
31837330f729Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
31847330f729Sjoerg EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
31857330f729Sjoerg } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
31867330f729Sjoerg Ops.Ty->isRealFloatingType() &&
31877330f729Sjoerg Ops.mayHaveFloatDivisionByZero()) {
31887330f729Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
31897330f729Sjoerg llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
31907330f729Sjoerg EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
31917330f729Sjoerg Ops);
31927330f729Sjoerg }
31937330f729Sjoerg }
31947330f729Sjoerg
3195*e038c9c4Sjoerg if (Ops.Ty->isConstantMatrixType()) {
3196*e038c9c4Sjoerg llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3197*e038c9c4Sjoerg // We need to check the types of the operands of the operator to get the
3198*e038c9c4Sjoerg // correct matrix dimensions.
3199*e038c9c4Sjoerg auto *BO = cast<BinaryOperator>(Ops.E);
3200*e038c9c4Sjoerg (void)BO;
3201*e038c9c4Sjoerg assert(
3202*e038c9c4Sjoerg isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3203*e038c9c4Sjoerg "first operand must be a matrix");
3204*e038c9c4Sjoerg assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3205*e038c9c4Sjoerg "second operand must be an arithmetic type");
3206*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3207*e038c9c4Sjoerg return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3208*e038c9c4Sjoerg Ops.Ty->hasUnsignedIntegerRepresentation());
3209*e038c9c4Sjoerg }
3210*e038c9c4Sjoerg
32117330f729Sjoerg if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3212*e038c9c4Sjoerg llvm::Value *Val;
3213*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3214*e038c9c4Sjoerg Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3215*e038c9c4Sjoerg if ((CGF.getLangOpts().OpenCL &&
3216*e038c9c4Sjoerg !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3217*e038c9c4Sjoerg (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3218*e038c9c4Sjoerg !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
32197330f729Sjoerg // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
32207330f729Sjoerg // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
32217330f729Sjoerg // build option allows an application to specify that single precision
32227330f729Sjoerg // floating-point divide (x/y and 1/x) and sqrt used in the program
32237330f729Sjoerg // source are correctly rounded.
32247330f729Sjoerg llvm::Type *ValTy = Val->getType();
32257330f729Sjoerg if (ValTy->isFloatTy() ||
32267330f729Sjoerg (isa<llvm::VectorType>(ValTy) &&
32277330f729Sjoerg cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
32287330f729Sjoerg CGF.SetFPAccuracy(Val, 2.5);
32297330f729Sjoerg }
32307330f729Sjoerg return Val;
32317330f729Sjoerg }
3232*e038c9c4Sjoerg else if (Ops.isFixedPointOp())
3233*e038c9c4Sjoerg return EmitFixedPointBinOp(Ops);
32347330f729Sjoerg else if (Ops.Ty->hasUnsignedIntegerRepresentation())
32357330f729Sjoerg return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
32367330f729Sjoerg else
32377330f729Sjoerg return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
32387330f729Sjoerg }
32397330f729Sjoerg
EmitRem(const BinOpInfo & Ops)32407330f729Sjoerg Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
32417330f729Sjoerg // Rem in C can't be a floating point type: C99 6.5.5p2.
32427330f729Sjoerg if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
32437330f729Sjoerg CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
32447330f729Sjoerg Ops.Ty->isIntegerType() &&
32457330f729Sjoerg (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
32467330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
32477330f729Sjoerg llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
32487330f729Sjoerg EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
32497330f729Sjoerg }
32507330f729Sjoerg
32517330f729Sjoerg if (Ops.Ty->hasUnsignedIntegerRepresentation())
32527330f729Sjoerg return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
32537330f729Sjoerg else
32547330f729Sjoerg return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
32557330f729Sjoerg }
32567330f729Sjoerg
EmitOverflowCheckedBinOp(const BinOpInfo & Ops)32577330f729Sjoerg Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
32587330f729Sjoerg unsigned IID;
32597330f729Sjoerg unsigned OpID = 0;
3260*e038c9c4Sjoerg SanitizerHandler OverflowKind;
32617330f729Sjoerg
32627330f729Sjoerg bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
32637330f729Sjoerg switch (Ops.Opcode) {
32647330f729Sjoerg case BO_Add:
32657330f729Sjoerg case BO_AddAssign:
32667330f729Sjoerg OpID = 1;
32677330f729Sjoerg IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
32687330f729Sjoerg llvm::Intrinsic::uadd_with_overflow;
3269*e038c9c4Sjoerg OverflowKind = SanitizerHandler::AddOverflow;
32707330f729Sjoerg break;
32717330f729Sjoerg case BO_Sub:
32727330f729Sjoerg case BO_SubAssign:
32737330f729Sjoerg OpID = 2;
32747330f729Sjoerg IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
32757330f729Sjoerg llvm::Intrinsic::usub_with_overflow;
3276*e038c9c4Sjoerg OverflowKind = SanitizerHandler::SubOverflow;
32777330f729Sjoerg break;
32787330f729Sjoerg case BO_Mul:
32797330f729Sjoerg case BO_MulAssign:
32807330f729Sjoerg OpID = 3;
32817330f729Sjoerg IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
32827330f729Sjoerg llvm::Intrinsic::umul_with_overflow;
3283*e038c9c4Sjoerg OverflowKind = SanitizerHandler::MulOverflow;
32847330f729Sjoerg break;
32857330f729Sjoerg default:
32867330f729Sjoerg llvm_unreachable("Unsupported operation for overflow detection");
32877330f729Sjoerg }
32887330f729Sjoerg OpID <<= 1;
32897330f729Sjoerg if (isSigned)
32907330f729Sjoerg OpID |= 1;
32917330f729Sjoerg
32927330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
32937330f729Sjoerg llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
32947330f729Sjoerg
32957330f729Sjoerg llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
32967330f729Sjoerg
32977330f729Sjoerg Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
32987330f729Sjoerg Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
32997330f729Sjoerg Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
33007330f729Sjoerg
33017330f729Sjoerg // Handle overflow with llvm.trap if no custom handler has been specified.
33027330f729Sjoerg const std::string *handlerName =
33037330f729Sjoerg &CGF.getLangOpts().OverflowHandler;
33047330f729Sjoerg if (handlerName->empty()) {
33057330f729Sjoerg // If the signed-integer-overflow sanitizer is enabled, emit a call to its
33067330f729Sjoerg // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
33077330f729Sjoerg if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
33087330f729Sjoerg llvm::Value *NotOverflow = Builder.CreateNot(overflow);
33097330f729Sjoerg SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
33107330f729Sjoerg : SanitizerKind::UnsignedIntegerOverflow;
33117330f729Sjoerg EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
33127330f729Sjoerg } else
3313*e038c9c4Sjoerg CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
33147330f729Sjoerg return result;
33157330f729Sjoerg }
33167330f729Sjoerg
33177330f729Sjoerg // Branch in case of overflow.
33187330f729Sjoerg llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
33197330f729Sjoerg llvm::BasicBlock *continueBB =
33207330f729Sjoerg CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
33217330f729Sjoerg llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
33227330f729Sjoerg
33237330f729Sjoerg Builder.CreateCondBr(overflow, overflowBB, continueBB);
33247330f729Sjoerg
33257330f729Sjoerg // If an overflow handler is set, then we want to call it and then use its
33267330f729Sjoerg // result, if it returns.
33277330f729Sjoerg Builder.SetInsertPoint(overflowBB);
33287330f729Sjoerg
33297330f729Sjoerg // Get the overflow handler.
33307330f729Sjoerg llvm::Type *Int8Ty = CGF.Int8Ty;
33317330f729Sjoerg llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
33327330f729Sjoerg llvm::FunctionType *handlerTy =
33337330f729Sjoerg llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
33347330f729Sjoerg llvm::FunctionCallee handler =
33357330f729Sjoerg CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
33367330f729Sjoerg
33377330f729Sjoerg // Sign extend the args to 64-bit, so that we can use the same handler for
33387330f729Sjoerg // all types of overflow.
33397330f729Sjoerg llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
33407330f729Sjoerg llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
33417330f729Sjoerg
33427330f729Sjoerg // Call the handler with the two arguments, the operation, and the size of
33437330f729Sjoerg // the result.
33447330f729Sjoerg llvm::Value *handlerArgs[] = {
33457330f729Sjoerg lhs,
33467330f729Sjoerg rhs,
33477330f729Sjoerg Builder.getInt8(OpID),
33487330f729Sjoerg Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
33497330f729Sjoerg };
33507330f729Sjoerg llvm::Value *handlerResult =
33517330f729Sjoerg CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
33527330f729Sjoerg
33537330f729Sjoerg // Truncate the result back to the desired size.
33547330f729Sjoerg handlerResult = Builder.CreateTrunc(handlerResult, opTy);
33557330f729Sjoerg Builder.CreateBr(continueBB);
33567330f729Sjoerg
33577330f729Sjoerg Builder.SetInsertPoint(continueBB);
33587330f729Sjoerg llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
33597330f729Sjoerg phi->addIncoming(result, initialBB);
33607330f729Sjoerg phi->addIncoming(handlerResult, overflowBB);
33617330f729Sjoerg
33627330f729Sjoerg return phi;
33637330f729Sjoerg }
33647330f729Sjoerg
33657330f729Sjoerg /// Emit pointer + index arithmetic.
emitPointerArithmetic(CodeGenFunction & CGF,const BinOpInfo & op,bool isSubtraction)33667330f729Sjoerg static Value *emitPointerArithmetic(CodeGenFunction &CGF,
33677330f729Sjoerg const BinOpInfo &op,
33687330f729Sjoerg bool isSubtraction) {
33697330f729Sjoerg // Must have binary (not unary) expr here. Unary pointer
33707330f729Sjoerg // increment/decrement doesn't use this path.
33717330f729Sjoerg const BinaryOperator *expr = cast<BinaryOperator>(op.E);
33727330f729Sjoerg
33737330f729Sjoerg Value *pointer = op.LHS;
33747330f729Sjoerg Expr *pointerOperand = expr->getLHS();
33757330f729Sjoerg Value *index = op.RHS;
33767330f729Sjoerg Expr *indexOperand = expr->getRHS();
33777330f729Sjoerg
33787330f729Sjoerg // In a subtraction, the LHS is always the pointer.
33797330f729Sjoerg if (!isSubtraction && !pointer->getType()->isPointerTy()) {
33807330f729Sjoerg std::swap(pointer, index);
33817330f729Sjoerg std::swap(pointerOperand, indexOperand);
33827330f729Sjoerg }
33837330f729Sjoerg
33847330f729Sjoerg bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
33857330f729Sjoerg
33867330f729Sjoerg unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
33877330f729Sjoerg auto &DL = CGF.CGM.getDataLayout();
33887330f729Sjoerg auto PtrTy = cast<llvm::PointerType>(pointer->getType());
33897330f729Sjoerg
33907330f729Sjoerg // Some versions of glibc and gcc use idioms (particularly in their malloc
33917330f729Sjoerg // routines) that add a pointer-sized integer (known to be a pointer value)
33927330f729Sjoerg // to a null pointer in order to cast the value back to an integer or as
33937330f729Sjoerg // part of a pointer alignment algorithm. This is undefined behavior, but
33947330f729Sjoerg // we'd like to be able to compile programs that use it.
33957330f729Sjoerg //
33967330f729Sjoerg // Normally, we'd generate a GEP with a null-pointer base here in response
33977330f729Sjoerg // to that code, but it's also UB to dereference a pointer created that
33987330f729Sjoerg // way. Instead (as an acknowledged hack to tolerate the idiom) we will
33997330f729Sjoerg // generate a direct cast of the integer value to a pointer.
34007330f729Sjoerg //
34017330f729Sjoerg // The idiom (p = nullptr + N) is not met if any of the following are true:
34027330f729Sjoerg //
34037330f729Sjoerg // The operation is subtraction.
34047330f729Sjoerg // The index is not pointer-sized.
34057330f729Sjoerg // The pointer type is not byte-sized.
34067330f729Sjoerg //
34077330f729Sjoerg if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
34087330f729Sjoerg op.Opcode,
34097330f729Sjoerg expr->getLHS(),
34107330f729Sjoerg expr->getRHS()))
34117330f729Sjoerg return CGF.Builder.CreateIntToPtr(index, pointer->getType());
34127330f729Sjoerg
3413*e038c9c4Sjoerg if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
34147330f729Sjoerg // Zero-extend or sign-extend the pointer value according to
34157330f729Sjoerg // whether the index is signed or not.
3416*e038c9c4Sjoerg index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
34177330f729Sjoerg "idx.ext");
34187330f729Sjoerg }
34197330f729Sjoerg
34207330f729Sjoerg // If this is subtraction, negate the index.
34217330f729Sjoerg if (isSubtraction)
34227330f729Sjoerg index = CGF.Builder.CreateNeg(index, "idx.neg");
34237330f729Sjoerg
34247330f729Sjoerg if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
34257330f729Sjoerg CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
34267330f729Sjoerg /*Accessed*/ false);
34277330f729Sjoerg
34287330f729Sjoerg const PointerType *pointerType
34297330f729Sjoerg = pointerOperand->getType()->getAs<PointerType>();
34307330f729Sjoerg if (!pointerType) {
34317330f729Sjoerg QualType objectType = pointerOperand->getType()
34327330f729Sjoerg ->castAs<ObjCObjectPointerType>()
34337330f729Sjoerg ->getPointeeType();
34347330f729Sjoerg llvm::Value *objectSize
34357330f729Sjoerg = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
34367330f729Sjoerg
34377330f729Sjoerg index = CGF.Builder.CreateMul(index, objectSize);
34387330f729Sjoerg
34397330f729Sjoerg Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
34407330f729Sjoerg result = CGF.Builder.CreateGEP(result, index, "add.ptr");
34417330f729Sjoerg return CGF.Builder.CreateBitCast(result, pointer->getType());
34427330f729Sjoerg }
34437330f729Sjoerg
34447330f729Sjoerg QualType elementType = pointerType->getPointeeType();
34457330f729Sjoerg if (const VariableArrayType *vla
34467330f729Sjoerg = CGF.getContext().getAsVariableArrayType(elementType)) {
34477330f729Sjoerg // The element count here is the total number of non-VLA elements.
34487330f729Sjoerg llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
34497330f729Sjoerg
34507330f729Sjoerg // Effectively, the multiply by the VLA size is part of the GEP.
34517330f729Sjoerg // GEP indexes are signed, and scaling an index isn't permitted to
34527330f729Sjoerg // signed-overflow, so we use the same semantics for our explicit
34537330f729Sjoerg // multiply. We suppress this if overflow is not undefined behavior.
34547330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined()) {
34557330f729Sjoerg index = CGF.Builder.CreateMul(index, numElements, "vla.index");
34567330f729Sjoerg pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
34577330f729Sjoerg } else {
34587330f729Sjoerg index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
34597330f729Sjoerg pointer =
34607330f729Sjoerg CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
34617330f729Sjoerg op.E->getExprLoc(), "add.ptr");
34627330f729Sjoerg }
34637330f729Sjoerg return pointer;
34647330f729Sjoerg }
34657330f729Sjoerg
34667330f729Sjoerg // Explicitly handle GNU void* and function pointer arithmetic extensions. The
34677330f729Sjoerg // GNU void* casts amount to no-ops since our void* type is i8*, but this is
34687330f729Sjoerg // future proof.
34697330f729Sjoerg if (elementType->isVoidType() || elementType->isFunctionType()) {
3470*e038c9c4Sjoerg Value *result = CGF.EmitCastToVoidPtr(pointer);
34717330f729Sjoerg result = CGF.Builder.CreateGEP(result, index, "add.ptr");
34727330f729Sjoerg return CGF.Builder.CreateBitCast(result, pointer->getType());
34737330f729Sjoerg }
34747330f729Sjoerg
34757330f729Sjoerg if (CGF.getLangOpts().isSignedOverflowDefined())
34767330f729Sjoerg return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
34777330f729Sjoerg
34787330f729Sjoerg return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
34797330f729Sjoerg op.E->getExprLoc(), "add.ptr");
34807330f729Sjoerg }
34817330f729Sjoerg
34827330f729Sjoerg // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
34837330f729Sjoerg // Addend. Use negMul and negAdd to negate the first operand of the Mul or
34847330f729Sjoerg // the add operand respectively. This allows fmuladd to represent a*b-c, or
34857330f729Sjoerg // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
34867330f729Sjoerg // efficient operations.
buildFMulAdd(llvm::Instruction * MulOp,Value * Addend,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool negMul,bool negAdd)3487*e038c9c4Sjoerg static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
34887330f729Sjoerg const CodeGenFunction &CGF, CGBuilderTy &Builder,
34897330f729Sjoerg bool negMul, bool negAdd) {
34907330f729Sjoerg assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
34917330f729Sjoerg
34927330f729Sjoerg Value *MulOp0 = MulOp->getOperand(0);
34937330f729Sjoerg Value *MulOp1 = MulOp->getOperand(1);
3494*e038c9c4Sjoerg if (negMul)
3495*e038c9c4Sjoerg MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3496*e038c9c4Sjoerg if (negAdd)
3497*e038c9c4Sjoerg Addend = Builder.CreateFNeg(Addend, "neg");
34987330f729Sjoerg
3499*e038c9c4Sjoerg Value *FMulAdd = nullptr;
3500*e038c9c4Sjoerg if (Builder.getIsFPConstrained()) {
3501*e038c9c4Sjoerg assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3502*e038c9c4Sjoerg "Only constrained operation should be created when Builder is in FP "
3503*e038c9c4Sjoerg "constrained mode");
3504*e038c9c4Sjoerg FMulAdd = Builder.CreateConstrainedFPCall(
3505*e038c9c4Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3506*e038c9c4Sjoerg Addend->getType()),
3507*e038c9c4Sjoerg {MulOp0, MulOp1, Addend});
3508*e038c9c4Sjoerg } else {
3509*e038c9c4Sjoerg FMulAdd = Builder.CreateCall(
35107330f729Sjoerg CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
35117330f729Sjoerg {MulOp0, MulOp1, Addend});
3512*e038c9c4Sjoerg }
35137330f729Sjoerg MulOp->eraseFromParent();
35147330f729Sjoerg
35157330f729Sjoerg return FMulAdd;
35167330f729Sjoerg }
35177330f729Sjoerg
35187330f729Sjoerg // Check whether it would be legal to emit an fmuladd intrinsic call to
35197330f729Sjoerg // represent op and if so, build the fmuladd.
35207330f729Sjoerg //
35217330f729Sjoerg // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
35227330f729Sjoerg // Does NOT check the type of the operation - it's assumed that this function
35237330f729Sjoerg // will be called from contexts where it's known that the type is contractable.
tryEmitFMulAdd(const BinOpInfo & op,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool isSub=false)35247330f729Sjoerg static Value* tryEmitFMulAdd(const BinOpInfo &op,
35257330f729Sjoerg const CodeGenFunction &CGF, CGBuilderTy &Builder,
35267330f729Sjoerg bool isSub=false) {
35277330f729Sjoerg
35287330f729Sjoerg assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
35297330f729Sjoerg op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
35307330f729Sjoerg "Only fadd/fsub can be the root of an fmuladd.");
35317330f729Sjoerg
35327330f729Sjoerg // Check whether this op is marked as fusable.
35337330f729Sjoerg if (!op.FPFeatures.allowFPContractWithinStatement())
35347330f729Sjoerg return nullptr;
35357330f729Sjoerg
35367330f729Sjoerg // We have a potentially fusable op. Look for a mul on one of the operands.
35377330f729Sjoerg // Also, make sure that the mul result isn't used directly. In that case,
35387330f729Sjoerg // there's no point creating a muladd operation.
35397330f729Sjoerg if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
35407330f729Sjoerg if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
35417330f729Sjoerg LHSBinOp->use_empty())
35427330f729Sjoerg return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
35437330f729Sjoerg }
35447330f729Sjoerg if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
35457330f729Sjoerg if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
35467330f729Sjoerg RHSBinOp->use_empty())
35477330f729Sjoerg return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
35487330f729Sjoerg }
35497330f729Sjoerg
3550*e038c9c4Sjoerg if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3551*e038c9c4Sjoerg if (LHSBinOp->getIntrinsicID() ==
3552*e038c9c4Sjoerg llvm::Intrinsic::experimental_constrained_fmul &&
3553*e038c9c4Sjoerg LHSBinOp->use_empty())
3554*e038c9c4Sjoerg return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3555*e038c9c4Sjoerg }
3556*e038c9c4Sjoerg if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3557*e038c9c4Sjoerg if (RHSBinOp->getIntrinsicID() ==
3558*e038c9c4Sjoerg llvm::Intrinsic::experimental_constrained_fmul &&
3559*e038c9c4Sjoerg RHSBinOp->use_empty())
3560*e038c9c4Sjoerg return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3561*e038c9c4Sjoerg }
3562*e038c9c4Sjoerg
35637330f729Sjoerg return nullptr;
35647330f729Sjoerg }
35657330f729Sjoerg
EmitAdd(const BinOpInfo & op)35667330f729Sjoerg Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
35677330f729Sjoerg if (op.LHS->getType()->isPointerTy() ||
35687330f729Sjoerg op.RHS->getType()->isPointerTy())
35697330f729Sjoerg return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
35707330f729Sjoerg
35717330f729Sjoerg if (op.Ty->isSignedIntegerOrEnumerationType()) {
35727330f729Sjoerg switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
35737330f729Sjoerg case LangOptions::SOB_Defined:
35747330f729Sjoerg return Builder.CreateAdd(op.LHS, op.RHS, "add");
35757330f729Sjoerg case LangOptions::SOB_Undefined:
35767330f729Sjoerg if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
35777330f729Sjoerg return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
35787330f729Sjoerg LLVM_FALLTHROUGH;
35797330f729Sjoerg case LangOptions::SOB_Trapping:
35807330f729Sjoerg if (CanElideOverflowCheck(CGF.getContext(), op))
35817330f729Sjoerg return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
35827330f729Sjoerg return EmitOverflowCheckedBinOp(op);
35837330f729Sjoerg }
35847330f729Sjoerg }
35857330f729Sjoerg
3586*e038c9c4Sjoerg if (op.Ty->isConstantMatrixType()) {
3587*e038c9c4Sjoerg llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3588*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3589*e038c9c4Sjoerg return MB.CreateAdd(op.LHS, op.RHS);
3590*e038c9c4Sjoerg }
3591*e038c9c4Sjoerg
35927330f729Sjoerg if (op.Ty->isUnsignedIntegerType() &&
35937330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
35947330f729Sjoerg !CanElideOverflowCheck(CGF.getContext(), op))
35957330f729Sjoerg return EmitOverflowCheckedBinOp(op);
35967330f729Sjoerg
35977330f729Sjoerg if (op.LHS->getType()->isFPOrFPVectorTy()) {
3598*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
35997330f729Sjoerg // Try to form an fmuladd.
36007330f729Sjoerg if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
36017330f729Sjoerg return FMulAdd;
36027330f729Sjoerg
3603*e038c9c4Sjoerg return Builder.CreateFAdd(op.LHS, op.RHS, "add");
36047330f729Sjoerg }
36057330f729Sjoerg
3606*e038c9c4Sjoerg if (op.isFixedPointOp())
36077330f729Sjoerg return EmitFixedPointBinOp(op);
36087330f729Sjoerg
36097330f729Sjoerg return Builder.CreateAdd(op.LHS, op.RHS, "add");
36107330f729Sjoerg }
36117330f729Sjoerg
36127330f729Sjoerg /// The resulting value must be calculated with exact precision, so the operands
36137330f729Sjoerg /// may not be the same type.
EmitFixedPointBinOp(const BinOpInfo & op)36147330f729Sjoerg Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
36157330f729Sjoerg using llvm::APSInt;
36167330f729Sjoerg using llvm::ConstantInt;
36177330f729Sjoerg
3618*e038c9c4Sjoerg // This is either a binary operation where at least one of the operands is
3619*e038c9c4Sjoerg // a fixed-point type, or a unary operation where the operand is a fixed-point
3620*e038c9c4Sjoerg // type. The result type of a binary operation is determined by
3621*e038c9c4Sjoerg // Sema::handleFixedPointConversions().
36227330f729Sjoerg QualType ResultTy = op.Ty;
3623*e038c9c4Sjoerg QualType LHSTy, RHSTy;
3624*e038c9c4Sjoerg if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3625*e038c9c4Sjoerg RHSTy = BinOp->getRHS()->getType();
3626*e038c9c4Sjoerg if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3627*e038c9c4Sjoerg // For compound assignment, the effective type of the LHS at this point
3628*e038c9c4Sjoerg // is the computation LHS type, not the actual LHS type, and the final
3629*e038c9c4Sjoerg // result type is not the type of the expression but rather the
3630*e038c9c4Sjoerg // computation result type.
3631*e038c9c4Sjoerg LHSTy = CAO->getComputationLHSType();
3632*e038c9c4Sjoerg ResultTy = CAO->getComputationResultType();
3633*e038c9c4Sjoerg } else
3634*e038c9c4Sjoerg LHSTy = BinOp->getLHS()->getType();
3635*e038c9c4Sjoerg } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3636*e038c9c4Sjoerg LHSTy = UnOp->getSubExpr()->getType();
3637*e038c9c4Sjoerg RHSTy = UnOp->getSubExpr()->getType();
3638*e038c9c4Sjoerg }
36397330f729Sjoerg ASTContext &Ctx = CGF.getContext();
36407330f729Sjoerg Value *LHS = op.LHS;
36417330f729Sjoerg Value *RHS = op.RHS;
36427330f729Sjoerg
36437330f729Sjoerg auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
36447330f729Sjoerg auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
36457330f729Sjoerg auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
36467330f729Sjoerg auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
36477330f729Sjoerg
3648*e038c9c4Sjoerg // Perform the actual operation.
36497330f729Sjoerg Value *Result;
3650*e038c9c4Sjoerg llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3651*e038c9c4Sjoerg switch (op.Opcode) {
3652*e038c9c4Sjoerg case BO_AddAssign:
3653*e038c9c4Sjoerg case BO_Add:
3654*e038c9c4Sjoerg Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
36557330f729Sjoerg break;
3656*e038c9c4Sjoerg case BO_SubAssign:
3657*e038c9c4Sjoerg case BO_Sub:
3658*e038c9c4Sjoerg Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
36597330f729Sjoerg break;
3660*e038c9c4Sjoerg case BO_MulAssign:
3661*e038c9c4Sjoerg case BO_Mul:
3662*e038c9c4Sjoerg Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3663*e038c9c4Sjoerg break;
3664*e038c9c4Sjoerg case BO_DivAssign:
3665*e038c9c4Sjoerg case BO_Div:
3666*e038c9c4Sjoerg Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3667*e038c9c4Sjoerg break;
3668*e038c9c4Sjoerg case BO_ShlAssign:
3669*e038c9c4Sjoerg case BO_Shl:
3670*e038c9c4Sjoerg Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3671*e038c9c4Sjoerg break;
3672*e038c9c4Sjoerg case BO_ShrAssign:
3673*e038c9c4Sjoerg case BO_Shr:
3674*e038c9c4Sjoerg Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3675*e038c9c4Sjoerg break;
36767330f729Sjoerg case BO_LT:
3677*e038c9c4Sjoerg return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
36787330f729Sjoerg case BO_GT:
3679*e038c9c4Sjoerg return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
36807330f729Sjoerg case BO_LE:
3681*e038c9c4Sjoerg return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
36827330f729Sjoerg case BO_GE:
3683*e038c9c4Sjoerg return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
36847330f729Sjoerg case BO_EQ:
36857330f729Sjoerg // For equality operations, we assume any padding bits on unsigned types are
36867330f729Sjoerg // zero'd out. They could be overwritten through non-saturating operations
36877330f729Sjoerg // that cause overflow, but this leads to undefined behavior.
3688*e038c9c4Sjoerg return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
36897330f729Sjoerg case BO_NE:
3690*e038c9c4Sjoerg return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
36917330f729Sjoerg case BO_Cmp:
36927330f729Sjoerg case BO_LAnd:
36937330f729Sjoerg case BO_LOr:
36947330f729Sjoerg llvm_unreachable("Found unimplemented fixed point binary operation");
36957330f729Sjoerg case BO_PtrMemD:
36967330f729Sjoerg case BO_PtrMemI:
36977330f729Sjoerg case BO_Rem:
36987330f729Sjoerg case BO_Xor:
36997330f729Sjoerg case BO_And:
37007330f729Sjoerg case BO_Or:
37017330f729Sjoerg case BO_Assign:
37027330f729Sjoerg case BO_RemAssign:
37037330f729Sjoerg case BO_AndAssign:
37047330f729Sjoerg case BO_XorAssign:
37057330f729Sjoerg case BO_OrAssign:
37067330f729Sjoerg case BO_Comma:
37077330f729Sjoerg llvm_unreachable("Found unsupported binary operation for fixed point types.");
37087330f729Sjoerg }
37097330f729Sjoerg
3710*e038c9c4Sjoerg bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3711*e038c9c4Sjoerg BinaryOperator::isShiftAssignOp(op.Opcode);
37127330f729Sjoerg // Convert to the result type.
3713*e038c9c4Sjoerg return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3714*e038c9c4Sjoerg : CommonFixedSema,
3715*e038c9c4Sjoerg ResultFixedSema);
37167330f729Sjoerg }
37177330f729Sjoerg
EmitSub(const BinOpInfo & op)37187330f729Sjoerg Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
37197330f729Sjoerg // The LHS is always a pointer if either side is.
37207330f729Sjoerg if (!op.LHS->getType()->isPointerTy()) {
37217330f729Sjoerg if (op.Ty->isSignedIntegerOrEnumerationType()) {
37227330f729Sjoerg switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
37237330f729Sjoerg case LangOptions::SOB_Defined:
37247330f729Sjoerg return Builder.CreateSub(op.LHS, op.RHS, "sub");
37257330f729Sjoerg case LangOptions::SOB_Undefined:
37267330f729Sjoerg if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
37277330f729Sjoerg return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
37287330f729Sjoerg LLVM_FALLTHROUGH;
37297330f729Sjoerg case LangOptions::SOB_Trapping:
37307330f729Sjoerg if (CanElideOverflowCheck(CGF.getContext(), op))
37317330f729Sjoerg return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
37327330f729Sjoerg return EmitOverflowCheckedBinOp(op);
37337330f729Sjoerg }
37347330f729Sjoerg }
37357330f729Sjoerg
3736*e038c9c4Sjoerg if (op.Ty->isConstantMatrixType()) {
3737*e038c9c4Sjoerg llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3738*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3739*e038c9c4Sjoerg return MB.CreateSub(op.LHS, op.RHS);
3740*e038c9c4Sjoerg }
3741*e038c9c4Sjoerg
37427330f729Sjoerg if (op.Ty->isUnsignedIntegerType() &&
37437330f729Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
37447330f729Sjoerg !CanElideOverflowCheck(CGF.getContext(), op))
37457330f729Sjoerg return EmitOverflowCheckedBinOp(op);
37467330f729Sjoerg
37477330f729Sjoerg if (op.LHS->getType()->isFPOrFPVectorTy()) {
3748*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
37497330f729Sjoerg // Try to form an fmuladd.
37507330f729Sjoerg if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
37517330f729Sjoerg return FMulAdd;
3752*e038c9c4Sjoerg return Builder.CreateFSub(op.LHS, op.RHS, "sub");
37537330f729Sjoerg }
37547330f729Sjoerg
3755*e038c9c4Sjoerg if (op.isFixedPointOp())
37567330f729Sjoerg return EmitFixedPointBinOp(op);
37577330f729Sjoerg
37587330f729Sjoerg return Builder.CreateSub(op.LHS, op.RHS, "sub");
37597330f729Sjoerg }
37607330f729Sjoerg
37617330f729Sjoerg // If the RHS is not a pointer, then we have normal pointer
37627330f729Sjoerg // arithmetic.
37637330f729Sjoerg if (!op.RHS->getType()->isPointerTy())
37647330f729Sjoerg return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
37657330f729Sjoerg
37667330f729Sjoerg // Otherwise, this is a pointer subtraction.
37677330f729Sjoerg
37687330f729Sjoerg // Do the raw subtraction part.
37697330f729Sjoerg llvm::Value *LHS
37707330f729Sjoerg = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
37717330f729Sjoerg llvm::Value *RHS
37727330f729Sjoerg = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
37737330f729Sjoerg Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
37747330f729Sjoerg
37757330f729Sjoerg // Okay, figure out the element size.
37767330f729Sjoerg const BinaryOperator *expr = cast<BinaryOperator>(op.E);
37777330f729Sjoerg QualType elementType = expr->getLHS()->getType()->getPointeeType();
37787330f729Sjoerg
37797330f729Sjoerg llvm::Value *divisor = nullptr;
37807330f729Sjoerg
37817330f729Sjoerg // For a variable-length array, this is going to be non-constant.
37827330f729Sjoerg if (const VariableArrayType *vla
37837330f729Sjoerg = CGF.getContext().getAsVariableArrayType(elementType)) {
37847330f729Sjoerg auto VlaSize = CGF.getVLASize(vla);
37857330f729Sjoerg elementType = VlaSize.Type;
37867330f729Sjoerg divisor = VlaSize.NumElts;
37877330f729Sjoerg
37887330f729Sjoerg // Scale the number of non-VLA elements by the non-VLA element size.
37897330f729Sjoerg CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
37907330f729Sjoerg if (!eltSize.isOne())
37917330f729Sjoerg divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
37927330f729Sjoerg
37937330f729Sjoerg // For everything elese, we can just compute it, safe in the
37947330f729Sjoerg // assumption that Sema won't let anything through that we can't
37957330f729Sjoerg // safely compute the size of.
37967330f729Sjoerg } else {
37977330f729Sjoerg CharUnits elementSize;
37987330f729Sjoerg // Handle GCC extension for pointer arithmetic on void* and
37997330f729Sjoerg // function pointer types.
38007330f729Sjoerg if (elementType->isVoidType() || elementType->isFunctionType())
38017330f729Sjoerg elementSize = CharUnits::One();
38027330f729Sjoerg else
38037330f729Sjoerg elementSize = CGF.getContext().getTypeSizeInChars(elementType);
38047330f729Sjoerg
38057330f729Sjoerg // Don't even emit the divide for element size of 1.
38067330f729Sjoerg if (elementSize.isOne())
38077330f729Sjoerg return diffInChars;
38087330f729Sjoerg
38097330f729Sjoerg divisor = CGF.CGM.getSize(elementSize);
38107330f729Sjoerg }
38117330f729Sjoerg
38127330f729Sjoerg // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
38137330f729Sjoerg // pointer difference in C is only defined in the case where both operands
38147330f729Sjoerg // are pointing to elements of an array.
38157330f729Sjoerg return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
38167330f729Sjoerg }
38177330f729Sjoerg
GetWidthMinusOneValue(Value * LHS,Value * RHS)38187330f729Sjoerg Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
38197330f729Sjoerg llvm::IntegerType *Ty;
38207330f729Sjoerg if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
38217330f729Sjoerg Ty = cast<llvm::IntegerType>(VT->getElementType());
38227330f729Sjoerg else
38237330f729Sjoerg Ty = cast<llvm::IntegerType>(LHS->getType());
38247330f729Sjoerg return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
38257330f729Sjoerg }
38267330f729Sjoerg
ConstrainShiftValue(Value * LHS,Value * RHS,const Twine & Name)3827*e038c9c4Sjoerg Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3828*e038c9c4Sjoerg const Twine &Name) {
3829*e038c9c4Sjoerg llvm::IntegerType *Ty;
3830*e038c9c4Sjoerg if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3831*e038c9c4Sjoerg Ty = cast<llvm::IntegerType>(VT->getElementType());
3832*e038c9c4Sjoerg else
3833*e038c9c4Sjoerg Ty = cast<llvm::IntegerType>(LHS->getType());
3834*e038c9c4Sjoerg
3835*e038c9c4Sjoerg if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3836*e038c9c4Sjoerg return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3837*e038c9c4Sjoerg
3838*e038c9c4Sjoerg return Builder.CreateURem(
3839*e038c9c4Sjoerg RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3840*e038c9c4Sjoerg }
3841*e038c9c4Sjoerg
EmitShl(const BinOpInfo & Ops)38427330f729Sjoerg Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3843*e038c9c4Sjoerg // TODO: This misses out on the sanitizer check below.
3844*e038c9c4Sjoerg if (Ops.isFixedPointOp())
3845*e038c9c4Sjoerg return EmitFixedPointBinOp(Ops);
3846*e038c9c4Sjoerg
38477330f729Sjoerg // LLVM requires the LHS and RHS to be the same type: promote or truncate the
38487330f729Sjoerg // RHS to the same size as the LHS.
38497330f729Sjoerg Value *RHS = Ops.RHS;
38507330f729Sjoerg if (Ops.LHS->getType() != RHS->getType())
38517330f729Sjoerg RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
38527330f729Sjoerg
3853*e038c9c4Sjoerg bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
38547330f729Sjoerg Ops.Ty->hasSignedIntegerRepresentation() &&
38557330f729Sjoerg !CGF.getLangOpts().isSignedOverflowDefined() &&
3856*e038c9c4Sjoerg !CGF.getLangOpts().CPlusPlus20;
3857*e038c9c4Sjoerg bool SanitizeUnsignedBase =
3858*e038c9c4Sjoerg CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3859*e038c9c4Sjoerg Ops.Ty->hasUnsignedIntegerRepresentation();
3860*e038c9c4Sjoerg bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
38617330f729Sjoerg bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
38627330f729Sjoerg // OpenCL 6.3j: shift values are effectively % word size of LHS.
38637330f729Sjoerg if (CGF.getLangOpts().OpenCL)
3864*e038c9c4Sjoerg RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
38657330f729Sjoerg else if ((SanitizeBase || SanitizeExponent) &&
38667330f729Sjoerg isa<llvm::IntegerType>(Ops.LHS->getType())) {
38677330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
38687330f729Sjoerg SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
38697330f729Sjoerg llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
38707330f729Sjoerg llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
38717330f729Sjoerg
38727330f729Sjoerg if (SanitizeExponent) {
38737330f729Sjoerg Checks.push_back(
38747330f729Sjoerg std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
38757330f729Sjoerg }
38767330f729Sjoerg
38777330f729Sjoerg if (SanitizeBase) {
38787330f729Sjoerg // Check whether we are shifting any non-zero bits off the top of the
38797330f729Sjoerg // integer. We only emit this check if exponent is valid - otherwise
38807330f729Sjoerg // instructions below will have undefined behavior themselves.
38817330f729Sjoerg llvm::BasicBlock *Orig = Builder.GetInsertBlock();
38827330f729Sjoerg llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
38837330f729Sjoerg llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
38847330f729Sjoerg Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
38857330f729Sjoerg llvm::Value *PromotedWidthMinusOne =
38867330f729Sjoerg (RHS == Ops.RHS) ? WidthMinusOne
38877330f729Sjoerg : GetWidthMinusOneValue(Ops.LHS, RHS);
38887330f729Sjoerg CGF.EmitBlock(CheckShiftBase);
38897330f729Sjoerg llvm::Value *BitsShiftedOff = Builder.CreateLShr(
38907330f729Sjoerg Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
38917330f729Sjoerg /*NUW*/ true, /*NSW*/ true),
38927330f729Sjoerg "shl.check");
3893*e038c9c4Sjoerg if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
38947330f729Sjoerg // In C99, we are not permitted to shift a 1 bit into the sign bit.
38957330f729Sjoerg // Under C++11's rules, shifting a 1 bit into the sign bit is
38967330f729Sjoerg // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
38977330f729Sjoerg // define signed left shifts, so we use the C99 and C++11 rules there).
3898*e038c9c4Sjoerg // Unsigned shifts can always shift into the top bit.
38997330f729Sjoerg llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
39007330f729Sjoerg BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
39017330f729Sjoerg }
39027330f729Sjoerg llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
39037330f729Sjoerg llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
39047330f729Sjoerg CGF.EmitBlock(Cont);
39057330f729Sjoerg llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
39067330f729Sjoerg BaseCheck->addIncoming(Builder.getTrue(), Orig);
39077330f729Sjoerg BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3908*e038c9c4Sjoerg Checks.push_back(std::make_pair(
3909*e038c9c4Sjoerg BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3910*e038c9c4Sjoerg : SanitizerKind::UnsignedShiftBase));
39117330f729Sjoerg }
39127330f729Sjoerg
39137330f729Sjoerg assert(!Checks.empty());
39147330f729Sjoerg EmitBinOpCheck(Checks, Ops);
39157330f729Sjoerg }
39167330f729Sjoerg
39177330f729Sjoerg return Builder.CreateShl(Ops.LHS, RHS, "shl");
39187330f729Sjoerg }
39197330f729Sjoerg
EmitShr(const BinOpInfo & Ops)39207330f729Sjoerg Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3921*e038c9c4Sjoerg // TODO: This misses out on the sanitizer check below.
3922*e038c9c4Sjoerg if (Ops.isFixedPointOp())
3923*e038c9c4Sjoerg return EmitFixedPointBinOp(Ops);
3924*e038c9c4Sjoerg
39257330f729Sjoerg // LLVM requires the LHS and RHS to be the same type: promote or truncate the
39267330f729Sjoerg // RHS to the same size as the LHS.
39277330f729Sjoerg Value *RHS = Ops.RHS;
39287330f729Sjoerg if (Ops.LHS->getType() != RHS->getType())
39297330f729Sjoerg RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
39307330f729Sjoerg
39317330f729Sjoerg // OpenCL 6.3j: shift values are effectively % word size of LHS.
39327330f729Sjoerg if (CGF.getLangOpts().OpenCL)
3933*e038c9c4Sjoerg RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
39347330f729Sjoerg else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
39357330f729Sjoerg isa<llvm::IntegerType>(Ops.LHS->getType())) {
39367330f729Sjoerg CodeGenFunction::SanitizerScope SanScope(&CGF);
39377330f729Sjoerg llvm::Value *Valid =
39387330f729Sjoerg Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
39397330f729Sjoerg EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
39407330f729Sjoerg }
39417330f729Sjoerg
39427330f729Sjoerg if (Ops.Ty->hasUnsignedIntegerRepresentation())
39437330f729Sjoerg return Builder.CreateLShr(Ops.LHS, RHS, "shr");
39447330f729Sjoerg return Builder.CreateAShr(Ops.LHS, RHS, "shr");
39457330f729Sjoerg }
39467330f729Sjoerg
39477330f729Sjoerg enum IntrinsicType { VCMPEQ, VCMPGT };
39487330f729Sjoerg // return corresponding comparison intrinsic for given vector type
GetIntrinsic(IntrinsicType IT,BuiltinType::Kind ElemKind)39497330f729Sjoerg static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
39507330f729Sjoerg BuiltinType::Kind ElemKind) {
39517330f729Sjoerg switch (ElemKind) {
39527330f729Sjoerg default: llvm_unreachable("unexpected element type");
39537330f729Sjoerg case BuiltinType::Char_U:
39547330f729Sjoerg case BuiltinType::UChar:
39557330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
39567330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
39577330f729Sjoerg case BuiltinType::Char_S:
39587330f729Sjoerg case BuiltinType::SChar:
39597330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
39607330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
39617330f729Sjoerg case BuiltinType::UShort:
39627330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
39637330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
39647330f729Sjoerg case BuiltinType::Short:
39657330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
39667330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
39677330f729Sjoerg case BuiltinType::UInt:
39687330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
39697330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
39707330f729Sjoerg case BuiltinType::Int:
39717330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
39727330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
39737330f729Sjoerg case BuiltinType::ULong:
39747330f729Sjoerg case BuiltinType::ULongLong:
39757330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
39767330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
39777330f729Sjoerg case BuiltinType::Long:
39787330f729Sjoerg case BuiltinType::LongLong:
39797330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
39807330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
39817330f729Sjoerg case BuiltinType::Float:
39827330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
39837330f729Sjoerg llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
39847330f729Sjoerg case BuiltinType::Double:
39857330f729Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
39867330f729Sjoerg llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3987*e038c9c4Sjoerg case BuiltinType::UInt128:
3988*e038c9c4Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3989*e038c9c4Sjoerg : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
3990*e038c9c4Sjoerg case BuiltinType::Int128:
3991*e038c9c4Sjoerg return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3992*e038c9c4Sjoerg : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
39937330f729Sjoerg }
39947330f729Sjoerg }
39957330f729Sjoerg
EmitCompare(const BinaryOperator * E,llvm::CmpInst::Predicate UICmpOpc,llvm::CmpInst::Predicate SICmpOpc,llvm::CmpInst::Predicate FCmpOpc,bool IsSignaling)39967330f729Sjoerg Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
39977330f729Sjoerg llvm::CmpInst::Predicate UICmpOpc,
39987330f729Sjoerg llvm::CmpInst::Predicate SICmpOpc,
3999*e038c9c4Sjoerg llvm::CmpInst::Predicate FCmpOpc,
4000*e038c9c4Sjoerg bool IsSignaling) {
40017330f729Sjoerg TestAndClearIgnoreResultAssign();
40027330f729Sjoerg Value *Result;
40037330f729Sjoerg QualType LHSTy = E->getLHS()->getType();
40047330f729Sjoerg QualType RHSTy = E->getRHS()->getType();
40057330f729Sjoerg if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
40067330f729Sjoerg assert(E->getOpcode() == BO_EQ ||
40077330f729Sjoerg E->getOpcode() == BO_NE);
40087330f729Sjoerg Value *LHS = CGF.EmitScalarExpr(E->getLHS());
40097330f729Sjoerg Value *RHS = CGF.EmitScalarExpr(E->getRHS());
40107330f729Sjoerg Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
40117330f729Sjoerg CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
40127330f729Sjoerg } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
40137330f729Sjoerg BinOpInfo BOInfo = EmitBinOps(E);
40147330f729Sjoerg Value *LHS = BOInfo.LHS;
40157330f729Sjoerg Value *RHS = BOInfo.RHS;
40167330f729Sjoerg
40177330f729Sjoerg // If AltiVec, the comparison results in a numeric type, so we use
40187330f729Sjoerg // intrinsics comparing vectors and giving 0 or 1 as a result
40197330f729Sjoerg if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
40207330f729Sjoerg // constants for mapping CR6 register bits to predicate result
40217330f729Sjoerg enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
40227330f729Sjoerg
40237330f729Sjoerg llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
40247330f729Sjoerg
40257330f729Sjoerg // in several cases vector arguments order will be reversed
40267330f729Sjoerg Value *FirstVecArg = LHS,
40277330f729Sjoerg *SecondVecArg = RHS;
40287330f729Sjoerg
40297330f729Sjoerg QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4030*e038c9c4Sjoerg BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
40317330f729Sjoerg
40327330f729Sjoerg switch(E->getOpcode()) {
40337330f729Sjoerg default: llvm_unreachable("is not a comparison operation");
40347330f729Sjoerg case BO_EQ:
40357330f729Sjoerg CR6 = CR6_LT;
40367330f729Sjoerg ID = GetIntrinsic(VCMPEQ, ElementKind);
40377330f729Sjoerg break;
40387330f729Sjoerg case BO_NE:
40397330f729Sjoerg CR6 = CR6_EQ;
40407330f729Sjoerg ID = GetIntrinsic(VCMPEQ, ElementKind);
40417330f729Sjoerg break;
40427330f729Sjoerg case BO_LT:
40437330f729Sjoerg CR6 = CR6_LT;
40447330f729Sjoerg ID = GetIntrinsic(VCMPGT, ElementKind);
40457330f729Sjoerg std::swap(FirstVecArg, SecondVecArg);
40467330f729Sjoerg break;
40477330f729Sjoerg case BO_GT:
40487330f729Sjoerg CR6 = CR6_LT;
40497330f729Sjoerg ID = GetIntrinsic(VCMPGT, ElementKind);
40507330f729Sjoerg break;
40517330f729Sjoerg case BO_LE:
40527330f729Sjoerg if (ElementKind == BuiltinType::Float) {
40537330f729Sjoerg CR6 = CR6_LT;
40547330f729Sjoerg ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
40557330f729Sjoerg std::swap(FirstVecArg, SecondVecArg);
40567330f729Sjoerg }
40577330f729Sjoerg else {
40587330f729Sjoerg CR6 = CR6_EQ;
40597330f729Sjoerg ID = GetIntrinsic(VCMPGT, ElementKind);
40607330f729Sjoerg }
40617330f729Sjoerg break;
40627330f729Sjoerg case BO_GE:
40637330f729Sjoerg if (ElementKind == BuiltinType::Float) {
40647330f729Sjoerg CR6 = CR6_LT;
40657330f729Sjoerg ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
40667330f729Sjoerg }
40677330f729Sjoerg else {
40687330f729Sjoerg CR6 = CR6_EQ;
40697330f729Sjoerg ID = GetIntrinsic(VCMPGT, ElementKind);
40707330f729Sjoerg std::swap(FirstVecArg, SecondVecArg);
40717330f729Sjoerg }
40727330f729Sjoerg break;
40737330f729Sjoerg }
40747330f729Sjoerg
40757330f729Sjoerg Value *CR6Param = Builder.getInt32(CR6);
40767330f729Sjoerg llvm::Function *F = CGF.CGM.getIntrinsic(ID);
40777330f729Sjoerg Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
40787330f729Sjoerg
40797330f729Sjoerg // The result type of intrinsic may not be same as E->getType().
40807330f729Sjoerg // If E->getType() is not BoolTy, EmitScalarConversion will do the
40817330f729Sjoerg // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
40827330f729Sjoerg // do nothing, if ResultTy is not i1 at the same time, it will cause
40837330f729Sjoerg // crash later.
40847330f729Sjoerg llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
40857330f729Sjoerg if (ResultTy->getBitWidth() > 1 &&
40867330f729Sjoerg E->getType() == CGF.getContext().BoolTy)
40877330f729Sjoerg Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
40887330f729Sjoerg return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
40897330f729Sjoerg E->getExprLoc());
40907330f729Sjoerg }
40917330f729Sjoerg
4092*e038c9c4Sjoerg if (BOInfo.isFixedPointOp()) {
40937330f729Sjoerg Result = EmitFixedPointBinOp(BOInfo);
40947330f729Sjoerg } else if (LHS->getType()->isFPOrFPVectorTy()) {
4095*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4096*e038c9c4Sjoerg if (!IsSignaling)
40977330f729Sjoerg Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4098*e038c9c4Sjoerg else
4099*e038c9c4Sjoerg Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
41007330f729Sjoerg } else if (LHSTy->hasSignedIntegerRepresentation()) {
41017330f729Sjoerg Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
41027330f729Sjoerg } else {
41037330f729Sjoerg // Unsigned integers and pointers.
41047330f729Sjoerg
41057330f729Sjoerg if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
41067330f729Sjoerg !isa<llvm::ConstantPointerNull>(LHS) &&
41077330f729Sjoerg !isa<llvm::ConstantPointerNull>(RHS)) {
41087330f729Sjoerg
41097330f729Sjoerg // Dynamic information is required to be stripped for comparisons,
41107330f729Sjoerg // because it could leak the dynamic information. Based on comparisons
41117330f729Sjoerg // of pointers to dynamic objects, the optimizer can replace one pointer
41127330f729Sjoerg // with another, which might be incorrect in presence of invariant
41137330f729Sjoerg // groups. Comparison with null is safe because null does not carry any
41147330f729Sjoerg // dynamic information.
41157330f729Sjoerg if (LHSTy.mayBeDynamicClass())
41167330f729Sjoerg LHS = Builder.CreateStripInvariantGroup(LHS);
41177330f729Sjoerg if (RHSTy.mayBeDynamicClass())
41187330f729Sjoerg RHS = Builder.CreateStripInvariantGroup(RHS);
41197330f729Sjoerg }
41207330f729Sjoerg
41217330f729Sjoerg Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
41227330f729Sjoerg }
41237330f729Sjoerg
41247330f729Sjoerg // If this is a vector comparison, sign extend the result to the appropriate
41257330f729Sjoerg // vector integer type and return it (don't convert to bool).
41267330f729Sjoerg if (LHSTy->isVectorType())
41277330f729Sjoerg return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
41287330f729Sjoerg
41297330f729Sjoerg } else {
41307330f729Sjoerg // Complex Comparison: can only be an equality comparison.
41317330f729Sjoerg CodeGenFunction::ComplexPairTy LHS, RHS;
41327330f729Sjoerg QualType CETy;
41337330f729Sjoerg if (auto *CTy = LHSTy->getAs<ComplexType>()) {
41347330f729Sjoerg LHS = CGF.EmitComplexExpr(E->getLHS());
41357330f729Sjoerg CETy = CTy->getElementType();
41367330f729Sjoerg } else {
41377330f729Sjoerg LHS.first = Visit(E->getLHS());
41387330f729Sjoerg LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
41397330f729Sjoerg CETy = LHSTy;
41407330f729Sjoerg }
41417330f729Sjoerg if (auto *CTy = RHSTy->getAs<ComplexType>()) {
41427330f729Sjoerg RHS = CGF.EmitComplexExpr(E->getRHS());
41437330f729Sjoerg assert(CGF.getContext().hasSameUnqualifiedType(CETy,
41447330f729Sjoerg CTy->getElementType()) &&
41457330f729Sjoerg "The element types must always match.");
41467330f729Sjoerg (void)CTy;
41477330f729Sjoerg } else {
41487330f729Sjoerg RHS.first = Visit(E->getRHS());
41497330f729Sjoerg RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
41507330f729Sjoerg assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
41517330f729Sjoerg "The element types must always match.");
41527330f729Sjoerg }
41537330f729Sjoerg
41547330f729Sjoerg Value *ResultR, *ResultI;
41557330f729Sjoerg if (CETy->isRealFloatingType()) {
4156*e038c9c4Sjoerg // As complex comparisons can only be equality comparisons, they
4157*e038c9c4Sjoerg // are never signaling comparisons.
41587330f729Sjoerg ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
41597330f729Sjoerg ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
41607330f729Sjoerg } else {
41617330f729Sjoerg // Complex comparisons can only be equality comparisons. As such, signed
41627330f729Sjoerg // and unsigned opcodes are the same.
41637330f729Sjoerg ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
41647330f729Sjoerg ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
41657330f729Sjoerg }
41667330f729Sjoerg
41677330f729Sjoerg if (E->getOpcode() == BO_EQ) {
41687330f729Sjoerg Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
41697330f729Sjoerg } else {
41707330f729Sjoerg assert(E->getOpcode() == BO_NE &&
41717330f729Sjoerg "Complex comparison other than == or != ?");
41727330f729Sjoerg Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
41737330f729Sjoerg }
41747330f729Sjoerg }
41757330f729Sjoerg
41767330f729Sjoerg return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
41777330f729Sjoerg E->getExprLoc());
41787330f729Sjoerg }
41797330f729Sjoerg
VisitBinAssign(const BinaryOperator * E)41807330f729Sjoerg Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
41817330f729Sjoerg bool Ignore = TestAndClearIgnoreResultAssign();
41827330f729Sjoerg
41837330f729Sjoerg Value *RHS;
41847330f729Sjoerg LValue LHS;
41857330f729Sjoerg
41867330f729Sjoerg switch (E->getLHS()->getType().getObjCLifetime()) {
41877330f729Sjoerg case Qualifiers::OCL_Strong:
41887330f729Sjoerg std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
41897330f729Sjoerg break;
41907330f729Sjoerg
41917330f729Sjoerg case Qualifiers::OCL_Autoreleasing:
41927330f729Sjoerg std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
41937330f729Sjoerg break;
41947330f729Sjoerg
41957330f729Sjoerg case Qualifiers::OCL_ExplicitNone:
41967330f729Sjoerg std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
41977330f729Sjoerg break;
41987330f729Sjoerg
41997330f729Sjoerg case Qualifiers::OCL_Weak:
42007330f729Sjoerg RHS = Visit(E->getRHS());
42017330f729Sjoerg LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4202*e038c9c4Sjoerg RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
42037330f729Sjoerg break;
42047330f729Sjoerg
42057330f729Sjoerg case Qualifiers::OCL_None:
42067330f729Sjoerg // __block variables need to have the rhs evaluated first, plus
42077330f729Sjoerg // this should improve codegen just a little.
42087330f729Sjoerg RHS = Visit(E->getRHS());
42097330f729Sjoerg LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
42107330f729Sjoerg
42117330f729Sjoerg // Store the value into the LHS. Bit-fields are handled specially
42127330f729Sjoerg // because the result is altered by the store, i.e., [C99 6.5.16p1]
42137330f729Sjoerg // 'An assignment expression has the value of the left operand after
42147330f729Sjoerg // the assignment...'.
42157330f729Sjoerg if (LHS.isBitField()) {
42167330f729Sjoerg CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
42177330f729Sjoerg } else {
42187330f729Sjoerg CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
42197330f729Sjoerg CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
42207330f729Sjoerg }
42217330f729Sjoerg }
42227330f729Sjoerg
42237330f729Sjoerg // If the result is clearly ignored, return now.
42247330f729Sjoerg if (Ignore)
42257330f729Sjoerg return nullptr;
42267330f729Sjoerg
42277330f729Sjoerg // The result of an assignment in C is the assigned r-value.
42287330f729Sjoerg if (!CGF.getLangOpts().CPlusPlus)
42297330f729Sjoerg return RHS;
42307330f729Sjoerg
42317330f729Sjoerg // If the lvalue is non-volatile, return the computed value of the assignment.
42327330f729Sjoerg if (!LHS.isVolatileQualified())
42337330f729Sjoerg return RHS;
42347330f729Sjoerg
42357330f729Sjoerg // Otherwise, reload the value.
42367330f729Sjoerg return EmitLoadOfLValue(LHS, E->getExprLoc());
42377330f729Sjoerg }
42387330f729Sjoerg
VisitBinLAnd(const BinaryOperator * E)42397330f729Sjoerg Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
42407330f729Sjoerg // Perform vector logical and on comparisons with zero vectors.
42417330f729Sjoerg if (E->getType()->isVectorType()) {
42427330f729Sjoerg CGF.incrementProfileCounter(E);
42437330f729Sjoerg
42447330f729Sjoerg Value *LHS = Visit(E->getLHS());
42457330f729Sjoerg Value *RHS = Visit(E->getRHS());
42467330f729Sjoerg Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
42477330f729Sjoerg if (LHS->getType()->isFPOrFPVectorTy()) {
4248*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4249*e038c9c4Sjoerg CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
42507330f729Sjoerg LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
42517330f729Sjoerg RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
42527330f729Sjoerg } else {
42537330f729Sjoerg LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
42547330f729Sjoerg RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
42557330f729Sjoerg }
42567330f729Sjoerg Value *And = Builder.CreateAnd(LHS, RHS);
42577330f729Sjoerg return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
42587330f729Sjoerg }
42597330f729Sjoerg
4260*e038c9c4Sjoerg bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
42617330f729Sjoerg llvm::Type *ResTy = ConvertType(E->getType());
42627330f729Sjoerg
42637330f729Sjoerg // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
42647330f729Sjoerg // If we have 1 && X, just emit X without inserting the control flow.
42657330f729Sjoerg bool LHSCondVal;
42667330f729Sjoerg if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
42677330f729Sjoerg if (LHSCondVal) { // If we have 1 && X, just emit X.
42687330f729Sjoerg CGF.incrementProfileCounter(E);
42697330f729Sjoerg
42707330f729Sjoerg Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4271*e038c9c4Sjoerg
4272*e038c9c4Sjoerg // If we're generating for profiling or coverage, generate a branch to a
4273*e038c9c4Sjoerg // block that increments the RHS counter needed to track branch condition
4274*e038c9c4Sjoerg // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4275*e038c9c4Sjoerg // "FalseBlock" after the increment is done.
4276*e038c9c4Sjoerg if (InstrumentRegions &&
4277*e038c9c4Sjoerg CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4278*e038c9c4Sjoerg llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4279*e038c9c4Sjoerg llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4280*e038c9c4Sjoerg Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4281*e038c9c4Sjoerg CGF.EmitBlock(RHSBlockCnt);
4282*e038c9c4Sjoerg CGF.incrementProfileCounter(E->getRHS());
4283*e038c9c4Sjoerg CGF.EmitBranch(FBlock);
4284*e038c9c4Sjoerg CGF.EmitBlock(FBlock);
4285*e038c9c4Sjoerg }
4286*e038c9c4Sjoerg
42877330f729Sjoerg // ZExt result to int or bool.
42887330f729Sjoerg return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
42897330f729Sjoerg }
42907330f729Sjoerg
42917330f729Sjoerg // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
42927330f729Sjoerg if (!CGF.ContainsLabel(E->getRHS()))
42937330f729Sjoerg return llvm::Constant::getNullValue(ResTy);
42947330f729Sjoerg }
42957330f729Sjoerg
42967330f729Sjoerg llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
42977330f729Sjoerg llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
42987330f729Sjoerg
42997330f729Sjoerg CodeGenFunction::ConditionalEvaluation eval(CGF);
43007330f729Sjoerg
43017330f729Sjoerg // Branch on the LHS first. If it is false, go to the failure (cont) block.
43027330f729Sjoerg CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
43037330f729Sjoerg CGF.getProfileCount(E->getRHS()));
43047330f729Sjoerg
43057330f729Sjoerg // Any edges into the ContBlock are now from an (indeterminate number of)
43067330f729Sjoerg // edges from this first condition. All of these values will be false. Start
43077330f729Sjoerg // setting up the PHI node in the Cont Block for this.
43087330f729Sjoerg llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
43097330f729Sjoerg "", ContBlock);
43107330f729Sjoerg for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
43117330f729Sjoerg PI != PE; ++PI)
43127330f729Sjoerg PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
43137330f729Sjoerg
43147330f729Sjoerg eval.begin(CGF);
43157330f729Sjoerg CGF.EmitBlock(RHSBlock);
43167330f729Sjoerg CGF.incrementProfileCounter(E);
43177330f729Sjoerg Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
43187330f729Sjoerg eval.end(CGF);
43197330f729Sjoerg
43207330f729Sjoerg // Reaquire the RHS block, as there may be subblocks inserted.
43217330f729Sjoerg RHSBlock = Builder.GetInsertBlock();
43227330f729Sjoerg
4323*e038c9c4Sjoerg // If we're generating for profiling or coverage, generate a branch on the
4324*e038c9c4Sjoerg // RHS to a block that increments the RHS true counter needed to track branch
4325*e038c9c4Sjoerg // condition coverage.
4326*e038c9c4Sjoerg if (InstrumentRegions &&
4327*e038c9c4Sjoerg CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4328*e038c9c4Sjoerg llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4329*e038c9c4Sjoerg Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4330*e038c9c4Sjoerg CGF.EmitBlock(RHSBlockCnt);
4331*e038c9c4Sjoerg CGF.incrementProfileCounter(E->getRHS());
4332*e038c9c4Sjoerg CGF.EmitBranch(ContBlock);
4333*e038c9c4Sjoerg PN->addIncoming(RHSCond, RHSBlockCnt);
4334*e038c9c4Sjoerg }
4335*e038c9c4Sjoerg
43367330f729Sjoerg // Emit an unconditional branch from this block to ContBlock.
43377330f729Sjoerg {
43387330f729Sjoerg // There is no need to emit line number for unconditional branch.
43397330f729Sjoerg auto NL = ApplyDebugLocation::CreateEmpty(CGF);
43407330f729Sjoerg CGF.EmitBlock(ContBlock);
43417330f729Sjoerg }
43427330f729Sjoerg // Insert an entry into the phi node for the edge with the value of RHSCond.
43437330f729Sjoerg PN->addIncoming(RHSCond, RHSBlock);
43447330f729Sjoerg
43457330f729Sjoerg // Artificial location to preserve the scope information
43467330f729Sjoerg {
43477330f729Sjoerg auto NL = ApplyDebugLocation::CreateArtificial(CGF);
43487330f729Sjoerg PN->setDebugLoc(Builder.getCurrentDebugLocation());
43497330f729Sjoerg }
43507330f729Sjoerg
43517330f729Sjoerg // ZExt result to int.
43527330f729Sjoerg return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
43537330f729Sjoerg }
43547330f729Sjoerg
VisitBinLOr(const BinaryOperator * E)43557330f729Sjoerg Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
43567330f729Sjoerg // Perform vector logical or on comparisons with zero vectors.
43577330f729Sjoerg if (E->getType()->isVectorType()) {
43587330f729Sjoerg CGF.incrementProfileCounter(E);
43597330f729Sjoerg
43607330f729Sjoerg Value *LHS = Visit(E->getLHS());
43617330f729Sjoerg Value *RHS = Visit(E->getRHS());
43627330f729Sjoerg Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
43637330f729Sjoerg if (LHS->getType()->isFPOrFPVectorTy()) {
4364*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4365*e038c9c4Sjoerg CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
43667330f729Sjoerg LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
43677330f729Sjoerg RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
43687330f729Sjoerg } else {
43697330f729Sjoerg LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
43707330f729Sjoerg RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
43717330f729Sjoerg }
43727330f729Sjoerg Value *Or = Builder.CreateOr(LHS, RHS);
43737330f729Sjoerg return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
43747330f729Sjoerg }
43757330f729Sjoerg
4376*e038c9c4Sjoerg bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
43777330f729Sjoerg llvm::Type *ResTy = ConvertType(E->getType());
43787330f729Sjoerg
43797330f729Sjoerg // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
43807330f729Sjoerg // If we have 0 || X, just emit X without inserting the control flow.
43817330f729Sjoerg bool LHSCondVal;
43827330f729Sjoerg if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
43837330f729Sjoerg if (!LHSCondVal) { // If we have 0 || X, just emit X.
43847330f729Sjoerg CGF.incrementProfileCounter(E);
43857330f729Sjoerg
43867330f729Sjoerg Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4387*e038c9c4Sjoerg
4388*e038c9c4Sjoerg // If we're generating for profiling or coverage, generate a branch to a
4389*e038c9c4Sjoerg // block that increments the RHS counter need to track branch condition
4390*e038c9c4Sjoerg // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4391*e038c9c4Sjoerg // "FalseBlock" after the increment is done.
4392*e038c9c4Sjoerg if (InstrumentRegions &&
4393*e038c9c4Sjoerg CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4394*e038c9c4Sjoerg llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4395*e038c9c4Sjoerg llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4396*e038c9c4Sjoerg Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4397*e038c9c4Sjoerg CGF.EmitBlock(RHSBlockCnt);
4398*e038c9c4Sjoerg CGF.incrementProfileCounter(E->getRHS());
4399*e038c9c4Sjoerg CGF.EmitBranch(FBlock);
4400*e038c9c4Sjoerg CGF.EmitBlock(FBlock);
4401*e038c9c4Sjoerg }
4402*e038c9c4Sjoerg
44037330f729Sjoerg // ZExt result to int or bool.
44047330f729Sjoerg return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
44057330f729Sjoerg }
44067330f729Sjoerg
44077330f729Sjoerg // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
44087330f729Sjoerg if (!CGF.ContainsLabel(E->getRHS()))
44097330f729Sjoerg return llvm::ConstantInt::get(ResTy, 1);
44107330f729Sjoerg }
44117330f729Sjoerg
44127330f729Sjoerg llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
44137330f729Sjoerg llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
44147330f729Sjoerg
44157330f729Sjoerg CodeGenFunction::ConditionalEvaluation eval(CGF);
44167330f729Sjoerg
44177330f729Sjoerg // Branch on the LHS first. If it is true, go to the success (cont) block.
44187330f729Sjoerg CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
44197330f729Sjoerg CGF.getCurrentProfileCount() -
44207330f729Sjoerg CGF.getProfileCount(E->getRHS()));
44217330f729Sjoerg
44227330f729Sjoerg // Any edges into the ContBlock are now from an (indeterminate number of)
44237330f729Sjoerg // edges from this first condition. All of these values will be true. Start
44247330f729Sjoerg // setting up the PHI node in the Cont Block for this.
44257330f729Sjoerg llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
44267330f729Sjoerg "", ContBlock);
44277330f729Sjoerg for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
44287330f729Sjoerg PI != PE; ++PI)
44297330f729Sjoerg PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
44307330f729Sjoerg
44317330f729Sjoerg eval.begin(CGF);
44327330f729Sjoerg
44337330f729Sjoerg // Emit the RHS condition as a bool value.
44347330f729Sjoerg CGF.EmitBlock(RHSBlock);
44357330f729Sjoerg CGF.incrementProfileCounter(E);
44367330f729Sjoerg Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
44377330f729Sjoerg
44387330f729Sjoerg eval.end(CGF);
44397330f729Sjoerg
44407330f729Sjoerg // Reaquire the RHS block, as there may be subblocks inserted.
44417330f729Sjoerg RHSBlock = Builder.GetInsertBlock();
44427330f729Sjoerg
4443*e038c9c4Sjoerg // If we're generating for profiling or coverage, generate a branch on the
4444*e038c9c4Sjoerg // RHS to a block that increments the RHS true counter needed to track branch
4445*e038c9c4Sjoerg // condition coverage.
4446*e038c9c4Sjoerg if (InstrumentRegions &&
4447*e038c9c4Sjoerg CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4448*e038c9c4Sjoerg llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4449*e038c9c4Sjoerg Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4450*e038c9c4Sjoerg CGF.EmitBlock(RHSBlockCnt);
4451*e038c9c4Sjoerg CGF.incrementProfileCounter(E->getRHS());
4452*e038c9c4Sjoerg CGF.EmitBranch(ContBlock);
4453*e038c9c4Sjoerg PN->addIncoming(RHSCond, RHSBlockCnt);
4454*e038c9c4Sjoerg }
4455*e038c9c4Sjoerg
44567330f729Sjoerg // Emit an unconditional branch from this block to ContBlock. Insert an entry
44577330f729Sjoerg // into the phi node for the edge with the value of RHSCond.
44587330f729Sjoerg CGF.EmitBlock(ContBlock);
44597330f729Sjoerg PN->addIncoming(RHSCond, RHSBlock);
44607330f729Sjoerg
44617330f729Sjoerg // ZExt result to int.
44627330f729Sjoerg return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
44637330f729Sjoerg }
44647330f729Sjoerg
VisitBinComma(const BinaryOperator * E)44657330f729Sjoerg Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
44667330f729Sjoerg CGF.EmitIgnoredExpr(E->getLHS());
44677330f729Sjoerg CGF.EnsureInsertPoint();
44687330f729Sjoerg return Visit(E->getRHS());
44697330f729Sjoerg }
44707330f729Sjoerg
44717330f729Sjoerg //===----------------------------------------------------------------------===//
44727330f729Sjoerg // Other Operators
44737330f729Sjoerg //===----------------------------------------------------------------------===//
44747330f729Sjoerg
44757330f729Sjoerg /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
44767330f729Sjoerg /// expression is cheap enough and side-effect-free enough to evaluate
44777330f729Sjoerg /// unconditionally instead of conditionally. This is used to convert control
44787330f729Sjoerg /// flow into selects in some cases.
isCheapEnoughToEvaluateUnconditionally(const Expr * E,CodeGenFunction & CGF)44797330f729Sjoerg static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
44807330f729Sjoerg CodeGenFunction &CGF) {
44817330f729Sjoerg // Anything that is an integer or floating point constant is fine.
44827330f729Sjoerg return E->IgnoreParens()->isEvaluatable(CGF.getContext());
44837330f729Sjoerg
44847330f729Sjoerg // Even non-volatile automatic variables can't be evaluated unconditionally.
44857330f729Sjoerg // Referencing a thread_local may cause non-trivial initialization work to
44867330f729Sjoerg // occur. If we're inside a lambda and one of the variables is from the scope
44877330f729Sjoerg // outside the lambda, that function may have returned already. Reading its
44887330f729Sjoerg // locals is a bad idea. Also, these reads may introduce races there didn't
44897330f729Sjoerg // exist in the source-level program.
44907330f729Sjoerg }
44917330f729Sjoerg
44927330f729Sjoerg
44937330f729Sjoerg Value *ScalarExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)44947330f729Sjoerg VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
44957330f729Sjoerg TestAndClearIgnoreResultAssign();
44967330f729Sjoerg
44977330f729Sjoerg // Bind the common expression if necessary.
44987330f729Sjoerg CodeGenFunction::OpaqueValueMapping binding(CGF, E);
44997330f729Sjoerg
45007330f729Sjoerg Expr *condExpr = E->getCond();
45017330f729Sjoerg Expr *lhsExpr = E->getTrueExpr();
45027330f729Sjoerg Expr *rhsExpr = E->getFalseExpr();
45037330f729Sjoerg
45047330f729Sjoerg // If the condition constant folds and can be elided, try to avoid emitting
45057330f729Sjoerg // the condition and the dead arm.
45067330f729Sjoerg bool CondExprBool;
45077330f729Sjoerg if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
45087330f729Sjoerg Expr *live = lhsExpr, *dead = rhsExpr;
45097330f729Sjoerg if (!CondExprBool) std::swap(live, dead);
45107330f729Sjoerg
45117330f729Sjoerg // If the dead side doesn't have labels we need, just emit the Live part.
45127330f729Sjoerg if (!CGF.ContainsLabel(dead)) {
45137330f729Sjoerg if (CondExprBool)
45147330f729Sjoerg CGF.incrementProfileCounter(E);
45157330f729Sjoerg Value *Result = Visit(live);
45167330f729Sjoerg
45177330f729Sjoerg // If the live part is a throw expression, it acts like it has a void
45187330f729Sjoerg // type, so evaluating it returns a null Value*. However, a conditional
45197330f729Sjoerg // with non-void type must return a non-null Value*.
45207330f729Sjoerg if (!Result && !E->getType()->isVoidType())
45217330f729Sjoerg Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
45227330f729Sjoerg
45237330f729Sjoerg return Result;
45247330f729Sjoerg }
45257330f729Sjoerg }
45267330f729Sjoerg
45277330f729Sjoerg // OpenCL: If the condition is a vector, we can treat this condition like
45287330f729Sjoerg // the select function.
4529*e038c9c4Sjoerg if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4530*e038c9c4Sjoerg condExpr->getType()->isExtVectorType()) {
45317330f729Sjoerg CGF.incrementProfileCounter(E);
45327330f729Sjoerg
45337330f729Sjoerg llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
45347330f729Sjoerg llvm::Value *LHS = Visit(lhsExpr);
45357330f729Sjoerg llvm::Value *RHS = Visit(rhsExpr);
45367330f729Sjoerg
45377330f729Sjoerg llvm::Type *condType = ConvertType(condExpr->getType());
4538*e038c9c4Sjoerg auto *vecTy = cast<llvm::FixedVectorType>(condType);
45397330f729Sjoerg
45407330f729Sjoerg unsigned numElem = vecTy->getNumElements();
45417330f729Sjoerg llvm::Type *elemType = vecTy->getElementType();
45427330f729Sjoerg
45437330f729Sjoerg llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
45447330f729Sjoerg llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4545*e038c9c4Sjoerg llvm::Value *tmp = Builder.CreateSExt(
4546*e038c9c4Sjoerg TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
45477330f729Sjoerg llvm::Value *tmp2 = Builder.CreateNot(tmp);
45487330f729Sjoerg
45497330f729Sjoerg // Cast float to int to perform ANDs if necessary.
45507330f729Sjoerg llvm::Value *RHSTmp = RHS;
45517330f729Sjoerg llvm::Value *LHSTmp = LHS;
45527330f729Sjoerg bool wasCast = false;
45537330f729Sjoerg llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
45547330f729Sjoerg if (rhsVTy->getElementType()->isFloatingPointTy()) {
45557330f729Sjoerg RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
45567330f729Sjoerg LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
45577330f729Sjoerg wasCast = true;
45587330f729Sjoerg }
45597330f729Sjoerg
45607330f729Sjoerg llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
45617330f729Sjoerg llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
45627330f729Sjoerg llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
45637330f729Sjoerg if (wasCast)
45647330f729Sjoerg tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
45657330f729Sjoerg
45667330f729Sjoerg return tmp5;
45677330f729Sjoerg }
45687330f729Sjoerg
4569*e038c9c4Sjoerg if (condExpr->getType()->isVectorType()) {
4570*e038c9c4Sjoerg CGF.incrementProfileCounter(E);
4571*e038c9c4Sjoerg
4572*e038c9c4Sjoerg llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4573*e038c9c4Sjoerg llvm::Value *LHS = Visit(lhsExpr);
4574*e038c9c4Sjoerg llvm::Value *RHS = Visit(rhsExpr);
4575*e038c9c4Sjoerg
4576*e038c9c4Sjoerg llvm::Type *CondType = ConvertType(condExpr->getType());
4577*e038c9c4Sjoerg auto *VecTy = cast<llvm::VectorType>(CondType);
4578*e038c9c4Sjoerg llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4579*e038c9c4Sjoerg
4580*e038c9c4Sjoerg CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4581*e038c9c4Sjoerg return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4582*e038c9c4Sjoerg }
4583*e038c9c4Sjoerg
45847330f729Sjoerg // If this is a really simple expression (like x ? 4 : 5), emit this as a
45857330f729Sjoerg // select instead of as control flow. We can only do this if it is cheap and
45867330f729Sjoerg // safe to evaluate the LHS and RHS unconditionally.
45877330f729Sjoerg if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
45887330f729Sjoerg isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
45897330f729Sjoerg llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
45907330f729Sjoerg llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
45917330f729Sjoerg
45927330f729Sjoerg CGF.incrementProfileCounter(E, StepV);
45937330f729Sjoerg
45947330f729Sjoerg llvm::Value *LHS = Visit(lhsExpr);
45957330f729Sjoerg llvm::Value *RHS = Visit(rhsExpr);
45967330f729Sjoerg if (!LHS) {
45977330f729Sjoerg // If the conditional has void type, make sure we return a null Value*.
45987330f729Sjoerg assert(!RHS && "LHS and RHS types must match");
45997330f729Sjoerg return nullptr;
46007330f729Sjoerg }
46017330f729Sjoerg return Builder.CreateSelect(CondV, LHS, RHS, "cond");
46027330f729Sjoerg }
46037330f729Sjoerg
46047330f729Sjoerg llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
46057330f729Sjoerg llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
46067330f729Sjoerg llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
46077330f729Sjoerg
46087330f729Sjoerg CodeGenFunction::ConditionalEvaluation eval(CGF);
46097330f729Sjoerg CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
46107330f729Sjoerg CGF.getProfileCount(lhsExpr));
46117330f729Sjoerg
46127330f729Sjoerg CGF.EmitBlock(LHSBlock);
46137330f729Sjoerg CGF.incrementProfileCounter(E);
46147330f729Sjoerg eval.begin(CGF);
46157330f729Sjoerg Value *LHS = Visit(lhsExpr);
46167330f729Sjoerg eval.end(CGF);
46177330f729Sjoerg
46187330f729Sjoerg LHSBlock = Builder.GetInsertBlock();
46197330f729Sjoerg Builder.CreateBr(ContBlock);
46207330f729Sjoerg
46217330f729Sjoerg CGF.EmitBlock(RHSBlock);
46227330f729Sjoerg eval.begin(CGF);
46237330f729Sjoerg Value *RHS = Visit(rhsExpr);
46247330f729Sjoerg eval.end(CGF);
46257330f729Sjoerg
46267330f729Sjoerg RHSBlock = Builder.GetInsertBlock();
46277330f729Sjoerg CGF.EmitBlock(ContBlock);
46287330f729Sjoerg
46297330f729Sjoerg // If the LHS or RHS is a throw expression, it will be legitimately null.
46307330f729Sjoerg if (!LHS)
46317330f729Sjoerg return RHS;
46327330f729Sjoerg if (!RHS)
46337330f729Sjoerg return LHS;
46347330f729Sjoerg
46357330f729Sjoerg // Create a PHI node for the real part.
46367330f729Sjoerg llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
46377330f729Sjoerg PN->addIncoming(LHS, LHSBlock);
46387330f729Sjoerg PN->addIncoming(RHS, RHSBlock);
46397330f729Sjoerg return PN;
46407330f729Sjoerg }
46417330f729Sjoerg
VisitChooseExpr(ChooseExpr * E)46427330f729Sjoerg Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
46437330f729Sjoerg return Visit(E->getChosenSubExpr());
46447330f729Sjoerg }
46457330f729Sjoerg
VisitVAArgExpr(VAArgExpr * VE)46467330f729Sjoerg Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
46477330f729Sjoerg QualType Ty = VE->getType();
46487330f729Sjoerg
46497330f729Sjoerg if (Ty->isVariablyModifiedType())
46507330f729Sjoerg CGF.EmitVariablyModifiedType(Ty);
46517330f729Sjoerg
46527330f729Sjoerg Address ArgValue = Address::invalid();
46537330f729Sjoerg Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
46547330f729Sjoerg
46557330f729Sjoerg llvm::Type *ArgTy = ConvertType(VE->getType());
46567330f729Sjoerg
46577330f729Sjoerg // If EmitVAArg fails, emit an error.
46587330f729Sjoerg if (!ArgPtr.isValid()) {
46597330f729Sjoerg CGF.ErrorUnsupported(VE, "va_arg expression");
46607330f729Sjoerg return llvm::UndefValue::get(ArgTy);
46617330f729Sjoerg }
46627330f729Sjoerg
46637330f729Sjoerg // FIXME Volatility.
46647330f729Sjoerg llvm::Value *Val = Builder.CreateLoad(ArgPtr);
46657330f729Sjoerg
46667330f729Sjoerg // If EmitVAArg promoted the type, we must truncate it.
46677330f729Sjoerg if (ArgTy != Val->getType()) {
46687330f729Sjoerg if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
46697330f729Sjoerg Val = Builder.CreateIntToPtr(Val, ArgTy);
46707330f729Sjoerg else
46717330f729Sjoerg Val = Builder.CreateTrunc(Val, ArgTy);
46727330f729Sjoerg }
46737330f729Sjoerg
46747330f729Sjoerg return Val;
46757330f729Sjoerg }
46767330f729Sjoerg
VisitBlockExpr(const BlockExpr * block)46777330f729Sjoerg Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
46787330f729Sjoerg return CGF.EmitBlockLiteral(block);
46797330f729Sjoerg }
46807330f729Sjoerg
46817330f729Sjoerg // Convert a vec3 to vec4, or vice versa.
ConvertVec3AndVec4(CGBuilderTy & Builder,CodeGenFunction & CGF,Value * Src,unsigned NumElementsDst)46827330f729Sjoerg static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
46837330f729Sjoerg Value *Src, unsigned NumElementsDst) {
4684*e038c9c4Sjoerg static constexpr int Mask[] = {0, 1, 2, -1};
4685*e038c9c4Sjoerg return Builder.CreateShuffleVector(Src,
4686*e038c9c4Sjoerg llvm::makeArrayRef(Mask, NumElementsDst));
46877330f729Sjoerg }
46887330f729Sjoerg
46897330f729Sjoerg // Create cast instructions for converting LLVM value \p Src to LLVM type \p
46907330f729Sjoerg // DstTy. \p Src has the same size as \p DstTy. Both are single value types
46917330f729Sjoerg // but could be scalar or vectors of different lengths, and either can be
46927330f729Sjoerg // pointer.
46937330f729Sjoerg // There are 4 cases:
46947330f729Sjoerg // 1. non-pointer -> non-pointer : needs 1 bitcast
46957330f729Sjoerg // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
46967330f729Sjoerg // 3. pointer -> non-pointer
46977330f729Sjoerg // a) pointer -> intptr_t : needs 1 ptrtoint
46987330f729Sjoerg // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
46997330f729Sjoerg // 4. non-pointer -> pointer
47007330f729Sjoerg // a) intptr_t -> pointer : needs 1 inttoptr
47017330f729Sjoerg // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
47027330f729Sjoerg // Note: for cases 3b and 4b two casts are required since LLVM casts do not
47037330f729Sjoerg // allow casting directly between pointer types and non-integer non-pointer
47047330f729Sjoerg // types.
createCastsForTypeOfSameSize(CGBuilderTy & Builder,const llvm::DataLayout & DL,Value * Src,llvm::Type * DstTy,StringRef Name="")47057330f729Sjoerg static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
47067330f729Sjoerg const llvm::DataLayout &DL,
47077330f729Sjoerg Value *Src, llvm::Type *DstTy,
47087330f729Sjoerg StringRef Name = "") {
47097330f729Sjoerg auto SrcTy = Src->getType();
47107330f729Sjoerg
47117330f729Sjoerg // Case 1.
47127330f729Sjoerg if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
47137330f729Sjoerg return Builder.CreateBitCast(Src, DstTy, Name);
47147330f729Sjoerg
47157330f729Sjoerg // Case 2.
47167330f729Sjoerg if (SrcTy->isPointerTy() && DstTy->isPointerTy())
47177330f729Sjoerg return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
47187330f729Sjoerg
47197330f729Sjoerg // Case 3.
47207330f729Sjoerg if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
47217330f729Sjoerg // Case 3b.
47227330f729Sjoerg if (!DstTy->isIntegerTy())
47237330f729Sjoerg Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
47247330f729Sjoerg // Cases 3a and 3b.
47257330f729Sjoerg return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
47267330f729Sjoerg }
47277330f729Sjoerg
47287330f729Sjoerg // Case 4b.
47297330f729Sjoerg if (!SrcTy->isIntegerTy())
47307330f729Sjoerg Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
47317330f729Sjoerg // Cases 4a and 4b.
47327330f729Sjoerg return Builder.CreateIntToPtr(Src, DstTy, Name);
47337330f729Sjoerg }
47347330f729Sjoerg
VisitAsTypeExpr(AsTypeExpr * E)47357330f729Sjoerg Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
47367330f729Sjoerg Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
47377330f729Sjoerg llvm::Type *DstTy = ConvertType(E->getType());
47387330f729Sjoerg
47397330f729Sjoerg llvm::Type *SrcTy = Src->getType();
4740*e038c9c4Sjoerg unsigned NumElementsSrc =
4741*e038c9c4Sjoerg isa<llvm::VectorType>(SrcTy)
4742*e038c9c4Sjoerg ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4743*e038c9c4Sjoerg : 0;
4744*e038c9c4Sjoerg unsigned NumElementsDst =
4745*e038c9c4Sjoerg isa<llvm::VectorType>(DstTy)
4746*e038c9c4Sjoerg ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4747*e038c9c4Sjoerg : 0;
47487330f729Sjoerg
47497330f729Sjoerg // Going from vec3 to non-vec3 is a special case and requires a shuffle
47507330f729Sjoerg // vector to get a vec4, then a bitcast if the target type is different.
47517330f729Sjoerg if (NumElementsSrc == 3 && NumElementsDst != 3) {
47527330f729Sjoerg Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
47537330f729Sjoerg
47547330f729Sjoerg if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
47557330f729Sjoerg Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
47567330f729Sjoerg DstTy);
47577330f729Sjoerg }
47587330f729Sjoerg
47597330f729Sjoerg Src->setName("astype");
47607330f729Sjoerg return Src;
47617330f729Sjoerg }
47627330f729Sjoerg
47637330f729Sjoerg // Going from non-vec3 to vec3 is a special case and requires a bitcast
47647330f729Sjoerg // to vec4 if the original type is not vec4, then a shuffle vector to
47657330f729Sjoerg // get a vec3.
47667330f729Sjoerg if (NumElementsSrc != 3 && NumElementsDst == 3) {
47677330f729Sjoerg if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4768*e038c9c4Sjoerg auto *Vec4Ty = llvm::FixedVectorType::get(
4769*e038c9c4Sjoerg cast<llvm::VectorType>(DstTy)->getElementType(), 4);
47707330f729Sjoerg Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
47717330f729Sjoerg Vec4Ty);
47727330f729Sjoerg }
47737330f729Sjoerg
47747330f729Sjoerg Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
47757330f729Sjoerg Src->setName("astype");
47767330f729Sjoerg return Src;
47777330f729Sjoerg }
47787330f729Sjoerg
47797330f729Sjoerg return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
47807330f729Sjoerg Src, DstTy, "astype");
47817330f729Sjoerg }
47827330f729Sjoerg
VisitAtomicExpr(AtomicExpr * E)47837330f729Sjoerg Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
47847330f729Sjoerg return CGF.EmitAtomicExpr(E).getScalarVal();
47857330f729Sjoerg }
47867330f729Sjoerg
47877330f729Sjoerg //===----------------------------------------------------------------------===//
47887330f729Sjoerg // Entry Point into this File
47897330f729Sjoerg //===----------------------------------------------------------------------===//
47907330f729Sjoerg
47917330f729Sjoerg /// Emit the computation of the specified expression of scalar type, ignoring
47927330f729Sjoerg /// the result.
EmitScalarExpr(const Expr * E,bool IgnoreResultAssign)47937330f729Sjoerg Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
47947330f729Sjoerg assert(E && hasScalarEvaluationKind(E->getType()) &&
47957330f729Sjoerg "Invalid scalar expression to emit");
47967330f729Sjoerg
47977330f729Sjoerg return ScalarExprEmitter(*this, IgnoreResultAssign)
47987330f729Sjoerg .Visit(const_cast<Expr *>(E));
47997330f729Sjoerg }
48007330f729Sjoerg
48017330f729Sjoerg /// Emit a conversion from the specified type to the specified destination type,
48027330f729Sjoerg /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)48037330f729Sjoerg Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
48047330f729Sjoerg QualType DstTy,
48057330f729Sjoerg SourceLocation Loc) {
48067330f729Sjoerg assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
48077330f729Sjoerg "Invalid scalar expression to emit");
48087330f729Sjoerg return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
48097330f729Sjoerg }
48107330f729Sjoerg
48117330f729Sjoerg /// Emit a conversion from the specified complex type to the specified
48127330f729Sjoerg /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)48137330f729Sjoerg Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
48147330f729Sjoerg QualType SrcTy,
48157330f729Sjoerg QualType DstTy,
48167330f729Sjoerg SourceLocation Loc) {
48177330f729Sjoerg assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
48187330f729Sjoerg "Invalid complex -> scalar conversion");
48197330f729Sjoerg return ScalarExprEmitter(*this)
48207330f729Sjoerg .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
48217330f729Sjoerg }
48227330f729Sjoerg
48237330f729Sjoerg
48247330f729Sjoerg llvm::Value *CodeGenFunction::
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)48257330f729Sjoerg EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
48267330f729Sjoerg bool isInc, bool isPre) {
48277330f729Sjoerg return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
48287330f729Sjoerg }
48297330f729Sjoerg
EmitObjCIsaExpr(const ObjCIsaExpr * E)48307330f729Sjoerg LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
48317330f729Sjoerg // object->isa or (*object).isa
48327330f729Sjoerg // Generate code as for: *(Class*)object
48337330f729Sjoerg
48347330f729Sjoerg Expr *BaseExpr = E->getBase();
48357330f729Sjoerg Address Addr = Address::invalid();
48367330f729Sjoerg if (BaseExpr->isRValue()) {
48377330f729Sjoerg Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
48387330f729Sjoerg } else {
4839*e038c9c4Sjoerg Addr = EmitLValue(BaseExpr).getAddress(*this);
48407330f729Sjoerg }
48417330f729Sjoerg
48427330f729Sjoerg // Cast the address to Class*.
48437330f729Sjoerg Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
48447330f729Sjoerg return MakeAddrLValue(Addr, E->getType());
48457330f729Sjoerg }
48467330f729Sjoerg
48477330f729Sjoerg
EmitCompoundAssignmentLValue(const CompoundAssignOperator * E)48487330f729Sjoerg LValue CodeGenFunction::EmitCompoundAssignmentLValue(
48497330f729Sjoerg const CompoundAssignOperator *E) {
48507330f729Sjoerg ScalarExprEmitter Scalar(*this);
48517330f729Sjoerg Value *Result = nullptr;
48527330f729Sjoerg switch (E->getOpcode()) {
48537330f729Sjoerg #define COMPOUND_OP(Op) \
48547330f729Sjoerg case BO_##Op##Assign: \
48557330f729Sjoerg return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
48567330f729Sjoerg Result)
48577330f729Sjoerg COMPOUND_OP(Mul);
48587330f729Sjoerg COMPOUND_OP(Div);
48597330f729Sjoerg COMPOUND_OP(Rem);
48607330f729Sjoerg COMPOUND_OP(Add);
48617330f729Sjoerg COMPOUND_OP(Sub);
48627330f729Sjoerg COMPOUND_OP(Shl);
48637330f729Sjoerg COMPOUND_OP(Shr);
48647330f729Sjoerg COMPOUND_OP(And);
48657330f729Sjoerg COMPOUND_OP(Xor);
48667330f729Sjoerg COMPOUND_OP(Or);
48677330f729Sjoerg #undef COMPOUND_OP
48687330f729Sjoerg
48697330f729Sjoerg case BO_PtrMemD:
48707330f729Sjoerg case BO_PtrMemI:
48717330f729Sjoerg case BO_Mul:
48727330f729Sjoerg case BO_Div:
48737330f729Sjoerg case BO_Rem:
48747330f729Sjoerg case BO_Add:
48757330f729Sjoerg case BO_Sub:
48767330f729Sjoerg case BO_Shl:
48777330f729Sjoerg case BO_Shr:
48787330f729Sjoerg case BO_LT:
48797330f729Sjoerg case BO_GT:
48807330f729Sjoerg case BO_LE:
48817330f729Sjoerg case BO_GE:
48827330f729Sjoerg case BO_EQ:
48837330f729Sjoerg case BO_NE:
48847330f729Sjoerg case BO_Cmp:
48857330f729Sjoerg case BO_And:
48867330f729Sjoerg case BO_Xor:
48877330f729Sjoerg case BO_Or:
48887330f729Sjoerg case BO_LAnd:
48897330f729Sjoerg case BO_LOr:
48907330f729Sjoerg case BO_Assign:
48917330f729Sjoerg case BO_Comma:
48927330f729Sjoerg llvm_unreachable("Not valid compound assignment operators");
48937330f729Sjoerg }
48947330f729Sjoerg
48957330f729Sjoerg llvm_unreachable("Unhandled compound assignment operator");
48967330f729Sjoerg }
48977330f729Sjoerg
48987330f729Sjoerg struct GEPOffsetAndOverflow {
48997330f729Sjoerg // The total (signed) byte offset for the GEP.
49007330f729Sjoerg llvm::Value *TotalOffset;
49017330f729Sjoerg // The offset overflow flag - true if the total offset overflows.
49027330f729Sjoerg llvm::Value *OffsetOverflows;
49037330f729Sjoerg };
49047330f729Sjoerg
49057330f729Sjoerg /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
49067330f729Sjoerg /// and compute the total offset it applies from it's base pointer BasePtr.
49077330f729Sjoerg /// Returns offset in bytes and a boolean flag whether an overflow happened
49087330f729Sjoerg /// during evaluation.
EmitGEPOffsetInBytes(Value * BasePtr,Value * GEPVal,llvm::LLVMContext & VMContext,CodeGenModule & CGM,CGBuilderTy & Builder)49097330f729Sjoerg static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
49107330f729Sjoerg llvm::LLVMContext &VMContext,
49117330f729Sjoerg CodeGenModule &CGM,
4912*e038c9c4Sjoerg CGBuilderTy &Builder) {
49137330f729Sjoerg const auto &DL = CGM.getDataLayout();
49147330f729Sjoerg
49157330f729Sjoerg // The total (signed) byte offset for the GEP.
49167330f729Sjoerg llvm::Value *TotalOffset = nullptr;
49177330f729Sjoerg
49187330f729Sjoerg // Was the GEP already reduced to a constant?
49197330f729Sjoerg if (isa<llvm::Constant>(GEPVal)) {
49207330f729Sjoerg // Compute the offset by casting both pointers to integers and subtracting:
49217330f729Sjoerg // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
49227330f729Sjoerg Value *BasePtr_int =
49237330f729Sjoerg Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
49247330f729Sjoerg Value *GEPVal_int =
49257330f729Sjoerg Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
49267330f729Sjoerg TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
49277330f729Sjoerg return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
49287330f729Sjoerg }
49297330f729Sjoerg
49307330f729Sjoerg auto *GEP = cast<llvm::GEPOperator>(GEPVal);
49317330f729Sjoerg assert(GEP->getPointerOperand() == BasePtr &&
49327330f729Sjoerg "BasePtr must be the the base of the GEP.");
49337330f729Sjoerg assert(GEP->isInBounds() && "Expected inbounds GEP");
49347330f729Sjoerg
49357330f729Sjoerg auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
49367330f729Sjoerg
49377330f729Sjoerg // Grab references to the signed add/mul overflow intrinsics for intptr_t.
49387330f729Sjoerg auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
49397330f729Sjoerg auto *SAddIntrinsic =
49407330f729Sjoerg CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
49417330f729Sjoerg auto *SMulIntrinsic =
49427330f729Sjoerg CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
49437330f729Sjoerg
49447330f729Sjoerg // The offset overflow flag - true if the total offset overflows.
49457330f729Sjoerg llvm::Value *OffsetOverflows = Builder.getFalse();
49467330f729Sjoerg
49477330f729Sjoerg /// Return the result of the given binary operation.
49487330f729Sjoerg auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
49497330f729Sjoerg llvm::Value *RHS) -> llvm::Value * {
49507330f729Sjoerg assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
49517330f729Sjoerg
49527330f729Sjoerg // If the operands are constants, return a constant result.
49537330f729Sjoerg if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
49547330f729Sjoerg if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
49557330f729Sjoerg llvm::APInt N;
49567330f729Sjoerg bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
49577330f729Sjoerg /*Signed=*/true, N);
49587330f729Sjoerg if (HasOverflow)
49597330f729Sjoerg OffsetOverflows = Builder.getTrue();
49607330f729Sjoerg return llvm::ConstantInt::get(VMContext, N);
49617330f729Sjoerg }
49627330f729Sjoerg }
49637330f729Sjoerg
49647330f729Sjoerg // Otherwise, compute the result with checked arithmetic.
49657330f729Sjoerg auto *ResultAndOverflow = Builder.CreateCall(
49667330f729Sjoerg (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
49677330f729Sjoerg OffsetOverflows = Builder.CreateOr(
49687330f729Sjoerg Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
49697330f729Sjoerg return Builder.CreateExtractValue(ResultAndOverflow, 0);
49707330f729Sjoerg };
49717330f729Sjoerg
49727330f729Sjoerg // Determine the total byte offset by looking at each GEP operand.
49737330f729Sjoerg for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
49747330f729Sjoerg GTI != GTE; ++GTI) {
49757330f729Sjoerg llvm::Value *LocalOffset;
49767330f729Sjoerg auto *Index = GTI.getOperand();
49777330f729Sjoerg // Compute the local offset contributed by this indexing step:
49787330f729Sjoerg if (auto *STy = GTI.getStructTypeOrNull()) {
49797330f729Sjoerg // For struct indexing, the local offset is the byte position of the
49807330f729Sjoerg // specified field.
49817330f729Sjoerg unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
49827330f729Sjoerg LocalOffset = llvm::ConstantInt::get(
49837330f729Sjoerg IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
49847330f729Sjoerg } else {
49857330f729Sjoerg // Otherwise this is array-like indexing. The local offset is the index
49867330f729Sjoerg // multiplied by the element size.
49877330f729Sjoerg auto *ElementSize = llvm::ConstantInt::get(
49887330f729Sjoerg IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
49897330f729Sjoerg auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
49907330f729Sjoerg LocalOffset = eval(BO_Mul, ElementSize, IndexS);
49917330f729Sjoerg }
49927330f729Sjoerg
49937330f729Sjoerg // If this is the first offset, set it as the total offset. Otherwise, add
49947330f729Sjoerg // the local offset into the running total.
49957330f729Sjoerg if (!TotalOffset || TotalOffset == Zero)
49967330f729Sjoerg TotalOffset = LocalOffset;
49977330f729Sjoerg else
49987330f729Sjoerg TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
49997330f729Sjoerg }
50007330f729Sjoerg
50017330f729Sjoerg return {TotalOffset, OffsetOverflows};
50027330f729Sjoerg }
50037330f729Sjoerg
50047330f729Sjoerg Value *
EmitCheckedInBoundsGEP(Value * Ptr,ArrayRef<Value * > IdxList,bool SignedIndices,bool IsSubtraction,SourceLocation Loc,const Twine & Name)50057330f729Sjoerg CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
50067330f729Sjoerg bool SignedIndices, bool IsSubtraction,
50077330f729Sjoerg SourceLocation Loc, const Twine &Name) {
50087330f729Sjoerg Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
50097330f729Sjoerg
50107330f729Sjoerg // If the pointer overflow sanitizer isn't enabled, do nothing.
50117330f729Sjoerg if (!SanOpts.has(SanitizerKind::PointerOverflow))
50127330f729Sjoerg return GEPVal;
50137330f729Sjoerg
50147330f729Sjoerg llvm::Type *PtrTy = Ptr->getType();
50157330f729Sjoerg
50167330f729Sjoerg // Perform nullptr-and-offset check unless the nullptr is defined.
50177330f729Sjoerg bool PerformNullCheck = !NullPointerIsDefined(
50187330f729Sjoerg Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
50197330f729Sjoerg // Check for overflows unless the GEP got constant-folded,
50207330f729Sjoerg // and only in the default address space
50217330f729Sjoerg bool PerformOverflowCheck =
50227330f729Sjoerg !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
50237330f729Sjoerg
50247330f729Sjoerg if (!(PerformNullCheck || PerformOverflowCheck))
50257330f729Sjoerg return GEPVal;
50267330f729Sjoerg
50277330f729Sjoerg const auto &DL = CGM.getDataLayout();
50287330f729Sjoerg
50297330f729Sjoerg SanitizerScope SanScope(this);
50307330f729Sjoerg llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
50317330f729Sjoerg
50327330f729Sjoerg GEPOffsetAndOverflow EvaluatedGEP =
50337330f729Sjoerg EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
50347330f729Sjoerg
50357330f729Sjoerg assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
50367330f729Sjoerg EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
50377330f729Sjoerg "If the offset got constant-folded, we don't expect that there was an "
50387330f729Sjoerg "overflow.");
50397330f729Sjoerg
50407330f729Sjoerg auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
50417330f729Sjoerg
50427330f729Sjoerg // Common case: if the total offset is zero, and we are using C++ semantics,
50437330f729Sjoerg // where nullptr+0 is defined, don't emit a check.
50447330f729Sjoerg if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
50457330f729Sjoerg return GEPVal;
50467330f729Sjoerg
50477330f729Sjoerg // Now that we've computed the total offset, add it to the base pointer (with
50487330f729Sjoerg // wrapping semantics).
50497330f729Sjoerg auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
50507330f729Sjoerg auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
50517330f729Sjoerg
50527330f729Sjoerg llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
50537330f729Sjoerg
50547330f729Sjoerg if (PerformNullCheck) {
50557330f729Sjoerg // In C++, if the base pointer evaluates to a null pointer value,
50567330f729Sjoerg // the only valid pointer this inbounds GEP can produce is also
50577330f729Sjoerg // a null pointer, so the offset must also evaluate to zero.
50587330f729Sjoerg // Likewise, if we have non-zero base pointer, we can not get null pointer
50597330f729Sjoerg // as a result, so the offset can not be -intptr_t(BasePtr).
50607330f729Sjoerg // In other words, both pointers are either null, or both are non-null,
50617330f729Sjoerg // or the behaviour is undefined.
50627330f729Sjoerg //
50637330f729Sjoerg // C, however, is more strict in this regard, and gives more
50647330f729Sjoerg // optimization opportunities: in C, additionally, nullptr+0 is undefined.
50657330f729Sjoerg // So both the input to the 'gep inbounds' AND the output must not be null.
50667330f729Sjoerg auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
50677330f729Sjoerg auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
50687330f729Sjoerg auto *Valid =
50697330f729Sjoerg CGM.getLangOpts().CPlusPlus
50707330f729Sjoerg ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
50717330f729Sjoerg : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
50727330f729Sjoerg Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
50737330f729Sjoerg }
50747330f729Sjoerg
50757330f729Sjoerg if (PerformOverflowCheck) {
50767330f729Sjoerg // The GEP is valid if:
50777330f729Sjoerg // 1) The total offset doesn't overflow, and
50787330f729Sjoerg // 2) The sign of the difference between the computed address and the base
50797330f729Sjoerg // pointer matches the sign of the total offset.
50807330f729Sjoerg llvm::Value *ValidGEP;
50817330f729Sjoerg auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
50827330f729Sjoerg if (SignedIndices) {
50837330f729Sjoerg // GEP is computed as `unsigned base + signed offset`, therefore:
50847330f729Sjoerg // * If offset was positive, then the computed pointer can not be
50857330f729Sjoerg // [unsigned] less than the base pointer, unless it overflowed.
50867330f729Sjoerg // * If offset was negative, then the computed pointer can not be
50877330f729Sjoerg // [unsigned] greater than the bas pointere, unless it overflowed.
50887330f729Sjoerg auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
50897330f729Sjoerg auto *PosOrZeroOffset =
50907330f729Sjoerg Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
50917330f729Sjoerg llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
50927330f729Sjoerg ValidGEP =
50937330f729Sjoerg Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
50947330f729Sjoerg } else if (!IsSubtraction) {
50957330f729Sjoerg // GEP is computed as `unsigned base + unsigned offset`, therefore the
50967330f729Sjoerg // computed pointer can not be [unsigned] less than base pointer,
50977330f729Sjoerg // unless there was an overflow.
50987330f729Sjoerg // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
50997330f729Sjoerg ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
51007330f729Sjoerg } else {
51017330f729Sjoerg // GEP is computed as `unsigned base - unsigned offset`, therefore the
51027330f729Sjoerg // computed pointer can not be [unsigned] greater than base pointer,
51037330f729Sjoerg // unless there was an overflow.
51047330f729Sjoerg // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
51057330f729Sjoerg ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
51067330f729Sjoerg }
51077330f729Sjoerg ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
51087330f729Sjoerg Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
51097330f729Sjoerg }
51107330f729Sjoerg
51117330f729Sjoerg assert(!Checks.empty() && "Should have produced some checks.");
51127330f729Sjoerg
51137330f729Sjoerg llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
51147330f729Sjoerg // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
51157330f729Sjoerg llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
51167330f729Sjoerg EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
51177330f729Sjoerg
51187330f729Sjoerg return GEPVal;
51197330f729Sjoerg }
5120