xref: /netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/CodeGenFunction.cpp (revision e038c9c4676b0f19b1b7dd08a940c6ed64a6d5ae)
17330f729Sjoerg //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
27330f729Sjoerg //
37330f729Sjoerg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
47330f729Sjoerg // See https://llvm.org/LICENSE.txt for license information.
57330f729Sjoerg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
67330f729Sjoerg //
77330f729Sjoerg //===----------------------------------------------------------------------===//
87330f729Sjoerg //
97330f729Sjoerg // This coordinates the per-function state used while generating code.
107330f729Sjoerg //
117330f729Sjoerg //===----------------------------------------------------------------------===//
127330f729Sjoerg 
137330f729Sjoerg #include "CodeGenFunction.h"
147330f729Sjoerg #include "CGBlocks.h"
157330f729Sjoerg #include "CGCUDARuntime.h"
167330f729Sjoerg #include "CGCXXABI.h"
17*e038c9c4Sjoerg #include "CGCleanup.h"
187330f729Sjoerg #include "CGDebugInfo.h"
197330f729Sjoerg #include "CGOpenMPRuntime.h"
207330f729Sjoerg #include "CodeGenModule.h"
217330f729Sjoerg #include "CodeGenPGO.h"
227330f729Sjoerg #include "TargetInfo.h"
237330f729Sjoerg #include "clang/AST/ASTContext.h"
247330f729Sjoerg #include "clang/AST/ASTLambda.h"
25*e038c9c4Sjoerg #include "clang/AST/Attr.h"
267330f729Sjoerg #include "clang/AST/Decl.h"
277330f729Sjoerg #include "clang/AST/DeclCXX.h"
28*e038c9c4Sjoerg #include "clang/AST/Expr.h"
297330f729Sjoerg #include "clang/AST/StmtCXX.h"
307330f729Sjoerg #include "clang/AST/StmtObjC.h"
317330f729Sjoerg #include "clang/Basic/Builtins.h"
327330f729Sjoerg #include "clang/Basic/CodeGenOptions.h"
337330f729Sjoerg #include "clang/Basic/TargetInfo.h"
347330f729Sjoerg #include "clang/CodeGen/CGFunctionInfo.h"
357330f729Sjoerg #include "clang/Frontend/FrontendDiagnostic.h"
36*e038c9c4Sjoerg #include "llvm/ADT/ArrayRef.h"
37*e038c9c4Sjoerg #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
387330f729Sjoerg #include "llvm/IR/DataLayout.h"
397330f729Sjoerg #include "llvm/IR/Dominators.h"
40*e038c9c4Sjoerg #include "llvm/IR/FPEnv.h"
41*e038c9c4Sjoerg #include "llvm/IR/IntrinsicInst.h"
427330f729Sjoerg #include "llvm/IR/Intrinsics.h"
437330f729Sjoerg #include "llvm/IR/MDBuilder.h"
447330f729Sjoerg #include "llvm/IR/Operator.h"
45*e038c9c4Sjoerg #include "llvm/Support/CRC.h"
46*e038c9c4Sjoerg #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
477330f729Sjoerg #include "llvm/Transforms/Utils/PromoteMemToReg.h"
487330f729Sjoerg using namespace clang;
497330f729Sjoerg using namespace CodeGen;
507330f729Sjoerg 
517330f729Sjoerg /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
527330f729Sjoerg /// markers.
shouldEmitLifetimeMarkers(const CodeGenOptions & CGOpts,const LangOptions & LangOpts)537330f729Sjoerg static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
547330f729Sjoerg                                       const LangOptions &LangOpts) {
557330f729Sjoerg   if (CGOpts.DisableLifetimeMarkers)
567330f729Sjoerg     return false;
577330f729Sjoerg 
587330f729Sjoerg   // Sanitizers may use markers.
597330f729Sjoerg   if (CGOpts.SanitizeAddressUseAfterScope ||
607330f729Sjoerg       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
617330f729Sjoerg       LangOpts.Sanitize.has(SanitizerKind::Memory))
627330f729Sjoerg     return true;
637330f729Sjoerg 
647330f729Sjoerg   // For now, only in optimized builds.
657330f729Sjoerg   return CGOpts.OptimizationLevel != 0;
667330f729Sjoerg }
677330f729Sjoerg 
CodeGenFunction(CodeGenModule & cgm,bool suppressNewContext)687330f729Sjoerg CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
697330f729Sjoerg     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
707330f729Sjoerg       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
717330f729Sjoerg               CGBuilderInserterTy(this)),
72*e038c9c4Sjoerg       SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
73*e038c9c4Sjoerg       DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
74*e038c9c4Sjoerg       ShouldEmitLifetimeMarkers(
75*e038c9c4Sjoerg           shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
767330f729Sjoerg   if (!suppressNewContext)
777330f729Sjoerg     CGM.getCXXABI().getMangleContext().startNewFunction();
78*e038c9c4Sjoerg   EHStack.setCGF(this);
797330f729Sjoerg 
80*e038c9c4Sjoerg   SetFastMathFlags(CurFPFeatures);
81*e038c9c4Sjoerg   SetFPModel();
827330f729Sjoerg }
837330f729Sjoerg 
~CodeGenFunction()847330f729Sjoerg CodeGenFunction::~CodeGenFunction() {
857330f729Sjoerg   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
867330f729Sjoerg 
877330f729Sjoerg   if (getLangOpts().OpenMP && CurFn)
887330f729Sjoerg     CGM.getOpenMPRuntime().functionFinished(*this);
89*e038c9c4Sjoerg 
90*e038c9c4Sjoerg   // If we have an OpenMPIRBuilder we want to finalize functions (incl.
91*e038c9c4Sjoerg   // outlining etc) at some point. Doing it once the function codegen is done
92*e038c9c4Sjoerg   // seems to be a reasonable spot. We do it here, as opposed to the deletion
93*e038c9c4Sjoerg   // time of the CodeGenModule, because we have to ensure the IR has not yet
94*e038c9c4Sjoerg   // been "emitted" to the outside, thus, modifications are still sensible.
95*e038c9c4Sjoerg   if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
96*e038c9c4Sjoerg     CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
977330f729Sjoerg }
987330f729Sjoerg 
99*e038c9c4Sjoerg // Map the LangOption for exception behavior into
100*e038c9c4Sjoerg // the corresponding enum in the IR.
101*e038c9c4Sjoerg llvm::fp::ExceptionBehavior
ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)102*e038c9c4Sjoerg clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
103*e038c9c4Sjoerg 
104*e038c9c4Sjoerg   switch (Kind) {
105*e038c9c4Sjoerg   case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
106*e038c9c4Sjoerg   case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
107*e038c9c4Sjoerg   case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
108*e038c9c4Sjoerg   }
109*e038c9c4Sjoerg   llvm_unreachable("Unsupported FP Exception Behavior");
1107330f729Sjoerg }
1117330f729Sjoerg 
SetFPModel()112*e038c9c4Sjoerg void CodeGenFunction::SetFPModel() {
113*e038c9c4Sjoerg   llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
114*e038c9c4Sjoerg   auto fpExceptionBehavior = ToConstrainedExceptMD(
115*e038c9c4Sjoerg                                getLangOpts().getFPExceptionMode());
1167330f729Sjoerg 
117*e038c9c4Sjoerg   Builder.setDefaultConstrainedRounding(RM);
118*e038c9c4Sjoerg   Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
119*e038c9c4Sjoerg   Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
120*e038c9c4Sjoerg                              RM != llvm::RoundingMode::NearestTiesToEven);
1217330f729Sjoerg }
1227330f729Sjoerg 
SetFastMathFlags(FPOptions FPFeatures)123*e038c9c4Sjoerg void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
124*e038c9c4Sjoerg   llvm::FastMathFlags FMF;
125*e038c9c4Sjoerg   FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
126*e038c9c4Sjoerg   FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
127*e038c9c4Sjoerg   FMF.setNoInfs(FPFeatures.getNoHonorInfs());
128*e038c9c4Sjoerg   FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
129*e038c9c4Sjoerg   FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
130*e038c9c4Sjoerg   FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
131*e038c9c4Sjoerg   FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
132*e038c9c4Sjoerg   Builder.setFastMathFlags(FMF);
1337330f729Sjoerg }
1347330f729Sjoerg 
CGFPOptionsRAII(CodeGenFunction & CGF,const Expr * E)135*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
136*e038c9c4Sjoerg                                                   const Expr *E)
137*e038c9c4Sjoerg     : CGF(CGF) {
138*e038c9c4Sjoerg   ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
1397330f729Sjoerg }
140*e038c9c4Sjoerg 
CGFPOptionsRAII(CodeGenFunction & CGF,FPOptions FPFeatures)141*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
142*e038c9c4Sjoerg                                                   FPOptions FPFeatures)
143*e038c9c4Sjoerg     : CGF(CGF) {
144*e038c9c4Sjoerg   ConstructorHelper(FPFeatures);
1457330f729Sjoerg }
146*e038c9c4Sjoerg 
ConstructorHelper(FPOptions FPFeatures)147*e038c9c4Sjoerg void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
148*e038c9c4Sjoerg   OldFPFeatures = CGF.CurFPFeatures;
149*e038c9c4Sjoerg   CGF.CurFPFeatures = FPFeatures;
150*e038c9c4Sjoerg 
151*e038c9c4Sjoerg   OldExcept = CGF.Builder.getDefaultConstrainedExcept();
152*e038c9c4Sjoerg   OldRounding = CGF.Builder.getDefaultConstrainedRounding();
153*e038c9c4Sjoerg 
154*e038c9c4Sjoerg   if (OldFPFeatures == FPFeatures)
155*e038c9c4Sjoerg     return;
156*e038c9c4Sjoerg 
157*e038c9c4Sjoerg   FMFGuard.emplace(CGF.Builder);
158*e038c9c4Sjoerg 
159*e038c9c4Sjoerg   llvm::RoundingMode NewRoundingBehavior =
160*e038c9c4Sjoerg       static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
161*e038c9c4Sjoerg   CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
162*e038c9c4Sjoerg   auto NewExceptionBehavior =
163*e038c9c4Sjoerg       ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
164*e038c9c4Sjoerg           FPFeatures.getFPExceptionMode()));
165*e038c9c4Sjoerg   CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
166*e038c9c4Sjoerg 
167*e038c9c4Sjoerg   CGF.SetFastMathFlags(FPFeatures);
168*e038c9c4Sjoerg 
169*e038c9c4Sjoerg   assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
170*e038c9c4Sjoerg           isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
171*e038c9c4Sjoerg           isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
172*e038c9c4Sjoerg           (NewExceptionBehavior == llvm::fp::ebIgnore &&
173*e038c9c4Sjoerg            NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
174*e038c9c4Sjoerg          "FPConstrained should be enabled on entire function");
175*e038c9c4Sjoerg 
176*e038c9c4Sjoerg   auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
177*e038c9c4Sjoerg     auto OldValue =
178*e038c9c4Sjoerg         CGF.CurFn->getFnAttribute(Name).getValueAsBool();
179*e038c9c4Sjoerg     auto NewValue = OldValue & Value;
180*e038c9c4Sjoerg     if (OldValue != NewValue)
181*e038c9c4Sjoerg       CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
182*e038c9c4Sjoerg   };
183*e038c9c4Sjoerg   mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
184*e038c9c4Sjoerg   mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
185*e038c9c4Sjoerg   mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
186*e038c9c4Sjoerg   mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
187*e038c9c4Sjoerg                                          FPFeatures.getAllowReciprocal() &&
188*e038c9c4Sjoerg                                          FPFeatures.getAllowApproxFunc() &&
189*e038c9c4Sjoerg                                          FPFeatures.getNoSignedZero());
190*e038c9c4Sjoerg }
191*e038c9c4Sjoerg 
~CGFPOptionsRAII()192*e038c9c4Sjoerg CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
193*e038c9c4Sjoerg   CGF.CurFPFeatures = OldFPFeatures;
194*e038c9c4Sjoerg   CGF.Builder.setDefaultConstrainedExcept(OldExcept);
195*e038c9c4Sjoerg   CGF.Builder.setDefaultConstrainedRounding(OldRounding);
1967330f729Sjoerg }
1977330f729Sjoerg 
MakeNaturalAlignAddrLValue(llvm::Value * V,QualType T)1987330f729Sjoerg LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1997330f729Sjoerg   LValueBaseInfo BaseInfo;
2007330f729Sjoerg   TBAAAccessInfo TBAAInfo;
201*e038c9c4Sjoerg   CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
2027330f729Sjoerg   return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
2037330f729Sjoerg                           TBAAInfo);
2047330f729Sjoerg }
2057330f729Sjoerg 
2067330f729Sjoerg /// Given a value of type T* that may not be to a complete object,
2077330f729Sjoerg /// construct an l-value with the natural pointee alignment of T.
2087330f729Sjoerg LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value * V,QualType T)2097330f729Sjoerg CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
2107330f729Sjoerg   LValueBaseInfo BaseInfo;
2117330f729Sjoerg   TBAAAccessInfo TBAAInfo;
212*e038c9c4Sjoerg   CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
2137330f729Sjoerg                                                 /* forPointeeType= */ true);
2147330f729Sjoerg   return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
2157330f729Sjoerg }
2167330f729Sjoerg 
2177330f729Sjoerg 
ConvertTypeForMem(QualType T)2187330f729Sjoerg llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
2197330f729Sjoerg   return CGM.getTypes().ConvertTypeForMem(T);
2207330f729Sjoerg }
2217330f729Sjoerg 
ConvertType(QualType T)2227330f729Sjoerg llvm::Type *CodeGenFunction::ConvertType(QualType T) {
2237330f729Sjoerg   return CGM.getTypes().ConvertType(T);
2247330f729Sjoerg }
2257330f729Sjoerg 
getEvaluationKind(QualType type)2267330f729Sjoerg TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
2277330f729Sjoerg   type = type.getCanonicalType();
2287330f729Sjoerg   while (true) {
2297330f729Sjoerg     switch (type->getTypeClass()) {
2307330f729Sjoerg #define TYPE(name, parent)
2317330f729Sjoerg #define ABSTRACT_TYPE(name, parent)
2327330f729Sjoerg #define NON_CANONICAL_TYPE(name, parent) case Type::name:
2337330f729Sjoerg #define DEPENDENT_TYPE(name, parent) case Type::name:
2347330f729Sjoerg #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
2357330f729Sjoerg #include "clang/AST/TypeNodes.inc"
2367330f729Sjoerg       llvm_unreachable("non-canonical or dependent type in IR-generation");
2377330f729Sjoerg 
2387330f729Sjoerg     case Type::Auto:
2397330f729Sjoerg     case Type::DeducedTemplateSpecialization:
2407330f729Sjoerg       llvm_unreachable("undeduced type in IR-generation");
2417330f729Sjoerg 
2427330f729Sjoerg     // Various scalar types.
2437330f729Sjoerg     case Type::Builtin:
2447330f729Sjoerg     case Type::Pointer:
2457330f729Sjoerg     case Type::BlockPointer:
2467330f729Sjoerg     case Type::LValueReference:
2477330f729Sjoerg     case Type::RValueReference:
2487330f729Sjoerg     case Type::MemberPointer:
2497330f729Sjoerg     case Type::Vector:
2507330f729Sjoerg     case Type::ExtVector:
251*e038c9c4Sjoerg     case Type::ConstantMatrix:
2527330f729Sjoerg     case Type::FunctionProto:
2537330f729Sjoerg     case Type::FunctionNoProto:
2547330f729Sjoerg     case Type::Enum:
2557330f729Sjoerg     case Type::ObjCObjectPointer:
2567330f729Sjoerg     case Type::Pipe:
257*e038c9c4Sjoerg     case Type::ExtInt:
2587330f729Sjoerg       return TEK_Scalar;
2597330f729Sjoerg 
2607330f729Sjoerg     // Complexes.
2617330f729Sjoerg     case Type::Complex:
2627330f729Sjoerg       return TEK_Complex;
2637330f729Sjoerg 
2647330f729Sjoerg     // Arrays, records, and Objective-C objects.
2657330f729Sjoerg     case Type::ConstantArray:
2667330f729Sjoerg     case Type::IncompleteArray:
2677330f729Sjoerg     case Type::VariableArray:
2687330f729Sjoerg     case Type::Record:
2697330f729Sjoerg     case Type::ObjCObject:
2707330f729Sjoerg     case Type::ObjCInterface:
2717330f729Sjoerg       return TEK_Aggregate;
2727330f729Sjoerg 
2737330f729Sjoerg     // We operate on atomic values according to their underlying type.
2747330f729Sjoerg     case Type::Atomic:
2757330f729Sjoerg       type = cast<AtomicType>(type)->getValueType();
2767330f729Sjoerg       continue;
2777330f729Sjoerg     }
2787330f729Sjoerg     llvm_unreachable("unknown type kind!");
2797330f729Sjoerg   }
2807330f729Sjoerg }
2817330f729Sjoerg 
EmitReturnBlock()2827330f729Sjoerg llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
2837330f729Sjoerg   // For cleanliness, we try to avoid emitting the return block for
2847330f729Sjoerg   // simple cases.
2857330f729Sjoerg   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
2867330f729Sjoerg 
2877330f729Sjoerg   if (CurBB) {
2887330f729Sjoerg     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
2897330f729Sjoerg 
2907330f729Sjoerg     // We have a valid insert point, reuse it if it is empty or there are no
2917330f729Sjoerg     // explicit jumps to the return block.
2927330f729Sjoerg     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
2937330f729Sjoerg       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
2947330f729Sjoerg       delete ReturnBlock.getBlock();
2957330f729Sjoerg       ReturnBlock = JumpDest();
2967330f729Sjoerg     } else
2977330f729Sjoerg       EmitBlock(ReturnBlock.getBlock());
2987330f729Sjoerg     return llvm::DebugLoc();
2997330f729Sjoerg   }
3007330f729Sjoerg 
3017330f729Sjoerg   // Otherwise, if the return block is the target of a single direct
3027330f729Sjoerg   // branch then we can just put the code in that block instead. This
3037330f729Sjoerg   // cleans up functions which started with a unified return block.
3047330f729Sjoerg   if (ReturnBlock.getBlock()->hasOneUse()) {
3057330f729Sjoerg     llvm::BranchInst *BI =
3067330f729Sjoerg       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
3077330f729Sjoerg     if (BI && BI->isUnconditional() &&
3087330f729Sjoerg         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
3097330f729Sjoerg       // Record/return the DebugLoc of the simple 'return' expression to be used
3107330f729Sjoerg       // later by the actual 'ret' instruction.
3117330f729Sjoerg       llvm::DebugLoc Loc = BI->getDebugLoc();
3127330f729Sjoerg       Builder.SetInsertPoint(BI->getParent());
3137330f729Sjoerg       BI->eraseFromParent();
3147330f729Sjoerg       delete ReturnBlock.getBlock();
3157330f729Sjoerg       ReturnBlock = JumpDest();
3167330f729Sjoerg       return Loc;
3177330f729Sjoerg     }
3187330f729Sjoerg   }
3197330f729Sjoerg 
3207330f729Sjoerg   // FIXME: We are at an unreachable point, there is no reason to emit the block
3217330f729Sjoerg   // unless it has uses. However, we still need a place to put the debug
3227330f729Sjoerg   // region.end for now.
3237330f729Sjoerg 
3247330f729Sjoerg   EmitBlock(ReturnBlock.getBlock());
3257330f729Sjoerg   return llvm::DebugLoc();
3267330f729Sjoerg }
3277330f729Sjoerg 
EmitIfUsed(CodeGenFunction & CGF,llvm::BasicBlock * BB)3287330f729Sjoerg static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
3297330f729Sjoerg   if (!BB) return;
3307330f729Sjoerg   if (!BB->use_empty())
3317330f729Sjoerg     return CGF.CurFn->getBasicBlockList().push_back(BB);
3327330f729Sjoerg   delete BB;
3337330f729Sjoerg }
3347330f729Sjoerg 
FinishFunction(SourceLocation EndLoc)3357330f729Sjoerg void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
3367330f729Sjoerg   assert(BreakContinueStack.empty() &&
3377330f729Sjoerg          "mismatched push/pop in break/continue stack!");
3387330f729Sjoerg 
3397330f729Sjoerg   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
3407330f729Sjoerg     && NumSimpleReturnExprs == NumReturnExprs
3417330f729Sjoerg     && ReturnBlock.getBlock()->use_empty();
3427330f729Sjoerg   // Usually the return expression is evaluated before the cleanup
3437330f729Sjoerg   // code.  If the function contains only a simple return statement,
3447330f729Sjoerg   // such as a constant, the location before the cleanup code becomes
3457330f729Sjoerg   // the last useful breakpoint in the function, because the simple
3467330f729Sjoerg   // return expression will be evaluated after the cleanup code. To be
3477330f729Sjoerg   // safe, set the debug location for cleanup code to the location of
3487330f729Sjoerg   // the return statement.  Otherwise the cleanup code should be at the
3497330f729Sjoerg   // end of the function's lexical scope.
3507330f729Sjoerg   //
3517330f729Sjoerg   // If there are multiple branches to the return block, the branch
3527330f729Sjoerg   // instructions will get the location of the return statements and
3537330f729Sjoerg   // all will be fine.
3547330f729Sjoerg   if (CGDebugInfo *DI = getDebugInfo()) {
3557330f729Sjoerg     if (OnlySimpleReturnStmts)
3567330f729Sjoerg       DI->EmitLocation(Builder, LastStopPoint);
3577330f729Sjoerg     else
3587330f729Sjoerg       DI->EmitLocation(Builder, EndLoc);
3597330f729Sjoerg   }
3607330f729Sjoerg 
3617330f729Sjoerg   // Pop any cleanups that might have been associated with the
3627330f729Sjoerg   // parameters.  Do this in whatever block we're currently in; it's
3637330f729Sjoerg   // important to do this before we enter the return block or return
3647330f729Sjoerg   // edges will be *really* confused.
3657330f729Sjoerg   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
3667330f729Sjoerg   bool HasOnlyLifetimeMarkers =
3677330f729Sjoerg       HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
3687330f729Sjoerg   bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
3697330f729Sjoerg   if (HasCleanups) {
3707330f729Sjoerg     // Make sure the line table doesn't jump back into the body for
3717330f729Sjoerg     // the ret after it's been at EndLoc.
372*e038c9c4Sjoerg     Optional<ApplyDebugLocation> AL;
373*e038c9c4Sjoerg     if (CGDebugInfo *DI = getDebugInfo()) {
3747330f729Sjoerg       if (OnlySimpleReturnStmts)
3757330f729Sjoerg         DI->EmitLocation(Builder, EndLoc);
376*e038c9c4Sjoerg       else
377*e038c9c4Sjoerg         // We may not have a valid end location. Try to apply it anyway, and
378*e038c9c4Sjoerg         // fall back to an artificial location if needed.
379*e038c9c4Sjoerg         AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
380*e038c9c4Sjoerg     }
3817330f729Sjoerg 
3827330f729Sjoerg     PopCleanupBlocks(PrologueCleanupDepth);
3837330f729Sjoerg   }
3847330f729Sjoerg 
3857330f729Sjoerg   // Emit function epilog (to return).
3867330f729Sjoerg   llvm::DebugLoc Loc = EmitReturnBlock();
3877330f729Sjoerg 
3887330f729Sjoerg   if (ShouldInstrumentFunction()) {
3897330f729Sjoerg     if (CGM.getCodeGenOpts().InstrumentFunctions)
3907330f729Sjoerg       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
3917330f729Sjoerg     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
3927330f729Sjoerg       CurFn->addFnAttr("instrument-function-exit-inlined",
3937330f729Sjoerg                        "__cyg_profile_func_exit");
3947330f729Sjoerg   }
3957330f729Sjoerg 
3967330f729Sjoerg   // Emit debug descriptor for function end.
3977330f729Sjoerg   if (CGDebugInfo *DI = getDebugInfo())
3987330f729Sjoerg     DI->EmitFunctionEnd(Builder, CurFn);
3997330f729Sjoerg 
4007330f729Sjoerg   // Reset the debug location to that of the simple 'return' expression, if any
4017330f729Sjoerg   // rather than that of the end of the function's scope '}'.
4027330f729Sjoerg   ApplyDebugLocation AL(*this, Loc);
4037330f729Sjoerg   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
4047330f729Sjoerg   EmitEndEHSpec(CurCodeDecl);
4057330f729Sjoerg 
4067330f729Sjoerg   assert(EHStack.empty() &&
4077330f729Sjoerg          "did not remove all scopes from cleanup stack!");
4087330f729Sjoerg 
4097330f729Sjoerg   // If someone did an indirect goto, emit the indirect goto block at the end of
4107330f729Sjoerg   // the function.
4117330f729Sjoerg   if (IndirectBranch) {
4127330f729Sjoerg     EmitBlock(IndirectBranch->getParent());
4137330f729Sjoerg     Builder.ClearInsertionPoint();
4147330f729Sjoerg   }
4157330f729Sjoerg 
4167330f729Sjoerg   // If some of our locals escaped, insert a call to llvm.localescape in the
4177330f729Sjoerg   // entry block.
4187330f729Sjoerg   if (!EscapedLocals.empty()) {
4197330f729Sjoerg     // Invert the map from local to index into a simple vector. There should be
4207330f729Sjoerg     // no holes.
4217330f729Sjoerg     SmallVector<llvm::Value *, 4> EscapeArgs;
4227330f729Sjoerg     EscapeArgs.resize(EscapedLocals.size());
4237330f729Sjoerg     for (auto &Pair : EscapedLocals)
4247330f729Sjoerg       EscapeArgs[Pair.second] = Pair.first;
4257330f729Sjoerg     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
4267330f729Sjoerg         &CGM.getModule(), llvm::Intrinsic::localescape);
4277330f729Sjoerg     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
4287330f729Sjoerg   }
4297330f729Sjoerg 
4307330f729Sjoerg   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
4317330f729Sjoerg   llvm::Instruction *Ptr = AllocaInsertPt;
4327330f729Sjoerg   AllocaInsertPt = nullptr;
4337330f729Sjoerg   Ptr->eraseFromParent();
4347330f729Sjoerg 
4357330f729Sjoerg   // If someone took the address of a label but never did an indirect goto, we
4367330f729Sjoerg   // made a zero entry PHI node, which is illegal, zap it now.
4377330f729Sjoerg   if (IndirectBranch) {
4387330f729Sjoerg     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
4397330f729Sjoerg     if (PN->getNumIncomingValues() == 0) {
4407330f729Sjoerg       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
4417330f729Sjoerg       PN->eraseFromParent();
4427330f729Sjoerg     }
4437330f729Sjoerg   }
4447330f729Sjoerg 
4457330f729Sjoerg   EmitIfUsed(*this, EHResumeBlock);
4467330f729Sjoerg   EmitIfUsed(*this, TerminateLandingPad);
4477330f729Sjoerg   EmitIfUsed(*this, TerminateHandler);
4487330f729Sjoerg   EmitIfUsed(*this, UnreachableBlock);
4497330f729Sjoerg 
4507330f729Sjoerg   for (const auto &FuncletAndParent : TerminateFunclets)
4517330f729Sjoerg     EmitIfUsed(*this, FuncletAndParent.second);
4527330f729Sjoerg 
4537330f729Sjoerg   if (CGM.getCodeGenOpts().EmitDeclMetadata)
4547330f729Sjoerg     EmitDeclMetadata();
4557330f729Sjoerg 
456*e038c9c4Sjoerg   for (const auto &R : DeferredReplacements) {
457*e038c9c4Sjoerg     if (llvm::Value *Old = R.first) {
458*e038c9c4Sjoerg       Old->replaceAllUsesWith(R.second);
459*e038c9c4Sjoerg       cast<llvm::Instruction>(Old)->eraseFromParent();
4607330f729Sjoerg     }
461*e038c9c4Sjoerg   }
462*e038c9c4Sjoerg   DeferredReplacements.clear();
4637330f729Sjoerg 
4647330f729Sjoerg   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
4657330f729Sjoerg   // PHIs if the current function is a coroutine. We don't do it for all
4667330f729Sjoerg   // functions as it may result in slight increase in numbers of instructions
4677330f729Sjoerg   // if compiled with no optimizations. We do it for coroutine as the lifetime
4687330f729Sjoerg   // of CleanupDestSlot alloca make correct coroutine frame building very
4697330f729Sjoerg   // difficult.
4707330f729Sjoerg   if (NormalCleanupDest.isValid() && isCoroutine()) {
4717330f729Sjoerg     llvm::DominatorTree DT(*CurFn);
4727330f729Sjoerg     llvm::PromoteMemToReg(
4737330f729Sjoerg         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
4747330f729Sjoerg     NormalCleanupDest = Address::invalid();
4757330f729Sjoerg   }
4767330f729Sjoerg 
4777330f729Sjoerg   // Scan function arguments for vector width.
4787330f729Sjoerg   for (llvm::Argument &A : CurFn->args())
4797330f729Sjoerg     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
480*e038c9c4Sjoerg       LargestVectorWidth =
481*e038c9c4Sjoerg           std::max((uint64_t)LargestVectorWidth,
482*e038c9c4Sjoerg                    VT->getPrimitiveSizeInBits().getKnownMinSize());
4837330f729Sjoerg 
4847330f729Sjoerg   // Update vector width based on return type.
4857330f729Sjoerg   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
486*e038c9c4Sjoerg     LargestVectorWidth =
487*e038c9c4Sjoerg         std::max((uint64_t)LargestVectorWidth,
488*e038c9c4Sjoerg                  VT->getPrimitiveSizeInBits().getKnownMinSize());
4897330f729Sjoerg 
4907330f729Sjoerg   // Add the required-vector-width attribute. This contains the max width from:
4917330f729Sjoerg   // 1. min-vector-width attribute used in the source program.
4927330f729Sjoerg   // 2. Any builtins used that have a vector width specified.
4937330f729Sjoerg   // 3. Values passed in and out of inline assembly.
4947330f729Sjoerg   // 4. Width of vector arguments and return types for this function.
4957330f729Sjoerg   // 5. Width of vector aguments and return types for functions called by this
4967330f729Sjoerg   //    function.
4977330f729Sjoerg   CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
4987330f729Sjoerg 
499*e038c9c4Sjoerg   // Add vscale attribute if appropriate.
500*e038c9c4Sjoerg   if (getLangOpts().ArmSveVectorBits) {
501*e038c9c4Sjoerg     unsigned VScale = getLangOpts().ArmSveVectorBits / 128;
502*e038c9c4Sjoerg     CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(getLLVMContext(),
503*e038c9c4Sjoerg                                                              VScale, VScale));
504*e038c9c4Sjoerg   }
505*e038c9c4Sjoerg 
5067330f729Sjoerg   // If we generated an unreachable return block, delete it now.
5077330f729Sjoerg   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
5087330f729Sjoerg     Builder.ClearInsertionPoint();
5097330f729Sjoerg     ReturnBlock.getBlock()->eraseFromParent();
5107330f729Sjoerg   }
5117330f729Sjoerg   if (ReturnValue.isValid()) {
5127330f729Sjoerg     auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
5137330f729Sjoerg     if (RetAlloca && RetAlloca->use_empty()) {
5147330f729Sjoerg       RetAlloca->eraseFromParent();
5157330f729Sjoerg       ReturnValue = Address::invalid();
5167330f729Sjoerg     }
5177330f729Sjoerg   }
5187330f729Sjoerg }
5197330f729Sjoerg 
5207330f729Sjoerg /// ShouldInstrumentFunction - Return true if the current function should be
5217330f729Sjoerg /// instrumented with __cyg_profile_func_* calls
ShouldInstrumentFunction()5227330f729Sjoerg bool CodeGenFunction::ShouldInstrumentFunction() {
5237330f729Sjoerg   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
5247330f729Sjoerg       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
5257330f729Sjoerg       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
5267330f729Sjoerg     return false;
5277330f729Sjoerg   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
5287330f729Sjoerg     return false;
5297330f729Sjoerg   return true;
5307330f729Sjoerg }
5317330f729Sjoerg 
5327330f729Sjoerg /// ShouldXRayInstrument - Return true if the current function should be
5337330f729Sjoerg /// instrumented with XRay nop sleds.
ShouldXRayInstrumentFunction() const5347330f729Sjoerg bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
5357330f729Sjoerg   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
5367330f729Sjoerg }
5377330f729Sjoerg 
5387330f729Sjoerg /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
5397330f729Sjoerg /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
AlwaysEmitXRayCustomEvents() const5407330f729Sjoerg bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
5417330f729Sjoerg   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
5427330f729Sjoerg          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
5437330f729Sjoerg           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
5447330f729Sjoerg               XRayInstrKind::Custom);
5457330f729Sjoerg }
5467330f729Sjoerg 
AlwaysEmitXRayTypedEvents() const5477330f729Sjoerg bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
5487330f729Sjoerg   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
5497330f729Sjoerg          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
5507330f729Sjoerg           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
5517330f729Sjoerg               XRayInstrKind::Typed);
5527330f729Sjoerg }
5537330f729Sjoerg 
5547330f729Sjoerg llvm::Constant *
EncodeAddrForUseInPrologue(llvm::Function * F,llvm::Constant * Addr)5557330f729Sjoerg CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
5567330f729Sjoerg                                             llvm::Constant *Addr) {
5577330f729Sjoerg   // Addresses stored in prologue data can't require run-time fixups and must
5587330f729Sjoerg   // be PC-relative. Run-time fixups are undesirable because they necessitate
5597330f729Sjoerg   // writable text segments, which are unsafe. And absolute addresses are
5607330f729Sjoerg   // undesirable because they break PIE mode.
5617330f729Sjoerg 
5627330f729Sjoerg   // Add a layer of indirection through a private global. Taking its address
5637330f729Sjoerg   // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
5647330f729Sjoerg   auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
5657330f729Sjoerg                                       /*isConstant=*/true,
5667330f729Sjoerg                                       llvm::GlobalValue::PrivateLinkage, Addr);
5677330f729Sjoerg 
5687330f729Sjoerg   // Create a PC-relative address.
5697330f729Sjoerg   auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
5707330f729Sjoerg   auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
5717330f729Sjoerg   auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
5727330f729Sjoerg   return (IntPtrTy == Int32Ty)
5737330f729Sjoerg              ? PCRelAsInt
5747330f729Sjoerg              : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
5757330f729Sjoerg }
5767330f729Sjoerg 
5777330f729Sjoerg llvm::Value *
DecodeAddrUsedInPrologue(llvm::Value * F,llvm::Value * EncodedAddr)5787330f729Sjoerg CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
5797330f729Sjoerg                                           llvm::Value *EncodedAddr) {
5807330f729Sjoerg   // Reconstruct the address of the global.
5817330f729Sjoerg   auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
5827330f729Sjoerg   auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
5837330f729Sjoerg   auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
5847330f729Sjoerg   auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
5857330f729Sjoerg 
5867330f729Sjoerg   // Load the original pointer through the global.
5877330f729Sjoerg   return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
5887330f729Sjoerg                             "decoded_addr");
5897330f729Sjoerg }
5907330f729Sjoerg 
EmitOpenCLKernelMetadata(const FunctionDecl * FD,llvm::Function * Fn)5917330f729Sjoerg void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
5927330f729Sjoerg                                                llvm::Function *Fn)
5937330f729Sjoerg {
5947330f729Sjoerg   if (!FD->hasAttr<OpenCLKernelAttr>())
5957330f729Sjoerg     return;
5967330f729Sjoerg 
5977330f729Sjoerg   llvm::LLVMContext &Context = getLLVMContext();
5987330f729Sjoerg 
5997330f729Sjoerg   CGM.GenOpenCLArgMetadata(Fn, FD, this);
6007330f729Sjoerg 
6017330f729Sjoerg   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
6027330f729Sjoerg     QualType HintQTy = A->getTypeHint();
6037330f729Sjoerg     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
6047330f729Sjoerg     bool IsSignedInteger =
6057330f729Sjoerg         HintQTy->isSignedIntegerType() ||
6067330f729Sjoerg         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
6077330f729Sjoerg     llvm::Metadata *AttrMDArgs[] = {
6087330f729Sjoerg         llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
6097330f729Sjoerg             CGM.getTypes().ConvertType(A->getTypeHint()))),
6107330f729Sjoerg         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
6117330f729Sjoerg             llvm::IntegerType::get(Context, 32),
6127330f729Sjoerg             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
6137330f729Sjoerg     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
6147330f729Sjoerg   }
6157330f729Sjoerg 
6167330f729Sjoerg   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
6177330f729Sjoerg     llvm::Metadata *AttrMDArgs[] = {
6187330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
6197330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
6207330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
6217330f729Sjoerg     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
6227330f729Sjoerg   }
6237330f729Sjoerg 
6247330f729Sjoerg   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
6257330f729Sjoerg     llvm::Metadata *AttrMDArgs[] = {
6267330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
6277330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
6287330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
6297330f729Sjoerg     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
6307330f729Sjoerg   }
6317330f729Sjoerg 
6327330f729Sjoerg   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
6337330f729Sjoerg           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
6347330f729Sjoerg     llvm::Metadata *AttrMDArgs[] = {
6357330f729Sjoerg         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
6367330f729Sjoerg     Fn->setMetadata("intel_reqd_sub_group_size",
6377330f729Sjoerg                     llvm::MDNode::get(Context, AttrMDArgs));
6387330f729Sjoerg   }
6397330f729Sjoerg }
6407330f729Sjoerg 
6417330f729Sjoerg /// Determine whether the function F ends with a return stmt.
endsWithReturn(const Decl * F)6427330f729Sjoerg static bool endsWithReturn(const Decl* F) {
6437330f729Sjoerg   const Stmt *Body = nullptr;
6447330f729Sjoerg   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
6457330f729Sjoerg     Body = FD->getBody();
6467330f729Sjoerg   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
6477330f729Sjoerg     Body = OMD->getBody();
6487330f729Sjoerg 
6497330f729Sjoerg   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
6507330f729Sjoerg     auto LastStmt = CS->body_rbegin();
6517330f729Sjoerg     if (LastStmt != CS->body_rend())
6527330f729Sjoerg       return isa<ReturnStmt>(*LastStmt);
6537330f729Sjoerg   }
6547330f729Sjoerg   return false;
6557330f729Sjoerg }
6567330f729Sjoerg 
markAsIgnoreThreadCheckingAtRuntime(llvm::Function * Fn)6577330f729Sjoerg void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
6587330f729Sjoerg   if (SanOpts.has(SanitizerKind::Thread)) {
6597330f729Sjoerg     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
6607330f729Sjoerg     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
6617330f729Sjoerg   }
6627330f729Sjoerg }
6637330f729Sjoerg 
664*e038c9c4Sjoerg /// Check if the return value of this function requires sanitization.
requiresReturnValueCheck() const665*e038c9c4Sjoerg bool CodeGenFunction::requiresReturnValueCheck() const {
666*e038c9c4Sjoerg   return requiresReturnValueNullabilityCheck() ||
667*e038c9c4Sjoerg          (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
668*e038c9c4Sjoerg           CurCodeDecl->getAttr<ReturnsNonNullAttr>());
669*e038c9c4Sjoerg }
670*e038c9c4Sjoerg 
matchesStlAllocatorFn(const Decl * D,const ASTContext & Ctx)6717330f729Sjoerg static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
6727330f729Sjoerg   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
6737330f729Sjoerg   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
6747330f729Sjoerg       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
6757330f729Sjoerg       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
6767330f729Sjoerg     return false;
6777330f729Sjoerg 
6787330f729Sjoerg   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
6797330f729Sjoerg     return false;
6807330f729Sjoerg 
6817330f729Sjoerg   if (MD->getNumParams() == 2) {
6827330f729Sjoerg     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
6837330f729Sjoerg     if (!PT || !PT->isVoidPointerType() ||
6847330f729Sjoerg         !PT->getPointeeType().isConstQualified())
6857330f729Sjoerg       return false;
6867330f729Sjoerg   }
6877330f729Sjoerg 
6887330f729Sjoerg   return true;
6897330f729Sjoerg }
6907330f729Sjoerg 
6917330f729Sjoerg /// Return the UBSan prologue signature for \p FD if one is available.
getPrologueSignature(CodeGenModule & CGM,const FunctionDecl * FD)6927330f729Sjoerg static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
6937330f729Sjoerg                                             const FunctionDecl *FD) {
6947330f729Sjoerg   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
6957330f729Sjoerg     if (!MD->isStatic())
6967330f729Sjoerg       return nullptr;
6977330f729Sjoerg   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
6987330f729Sjoerg }
6997330f729Sjoerg 
StartFunction(GlobalDecl GD,QualType RetTy,llvm::Function * Fn,const CGFunctionInfo & FnInfo,const FunctionArgList & Args,SourceLocation Loc,SourceLocation StartLoc)700*e038c9c4Sjoerg void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
7017330f729Sjoerg                                     llvm::Function *Fn,
7027330f729Sjoerg                                     const CGFunctionInfo &FnInfo,
7037330f729Sjoerg                                     const FunctionArgList &Args,
7047330f729Sjoerg                                     SourceLocation Loc,
7057330f729Sjoerg                                     SourceLocation StartLoc) {
7067330f729Sjoerg   assert(!CurFn &&
7077330f729Sjoerg          "Do not use a CodeGenFunction object for more than one function");
7087330f729Sjoerg 
7097330f729Sjoerg   const Decl *D = GD.getDecl();
7107330f729Sjoerg 
7117330f729Sjoerg   DidCallStackSave = false;
7127330f729Sjoerg   CurCodeDecl = D;
7137330f729Sjoerg   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
7147330f729Sjoerg     if (FD->usesSEHTry())
7157330f729Sjoerg       CurSEHParent = FD;
7167330f729Sjoerg   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
7177330f729Sjoerg   FnRetTy = RetTy;
7187330f729Sjoerg   CurFn = Fn;
7197330f729Sjoerg   CurFnInfo = &FnInfo;
7207330f729Sjoerg   assert(CurFn->isDeclaration() && "Function already has body?");
7217330f729Sjoerg 
722*e038c9c4Sjoerg   // If this function is ignored for any of the enabled sanitizers,
7237330f729Sjoerg   // disable the sanitizer for the function.
7247330f729Sjoerg   do {
7257330f729Sjoerg #define SANITIZER(NAME, ID)                                                    \
7267330f729Sjoerg   if (SanOpts.empty())                                                         \
7277330f729Sjoerg     break;                                                                     \
7287330f729Sjoerg   if (SanOpts.has(SanitizerKind::ID))                                          \
729*e038c9c4Sjoerg     if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc))                    \
7307330f729Sjoerg       SanOpts.set(SanitizerKind::ID, false);
7317330f729Sjoerg 
7327330f729Sjoerg #include "clang/Basic/Sanitizers.def"
7337330f729Sjoerg #undef SANITIZER
7347330f729Sjoerg   } while (0);
7357330f729Sjoerg 
7367330f729Sjoerg   if (D) {
7377330f729Sjoerg     // Apply the no_sanitize* attributes to SanOpts.
7387330f729Sjoerg     for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
7397330f729Sjoerg       SanitizerMask mask = Attr->getMask();
7407330f729Sjoerg       SanOpts.Mask &= ~mask;
7417330f729Sjoerg       if (mask & SanitizerKind::Address)
7427330f729Sjoerg         SanOpts.set(SanitizerKind::KernelAddress, false);
7437330f729Sjoerg       if (mask & SanitizerKind::KernelAddress)
7447330f729Sjoerg         SanOpts.set(SanitizerKind::Address, false);
7457330f729Sjoerg       if (mask & SanitizerKind::HWAddress)
7467330f729Sjoerg         SanOpts.set(SanitizerKind::KernelHWAddress, false);
7477330f729Sjoerg       if (mask & SanitizerKind::KernelHWAddress)
7487330f729Sjoerg         SanOpts.set(SanitizerKind::HWAddress, false);
7497330f729Sjoerg     }
7507330f729Sjoerg   }
7517330f729Sjoerg 
7527330f729Sjoerg   // Apply sanitizer attributes to the function.
7537330f729Sjoerg   if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
7547330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
7557330f729Sjoerg   if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
7567330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
7577330f729Sjoerg   if (SanOpts.has(SanitizerKind::MemTag))
7587330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
7597330f729Sjoerg   if (SanOpts.has(SanitizerKind::Thread))
7607330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
7617330f729Sjoerg   if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
7627330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
7637330f729Sjoerg   if (SanOpts.has(SanitizerKind::SafeStack))
7647330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::SafeStack);
7657330f729Sjoerg   if (SanOpts.has(SanitizerKind::ShadowCallStack))
7667330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
7677330f729Sjoerg 
7687330f729Sjoerg   // Apply fuzzing attribute to the function.
7697330f729Sjoerg   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
7707330f729Sjoerg     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
7717330f729Sjoerg 
7727330f729Sjoerg   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
7737330f729Sjoerg   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
7747330f729Sjoerg   if (SanOpts.has(SanitizerKind::Thread)) {
7757330f729Sjoerg     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
7767330f729Sjoerg       IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
7777330f729Sjoerg       if (OMD->getMethodFamily() == OMF_dealloc ||
7787330f729Sjoerg           OMD->getMethodFamily() == OMF_initialize ||
7797330f729Sjoerg           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
7807330f729Sjoerg         markAsIgnoreThreadCheckingAtRuntime(Fn);
7817330f729Sjoerg       }
7827330f729Sjoerg     }
7837330f729Sjoerg   }
7847330f729Sjoerg 
7857330f729Sjoerg   // Ignore unrelated casts in STL allocate() since the allocator must cast
7867330f729Sjoerg   // from void* to T* before object initialization completes. Don't match on the
7877330f729Sjoerg   // namespace because not all allocators are in std::
7887330f729Sjoerg   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
7897330f729Sjoerg     if (matchesStlAllocatorFn(D, getContext()))
7907330f729Sjoerg       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
7917330f729Sjoerg   }
7927330f729Sjoerg 
7937330f729Sjoerg   // Ignore null checks in coroutine functions since the coroutines passes
7947330f729Sjoerg   // are not aware of how to move the extra UBSan instructions across the split
7957330f729Sjoerg   // coroutine boundaries.
7967330f729Sjoerg   if (D && SanOpts.has(SanitizerKind::Null))
7977330f729Sjoerg     if (const auto *FD = dyn_cast<FunctionDecl>(D))
7987330f729Sjoerg       if (FD->getBody() &&
7997330f729Sjoerg           FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
8007330f729Sjoerg         SanOpts.Mask &= ~SanitizerKind::Null;
8017330f729Sjoerg 
8027330f729Sjoerg   // Apply xray attributes to the function (as a string, for now)
803*e038c9c4Sjoerg   bool AlwaysXRayAttr = false;
804*e038c9c4Sjoerg   if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
8057330f729Sjoerg     if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
806*e038c9c4Sjoerg             XRayInstrKind::FunctionEntry) ||
807*e038c9c4Sjoerg         CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
808*e038c9c4Sjoerg             XRayInstrKind::FunctionExit)) {
809*e038c9c4Sjoerg       if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
8107330f729Sjoerg         Fn->addFnAttr("function-instrument", "xray-always");
811*e038c9c4Sjoerg         AlwaysXRayAttr = true;
812*e038c9c4Sjoerg       }
8137330f729Sjoerg       if (XRayAttr->neverXRayInstrument())
8147330f729Sjoerg         Fn->addFnAttr("function-instrument", "xray-never");
8157330f729Sjoerg       if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
8167330f729Sjoerg         if (ShouldXRayInstrumentFunction())
8177330f729Sjoerg           Fn->addFnAttr("xray-log-args",
8187330f729Sjoerg                         llvm::utostr(LogArgs->getArgumentCount()));
8197330f729Sjoerg     }
8207330f729Sjoerg   } else {
8217330f729Sjoerg     if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
8227330f729Sjoerg       Fn->addFnAttr(
8237330f729Sjoerg           "xray-instruction-threshold",
8247330f729Sjoerg           llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
8257330f729Sjoerg   }
826*e038c9c4Sjoerg 
827*e038c9c4Sjoerg   if (ShouldXRayInstrumentFunction()) {
828*e038c9c4Sjoerg     if (CGM.getCodeGenOpts().XRayIgnoreLoops)
829*e038c9c4Sjoerg       Fn->addFnAttr("xray-ignore-loops");
830*e038c9c4Sjoerg 
831*e038c9c4Sjoerg     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
832*e038c9c4Sjoerg             XRayInstrKind::FunctionExit))
833*e038c9c4Sjoerg       Fn->addFnAttr("xray-skip-exit");
834*e038c9c4Sjoerg 
835*e038c9c4Sjoerg     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
836*e038c9c4Sjoerg             XRayInstrKind::FunctionEntry))
837*e038c9c4Sjoerg       Fn->addFnAttr("xray-skip-entry");
838*e038c9c4Sjoerg 
839*e038c9c4Sjoerg     auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
840*e038c9c4Sjoerg     if (FuncGroups > 1) {
841*e038c9c4Sjoerg       auto FuncName = llvm::makeArrayRef<uint8_t>(
842*e038c9c4Sjoerg           CurFn->getName().bytes_begin(), CurFn->getName().bytes_end());
843*e038c9c4Sjoerg       auto Group = crc32(FuncName) % FuncGroups;
844*e038c9c4Sjoerg       if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
845*e038c9c4Sjoerg           !AlwaysXRayAttr)
846*e038c9c4Sjoerg         Fn->addFnAttr("function-instrument", "xray-never");
847*e038c9c4Sjoerg     }
848*e038c9c4Sjoerg   }
849*e038c9c4Sjoerg 
850*e038c9c4Sjoerg   if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone)
851*e038c9c4Sjoerg     if (CGM.isProfileInstrExcluded(Fn, Loc))
852*e038c9c4Sjoerg       Fn->addFnAttr(llvm::Attribute::NoProfile);
853*e038c9c4Sjoerg 
854*e038c9c4Sjoerg   unsigned Count, Offset;
855*e038c9c4Sjoerg   if (const auto *Attr =
856*e038c9c4Sjoerg           D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
857*e038c9c4Sjoerg     Count = Attr->getCount();
858*e038c9c4Sjoerg     Offset = Attr->getOffset();
859*e038c9c4Sjoerg   } else {
860*e038c9c4Sjoerg     Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
861*e038c9c4Sjoerg     Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
862*e038c9c4Sjoerg   }
863*e038c9c4Sjoerg   if (Count && Offset <= Count) {
864*e038c9c4Sjoerg     Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
865*e038c9c4Sjoerg     if (Offset)
866*e038c9c4Sjoerg       Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
8677330f729Sjoerg   }
8687330f729Sjoerg 
8697330f729Sjoerg   // Add no-jump-tables value.
870*e038c9c4Sjoerg   if (CGM.getCodeGenOpts().NoUseJumpTables)
871*e038c9c4Sjoerg     Fn->addFnAttr("no-jump-tables", "true");
872*e038c9c4Sjoerg 
873*e038c9c4Sjoerg   // Add no-inline-line-tables value.
874*e038c9c4Sjoerg   if (CGM.getCodeGenOpts().NoInlineLineTables)
875*e038c9c4Sjoerg     Fn->addFnAttr("no-inline-line-tables");
8767330f729Sjoerg 
8777330f729Sjoerg   // Add profile-sample-accurate value.
8787330f729Sjoerg   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
8797330f729Sjoerg     Fn->addFnAttr("profile-sample-accurate");
8807330f729Sjoerg 
881*e038c9c4Sjoerg   if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
882*e038c9c4Sjoerg     Fn->addFnAttr("use-sample-profile");
883*e038c9c4Sjoerg 
8847330f729Sjoerg   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
8857330f729Sjoerg     Fn->addFnAttr("cfi-canonical-jump-table");
8867330f729Sjoerg 
8877330f729Sjoerg   if (getLangOpts().OpenCL) {
8887330f729Sjoerg     // Add metadata for a kernel function.
8897330f729Sjoerg     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
8907330f729Sjoerg       EmitOpenCLKernelMetadata(FD, Fn);
8917330f729Sjoerg   }
8927330f729Sjoerg 
8937330f729Sjoerg   // If we are checking function types, emit a function type signature as
8947330f729Sjoerg   // prologue data.
8957330f729Sjoerg   if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
8967330f729Sjoerg     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
8977330f729Sjoerg       if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
8987330f729Sjoerg         // Remove any (C++17) exception specifications, to allow calling e.g. a
8997330f729Sjoerg         // noexcept function through a non-noexcept pointer.
9007330f729Sjoerg         auto ProtoTy =
9017330f729Sjoerg           getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
9027330f729Sjoerg                                                         EST_None);
9037330f729Sjoerg         llvm::Constant *FTRTTIConst =
9047330f729Sjoerg             CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
9057330f729Sjoerg         llvm::Constant *FTRTTIConstEncoded =
9067330f729Sjoerg             EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
9077330f729Sjoerg         llvm::Constant *PrologueStructElems[] = {PrologueSig,
9087330f729Sjoerg                                                  FTRTTIConstEncoded};
9097330f729Sjoerg         llvm::Constant *PrologueStructConst =
9107330f729Sjoerg             llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
9117330f729Sjoerg         Fn->setPrologueData(PrologueStructConst);
9127330f729Sjoerg       }
9137330f729Sjoerg     }
9147330f729Sjoerg   }
9157330f729Sjoerg 
9167330f729Sjoerg   // If we're checking nullability, we need to know whether we can check the
9177330f729Sjoerg   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
9187330f729Sjoerg   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
9197330f729Sjoerg     auto Nullability = FnRetTy->getNullability(getContext());
9207330f729Sjoerg     if (Nullability && *Nullability == NullabilityKind::NonNull) {
9217330f729Sjoerg       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
9227330f729Sjoerg             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
9237330f729Sjoerg         RetValNullabilityPrecondition =
9247330f729Sjoerg             llvm::ConstantInt::getTrue(getLLVMContext());
9257330f729Sjoerg     }
9267330f729Sjoerg   }
9277330f729Sjoerg 
9287330f729Sjoerg   // If we're in C++ mode and the function name is "main", it is guaranteed
9297330f729Sjoerg   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
9307330f729Sjoerg   // used within a program").
931*e038c9c4Sjoerg   //
932*e038c9c4Sjoerg   // OpenCL C 2.0 v2.2-11 s6.9.i:
933*e038c9c4Sjoerg   //     Recursion is not supported.
934*e038c9c4Sjoerg   //
935*e038c9c4Sjoerg   // SYCL v1.2.1 s3.10:
936*e038c9c4Sjoerg   //     kernels cannot include RTTI information, exception classes,
937*e038c9c4Sjoerg   //     recursive code, virtual functions or make use of C++ libraries that
938*e038c9c4Sjoerg   //     are not compiled for the device.
939*e038c9c4Sjoerg   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
940*e038c9c4Sjoerg     if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
941*e038c9c4Sjoerg         getLangOpts().SYCLIsDevice ||
942*e038c9c4Sjoerg         (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
9437330f729Sjoerg       Fn->addFnAttr(llvm::Attribute::NoRecurse);
944*e038c9c4Sjoerg   }
945*e038c9c4Sjoerg 
946*e038c9c4Sjoerg   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
947*e038c9c4Sjoerg     Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>());
948*e038c9c4Sjoerg     if (FD->hasAttr<StrictFPAttr>())
949*e038c9c4Sjoerg       Fn->addFnAttr(llvm::Attribute::StrictFP);
950*e038c9c4Sjoerg   }
9517330f729Sjoerg 
9527330f729Sjoerg   // If a custom alignment is used, force realigning to this alignment on
9537330f729Sjoerg   // any main function which certainly will need it.
9547330f729Sjoerg   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
9557330f729Sjoerg     if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
9567330f729Sjoerg         CGM.getCodeGenOpts().StackAlignment)
9577330f729Sjoerg       Fn->addFnAttr("stackrealign");
9587330f729Sjoerg 
9597330f729Sjoerg   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
9607330f729Sjoerg 
9617330f729Sjoerg   // Create a marker to make it easy to insert allocas into the entryblock
9627330f729Sjoerg   // later.  Don't create this with the builder, because we don't want it
9637330f729Sjoerg   // folded.
9647330f729Sjoerg   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
9657330f729Sjoerg   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
9667330f729Sjoerg 
9677330f729Sjoerg   ReturnBlock = getJumpDestInCurrentScope("return");
9687330f729Sjoerg 
9697330f729Sjoerg   Builder.SetInsertPoint(EntryBB);
9707330f729Sjoerg 
9717330f729Sjoerg   // If we're checking the return value, allocate space for a pointer to a
9727330f729Sjoerg   // precise source location of the checked return statement.
9737330f729Sjoerg   if (requiresReturnValueCheck()) {
9747330f729Sjoerg     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
9757330f729Sjoerg     InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
9767330f729Sjoerg   }
9777330f729Sjoerg 
9787330f729Sjoerg   // Emit subprogram debug descriptor.
9797330f729Sjoerg   if (CGDebugInfo *DI = getDebugInfo()) {
9807330f729Sjoerg     // Reconstruct the type from the argument list so that implicit parameters,
9817330f729Sjoerg     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
9827330f729Sjoerg     // convention.
9837330f729Sjoerg     CallingConv CC = CallingConv::CC_C;
9847330f729Sjoerg     if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
9857330f729Sjoerg       if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
9867330f729Sjoerg         CC = SrcFnTy->getCallConv();
9877330f729Sjoerg     SmallVector<QualType, 16> ArgTypes;
9887330f729Sjoerg     for (const VarDecl *VD : Args)
9897330f729Sjoerg       ArgTypes.push_back(VD->getType());
9907330f729Sjoerg     QualType FnType = getContext().getFunctionType(
9917330f729Sjoerg         RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
992*e038c9c4Sjoerg     DI->emitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk);
9937330f729Sjoerg   }
9947330f729Sjoerg 
9957330f729Sjoerg   if (ShouldInstrumentFunction()) {
9967330f729Sjoerg     if (CGM.getCodeGenOpts().InstrumentFunctions)
9977330f729Sjoerg       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
9987330f729Sjoerg     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
9997330f729Sjoerg       CurFn->addFnAttr("instrument-function-entry-inlined",
10007330f729Sjoerg                        "__cyg_profile_func_enter");
10017330f729Sjoerg     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
10027330f729Sjoerg       CurFn->addFnAttr("instrument-function-entry-inlined",
10037330f729Sjoerg                        "__cyg_profile_func_enter_bare");
10047330f729Sjoerg   }
10057330f729Sjoerg 
10067330f729Sjoerg   // Since emitting the mcount call here impacts optimizations such as function
10077330f729Sjoerg   // inlining, we just add an attribute to insert a mcount call in backend.
10087330f729Sjoerg   // The attribute "counting-function" is set to mcount function name which is
10097330f729Sjoerg   // architecture dependent.
10107330f729Sjoerg   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
10117330f729Sjoerg     // Calls to fentry/mcount should not be generated if function has
10127330f729Sjoerg     // the no_instrument_function attribute.
10137330f729Sjoerg     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
10147330f729Sjoerg       if (CGM.getCodeGenOpts().CallFEntry)
10157330f729Sjoerg         Fn->addFnAttr("fentry-call", "true");
10167330f729Sjoerg       else {
10177330f729Sjoerg         Fn->addFnAttr("instrument-function-entry-inlined",
10187330f729Sjoerg                       getTarget().getMCountName());
10197330f729Sjoerg       }
1020*e038c9c4Sjoerg       if (CGM.getCodeGenOpts().MNopMCount) {
1021*e038c9c4Sjoerg         if (!CGM.getCodeGenOpts().CallFEntry)
1022*e038c9c4Sjoerg           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1023*e038c9c4Sjoerg             << "-mnop-mcount" << "-mfentry";
1024*e038c9c4Sjoerg         Fn->addFnAttr("mnop-mcount");
10257330f729Sjoerg       }
1026*e038c9c4Sjoerg 
1027*e038c9c4Sjoerg       if (CGM.getCodeGenOpts().RecordMCount) {
1028*e038c9c4Sjoerg         if (!CGM.getCodeGenOpts().CallFEntry)
1029*e038c9c4Sjoerg           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1030*e038c9c4Sjoerg             << "-mrecord-mcount" << "-mfentry";
1031*e038c9c4Sjoerg         Fn->addFnAttr("mrecord-mcount");
1032*e038c9c4Sjoerg       }
1033*e038c9c4Sjoerg     }
1034*e038c9c4Sjoerg   }
1035*e038c9c4Sjoerg 
1036*e038c9c4Sjoerg   if (CGM.getCodeGenOpts().PackedStack) {
1037*e038c9c4Sjoerg     if (getContext().getTargetInfo().getTriple().getArch() !=
1038*e038c9c4Sjoerg         llvm::Triple::systemz)
1039*e038c9c4Sjoerg       CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1040*e038c9c4Sjoerg         << "-mpacked-stack";
1041*e038c9c4Sjoerg     Fn->addFnAttr("packed-stack");
10427330f729Sjoerg   }
10437330f729Sjoerg 
10447330f729Sjoerg   if (RetTy->isVoidType()) {
10457330f729Sjoerg     // Void type; nothing to return.
10467330f729Sjoerg     ReturnValue = Address::invalid();
10477330f729Sjoerg 
10487330f729Sjoerg     // Count the implicit return.
10497330f729Sjoerg     if (!endsWithReturn(D))
10507330f729Sjoerg       ++NumReturnExprs;
10517330f729Sjoerg   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
10527330f729Sjoerg     // Indirect return; emit returned value directly into sret slot.
10537330f729Sjoerg     // This reduces code size, and affects correctness in C++.
10547330f729Sjoerg     auto AI = CurFn->arg_begin();
10557330f729Sjoerg     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
10567330f729Sjoerg       ++AI;
10577330f729Sjoerg     ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
10587330f729Sjoerg     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
10597330f729Sjoerg       ReturnValuePointer =
10607330f729Sjoerg           CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
10617330f729Sjoerg       Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
10627330f729Sjoerg                               ReturnValue.getPointer(), Int8PtrTy),
10637330f729Sjoerg                           ReturnValuePointer);
10647330f729Sjoerg     }
10657330f729Sjoerg   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
10667330f729Sjoerg              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
10677330f729Sjoerg     // Load the sret pointer from the argument struct and return into that.
10687330f729Sjoerg     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
10697330f729Sjoerg     llvm::Function::arg_iterator EI = CurFn->arg_end();
10707330f729Sjoerg     --EI;
10717330f729Sjoerg     llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1072*e038c9c4Sjoerg     llvm::Type *Ty =
1073*e038c9c4Sjoerg         cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
10747330f729Sjoerg     ReturnValuePointer = Address(Addr, getPointerAlign());
1075*e038c9c4Sjoerg     Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1076*e038c9c4Sjoerg     ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
10777330f729Sjoerg   } else {
10787330f729Sjoerg     ReturnValue = CreateIRTemp(RetTy, "retval");
10797330f729Sjoerg 
10807330f729Sjoerg     // Tell the epilog emitter to autorelease the result.  We do this
10817330f729Sjoerg     // now so that various specialized functions can suppress it
10827330f729Sjoerg     // during their IR-generation.
10837330f729Sjoerg     if (getLangOpts().ObjCAutoRefCount &&
10847330f729Sjoerg         !CurFnInfo->isReturnsRetained() &&
10857330f729Sjoerg         RetTy->isObjCRetainableType())
10867330f729Sjoerg       AutoreleaseResult = true;
10877330f729Sjoerg   }
10887330f729Sjoerg 
10897330f729Sjoerg   EmitStartEHSpec(CurCodeDecl);
10907330f729Sjoerg 
10917330f729Sjoerg   PrologueCleanupDepth = EHStack.stable_begin();
10927330f729Sjoerg 
10937330f729Sjoerg   // Emit OpenMP specific initialization of the device functions.
10947330f729Sjoerg   if (getLangOpts().OpenMP && CurCodeDecl)
10957330f729Sjoerg     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
10967330f729Sjoerg 
10977330f729Sjoerg   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
10987330f729Sjoerg 
10997330f729Sjoerg   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
11007330f729Sjoerg     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
11017330f729Sjoerg     const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
11027330f729Sjoerg     if (MD->getParent()->isLambda() &&
11037330f729Sjoerg         MD->getOverloadedOperator() == OO_Call) {
11047330f729Sjoerg       // We're in a lambda; figure out the captures.
11057330f729Sjoerg       MD->getParent()->getCaptureFields(LambdaCaptureFields,
11067330f729Sjoerg                                         LambdaThisCaptureField);
11077330f729Sjoerg       if (LambdaThisCaptureField) {
11087330f729Sjoerg         // If the lambda captures the object referred to by '*this' - either by
11097330f729Sjoerg         // value or by reference, make sure CXXThisValue points to the correct
11107330f729Sjoerg         // object.
11117330f729Sjoerg 
11127330f729Sjoerg         // Get the lvalue for the field (which is a copy of the enclosing object
11137330f729Sjoerg         // or contains the address of the enclosing object).
11147330f729Sjoerg         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
11157330f729Sjoerg         if (!LambdaThisCaptureField->getType()->isPointerType()) {
11167330f729Sjoerg           // If the enclosing object was captured by value, just use its address.
1117*e038c9c4Sjoerg           CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
11187330f729Sjoerg         } else {
11197330f729Sjoerg           // Load the lvalue pointed to by the field, since '*this' was captured
11207330f729Sjoerg           // by reference.
11217330f729Sjoerg           CXXThisValue =
11227330f729Sjoerg               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
11237330f729Sjoerg         }
11247330f729Sjoerg       }
11257330f729Sjoerg       for (auto *FD : MD->getParent()->fields()) {
11267330f729Sjoerg         if (FD->hasCapturedVLAType()) {
11277330f729Sjoerg           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
11287330f729Sjoerg                                            SourceLocation()).getScalarVal();
11297330f729Sjoerg           auto VAT = FD->getCapturedVLAType();
11307330f729Sjoerg           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
11317330f729Sjoerg         }
11327330f729Sjoerg       }
11337330f729Sjoerg     } else {
11347330f729Sjoerg       // Not in a lambda; just use 'this' from the method.
11357330f729Sjoerg       // FIXME: Should we generate a new load for each use of 'this'?  The
11367330f729Sjoerg       // fast register allocator would be happier...
11377330f729Sjoerg       CXXThisValue = CXXABIThisValue;
11387330f729Sjoerg     }
11397330f729Sjoerg 
11407330f729Sjoerg     // Check the 'this' pointer once per function, if it's available.
11417330f729Sjoerg     if (CXXABIThisValue) {
11427330f729Sjoerg       SanitizerSet SkippedChecks;
11437330f729Sjoerg       SkippedChecks.set(SanitizerKind::ObjectSize, true);
11447330f729Sjoerg       QualType ThisTy = MD->getThisType();
11457330f729Sjoerg 
11467330f729Sjoerg       // If this is the call operator of a lambda with no capture-default, it
11477330f729Sjoerg       // may have a static invoker function, which may call this operator with
11487330f729Sjoerg       // a null 'this' pointer.
11497330f729Sjoerg       if (isLambdaCallOperator(MD) &&
11507330f729Sjoerg           MD->getParent()->getLambdaCaptureDefault() == LCD_None)
11517330f729Sjoerg         SkippedChecks.set(SanitizerKind::Null, true);
11527330f729Sjoerg 
1153*e038c9c4Sjoerg       EmitTypeCheck(
1154*e038c9c4Sjoerg           isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1155*e038c9c4Sjoerg           Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
11567330f729Sjoerg     }
11577330f729Sjoerg   }
11587330f729Sjoerg 
11597330f729Sjoerg   // If any of the arguments have a variably modified type, make sure to
11607330f729Sjoerg   // emit the type size.
11617330f729Sjoerg   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
11627330f729Sjoerg        i != e; ++i) {
11637330f729Sjoerg     const VarDecl *VD = *i;
11647330f729Sjoerg 
11657330f729Sjoerg     // Dig out the type as written from ParmVarDecls; it's unclear whether
11667330f729Sjoerg     // the standard (C99 6.9.1p10) requires this, but we're following the
11677330f729Sjoerg     // precedent set by gcc.
11687330f729Sjoerg     QualType Ty;
11697330f729Sjoerg     if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
11707330f729Sjoerg       Ty = PVD->getOriginalType();
11717330f729Sjoerg     else
11727330f729Sjoerg       Ty = VD->getType();
11737330f729Sjoerg 
11747330f729Sjoerg     if (Ty->isVariablyModifiedType())
11757330f729Sjoerg       EmitVariablyModifiedType(Ty);
11767330f729Sjoerg   }
11777330f729Sjoerg   // Emit a location at the end of the prologue.
11787330f729Sjoerg   if (CGDebugInfo *DI = getDebugInfo())
11797330f729Sjoerg     DI->EmitLocation(Builder, StartLoc);
11807330f729Sjoerg 
11817330f729Sjoerg   // TODO: Do we need to handle this in two places like we do with
11827330f729Sjoerg   // target-features/target-cpu?
11837330f729Sjoerg   if (CurFuncDecl)
11847330f729Sjoerg     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
11857330f729Sjoerg       LargestVectorWidth = VecWidth->getVectorWidth();
11867330f729Sjoerg }
11877330f729Sjoerg 
EmitFunctionBody(const Stmt * Body)11887330f729Sjoerg void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
11897330f729Sjoerg   incrementProfileCounter(Body);
11907330f729Sjoerg   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
11917330f729Sjoerg     EmitCompoundStmtWithoutScope(*S);
11927330f729Sjoerg   else
11937330f729Sjoerg     EmitStmt(Body);
1194*e038c9c4Sjoerg 
1195*e038c9c4Sjoerg   // This is checked after emitting the function body so we know if there
1196*e038c9c4Sjoerg   // are any permitted infinite loops.
1197*e038c9c4Sjoerg   if (checkIfFunctionMustProgress())
1198*e038c9c4Sjoerg     CurFn->addFnAttr(llvm::Attribute::MustProgress);
11997330f729Sjoerg }
12007330f729Sjoerg 
12017330f729Sjoerg /// When instrumenting to collect profile data, the counts for some blocks
12027330f729Sjoerg /// such as switch cases need to not include the fall-through counts, so
12037330f729Sjoerg /// emit a branch around the instrumentation code. When not instrumenting,
12047330f729Sjoerg /// this just calls EmitBlock().
EmitBlockWithFallThrough(llvm::BasicBlock * BB,const Stmt * S)12057330f729Sjoerg void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
12067330f729Sjoerg                                                const Stmt *S) {
12077330f729Sjoerg   llvm::BasicBlock *SkipCountBB = nullptr;
12087330f729Sjoerg   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
12097330f729Sjoerg     // When instrumenting for profiling, the fallthrough to certain
12107330f729Sjoerg     // statements needs to skip over the instrumentation code so that we
12117330f729Sjoerg     // get an accurate count.
12127330f729Sjoerg     SkipCountBB = createBasicBlock("skipcount");
12137330f729Sjoerg     EmitBranch(SkipCountBB);
12147330f729Sjoerg   }
12157330f729Sjoerg   EmitBlock(BB);
12167330f729Sjoerg   uint64_t CurrentCount = getCurrentProfileCount();
12177330f729Sjoerg   incrementProfileCounter(S);
12187330f729Sjoerg   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
12197330f729Sjoerg   if (SkipCountBB)
12207330f729Sjoerg     EmitBlock(SkipCountBB);
12217330f729Sjoerg }
12227330f729Sjoerg 
12237330f729Sjoerg /// Tries to mark the given function nounwind based on the
12247330f729Sjoerg /// non-existence of any throwing calls within it.  We believe this is
12257330f729Sjoerg /// lightweight enough to do at -O0.
TryMarkNoThrow(llvm::Function * F)12267330f729Sjoerg static void TryMarkNoThrow(llvm::Function *F) {
12277330f729Sjoerg   // LLVM treats 'nounwind' on a function as part of the type, so we
12287330f729Sjoerg   // can't do this on functions that can be overwritten.
12297330f729Sjoerg   if (F->isInterposable()) return;
12307330f729Sjoerg 
12317330f729Sjoerg   for (llvm::BasicBlock &BB : *F)
12327330f729Sjoerg     for (llvm::Instruction &I : BB)
12337330f729Sjoerg       if (I.mayThrow())
12347330f729Sjoerg         return;
12357330f729Sjoerg 
12367330f729Sjoerg   F->setDoesNotThrow();
12377330f729Sjoerg }
12387330f729Sjoerg 
BuildFunctionArgList(GlobalDecl GD,FunctionArgList & Args)12397330f729Sjoerg QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
12407330f729Sjoerg                                                FunctionArgList &Args) {
12417330f729Sjoerg   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
12427330f729Sjoerg   QualType ResTy = FD->getReturnType();
12437330f729Sjoerg 
12447330f729Sjoerg   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
12457330f729Sjoerg   if (MD && MD->isInstance()) {
12467330f729Sjoerg     if (CGM.getCXXABI().HasThisReturn(GD))
12477330f729Sjoerg       ResTy = MD->getThisType();
12487330f729Sjoerg     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
12497330f729Sjoerg       ResTy = CGM.getContext().VoidPtrTy;
12507330f729Sjoerg     CGM.getCXXABI().buildThisParam(*this, Args);
12517330f729Sjoerg   }
12527330f729Sjoerg 
12537330f729Sjoerg   // The base version of an inheriting constructor whose constructed base is a
12547330f729Sjoerg   // virtual base is not passed any arguments (because it doesn't actually call
12557330f729Sjoerg   // the inherited constructor).
12567330f729Sjoerg   bool PassedParams = true;
12577330f729Sjoerg   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
12587330f729Sjoerg     if (auto Inherited = CD->getInheritedConstructor())
12597330f729Sjoerg       PassedParams =
12607330f729Sjoerg           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
12617330f729Sjoerg 
12627330f729Sjoerg   if (PassedParams) {
12637330f729Sjoerg     for (auto *Param : FD->parameters()) {
12647330f729Sjoerg       Args.push_back(Param);
12657330f729Sjoerg       if (!Param->hasAttr<PassObjectSizeAttr>())
12667330f729Sjoerg         continue;
12677330f729Sjoerg 
12687330f729Sjoerg       auto *Implicit = ImplicitParamDecl::Create(
12697330f729Sjoerg           getContext(), Param->getDeclContext(), Param->getLocation(),
12707330f729Sjoerg           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
12717330f729Sjoerg       SizeArguments[Param] = Implicit;
12727330f729Sjoerg       Args.push_back(Implicit);
12737330f729Sjoerg     }
12747330f729Sjoerg   }
12757330f729Sjoerg 
12767330f729Sjoerg   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
12777330f729Sjoerg     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
12787330f729Sjoerg 
12797330f729Sjoerg   return ResTy;
12807330f729Sjoerg }
12817330f729Sjoerg 
GenerateCode(GlobalDecl GD,llvm::Function * Fn,const CGFunctionInfo & FnInfo)12827330f729Sjoerg void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
12837330f729Sjoerg                                    const CGFunctionInfo &FnInfo) {
12847330f729Sjoerg   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
12857330f729Sjoerg   CurGD = GD;
12867330f729Sjoerg 
12877330f729Sjoerg   FunctionArgList Args;
12887330f729Sjoerg   QualType ResTy = BuildFunctionArgList(GD, Args);
12897330f729Sjoerg 
12907330f729Sjoerg   // Check if we should generate debug info for this function.
12917330f729Sjoerg   if (FD->hasAttr<NoDebugAttr>())
12927330f729Sjoerg     DebugInfo = nullptr; // disable debug info indefinitely for this function
12937330f729Sjoerg 
12947330f729Sjoerg   // The function might not have a body if we're generating thunks for a
12957330f729Sjoerg   // function declaration.
12967330f729Sjoerg   SourceRange BodyRange;
12977330f729Sjoerg   if (Stmt *Body = FD->getBody())
12987330f729Sjoerg     BodyRange = Body->getSourceRange();
12997330f729Sjoerg   else
13007330f729Sjoerg     BodyRange = FD->getLocation();
13017330f729Sjoerg   CurEHLocation = BodyRange.getEnd();
13027330f729Sjoerg 
13037330f729Sjoerg   // Use the location of the start of the function to determine where
13047330f729Sjoerg   // the function definition is located. By default use the location
13057330f729Sjoerg   // of the declaration as the location for the subprogram. A function
13067330f729Sjoerg   // may lack a declaration in the source code if it is created by code
13077330f729Sjoerg   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
13087330f729Sjoerg   SourceLocation Loc = FD->getLocation();
13097330f729Sjoerg 
13107330f729Sjoerg   // If this is a function specialization then use the pattern body
13117330f729Sjoerg   // as the location for the function.
13127330f729Sjoerg   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
13137330f729Sjoerg     if (SpecDecl->hasBody(SpecDecl))
13147330f729Sjoerg       Loc = SpecDecl->getLocation();
13157330f729Sjoerg 
13167330f729Sjoerg   Stmt *Body = FD->getBody();
13177330f729Sjoerg 
1318*e038c9c4Sjoerg   if (Body) {
1319*e038c9c4Sjoerg     // Coroutines always emit lifetime markers.
1320*e038c9c4Sjoerg     if (isa<CoroutineBodyStmt>(Body))
1321*e038c9c4Sjoerg       ShouldEmitLifetimeMarkers = true;
1322*e038c9c4Sjoerg 
1323*e038c9c4Sjoerg     // Initialize helper which will detect jumps which can cause invalid
1324*e038c9c4Sjoerg     // lifetime markers.
1325*e038c9c4Sjoerg     if (ShouldEmitLifetimeMarkers)
13267330f729Sjoerg       Bypasses.Init(Body);
1327*e038c9c4Sjoerg   }
13287330f729Sjoerg 
13297330f729Sjoerg   // Emit the standard function prologue.
13307330f729Sjoerg   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
13317330f729Sjoerg 
1332*e038c9c4Sjoerg   // Save parameters for coroutine function.
1333*e038c9c4Sjoerg   if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1334*e038c9c4Sjoerg     for (const auto *ParamDecl : FD->parameters())
1335*e038c9c4Sjoerg       FnArgs.push_back(ParamDecl);
1336*e038c9c4Sjoerg 
13377330f729Sjoerg   // Generate the body of the function.
13387330f729Sjoerg   PGO.assignRegionCounters(GD, CurFn);
13397330f729Sjoerg   if (isa<CXXDestructorDecl>(FD))
13407330f729Sjoerg     EmitDestructorBody(Args);
13417330f729Sjoerg   else if (isa<CXXConstructorDecl>(FD))
13427330f729Sjoerg     EmitConstructorBody(Args);
13437330f729Sjoerg   else if (getLangOpts().CUDA &&
13447330f729Sjoerg            !getLangOpts().CUDAIsDevice &&
13457330f729Sjoerg            FD->hasAttr<CUDAGlobalAttr>())
13467330f729Sjoerg     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
13477330f729Sjoerg   else if (isa<CXXMethodDecl>(FD) &&
13487330f729Sjoerg            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
13497330f729Sjoerg     // The lambda static invoker function is special, because it forwards or
13507330f729Sjoerg     // clones the body of the function call operator (but is actually static).
13517330f729Sjoerg     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
13527330f729Sjoerg   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
13537330f729Sjoerg              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
13547330f729Sjoerg               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
13557330f729Sjoerg     // Implicit copy-assignment gets the same special treatment as implicit
13567330f729Sjoerg     // copy-constructors.
13577330f729Sjoerg     emitImplicitAssignmentOperatorBody(Args);
13587330f729Sjoerg   } else if (Body) {
13597330f729Sjoerg     EmitFunctionBody(Body);
13607330f729Sjoerg   } else
13617330f729Sjoerg     llvm_unreachable("no definition for emitted function");
13627330f729Sjoerg 
13637330f729Sjoerg   // C++11 [stmt.return]p2:
13647330f729Sjoerg   //   Flowing off the end of a function [...] results in undefined behavior in
13657330f729Sjoerg   //   a value-returning function.
13667330f729Sjoerg   // C11 6.9.1p12:
13677330f729Sjoerg   //   If the '}' that terminates a function is reached, and the value of the
13687330f729Sjoerg   //   function call is used by the caller, the behavior is undefined.
13697330f729Sjoerg   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
13707330f729Sjoerg       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
13717330f729Sjoerg     bool ShouldEmitUnreachable =
13727330f729Sjoerg         CGM.getCodeGenOpts().StrictReturn ||
1373*e038c9c4Sjoerg         !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
13747330f729Sjoerg     if (SanOpts.has(SanitizerKind::Return)) {
13757330f729Sjoerg       SanitizerScope SanScope(this);
13767330f729Sjoerg       llvm::Value *IsFalse = Builder.getFalse();
13777330f729Sjoerg       EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
13787330f729Sjoerg                 SanitizerHandler::MissingReturn,
13797330f729Sjoerg                 EmitCheckSourceLocation(FD->getLocation()), None);
13807330f729Sjoerg     } else if (ShouldEmitUnreachable) {
13817330f729Sjoerg       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
13827330f729Sjoerg         EmitTrapCall(llvm::Intrinsic::trap);
13837330f729Sjoerg     }
13847330f729Sjoerg     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
13857330f729Sjoerg       Builder.CreateUnreachable();
13867330f729Sjoerg       Builder.ClearInsertionPoint();
13877330f729Sjoerg     }
13887330f729Sjoerg   }
13897330f729Sjoerg 
13907330f729Sjoerg   // Emit the standard function epilogue.
13917330f729Sjoerg   FinishFunction(BodyRange.getEnd());
13927330f729Sjoerg 
13937330f729Sjoerg   // If we haven't marked the function nothrow through other means, do
13947330f729Sjoerg   // a quick pass now to see if we can.
13957330f729Sjoerg   if (!CurFn->doesNotThrow())
13967330f729Sjoerg     TryMarkNoThrow(CurFn);
13977330f729Sjoerg }
13987330f729Sjoerg 
13997330f729Sjoerg /// ContainsLabel - Return true if the statement contains a label in it.  If
14007330f729Sjoerg /// this statement is not executed normally, it not containing a label means
14017330f729Sjoerg /// that we can just remove the code.
ContainsLabel(const Stmt * S,bool IgnoreCaseStmts)14027330f729Sjoerg bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
14037330f729Sjoerg   // Null statement, not a label!
14047330f729Sjoerg   if (!S) return false;
14057330f729Sjoerg 
14067330f729Sjoerg   // If this is a label, we have to emit the code, consider something like:
14077330f729Sjoerg   // if (0) {  ...  foo:  bar(); }  goto foo;
14087330f729Sjoerg   //
14097330f729Sjoerg   // TODO: If anyone cared, we could track __label__'s, since we know that you
14107330f729Sjoerg   // can't jump to one from outside their declared region.
14117330f729Sjoerg   if (isa<LabelStmt>(S))
14127330f729Sjoerg     return true;
14137330f729Sjoerg 
14147330f729Sjoerg   // If this is a case/default statement, and we haven't seen a switch, we have
14157330f729Sjoerg   // to emit the code.
14167330f729Sjoerg   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
14177330f729Sjoerg     return true;
14187330f729Sjoerg 
14197330f729Sjoerg   // If this is a switch statement, we want to ignore cases below it.
14207330f729Sjoerg   if (isa<SwitchStmt>(S))
14217330f729Sjoerg     IgnoreCaseStmts = true;
14227330f729Sjoerg 
14237330f729Sjoerg   // Scan subexpressions for verboten labels.
14247330f729Sjoerg   for (const Stmt *SubStmt : S->children())
14257330f729Sjoerg     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
14267330f729Sjoerg       return true;
14277330f729Sjoerg 
14287330f729Sjoerg   return false;
14297330f729Sjoerg }
14307330f729Sjoerg 
14317330f729Sjoerg /// containsBreak - Return true if the statement contains a break out of it.
14327330f729Sjoerg /// If the statement (recursively) contains a switch or loop with a break
14337330f729Sjoerg /// inside of it, this is fine.
containsBreak(const Stmt * S)14347330f729Sjoerg bool CodeGenFunction::containsBreak(const Stmt *S) {
14357330f729Sjoerg   // Null statement, not a label!
14367330f729Sjoerg   if (!S) return false;
14377330f729Sjoerg 
14387330f729Sjoerg   // If this is a switch or loop that defines its own break scope, then we can
14397330f729Sjoerg   // include it and anything inside of it.
14407330f729Sjoerg   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
14417330f729Sjoerg       isa<ForStmt>(S))
14427330f729Sjoerg     return false;
14437330f729Sjoerg 
14447330f729Sjoerg   if (isa<BreakStmt>(S))
14457330f729Sjoerg     return true;
14467330f729Sjoerg 
14477330f729Sjoerg   // Scan subexpressions for verboten breaks.
14487330f729Sjoerg   for (const Stmt *SubStmt : S->children())
14497330f729Sjoerg     if (containsBreak(SubStmt))
14507330f729Sjoerg       return true;
14517330f729Sjoerg 
14527330f729Sjoerg   return false;
14537330f729Sjoerg }
14547330f729Sjoerg 
mightAddDeclToScope(const Stmt * S)14557330f729Sjoerg bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
14567330f729Sjoerg   if (!S) return false;
14577330f729Sjoerg 
14587330f729Sjoerg   // Some statement kinds add a scope and thus never add a decl to the current
14597330f729Sjoerg   // scope. Note, this list is longer than the list of statements that might
14607330f729Sjoerg   // have an unscoped decl nested within them, but this way is conservatively
14617330f729Sjoerg   // correct even if more statement kinds are added.
14627330f729Sjoerg   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
14637330f729Sjoerg       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
14647330f729Sjoerg       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
14657330f729Sjoerg       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
14667330f729Sjoerg     return false;
14677330f729Sjoerg 
14687330f729Sjoerg   if (isa<DeclStmt>(S))
14697330f729Sjoerg     return true;
14707330f729Sjoerg 
14717330f729Sjoerg   for (const Stmt *SubStmt : S->children())
14727330f729Sjoerg     if (mightAddDeclToScope(SubStmt))
14737330f729Sjoerg       return true;
14747330f729Sjoerg 
14757330f729Sjoerg   return false;
14767330f729Sjoerg }
14777330f729Sjoerg 
14787330f729Sjoerg /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
14797330f729Sjoerg /// to a constant, or if it does but contains a label, return false.  If it
14807330f729Sjoerg /// constant folds return true and set the boolean result in Result.
ConstantFoldsToSimpleInteger(const Expr * Cond,bool & ResultBool,bool AllowLabels)14817330f729Sjoerg bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
14827330f729Sjoerg                                                    bool &ResultBool,
14837330f729Sjoerg                                                    bool AllowLabels) {
14847330f729Sjoerg   llvm::APSInt ResultInt;
14857330f729Sjoerg   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
14867330f729Sjoerg     return false;
14877330f729Sjoerg 
14887330f729Sjoerg   ResultBool = ResultInt.getBoolValue();
14897330f729Sjoerg   return true;
14907330f729Sjoerg }
14917330f729Sjoerg 
14927330f729Sjoerg /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
14937330f729Sjoerg /// to a constant, or if it does but contains a label, return false.  If it
14947330f729Sjoerg /// constant folds return true and set the folded value.
ConstantFoldsToSimpleInteger(const Expr * Cond,llvm::APSInt & ResultInt,bool AllowLabels)14957330f729Sjoerg bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
14967330f729Sjoerg                                                    llvm::APSInt &ResultInt,
14977330f729Sjoerg                                                    bool AllowLabels) {
14987330f729Sjoerg   // FIXME: Rename and handle conversion of other evaluatable things
14997330f729Sjoerg   // to bool.
15007330f729Sjoerg   Expr::EvalResult Result;
15017330f729Sjoerg   if (!Cond->EvaluateAsInt(Result, getContext()))
15027330f729Sjoerg     return false;  // Not foldable, not integer or not fully evaluatable.
15037330f729Sjoerg 
15047330f729Sjoerg   llvm::APSInt Int = Result.Val.getInt();
15057330f729Sjoerg   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
15067330f729Sjoerg     return false;  // Contains a label.
15077330f729Sjoerg 
15087330f729Sjoerg   ResultInt = Int;
15097330f729Sjoerg   return true;
15107330f729Sjoerg }
15117330f729Sjoerg 
1512*e038c9c4Sjoerg /// Determine whether the given condition is an instrumentable condition
1513*e038c9c4Sjoerg /// (i.e. no "&&" or "||").
isInstrumentedCondition(const Expr * C)1514*e038c9c4Sjoerg bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1515*e038c9c4Sjoerg   // Bypass simplistic logical-NOT operator before determining whether the
1516*e038c9c4Sjoerg   // condition contains any other logical operator.
1517*e038c9c4Sjoerg   if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
1518*e038c9c4Sjoerg     if (UnOp->getOpcode() == UO_LNot)
1519*e038c9c4Sjoerg       C = UnOp->getSubExpr();
15207330f729Sjoerg 
1521*e038c9c4Sjoerg   const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
1522*e038c9c4Sjoerg   return (!BOp || !BOp->isLogicalOp());
1523*e038c9c4Sjoerg }
1524*e038c9c4Sjoerg 
1525*e038c9c4Sjoerg /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1526*e038c9c4Sjoerg /// increments a profile counter based on the semantics of the given logical
1527*e038c9c4Sjoerg /// operator opcode.  This is used to instrument branch condition coverage for
1528*e038c9c4Sjoerg /// logical operators.
EmitBranchToCounterBlock(const Expr * Cond,BinaryOperator::Opcode LOp,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount,Stmt::Likelihood LH,const Expr * CntrIdx)1529*e038c9c4Sjoerg void CodeGenFunction::EmitBranchToCounterBlock(
1530*e038c9c4Sjoerg     const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1531*e038c9c4Sjoerg     llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1532*e038c9c4Sjoerg     Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1533*e038c9c4Sjoerg   // If not instrumenting, just emit a branch.
1534*e038c9c4Sjoerg   bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1535*e038c9c4Sjoerg   if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1536*e038c9c4Sjoerg     return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1537*e038c9c4Sjoerg 
1538*e038c9c4Sjoerg   llvm::BasicBlock *ThenBlock = NULL;
1539*e038c9c4Sjoerg   llvm::BasicBlock *ElseBlock = NULL;
1540*e038c9c4Sjoerg   llvm::BasicBlock *NextBlock = NULL;
1541*e038c9c4Sjoerg 
1542*e038c9c4Sjoerg   // Create the block we'll use to increment the appropriate counter.
1543*e038c9c4Sjoerg   llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1544*e038c9c4Sjoerg 
1545*e038c9c4Sjoerg   // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1546*e038c9c4Sjoerg   // means we need to evaluate the condition and increment the counter on TRUE:
1547*e038c9c4Sjoerg   //
1548*e038c9c4Sjoerg   // if (Cond)
1549*e038c9c4Sjoerg   //   goto CounterIncrBlock;
1550*e038c9c4Sjoerg   // else
1551*e038c9c4Sjoerg   //   goto FalseBlock;
1552*e038c9c4Sjoerg   //
1553*e038c9c4Sjoerg   // CounterIncrBlock:
1554*e038c9c4Sjoerg   //   Counter++;
1555*e038c9c4Sjoerg   //   goto TrueBlock;
1556*e038c9c4Sjoerg 
1557*e038c9c4Sjoerg   if (LOp == BO_LAnd) {
1558*e038c9c4Sjoerg     ThenBlock = CounterIncrBlock;
1559*e038c9c4Sjoerg     ElseBlock = FalseBlock;
1560*e038c9c4Sjoerg     NextBlock = TrueBlock;
1561*e038c9c4Sjoerg   }
1562*e038c9c4Sjoerg 
1563*e038c9c4Sjoerg   // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1564*e038c9c4Sjoerg   // we need to evaluate the condition and increment the counter on FALSE:
1565*e038c9c4Sjoerg   //
1566*e038c9c4Sjoerg   // if (Cond)
1567*e038c9c4Sjoerg   //   goto TrueBlock;
1568*e038c9c4Sjoerg   // else
1569*e038c9c4Sjoerg   //   goto CounterIncrBlock;
1570*e038c9c4Sjoerg   //
1571*e038c9c4Sjoerg   // CounterIncrBlock:
1572*e038c9c4Sjoerg   //   Counter++;
1573*e038c9c4Sjoerg   //   goto FalseBlock;
1574*e038c9c4Sjoerg 
1575*e038c9c4Sjoerg   else if (LOp == BO_LOr) {
1576*e038c9c4Sjoerg     ThenBlock = TrueBlock;
1577*e038c9c4Sjoerg     ElseBlock = CounterIncrBlock;
1578*e038c9c4Sjoerg     NextBlock = FalseBlock;
1579*e038c9c4Sjoerg   } else {
1580*e038c9c4Sjoerg     llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1581*e038c9c4Sjoerg   }
1582*e038c9c4Sjoerg 
1583*e038c9c4Sjoerg   // Emit Branch based on condition.
1584*e038c9c4Sjoerg   EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1585*e038c9c4Sjoerg 
1586*e038c9c4Sjoerg   // Emit the block containing the counter increment(s).
1587*e038c9c4Sjoerg   EmitBlock(CounterIncrBlock);
1588*e038c9c4Sjoerg 
1589*e038c9c4Sjoerg   // Increment corresponding counter; if index not provided, use Cond as index.
1590*e038c9c4Sjoerg   incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1591*e038c9c4Sjoerg 
1592*e038c9c4Sjoerg   // Go to the next block.
1593*e038c9c4Sjoerg   EmitBranch(NextBlock);
1594*e038c9c4Sjoerg }
15957330f729Sjoerg 
15967330f729Sjoerg /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
15977330f729Sjoerg /// statement) to the specified blocks.  Based on the condition, this might try
15987330f729Sjoerg /// to simplify the codegen of the conditional based on the branch.
1599*e038c9c4Sjoerg /// \param LH The value of the likelihood attribute on the True branch.
EmitBranchOnBoolExpr(const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount,Stmt::Likelihood LH)16007330f729Sjoerg void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
16017330f729Sjoerg                                            llvm::BasicBlock *TrueBlock,
16027330f729Sjoerg                                            llvm::BasicBlock *FalseBlock,
1603*e038c9c4Sjoerg                                            uint64_t TrueCount,
1604*e038c9c4Sjoerg                                            Stmt::Likelihood LH) {
16057330f729Sjoerg   Cond = Cond->IgnoreParens();
16067330f729Sjoerg 
16077330f729Sjoerg   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
16087330f729Sjoerg 
16097330f729Sjoerg     // Handle X && Y in a condition.
16107330f729Sjoerg     if (CondBOp->getOpcode() == BO_LAnd) {
16117330f729Sjoerg       // If we have "1 && X", simplify the code.  "0 && X" would have constant
16127330f729Sjoerg       // folded if the case was simple enough.
16137330f729Sjoerg       bool ConstantBool = false;
16147330f729Sjoerg       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
16157330f729Sjoerg           ConstantBool) {
16167330f729Sjoerg         // br(1 && X) -> br(X).
16177330f729Sjoerg         incrementProfileCounter(CondBOp);
1618*e038c9c4Sjoerg         return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1619*e038c9c4Sjoerg                                         FalseBlock, TrueCount, LH);
16207330f729Sjoerg       }
16217330f729Sjoerg 
16227330f729Sjoerg       // If we have "X && 1", simplify the code to use an uncond branch.
16237330f729Sjoerg       // "X && 0" would have been constant folded to 0.
16247330f729Sjoerg       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
16257330f729Sjoerg           ConstantBool) {
16267330f729Sjoerg         // br(X && 1) -> br(X).
1627*e038c9c4Sjoerg         return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1628*e038c9c4Sjoerg                                         FalseBlock, TrueCount, LH, CondBOp);
16297330f729Sjoerg       }
16307330f729Sjoerg 
16317330f729Sjoerg       // Emit the LHS as a conditional.  If the LHS conditional is false, we
16327330f729Sjoerg       // want to jump to the FalseBlock.
16337330f729Sjoerg       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
16347330f729Sjoerg       // The counter tells us how often we evaluate RHS, and all of TrueCount
16357330f729Sjoerg       // can be propagated to that branch.
16367330f729Sjoerg       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
16377330f729Sjoerg 
16387330f729Sjoerg       ConditionalEvaluation eval(*this);
16397330f729Sjoerg       {
16407330f729Sjoerg         ApplyDebugLocation DL(*this, Cond);
1641*e038c9c4Sjoerg         // Propagate the likelihood attribute like __builtin_expect
1642*e038c9c4Sjoerg         // __builtin_expect(X && Y, 1) -> X and Y are likely
1643*e038c9c4Sjoerg         // __builtin_expect(X && Y, 0) -> only Y is unlikely
1644*e038c9c4Sjoerg         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1645*e038c9c4Sjoerg                              LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
16467330f729Sjoerg         EmitBlock(LHSTrue);
16477330f729Sjoerg       }
16487330f729Sjoerg 
16497330f729Sjoerg       incrementProfileCounter(CondBOp);
16507330f729Sjoerg       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
16517330f729Sjoerg 
16527330f729Sjoerg       // Any temporaries created here are conditional.
16537330f729Sjoerg       eval.begin(*this);
1654*e038c9c4Sjoerg       EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1655*e038c9c4Sjoerg                                FalseBlock, TrueCount, LH);
16567330f729Sjoerg       eval.end(*this);
16577330f729Sjoerg 
16587330f729Sjoerg       return;
16597330f729Sjoerg     }
16607330f729Sjoerg 
16617330f729Sjoerg     if (CondBOp->getOpcode() == BO_LOr) {
16627330f729Sjoerg       // If we have "0 || X", simplify the code.  "1 || X" would have constant
16637330f729Sjoerg       // folded if the case was simple enough.
16647330f729Sjoerg       bool ConstantBool = false;
16657330f729Sjoerg       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
16667330f729Sjoerg           !ConstantBool) {
16677330f729Sjoerg         // br(0 || X) -> br(X).
16687330f729Sjoerg         incrementProfileCounter(CondBOp);
1669*e038c9c4Sjoerg         return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1670*e038c9c4Sjoerg                                         FalseBlock, TrueCount, LH);
16717330f729Sjoerg       }
16727330f729Sjoerg 
16737330f729Sjoerg       // If we have "X || 0", simplify the code to use an uncond branch.
16747330f729Sjoerg       // "X || 1" would have been constant folded to 1.
16757330f729Sjoerg       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
16767330f729Sjoerg           !ConstantBool) {
16777330f729Sjoerg         // br(X || 0) -> br(X).
1678*e038c9c4Sjoerg         return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1679*e038c9c4Sjoerg                                         FalseBlock, TrueCount, LH, CondBOp);
16807330f729Sjoerg       }
16817330f729Sjoerg 
16827330f729Sjoerg       // Emit the LHS as a conditional.  If the LHS conditional is true, we
16837330f729Sjoerg       // want to jump to the TrueBlock.
16847330f729Sjoerg       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
16857330f729Sjoerg       // We have the count for entry to the RHS and for the whole expression
16867330f729Sjoerg       // being true, so we can divy up True count between the short circuit and
16877330f729Sjoerg       // the RHS.
16887330f729Sjoerg       uint64_t LHSCount =
16897330f729Sjoerg           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
16907330f729Sjoerg       uint64_t RHSCount = TrueCount - LHSCount;
16917330f729Sjoerg 
16927330f729Sjoerg       ConditionalEvaluation eval(*this);
16937330f729Sjoerg       {
1694*e038c9c4Sjoerg         // Propagate the likelihood attribute like __builtin_expect
1695*e038c9c4Sjoerg         // __builtin_expect(X || Y, 1) -> only Y is likely
1696*e038c9c4Sjoerg         // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
16977330f729Sjoerg         ApplyDebugLocation DL(*this, Cond);
1698*e038c9c4Sjoerg         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1699*e038c9c4Sjoerg                              LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
17007330f729Sjoerg         EmitBlock(LHSFalse);
17017330f729Sjoerg       }
17027330f729Sjoerg 
17037330f729Sjoerg       incrementProfileCounter(CondBOp);
17047330f729Sjoerg       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
17057330f729Sjoerg 
17067330f729Sjoerg       // Any temporaries created here are conditional.
17077330f729Sjoerg       eval.begin(*this);
1708*e038c9c4Sjoerg       EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1709*e038c9c4Sjoerg                                RHSCount, LH);
17107330f729Sjoerg 
17117330f729Sjoerg       eval.end(*this);
17127330f729Sjoerg 
17137330f729Sjoerg       return;
17147330f729Sjoerg     }
17157330f729Sjoerg   }
17167330f729Sjoerg 
17177330f729Sjoerg   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
17187330f729Sjoerg     // br(!x, t, f) -> br(x, f, t)
17197330f729Sjoerg     if (CondUOp->getOpcode() == UO_LNot) {
17207330f729Sjoerg       // Negate the count.
17217330f729Sjoerg       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1722*e038c9c4Sjoerg       // The values of the enum are chosen to make this negation possible.
1723*e038c9c4Sjoerg       LH = static_cast<Stmt::Likelihood>(-LH);
17247330f729Sjoerg       // Negate the condition and swap the destination blocks.
17257330f729Sjoerg       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1726*e038c9c4Sjoerg                                   FalseCount, LH);
17277330f729Sjoerg     }
17287330f729Sjoerg   }
17297330f729Sjoerg 
17307330f729Sjoerg   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
17317330f729Sjoerg     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
17327330f729Sjoerg     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
17337330f729Sjoerg     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
17347330f729Sjoerg 
1735*e038c9c4Sjoerg     // The ConditionalOperator itself has no likelihood information for its
1736*e038c9c4Sjoerg     // true and false branches. This matches the behavior of __builtin_expect.
17377330f729Sjoerg     ConditionalEvaluation cond(*this);
17387330f729Sjoerg     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1739*e038c9c4Sjoerg                          getProfileCount(CondOp), Stmt::LH_None);
17407330f729Sjoerg 
17417330f729Sjoerg     // When computing PGO branch weights, we only know the overall count for
17427330f729Sjoerg     // the true block. This code is essentially doing tail duplication of the
17437330f729Sjoerg     // naive code-gen, introducing new edges for which counts are not
17447330f729Sjoerg     // available. Divide the counts proportionally between the LHS and RHS of
17457330f729Sjoerg     // the conditional operator.
17467330f729Sjoerg     uint64_t LHSScaledTrueCount = 0;
17477330f729Sjoerg     if (TrueCount) {
17487330f729Sjoerg       double LHSRatio =
17497330f729Sjoerg           getProfileCount(CondOp) / (double)getCurrentProfileCount();
17507330f729Sjoerg       LHSScaledTrueCount = TrueCount * LHSRatio;
17517330f729Sjoerg     }
17527330f729Sjoerg 
17537330f729Sjoerg     cond.begin(*this);
17547330f729Sjoerg     EmitBlock(LHSBlock);
17557330f729Sjoerg     incrementProfileCounter(CondOp);
17567330f729Sjoerg     {
17577330f729Sjoerg       ApplyDebugLocation DL(*this, Cond);
17587330f729Sjoerg       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1759*e038c9c4Sjoerg                            LHSScaledTrueCount, LH);
17607330f729Sjoerg     }
17617330f729Sjoerg     cond.end(*this);
17627330f729Sjoerg 
17637330f729Sjoerg     cond.begin(*this);
17647330f729Sjoerg     EmitBlock(RHSBlock);
17657330f729Sjoerg     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1766*e038c9c4Sjoerg                          TrueCount - LHSScaledTrueCount, LH);
17677330f729Sjoerg     cond.end(*this);
17687330f729Sjoerg 
17697330f729Sjoerg     return;
17707330f729Sjoerg   }
17717330f729Sjoerg 
17727330f729Sjoerg   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
17737330f729Sjoerg     // Conditional operator handling can give us a throw expression as a
17747330f729Sjoerg     // condition for a case like:
17757330f729Sjoerg     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
17767330f729Sjoerg     // Fold this to:
17777330f729Sjoerg     //   br(c, throw x, br(y, t, f))
17787330f729Sjoerg     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
17797330f729Sjoerg     return;
17807330f729Sjoerg   }
17817330f729Sjoerg 
1782*e038c9c4Sjoerg   // Emit the code with the fully general case.
1783*e038c9c4Sjoerg   llvm::Value *CondV;
1784*e038c9c4Sjoerg   {
1785*e038c9c4Sjoerg     ApplyDebugLocation DL(*this, Cond);
1786*e038c9c4Sjoerg     CondV = EvaluateExprAsBool(Cond);
1787*e038c9c4Sjoerg   }
1788*e038c9c4Sjoerg 
1789*e038c9c4Sjoerg   llvm::MDNode *Weights = nullptr;
1790*e038c9c4Sjoerg   llvm::MDNode *Unpredictable = nullptr;
1791*e038c9c4Sjoerg 
17927330f729Sjoerg   // If the branch has a condition wrapped by __builtin_unpredictable,
17937330f729Sjoerg   // create metadata that specifies that the branch is unpredictable.
17947330f729Sjoerg   // Don't bother if not optimizing because that metadata would not be used.
17957330f729Sjoerg   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
17967330f729Sjoerg   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
17977330f729Sjoerg     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
17987330f729Sjoerg     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
17997330f729Sjoerg       llvm::MDBuilder MDHelper(getLLVMContext());
18007330f729Sjoerg       Unpredictable = MDHelper.createUnpredictable();
18017330f729Sjoerg     }
18027330f729Sjoerg   }
18037330f729Sjoerg 
1804*e038c9c4Sjoerg   // If there is a Likelihood knowledge for the cond, lower it.
1805*e038c9c4Sjoerg   // Note that if not optimizing this won't emit anything.
1806*e038c9c4Sjoerg   llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1807*e038c9c4Sjoerg   if (CondV != NewCondV)
1808*e038c9c4Sjoerg     CondV = NewCondV;
1809*e038c9c4Sjoerg   else {
1810*e038c9c4Sjoerg     // Otherwise, lower profile counts. Note that we do this even at -O0.
18117330f729Sjoerg     uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1812*e038c9c4Sjoerg     Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
18137330f729Sjoerg   }
1814*e038c9c4Sjoerg 
18157330f729Sjoerg   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
18167330f729Sjoerg }
18177330f729Sjoerg 
18187330f729Sjoerg /// ErrorUnsupported - Print out an error that codegen doesn't support the
18197330f729Sjoerg /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)18207330f729Sjoerg void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
18217330f729Sjoerg   CGM.ErrorUnsupported(S, Type);
18227330f729Sjoerg }
18237330f729Sjoerg 
18247330f729Sjoerg /// emitNonZeroVLAInit - Emit the "zero" initialization of a
18257330f729Sjoerg /// variable-length array whose elements have a non-zero bit-pattern.
18267330f729Sjoerg ///
18277330f729Sjoerg /// \param baseType the inner-most element type of the array
18287330f729Sjoerg /// \param src - a char* pointing to the bit-pattern for a single
18297330f729Sjoerg /// base element of the array
18307330f729Sjoerg /// \param sizeInChars - the total size of the VLA, in chars
emitNonZeroVLAInit(CodeGenFunction & CGF,QualType baseType,Address dest,Address src,llvm::Value * sizeInChars)18317330f729Sjoerg static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
18327330f729Sjoerg                                Address dest, Address src,
18337330f729Sjoerg                                llvm::Value *sizeInChars) {
18347330f729Sjoerg   CGBuilderTy &Builder = CGF.Builder;
18357330f729Sjoerg 
18367330f729Sjoerg   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
18377330f729Sjoerg   llvm::Value *baseSizeInChars
18387330f729Sjoerg     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
18397330f729Sjoerg 
18407330f729Sjoerg   Address begin =
18417330f729Sjoerg     Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1842*e038c9c4Sjoerg   llvm::Value *end = Builder.CreateInBoundsGEP(
1843*e038c9c4Sjoerg       begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
18447330f729Sjoerg 
18457330f729Sjoerg   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
18467330f729Sjoerg   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
18477330f729Sjoerg   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
18487330f729Sjoerg 
18497330f729Sjoerg   // Make a loop over the VLA.  C99 guarantees that the VLA element
18507330f729Sjoerg   // count must be nonzero.
18517330f729Sjoerg   CGF.EmitBlock(loopBB);
18527330f729Sjoerg 
18537330f729Sjoerg   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
18547330f729Sjoerg   cur->addIncoming(begin.getPointer(), originBB);
18557330f729Sjoerg 
18567330f729Sjoerg   CharUnits curAlign =
18577330f729Sjoerg     dest.getAlignment().alignmentOfArrayElement(baseSize);
18587330f729Sjoerg 
18597330f729Sjoerg   // memcpy the individual element bit-pattern.
18607330f729Sjoerg   Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
18617330f729Sjoerg                        /*volatile*/ false);
18627330f729Sjoerg 
18637330f729Sjoerg   // Go to the next element.
18647330f729Sjoerg   llvm::Value *next =
18657330f729Sjoerg     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
18667330f729Sjoerg 
18677330f729Sjoerg   // Leave if that's the end of the VLA.
18687330f729Sjoerg   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
18697330f729Sjoerg   Builder.CreateCondBr(done, contBB, loopBB);
18707330f729Sjoerg   cur->addIncoming(next, loopBB);
18717330f729Sjoerg 
18727330f729Sjoerg   CGF.EmitBlock(contBB);
18737330f729Sjoerg }
18747330f729Sjoerg 
18757330f729Sjoerg void
EmitNullInitialization(Address DestPtr,QualType Ty)18767330f729Sjoerg CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
18777330f729Sjoerg   // Ignore empty classes in C++.
18787330f729Sjoerg   if (getLangOpts().CPlusPlus) {
18797330f729Sjoerg     if (const RecordType *RT = Ty->getAs<RecordType>()) {
18807330f729Sjoerg       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
18817330f729Sjoerg         return;
18827330f729Sjoerg     }
18837330f729Sjoerg   }
18847330f729Sjoerg 
18857330f729Sjoerg   // Cast the dest ptr to the appropriate i8 pointer type.
18867330f729Sjoerg   if (DestPtr.getElementType() != Int8Ty)
18877330f729Sjoerg     DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
18887330f729Sjoerg 
18897330f729Sjoerg   // Get size and alignment info for this aggregate.
18907330f729Sjoerg   CharUnits size = getContext().getTypeSizeInChars(Ty);
18917330f729Sjoerg 
18927330f729Sjoerg   llvm::Value *SizeVal;
18937330f729Sjoerg   const VariableArrayType *vla;
18947330f729Sjoerg 
18957330f729Sjoerg   // Don't bother emitting a zero-byte memset.
18967330f729Sjoerg   if (size.isZero()) {
18977330f729Sjoerg     // But note that getTypeInfo returns 0 for a VLA.
18987330f729Sjoerg     if (const VariableArrayType *vlaType =
18997330f729Sjoerg           dyn_cast_or_null<VariableArrayType>(
19007330f729Sjoerg                                           getContext().getAsArrayType(Ty))) {
19017330f729Sjoerg       auto VlaSize = getVLASize(vlaType);
19027330f729Sjoerg       SizeVal = VlaSize.NumElts;
19037330f729Sjoerg       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
19047330f729Sjoerg       if (!eltSize.isOne())
19057330f729Sjoerg         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
19067330f729Sjoerg       vla = vlaType;
19077330f729Sjoerg     } else {
19087330f729Sjoerg       return;
19097330f729Sjoerg     }
19107330f729Sjoerg   } else {
19117330f729Sjoerg     SizeVal = CGM.getSize(size);
19127330f729Sjoerg     vla = nullptr;
19137330f729Sjoerg   }
19147330f729Sjoerg 
19157330f729Sjoerg   // If the type contains a pointer to data member we can't memset it to zero.
19167330f729Sjoerg   // Instead, create a null constant and copy it to the destination.
19177330f729Sjoerg   // TODO: there are other patterns besides zero that we can usefully memset,
19187330f729Sjoerg   // like -1, which happens to be the pattern used by member-pointers.
19197330f729Sjoerg   if (!CGM.getTypes().isZeroInitializable(Ty)) {
19207330f729Sjoerg     // For a VLA, emit a single element, then splat that over the VLA.
19217330f729Sjoerg     if (vla) Ty = getContext().getBaseElementType(vla);
19227330f729Sjoerg 
19237330f729Sjoerg     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
19247330f729Sjoerg 
19257330f729Sjoerg     llvm::GlobalVariable *NullVariable =
19267330f729Sjoerg       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
19277330f729Sjoerg                                /*isConstant=*/true,
19287330f729Sjoerg                                llvm::GlobalVariable::PrivateLinkage,
19297330f729Sjoerg                                NullConstant, Twine());
19307330f729Sjoerg     CharUnits NullAlign = DestPtr.getAlignment();
19317330f729Sjoerg     NullVariable->setAlignment(NullAlign.getAsAlign());
19327330f729Sjoerg     Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
19337330f729Sjoerg                    NullAlign);
19347330f729Sjoerg 
19357330f729Sjoerg     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
19367330f729Sjoerg 
19377330f729Sjoerg     // Get and call the appropriate llvm.memcpy overload.
19387330f729Sjoerg     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
19397330f729Sjoerg     return;
19407330f729Sjoerg   }
19417330f729Sjoerg 
19427330f729Sjoerg   // Otherwise, just memset the whole thing to zero.  This is legal
19437330f729Sjoerg   // because in LLVM, all default initializers (other than the ones we just
19447330f729Sjoerg   // handled above) are guaranteed to have a bit pattern of all zeros.
19457330f729Sjoerg   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
19467330f729Sjoerg }
19477330f729Sjoerg 
GetAddrOfLabel(const LabelDecl * L)19487330f729Sjoerg llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
19497330f729Sjoerg   // Make sure that there is a block for the indirect goto.
19507330f729Sjoerg   if (!IndirectBranch)
19517330f729Sjoerg     GetIndirectGotoBlock();
19527330f729Sjoerg 
19537330f729Sjoerg   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
19547330f729Sjoerg 
19557330f729Sjoerg   // Make sure the indirect branch includes all of the address-taken blocks.
19567330f729Sjoerg   IndirectBranch->addDestination(BB);
19577330f729Sjoerg   return llvm::BlockAddress::get(CurFn, BB);
19587330f729Sjoerg }
19597330f729Sjoerg 
GetIndirectGotoBlock()19607330f729Sjoerg llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
19617330f729Sjoerg   // If we already made the indirect branch for indirect goto, return its block.
19627330f729Sjoerg   if (IndirectBranch) return IndirectBranch->getParent();
19637330f729Sjoerg 
19647330f729Sjoerg   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
19657330f729Sjoerg 
19667330f729Sjoerg   // Create the PHI node that indirect gotos will add entries to.
19677330f729Sjoerg   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
19687330f729Sjoerg                                               "indirect.goto.dest");
19697330f729Sjoerg 
19707330f729Sjoerg   // Create the indirect branch instruction.
19717330f729Sjoerg   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
19727330f729Sjoerg   return IndirectBranch->getParent();
19737330f729Sjoerg }
19747330f729Sjoerg 
19757330f729Sjoerg /// Computes the length of an array in elements, as well as the base
19767330f729Sjoerg /// element type and a properly-typed first element pointer.
emitArrayLength(const ArrayType * origArrayType,QualType & baseType,Address & addr)19777330f729Sjoerg llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
19787330f729Sjoerg                                               QualType &baseType,
19797330f729Sjoerg                                               Address &addr) {
19807330f729Sjoerg   const ArrayType *arrayType = origArrayType;
19817330f729Sjoerg 
19827330f729Sjoerg   // If it's a VLA, we have to load the stored size.  Note that
19837330f729Sjoerg   // this is the size of the VLA in bytes, not its size in elements.
19847330f729Sjoerg   llvm::Value *numVLAElements = nullptr;
19857330f729Sjoerg   if (isa<VariableArrayType>(arrayType)) {
19867330f729Sjoerg     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
19877330f729Sjoerg 
19887330f729Sjoerg     // Walk into all VLAs.  This doesn't require changes to addr,
19897330f729Sjoerg     // which has type T* where T is the first non-VLA element type.
19907330f729Sjoerg     do {
19917330f729Sjoerg       QualType elementType = arrayType->getElementType();
19927330f729Sjoerg       arrayType = getContext().getAsArrayType(elementType);
19937330f729Sjoerg 
19947330f729Sjoerg       // If we only have VLA components, 'addr' requires no adjustment.
19957330f729Sjoerg       if (!arrayType) {
19967330f729Sjoerg         baseType = elementType;
19977330f729Sjoerg         return numVLAElements;
19987330f729Sjoerg       }
19997330f729Sjoerg     } while (isa<VariableArrayType>(arrayType));
20007330f729Sjoerg 
20017330f729Sjoerg     // We get out here only if we find a constant array type
20027330f729Sjoerg     // inside the VLA.
20037330f729Sjoerg   }
20047330f729Sjoerg 
20057330f729Sjoerg   // We have some number of constant-length arrays, so addr should
20067330f729Sjoerg   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
20077330f729Sjoerg   // down to the first element of addr.
20087330f729Sjoerg   SmallVector<llvm::Value*, 8> gepIndices;
20097330f729Sjoerg 
20107330f729Sjoerg   // GEP down to the array type.
20117330f729Sjoerg   llvm::ConstantInt *zero = Builder.getInt32(0);
20127330f729Sjoerg   gepIndices.push_back(zero);
20137330f729Sjoerg 
20147330f729Sjoerg   uint64_t countFromCLAs = 1;
20157330f729Sjoerg   QualType eltType;
20167330f729Sjoerg 
20177330f729Sjoerg   llvm::ArrayType *llvmArrayType =
20187330f729Sjoerg     dyn_cast<llvm::ArrayType>(addr.getElementType());
20197330f729Sjoerg   while (llvmArrayType) {
20207330f729Sjoerg     assert(isa<ConstantArrayType>(arrayType));
20217330f729Sjoerg     assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
20227330f729Sjoerg              == llvmArrayType->getNumElements());
20237330f729Sjoerg 
20247330f729Sjoerg     gepIndices.push_back(zero);
20257330f729Sjoerg     countFromCLAs *= llvmArrayType->getNumElements();
20267330f729Sjoerg     eltType = arrayType->getElementType();
20277330f729Sjoerg 
20287330f729Sjoerg     llvmArrayType =
20297330f729Sjoerg       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
20307330f729Sjoerg     arrayType = getContext().getAsArrayType(arrayType->getElementType());
20317330f729Sjoerg     assert((!llvmArrayType || arrayType) &&
20327330f729Sjoerg            "LLVM and Clang types are out-of-synch");
20337330f729Sjoerg   }
20347330f729Sjoerg 
20357330f729Sjoerg   if (arrayType) {
20367330f729Sjoerg     // From this point onwards, the Clang array type has been emitted
20377330f729Sjoerg     // as some other type (probably a packed struct). Compute the array
20387330f729Sjoerg     // size, and just emit the 'begin' expression as a bitcast.
20397330f729Sjoerg     while (arrayType) {
20407330f729Sjoerg       countFromCLAs *=
20417330f729Sjoerg           cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
20427330f729Sjoerg       eltType = arrayType->getElementType();
20437330f729Sjoerg       arrayType = getContext().getAsArrayType(eltType);
20447330f729Sjoerg     }
20457330f729Sjoerg 
20467330f729Sjoerg     llvm::Type *baseType = ConvertType(eltType);
20477330f729Sjoerg     addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
20487330f729Sjoerg   } else {
20497330f729Sjoerg     // Create the actual GEP.
2050*e038c9c4Sjoerg     addr = Address(Builder.CreateInBoundsGEP(
2051*e038c9c4Sjoerg         addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
20527330f729Sjoerg         addr.getAlignment());
20537330f729Sjoerg   }
20547330f729Sjoerg 
20557330f729Sjoerg   baseType = eltType;
20567330f729Sjoerg 
20577330f729Sjoerg   llvm::Value *numElements
20587330f729Sjoerg     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
20597330f729Sjoerg 
20607330f729Sjoerg   // If we had any VLA dimensions, factor them in.
20617330f729Sjoerg   if (numVLAElements)
20627330f729Sjoerg     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
20637330f729Sjoerg 
20647330f729Sjoerg   return numElements;
20657330f729Sjoerg }
20667330f729Sjoerg 
getVLASize(QualType type)20677330f729Sjoerg CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
20687330f729Sjoerg   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
20697330f729Sjoerg   assert(vla && "type was not a variable array type!");
20707330f729Sjoerg   return getVLASize(vla);
20717330f729Sjoerg }
20727330f729Sjoerg 
20737330f729Sjoerg CodeGenFunction::VlaSizePair
getVLASize(const VariableArrayType * type)20747330f729Sjoerg CodeGenFunction::getVLASize(const VariableArrayType *type) {
20757330f729Sjoerg   // The number of elements so far; always size_t.
20767330f729Sjoerg   llvm::Value *numElements = nullptr;
20777330f729Sjoerg 
20787330f729Sjoerg   QualType elementType;
20797330f729Sjoerg   do {
20807330f729Sjoerg     elementType = type->getElementType();
20817330f729Sjoerg     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
20827330f729Sjoerg     assert(vlaSize && "no size for VLA!");
20837330f729Sjoerg     assert(vlaSize->getType() == SizeTy);
20847330f729Sjoerg 
20857330f729Sjoerg     if (!numElements) {
20867330f729Sjoerg       numElements = vlaSize;
20877330f729Sjoerg     } else {
20887330f729Sjoerg       // It's undefined behavior if this wraps around, so mark it that way.
20897330f729Sjoerg       // FIXME: Teach -fsanitize=undefined to trap this.
20907330f729Sjoerg       numElements = Builder.CreateNUWMul(numElements, vlaSize);
20917330f729Sjoerg     }
20927330f729Sjoerg   } while ((type = getContext().getAsVariableArrayType(elementType)));
20937330f729Sjoerg 
20947330f729Sjoerg   return { numElements, elementType };
20957330f729Sjoerg }
20967330f729Sjoerg 
20977330f729Sjoerg CodeGenFunction::VlaSizePair
getVLAElements1D(QualType type)20987330f729Sjoerg CodeGenFunction::getVLAElements1D(QualType type) {
20997330f729Sjoerg   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
21007330f729Sjoerg   assert(vla && "type was not a variable array type!");
21017330f729Sjoerg   return getVLAElements1D(vla);
21027330f729Sjoerg }
21037330f729Sjoerg 
21047330f729Sjoerg CodeGenFunction::VlaSizePair
getVLAElements1D(const VariableArrayType * Vla)21057330f729Sjoerg CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
21067330f729Sjoerg   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
21077330f729Sjoerg   assert(VlaSize && "no size for VLA!");
21087330f729Sjoerg   assert(VlaSize->getType() == SizeTy);
21097330f729Sjoerg   return { VlaSize, Vla->getElementType() };
21107330f729Sjoerg }
21117330f729Sjoerg 
EmitVariablyModifiedType(QualType type)21127330f729Sjoerg void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
21137330f729Sjoerg   assert(type->isVariablyModifiedType() &&
21147330f729Sjoerg          "Must pass variably modified type to EmitVLASizes!");
21157330f729Sjoerg 
21167330f729Sjoerg   EnsureInsertPoint();
21177330f729Sjoerg 
21187330f729Sjoerg   // We're going to walk down into the type and look for VLA
21197330f729Sjoerg   // expressions.
21207330f729Sjoerg   do {
21217330f729Sjoerg     assert(type->isVariablyModifiedType());
21227330f729Sjoerg 
21237330f729Sjoerg     const Type *ty = type.getTypePtr();
21247330f729Sjoerg     switch (ty->getTypeClass()) {
21257330f729Sjoerg 
21267330f729Sjoerg #define TYPE(Class, Base)
21277330f729Sjoerg #define ABSTRACT_TYPE(Class, Base)
21287330f729Sjoerg #define NON_CANONICAL_TYPE(Class, Base)
21297330f729Sjoerg #define DEPENDENT_TYPE(Class, Base) case Type::Class:
21307330f729Sjoerg #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
21317330f729Sjoerg #include "clang/AST/TypeNodes.inc"
21327330f729Sjoerg       llvm_unreachable("unexpected dependent type!");
21337330f729Sjoerg 
21347330f729Sjoerg     // These types are never variably-modified.
21357330f729Sjoerg     case Type::Builtin:
21367330f729Sjoerg     case Type::Complex:
21377330f729Sjoerg     case Type::Vector:
21387330f729Sjoerg     case Type::ExtVector:
2139*e038c9c4Sjoerg     case Type::ConstantMatrix:
21407330f729Sjoerg     case Type::Record:
21417330f729Sjoerg     case Type::Enum:
21427330f729Sjoerg     case Type::Elaborated:
21437330f729Sjoerg     case Type::TemplateSpecialization:
21447330f729Sjoerg     case Type::ObjCTypeParam:
21457330f729Sjoerg     case Type::ObjCObject:
21467330f729Sjoerg     case Type::ObjCInterface:
21477330f729Sjoerg     case Type::ObjCObjectPointer:
2148*e038c9c4Sjoerg     case Type::ExtInt:
21497330f729Sjoerg       llvm_unreachable("type class is never variably-modified!");
21507330f729Sjoerg 
21517330f729Sjoerg     case Type::Adjusted:
21527330f729Sjoerg       type = cast<AdjustedType>(ty)->getAdjustedType();
21537330f729Sjoerg       break;
21547330f729Sjoerg 
21557330f729Sjoerg     case Type::Decayed:
21567330f729Sjoerg       type = cast<DecayedType>(ty)->getPointeeType();
21577330f729Sjoerg       break;
21587330f729Sjoerg 
21597330f729Sjoerg     case Type::Pointer:
21607330f729Sjoerg       type = cast<PointerType>(ty)->getPointeeType();
21617330f729Sjoerg       break;
21627330f729Sjoerg 
21637330f729Sjoerg     case Type::BlockPointer:
21647330f729Sjoerg       type = cast<BlockPointerType>(ty)->getPointeeType();
21657330f729Sjoerg       break;
21667330f729Sjoerg 
21677330f729Sjoerg     case Type::LValueReference:
21687330f729Sjoerg     case Type::RValueReference:
21697330f729Sjoerg       type = cast<ReferenceType>(ty)->getPointeeType();
21707330f729Sjoerg       break;
21717330f729Sjoerg 
21727330f729Sjoerg     case Type::MemberPointer:
21737330f729Sjoerg       type = cast<MemberPointerType>(ty)->getPointeeType();
21747330f729Sjoerg       break;
21757330f729Sjoerg 
21767330f729Sjoerg     case Type::ConstantArray:
21777330f729Sjoerg     case Type::IncompleteArray:
21787330f729Sjoerg       // Losing element qualification here is fine.
21797330f729Sjoerg       type = cast<ArrayType>(ty)->getElementType();
21807330f729Sjoerg       break;
21817330f729Sjoerg 
21827330f729Sjoerg     case Type::VariableArray: {
21837330f729Sjoerg       // Losing element qualification here is fine.
21847330f729Sjoerg       const VariableArrayType *vat = cast<VariableArrayType>(ty);
21857330f729Sjoerg 
21867330f729Sjoerg       // Unknown size indication requires no size computation.
21877330f729Sjoerg       // Otherwise, evaluate and record it.
21887330f729Sjoerg       if (const Expr *size = vat->getSizeExpr()) {
21897330f729Sjoerg         // It's possible that we might have emitted this already,
21907330f729Sjoerg         // e.g. with a typedef and a pointer to it.
21917330f729Sjoerg         llvm::Value *&entry = VLASizeMap[size];
21927330f729Sjoerg         if (!entry) {
21937330f729Sjoerg           llvm::Value *Size = EmitScalarExpr(size);
21947330f729Sjoerg 
21957330f729Sjoerg           // C11 6.7.6.2p5:
21967330f729Sjoerg           //   If the size is an expression that is not an integer constant
21977330f729Sjoerg           //   expression [...] each time it is evaluated it shall have a value
21987330f729Sjoerg           //   greater than zero.
21997330f729Sjoerg           if (SanOpts.has(SanitizerKind::VLABound) &&
22007330f729Sjoerg               size->getType()->isSignedIntegerType()) {
22017330f729Sjoerg             SanitizerScope SanScope(this);
22027330f729Sjoerg             llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
22037330f729Sjoerg             llvm::Constant *StaticArgs[] = {
22047330f729Sjoerg                 EmitCheckSourceLocation(size->getBeginLoc()),
22057330f729Sjoerg                 EmitCheckTypeDescriptor(size->getType())};
22067330f729Sjoerg             EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
22077330f729Sjoerg                                      SanitizerKind::VLABound),
22087330f729Sjoerg                       SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
22097330f729Sjoerg           }
22107330f729Sjoerg 
22117330f729Sjoerg           // Always zexting here would be wrong if it weren't
22127330f729Sjoerg           // undefined behavior to have a negative bound.
22137330f729Sjoerg           entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
22147330f729Sjoerg         }
22157330f729Sjoerg       }
22167330f729Sjoerg       type = vat->getElementType();
22177330f729Sjoerg       break;
22187330f729Sjoerg     }
22197330f729Sjoerg 
22207330f729Sjoerg     case Type::FunctionProto:
22217330f729Sjoerg     case Type::FunctionNoProto:
22227330f729Sjoerg       type = cast<FunctionType>(ty)->getReturnType();
22237330f729Sjoerg       break;
22247330f729Sjoerg 
22257330f729Sjoerg     case Type::Paren:
22267330f729Sjoerg     case Type::TypeOf:
22277330f729Sjoerg     case Type::UnaryTransform:
22287330f729Sjoerg     case Type::Attributed:
22297330f729Sjoerg     case Type::SubstTemplateTypeParm:
22307330f729Sjoerg     case Type::MacroQualified:
22317330f729Sjoerg       // Keep walking after single level desugaring.
22327330f729Sjoerg       type = type.getSingleStepDesugaredType(getContext());
22337330f729Sjoerg       break;
22347330f729Sjoerg 
22357330f729Sjoerg     case Type::Typedef:
22367330f729Sjoerg     case Type::Decltype:
22377330f729Sjoerg     case Type::Auto:
22387330f729Sjoerg     case Type::DeducedTemplateSpecialization:
22397330f729Sjoerg       // Stop walking: nothing to do.
22407330f729Sjoerg       return;
22417330f729Sjoerg 
22427330f729Sjoerg     case Type::TypeOfExpr:
22437330f729Sjoerg       // Stop walking: emit typeof expression.
22447330f729Sjoerg       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
22457330f729Sjoerg       return;
22467330f729Sjoerg 
22477330f729Sjoerg     case Type::Atomic:
22487330f729Sjoerg       type = cast<AtomicType>(ty)->getValueType();
22497330f729Sjoerg       break;
22507330f729Sjoerg 
22517330f729Sjoerg     case Type::Pipe:
22527330f729Sjoerg       type = cast<PipeType>(ty)->getElementType();
22537330f729Sjoerg       break;
22547330f729Sjoerg     }
22557330f729Sjoerg   } while (type->isVariablyModifiedType());
22567330f729Sjoerg }
22577330f729Sjoerg 
EmitVAListRef(const Expr * E)22587330f729Sjoerg Address CodeGenFunction::EmitVAListRef(const Expr* E) {
22597330f729Sjoerg   if (getContext().getBuiltinVaListType()->isArrayType())
22607330f729Sjoerg     return EmitPointerWithAlignment(E);
2261*e038c9c4Sjoerg   return EmitLValue(E).getAddress(*this);
22627330f729Sjoerg }
22637330f729Sjoerg 
EmitMSVAListRef(const Expr * E)22647330f729Sjoerg Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2265*e038c9c4Sjoerg   return EmitLValue(E).getAddress(*this);
22667330f729Sjoerg }
22677330f729Sjoerg 
EmitDeclRefExprDbgValue(const DeclRefExpr * E,const APValue & Init)22687330f729Sjoerg void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
22697330f729Sjoerg                                               const APValue &Init) {
22707330f729Sjoerg   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
22717330f729Sjoerg   if (CGDebugInfo *Dbg = getDebugInfo())
2272*e038c9c4Sjoerg     if (CGM.getCodeGenOpts().hasReducedDebugInfo())
22737330f729Sjoerg       Dbg->EmitGlobalVariable(E->getDecl(), Init);
22747330f729Sjoerg }
22757330f729Sjoerg 
22767330f729Sjoerg CodeGenFunction::PeepholeProtection
protectFromPeepholes(RValue rvalue)22777330f729Sjoerg CodeGenFunction::protectFromPeepholes(RValue rvalue) {
22787330f729Sjoerg   // At the moment, the only aggressive peephole we do in IR gen
22797330f729Sjoerg   // is trunc(zext) folding, but if we add more, we can easily
22807330f729Sjoerg   // extend this protection.
22817330f729Sjoerg 
22827330f729Sjoerg   if (!rvalue.isScalar()) return PeepholeProtection();
22837330f729Sjoerg   llvm::Value *value = rvalue.getScalarVal();
22847330f729Sjoerg   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
22857330f729Sjoerg 
22867330f729Sjoerg   // Just make an extra bitcast.
22877330f729Sjoerg   assert(HaveInsertPoint());
22887330f729Sjoerg   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
22897330f729Sjoerg                                                   Builder.GetInsertBlock());
22907330f729Sjoerg 
22917330f729Sjoerg   PeepholeProtection protection;
22927330f729Sjoerg   protection.Inst = inst;
22937330f729Sjoerg   return protection;
22947330f729Sjoerg }
22957330f729Sjoerg 
unprotectFromPeepholes(PeepholeProtection protection)22967330f729Sjoerg void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
22977330f729Sjoerg   if (!protection.Inst) return;
22987330f729Sjoerg 
22997330f729Sjoerg   // In theory, we could try to duplicate the peepholes now, but whatever.
23007330f729Sjoerg   protection.Inst->eraseFromParent();
23017330f729Sjoerg }
23027330f729Sjoerg 
emitAlignmentAssumption(llvm::Value * PtrValue,QualType Ty,SourceLocation Loc,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2303*e038c9c4Sjoerg void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
23047330f729Sjoerg                                               QualType Ty, SourceLocation Loc,
23057330f729Sjoerg                                               SourceLocation AssumptionLoc,
23067330f729Sjoerg                                               llvm::Value *Alignment,
23077330f729Sjoerg                                               llvm::Value *OffsetValue) {
2308*e038c9c4Sjoerg   if (Alignment->getType() != IntPtrTy)
2309*e038c9c4Sjoerg     Alignment =
2310*e038c9c4Sjoerg         Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2311*e038c9c4Sjoerg   if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2312*e038c9c4Sjoerg     OffsetValue =
2313*e038c9c4Sjoerg         Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2314*e038c9c4Sjoerg   llvm::Value *TheCheck = nullptr;
23157330f729Sjoerg   if (SanOpts.has(SanitizerKind::Alignment)) {
2316*e038c9c4Sjoerg     llvm::Value *PtrIntValue =
2317*e038c9c4Sjoerg         Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2318*e038c9c4Sjoerg 
2319*e038c9c4Sjoerg     if (OffsetValue) {
2320*e038c9c4Sjoerg       bool IsOffsetZero = false;
2321*e038c9c4Sjoerg       if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2322*e038c9c4Sjoerg         IsOffsetZero = CI->isZero();
2323*e038c9c4Sjoerg 
2324*e038c9c4Sjoerg       if (!IsOffsetZero)
2325*e038c9c4Sjoerg         PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
23267330f729Sjoerg     }
23277330f729Sjoerg 
2328*e038c9c4Sjoerg     llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2329*e038c9c4Sjoerg     llvm::Value *Mask =
2330*e038c9c4Sjoerg         Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2331*e038c9c4Sjoerg     llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2332*e038c9c4Sjoerg     TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2333*e038c9c4Sjoerg   }
2334*e038c9c4Sjoerg   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2335*e038c9c4Sjoerg       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2336*e038c9c4Sjoerg 
2337*e038c9c4Sjoerg   if (!SanOpts.has(SanitizerKind::Alignment))
2338*e038c9c4Sjoerg     return;
2339*e038c9c4Sjoerg   emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2340*e038c9c4Sjoerg                                OffsetValue, TheCheck, Assumption);
2341*e038c9c4Sjoerg }
2342*e038c9c4Sjoerg 
emitAlignmentAssumption(llvm::Value * PtrValue,const Expr * E,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2343*e038c9c4Sjoerg void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
23447330f729Sjoerg                                               const Expr *E,
23457330f729Sjoerg                                               SourceLocation AssumptionLoc,
23467330f729Sjoerg                                               llvm::Value *Alignment,
23477330f729Sjoerg                                               llvm::Value *OffsetValue) {
23487330f729Sjoerg   if (auto *CE = dyn_cast<CastExpr>(E))
23497330f729Sjoerg     E = CE->getSubExprAsWritten();
23507330f729Sjoerg   QualType Ty = E->getType();
23517330f729Sjoerg   SourceLocation Loc = E->getExprLoc();
23527330f729Sjoerg 
2353*e038c9c4Sjoerg   emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
23547330f729Sjoerg                           OffsetValue);
23557330f729Sjoerg }
23567330f729Sjoerg 
EmitAnnotationCall(llvm::Function * AnnotationFn,llvm::Value * AnnotatedVal,StringRef AnnotationStr,SourceLocation Location,const AnnotateAttr * Attr)23577330f729Sjoerg llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
23587330f729Sjoerg                                                  llvm::Value *AnnotatedVal,
23597330f729Sjoerg                                                  StringRef AnnotationStr,
2360*e038c9c4Sjoerg                                                  SourceLocation Location,
2361*e038c9c4Sjoerg                                                  const AnnotateAttr *Attr) {
2362*e038c9c4Sjoerg   SmallVector<llvm::Value *, 5> Args = {
23637330f729Sjoerg       AnnotatedVal,
23647330f729Sjoerg       Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
23657330f729Sjoerg       Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2366*e038c9c4Sjoerg       CGM.EmitAnnotationLineNo(Location),
23677330f729Sjoerg   };
2368*e038c9c4Sjoerg   if (Attr)
2369*e038c9c4Sjoerg     Args.push_back(CGM.EmitAnnotationArgs(Attr));
23707330f729Sjoerg   return Builder.CreateCall(AnnotationFn, Args);
23717330f729Sjoerg }
23727330f729Sjoerg 
EmitVarAnnotations(const VarDecl * D,llvm::Value * V)23737330f729Sjoerg void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
23747330f729Sjoerg   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
23757330f729Sjoerg   // FIXME We create a new bitcast for every annotation because that's what
23767330f729Sjoerg   // llvm-gcc was doing.
23777330f729Sjoerg   for (const auto *I : D->specific_attrs<AnnotateAttr>())
23787330f729Sjoerg     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
23797330f729Sjoerg                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2380*e038c9c4Sjoerg                        I->getAnnotation(), D->getLocation(), I);
23817330f729Sjoerg }
23827330f729Sjoerg 
EmitFieldAnnotations(const FieldDecl * D,Address Addr)23837330f729Sjoerg Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
23847330f729Sjoerg                                               Address Addr) {
23857330f729Sjoerg   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
23867330f729Sjoerg   llvm::Value *V = Addr.getPointer();
23877330f729Sjoerg   llvm::Type *VTy = V->getType();
23887330f729Sjoerg   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
23897330f729Sjoerg                                     CGM.Int8PtrTy);
23907330f729Sjoerg 
23917330f729Sjoerg   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
23927330f729Sjoerg     // FIXME Always emit the cast inst so we can differentiate between
23937330f729Sjoerg     // annotation on the first field of a struct and annotation on the struct
23947330f729Sjoerg     // itself.
23957330f729Sjoerg     if (VTy != CGM.Int8PtrTy)
23967330f729Sjoerg       V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2397*e038c9c4Sjoerg     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
23987330f729Sjoerg     V = Builder.CreateBitCast(V, VTy);
23997330f729Sjoerg   }
24007330f729Sjoerg 
24017330f729Sjoerg   return Address(V, Addr.getAlignment());
24027330f729Sjoerg }
24037330f729Sjoerg 
~CGCapturedStmtInfo()24047330f729Sjoerg CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
24057330f729Sjoerg 
SanitizerScope(CodeGenFunction * CGF)24067330f729Sjoerg CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
24077330f729Sjoerg     : CGF(CGF) {
24087330f729Sjoerg   assert(!CGF->IsSanitizerScope);
24097330f729Sjoerg   CGF->IsSanitizerScope = true;
24107330f729Sjoerg }
24117330f729Sjoerg 
~SanitizerScope()24127330f729Sjoerg CodeGenFunction::SanitizerScope::~SanitizerScope() {
24137330f729Sjoerg   CGF->IsSanitizerScope = false;
24147330f729Sjoerg }
24157330f729Sjoerg 
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const24167330f729Sjoerg void CodeGenFunction::InsertHelper(llvm::Instruction *I,
24177330f729Sjoerg                                    const llvm::Twine &Name,
24187330f729Sjoerg                                    llvm::BasicBlock *BB,
24197330f729Sjoerg                                    llvm::BasicBlock::iterator InsertPt) const {
24207330f729Sjoerg   LoopStack.InsertHelper(I);
24217330f729Sjoerg   if (IsSanitizerScope)
24227330f729Sjoerg     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
24237330f729Sjoerg }
24247330f729Sjoerg 
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const24257330f729Sjoerg void CGBuilderInserter::InsertHelper(
24267330f729Sjoerg     llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
24277330f729Sjoerg     llvm::BasicBlock::iterator InsertPt) const {
24287330f729Sjoerg   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
24297330f729Sjoerg   if (CGF)
24307330f729Sjoerg     CGF->InsertHelper(I, Name, BB, InsertPt);
24317330f729Sjoerg }
24327330f729Sjoerg 
24337330f729Sjoerg // Emits an error if we don't have a valid set of target features for the
24347330f729Sjoerg // called function.
checkTargetFeatures(const CallExpr * E,const FunctionDecl * TargetDecl)24357330f729Sjoerg void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
24367330f729Sjoerg                                           const FunctionDecl *TargetDecl) {
24377330f729Sjoerg   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
24387330f729Sjoerg }
24397330f729Sjoerg 
24407330f729Sjoerg // Emits an error if we don't have a valid set of target features for the
24417330f729Sjoerg // called function.
checkTargetFeatures(SourceLocation Loc,const FunctionDecl * TargetDecl)24427330f729Sjoerg void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
24437330f729Sjoerg                                           const FunctionDecl *TargetDecl) {
24447330f729Sjoerg   // Early exit if this is an indirect call.
24457330f729Sjoerg   if (!TargetDecl)
24467330f729Sjoerg     return;
24477330f729Sjoerg 
24487330f729Sjoerg   // Get the current enclosing function if it exists. If it doesn't
24497330f729Sjoerg   // we can't check the target features anyhow.
24507330f729Sjoerg   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
24517330f729Sjoerg   if (!FD)
24527330f729Sjoerg     return;
24537330f729Sjoerg 
24547330f729Sjoerg   // Grab the required features for the call. For a builtin this is listed in
24557330f729Sjoerg   // the td file with the default cpu, for an always_inline function this is any
24567330f729Sjoerg   // listed cpu and any listed features.
24577330f729Sjoerg   unsigned BuiltinID = TargetDecl->getBuiltinID();
24587330f729Sjoerg   std::string MissingFeature;
2459*e038c9c4Sjoerg   llvm::StringMap<bool> CallerFeatureMap;
2460*e038c9c4Sjoerg   CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
24617330f729Sjoerg   if (BuiltinID) {
2462*e038c9c4Sjoerg     StringRef FeatureList(
2463*e038c9c4Sjoerg         CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
24647330f729Sjoerg     // Return if the builtin doesn't have any required features.
2465*e038c9c4Sjoerg     if (FeatureList.empty())
24667330f729Sjoerg       return;
2467*e038c9c4Sjoerg     assert(FeatureList.find(' ') == StringRef::npos &&
2468*e038c9c4Sjoerg            "Space in feature list");
2469*e038c9c4Sjoerg     TargetFeatures TF(CallerFeatureMap);
2470*e038c9c4Sjoerg     if (!TF.hasRequiredFeatures(FeatureList))
24717330f729Sjoerg       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2472*e038c9c4Sjoerg           << TargetDecl->getDeclName() << FeatureList;
2473*e038c9c4Sjoerg   } else if (!TargetDecl->isMultiVersion() &&
2474*e038c9c4Sjoerg              TargetDecl->hasAttr<TargetAttr>()) {
24757330f729Sjoerg     // Get the required features for the callee.
24767330f729Sjoerg 
24777330f729Sjoerg     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2478*e038c9c4Sjoerg     ParsedTargetAttr ParsedAttr =
2479*e038c9c4Sjoerg         CGM.getContext().filterFunctionTargetAttrs(TD);
24807330f729Sjoerg 
24817330f729Sjoerg     SmallVector<StringRef, 1> ReqFeatures;
24827330f729Sjoerg     llvm::StringMap<bool> CalleeFeatureMap;
2483*e038c9c4Sjoerg     CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
24847330f729Sjoerg 
24857330f729Sjoerg     for (const auto &F : ParsedAttr.Features) {
24867330f729Sjoerg       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
24877330f729Sjoerg         ReqFeatures.push_back(StringRef(F).substr(1));
24887330f729Sjoerg     }
24897330f729Sjoerg 
24907330f729Sjoerg     for (const auto &F : CalleeFeatureMap) {
24917330f729Sjoerg       // Only positive features are "required".
24927330f729Sjoerg       if (F.getValue())
24937330f729Sjoerg         ReqFeatures.push_back(F.getKey());
24947330f729Sjoerg     }
2495*e038c9c4Sjoerg     if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2496*e038c9c4Sjoerg       if (!CallerFeatureMap.lookup(Feature)) {
2497*e038c9c4Sjoerg         MissingFeature = Feature.str();
2498*e038c9c4Sjoerg         return false;
2499*e038c9c4Sjoerg       }
2500*e038c9c4Sjoerg       return true;
2501*e038c9c4Sjoerg     }))
25027330f729Sjoerg       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
25037330f729Sjoerg           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
25047330f729Sjoerg   }
25057330f729Sjoerg }
25067330f729Sjoerg 
EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)25077330f729Sjoerg void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
25087330f729Sjoerg   if (!CGM.getCodeGenOpts().SanitizeStats)
25097330f729Sjoerg     return;
25107330f729Sjoerg 
25117330f729Sjoerg   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
25127330f729Sjoerg   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
25137330f729Sjoerg   CGM.getSanStats().create(IRB, SSK);
25147330f729Sjoerg }
25157330f729Sjoerg 
25167330f729Sjoerg llvm::Value *
FormResolverCondition(const MultiVersionResolverOption & RO)25177330f729Sjoerg CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
25187330f729Sjoerg   llvm::Value *Condition = nullptr;
25197330f729Sjoerg 
25207330f729Sjoerg   if (!RO.Conditions.Architecture.empty())
25217330f729Sjoerg     Condition = EmitX86CpuIs(RO.Conditions.Architecture);
25227330f729Sjoerg 
25237330f729Sjoerg   if (!RO.Conditions.Features.empty()) {
25247330f729Sjoerg     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
25257330f729Sjoerg     Condition =
25267330f729Sjoerg         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
25277330f729Sjoerg   }
25287330f729Sjoerg   return Condition;
25297330f729Sjoerg }
25307330f729Sjoerg 
CreateMultiVersionResolverReturn(CodeGenModule & CGM,llvm::Function * Resolver,CGBuilderTy & Builder,llvm::Function * FuncToReturn,bool SupportsIFunc)25317330f729Sjoerg static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
25327330f729Sjoerg                                              llvm::Function *Resolver,
25337330f729Sjoerg                                              CGBuilderTy &Builder,
25347330f729Sjoerg                                              llvm::Function *FuncToReturn,
25357330f729Sjoerg                                              bool SupportsIFunc) {
25367330f729Sjoerg   if (SupportsIFunc) {
25377330f729Sjoerg     Builder.CreateRet(FuncToReturn);
25387330f729Sjoerg     return;
25397330f729Sjoerg   }
25407330f729Sjoerg 
25417330f729Sjoerg   llvm::SmallVector<llvm::Value *, 10> Args;
25427330f729Sjoerg   llvm::for_each(Resolver->args(),
25437330f729Sjoerg                  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
25447330f729Sjoerg 
25457330f729Sjoerg   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
25467330f729Sjoerg   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
25477330f729Sjoerg 
25487330f729Sjoerg   if (Resolver->getReturnType()->isVoidTy())
25497330f729Sjoerg     Builder.CreateRetVoid();
25507330f729Sjoerg   else
25517330f729Sjoerg     Builder.CreateRet(Result);
25527330f729Sjoerg }
25537330f729Sjoerg 
EmitMultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)25547330f729Sjoerg void CodeGenFunction::EmitMultiVersionResolver(
25557330f729Sjoerg     llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2556*e038c9c4Sjoerg   assert(getContext().getTargetInfo().getTriple().isX86() &&
25577330f729Sjoerg          "Only implemented for x86 targets");
25587330f729Sjoerg 
25597330f729Sjoerg   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
25607330f729Sjoerg 
25617330f729Sjoerg   // Main function's basic block.
25627330f729Sjoerg   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
25637330f729Sjoerg   Builder.SetInsertPoint(CurBlock);
25647330f729Sjoerg   EmitX86CpuInit();
25657330f729Sjoerg 
25667330f729Sjoerg   for (const MultiVersionResolverOption &RO : Options) {
25677330f729Sjoerg     Builder.SetInsertPoint(CurBlock);
25687330f729Sjoerg     llvm::Value *Condition = FormResolverCondition(RO);
25697330f729Sjoerg 
25707330f729Sjoerg     // The 'default' or 'generic' case.
25717330f729Sjoerg     if (!Condition) {
25727330f729Sjoerg       assert(&RO == Options.end() - 1 &&
25737330f729Sjoerg              "Default or Generic case must be last");
25747330f729Sjoerg       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
25757330f729Sjoerg                                        SupportsIFunc);
25767330f729Sjoerg       return;
25777330f729Sjoerg     }
25787330f729Sjoerg 
25797330f729Sjoerg     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
25807330f729Sjoerg     CGBuilderTy RetBuilder(*this, RetBlock);
25817330f729Sjoerg     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
25827330f729Sjoerg                                      SupportsIFunc);
25837330f729Sjoerg     CurBlock = createBasicBlock("resolver_else", Resolver);
25847330f729Sjoerg     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
25857330f729Sjoerg   }
25867330f729Sjoerg 
25877330f729Sjoerg   // If no generic/default, emit an unreachable.
25887330f729Sjoerg   Builder.SetInsertPoint(CurBlock);
25897330f729Sjoerg   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
25907330f729Sjoerg   TrapCall->setDoesNotReturn();
25917330f729Sjoerg   TrapCall->setDoesNotThrow();
25927330f729Sjoerg   Builder.CreateUnreachable();
25937330f729Sjoerg   Builder.ClearInsertionPoint();
25947330f729Sjoerg }
25957330f729Sjoerg 
25967330f729Sjoerg // Loc - where the diagnostic will point, where in the source code this
25977330f729Sjoerg //  alignment has failed.
25987330f729Sjoerg // SecondaryLoc - if present (will be present if sufficiently different from
25997330f729Sjoerg //  Loc), the diagnostic will additionally point a "Note:" to this location.
26007330f729Sjoerg //  It should be the location where the __attribute__((assume_aligned))
26017330f729Sjoerg //  was written e.g.
emitAlignmentAssumptionCheck(llvm::Value * Ptr,QualType Ty,SourceLocation Loc,SourceLocation SecondaryLoc,llvm::Value * Alignment,llvm::Value * OffsetValue,llvm::Value * TheCheck,llvm::Instruction * Assumption)2602*e038c9c4Sjoerg void CodeGenFunction::emitAlignmentAssumptionCheck(
26037330f729Sjoerg     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
26047330f729Sjoerg     SourceLocation SecondaryLoc, llvm::Value *Alignment,
26057330f729Sjoerg     llvm::Value *OffsetValue, llvm::Value *TheCheck,
26067330f729Sjoerg     llvm::Instruction *Assumption) {
26077330f729Sjoerg   assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2608*e038c9c4Sjoerg          cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
26097330f729Sjoerg              llvm::Intrinsic::getDeclaration(
26107330f729Sjoerg                  Builder.GetInsertBlock()->getParent()->getParent(),
26117330f729Sjoerg                  llvm::Intrinsic::assume) &&
26127330f729Sjoerg          "Assumption should be a call to llvm.assume().");
26137330f729Sjoerg   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
26147330f729Sjoerg          "Assumption should be the last instruction of the basic block, "
26157330f729Sjoerg          "since the basic block is still being generated.");
26167330f729Sjoerg 
26177330f729Sjoerg   if (!SanOpts.has(SanitizerKind::Alignment))
26187330f729Sjoerg     return;
26197330f729Sjoerg 
26207330f729Sjoerg   // Don't check pointers to volatile data. The behavior here is implementation-
26217330f729Sjoerg   // defined.
26227330f729Sjoerg   if (Ty->getPointeeType().isVolatileQualified())
26237330f729Sjoerg     return;
26247330f729Sjoerg 
26257330f729Sjoerg   // We need to temorairly remove the assumption so we can insert the
26267330f729Sjoerg   // sanitizer check before it, else the check will be dropped by optimizations.
26277330f729Sjoerg   Assumption->removeFromParent();
26287330f729Sjoerg 
26297330f729Sjoerg   {
26307330f729Sjoerg     SanitizerScope SanScope(this);
26317330f729Sjoerg 
26327330f729Sjoerg     if (!OffsetValue)
26337330f729Sjoerg       OffsetValue = Builder.getInt1(0); // no offset.
26347330f729Sjoerg 
26357330f729Sjoerg     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
26367330f729Sjoerg                                     EmitCheckSourceLocation(SecondaryLoc),
26377330f729Sjoerg                                     EmitCheckTypeDescriptor(Ty)};
26387330f729Sjoerg     llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
26397330f729Sjoerg                                   EmitCheckValue(Alignment),
26407330f729Sjoerg                                   EmitCheckValue(OffsetValue)};
26417330f729Sjoerg     EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
26427330f729Sjoerg               SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
26437330f729Sjoerg   }
26447330f729Sjoerg 
26457330f729Sjoerg   // We are now in the (new, empty) "cont" basic block.
26467330f729Sjoerg   // Reintroduce the assumption.
26477330f729Sjoerg   Builder.Insert(Assumption);
26487330f729Sjoerg   // FIXME: Assumption still has it's original basic block as it's Parent.
26497330f729Sjoerg }
26507330f729Sjoerg 
SourceLocToDebugLoc(SourceLocation Location)26517330f729Sjoerg llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
26527330f729Sjoerg   if (CGDebugInfo *DI = getDebugInfo())
26537330f729Sjoerg     return DI->SourceLocToDebugLoc(Location);
26547330f729Sjoerg 
26557330f729Sjoerg   return llvm::DebugLoc();
26567330f729Sjoerg }
2657*e038c9c4Sjoerg 
2658*e038c9c4Sjoerg llvm::Value *
emitCondLikelihoodViaExpectIntrinsic(llvm::Value * Cond,Stmt::Likelihood LH)2659*e038c9c4Sjoerg CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2660*e038c9c4Sjoerg                                                       Stmt::Likelihood LH) {
2661*e038c9c4Sjoerg   switch (LH) {
2662*e038c9c4Sjoerg   case Stmt::LH_None:
2663*e038c9c4Sjoerg     return Cond;
2664*e038c9c4Sjoerg   case Stmt::LH_Likely:
2665*e038c9c4Sjoerg   case Stmt::LH_Unlikely:
2666*e038c9c4Sjoerg     // Don't generate llvm.expect on -O0 as the backend won't use it for
2667*e038c9c4Sjoerg     // anything.
2668*e038c9c4Sjoerg     if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2669*e038c9c4Sjoerg       return Cond;
2670*e038c9c4Sjoerg     llvm::Type *CondTy = Cond->getType();
2671*e038c9c4Sjoerg     assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2672*e038c9c4Sjoerg     llvm::Function *FnExpect =
2673*e038c9c4Sjoerg         CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2674*e038c9c4Sjoerg     llvm::Value *ExpectedValueOfCond =
2675*e038c9c4Sjoerg         llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2676*e038c9c4Sjoerg     return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2677*e038c9c4Sjoerg                               Cond->getName() + ".expval");
2678*e038c9c4Sjoerg   }
2679*e038c9c4Sjoerg   llvm_unreachable("Unknown Likelihood");
2680*e038c9c4Sjoerg }
2681