1e5dd7070Spatrick //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2e5dd7070Spatrick //
3e5dd7070Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e5dd7070Spatrick // See https://llvm.org/LICENSE.txt for license information.
5e5dd7070Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e5dd7070Spatrick //
7e5dd7070Spatrick //===----------------------------------------------------------------------===//
8e5dd7070Spatrick //
9e5dd7070Spatrick // This coordinates the per-function state used while generating code.
10e5dd7070Spatrick //
11e5dd7070Spatrick //===----------------------------------------------------------------------===//
12e5dd7070Spatrick
13e5dd7070Spatrick #include "CodeGenFunction.h"
14e5dd7070Spatrick #include "CGBlocks.h"
15e5dd7070Spatrick #include "CGCUDARuntime.h"
16e5dd7070Spatrick #include "CGCXXABI.h"
17e5dd7070Spatrick #include "CGCleanup.h"
18e5dd7070Spatrick #include "CGDebugInfo.h"
19*12c85518Srobert #include "CGHLSLRuntime.h"
20e5dd7070Spatrick #include "CGOpenMPRuntime.h"
21e5dd7070Spatrick #include "CodeGenModule.h"
22e5dd7070Spatrick #include "CodeGenPGO.h"
23e5dd7070Spatrick #include "TargetInfo.h"
24e5dd7070Spatrick #include "clang/AST/ASTContext.h"
25e5dd7070Spatrick #include "clang/AST/ASTLambda.h"
26e5dd7070Spatrick #include "clang/AST/Attr.h"
27e5dd7070Spatrick #include "clang/AST/Decl.h"
28e5dd7070Spatrick #include "clang/AST/DeclCXX.h"
29a9ac8606Spatrick #include "clang/AST/Expr.h"
30e5dd7070Spatrick #include "clang/AST/StmtCXX.h"
31e5dd7070Spatrick #include "clang/AST/StmtObjC.h"
32e5dd7070Spatrick #include "clang/Basic/Builtins.h"
33e5dd7070Spatrick #include "clang/Basic/CodeGenOptions.h"
34e5dd7070Spatrick #include "clang/Basic/TargetInfo.h"
35e5dd7070Spatrick #include "clang/CodeGen/CGFunctionInfo.h"
36e5dd7070Spatrick #include "clang/Frontend/FrontendDiagnostic.h"
37a9ac8606Spatrick #include "llvm/ADT/ArrayRef.h"
38ec727ea7Spatrick #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
39e5dd7070Spatrick #include "llvm/IR/DataLayout.h"
40e5dd7070Spatrick #include "llvm/IR/Dominators.h"
41e5dd7070Spatrick #include "llvm/IR/FPEnv.h"
42e5dd7070Spatrick #include "llvm/IR/IntrinsicInst.h"
43e5dd7070Spatrick #include "llvm/IR/Intrinsics.h"
44e5dd7070Spatrick #include "llvm/IR/MDBuilder.h"
45e5dd7070Spatrick #include "llvm/IR/Operator.h"
46a9ac8606Spatrick #include "llvm/Support/CRC.h"
47a9ac8606Spatrick #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
48e5dd7070Spatrick #include "llvm/Transforms/Utils/PromoteMemToReg.h"
49*12c85518Srobert #include <optional>
50*12c85518Srobert
51e5dd7070Spatrick using namespace clang;
52e5dd7070Spatrick using namespace CodeGen;
53e5dd7070Spatrick
54e5dd7070Spatrick /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
55e5dd7070Spatrick /// markers.
shouldEmitLifetimeMarkers(const CodeGenOptions & CGOpts,const LangOptions & LangOpts)56e5dd7070Spatrick static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
57e5dd7070Spatrick const LangOptions &LangOpts) {
58e5dd7070Spatrick if (CGOpts.DisableLifetimeMarkers)
59e5dd7070Spatrick return false;
60e5dd7070Spatrick
61e5dd7070Spatrick // Sanitizers may use markers.
62e5dd7070Spatrick if (CGOpts.SanitizeAddressUseAfterScope ||
63e5dd7070Spatrick LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
64e5dd7070Spatrick LangOpts.Sanitize.has(SanitizerKind::Memory))
65e5dd7070Spatrick return true;
66e5dd7070Spatrick
67e5dd7070Spatrick // For now, only in optimized builds.
68e5dd7070Spatrick return CGOpts.OptimizationLevel != 0;
69e5dd7070Spatrick }
70e5dd7070Spatrick
CodeGenFunction(CodeGenModule & cgm,bool suppressNewContext)71e5dd7070Spatrick CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
72e5dd7070Spatrick : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
73e5dd7070Spatrick Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
74e5dd7070Spatrick CGBuilderInserterTy(this)),
75ec727ea7Spatrick SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
76ec727ea7Spatrick DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
77ec727ea7Spatrick ShouldEmitLifetimeMarkers(
78ec727ea7Spatrick shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
79e5dd7070Spatrick if (!suppressNewContext)
80e5dd7070Spatrick CGM.getCXXABI().getMangleContext().startNewFunction();
81a9ac8606Spatrick EHStack.setCGF(this);
82e5dd7070Spatrick
83ec727ea7Spatrick SetFastMathFlags(CurFPFeatures);
84e5dd7070Spatrick }
85e5dd7070Spatrick
~CodeGenFunction()86e5dd7070Spatrick CodeGenFunction::~CodeGenFunction() {
87e5dd7070Spatrick assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
88e5dd7070Spatrick
89e5dd7070Spatrick if (getLangOpts().OpenMP && CurFn)
90e5dd7070Spatrick CGM.getOpenMPRuntime().functionFinished(*this);
91e5dd7070Spatrick
92ec727ea7Spatrick // If we have an OpenMPIRBuilder we want to finalize functions (incl.
93ec727ea7Spatrick // outlining etc) at some point. Doing it once the function codegen is done
94ec727ea7Spatrick // seems to be a reasonable spot. We do it here, as opposed to the deletion
95ec727ea7Spatrick // time of the CodeGenModule, because we have to ensure the IR has not yet
96ec727ea7Spatrick // been "emitted" to the outside, thus, modifications are still sensible.
97a9ac8606Spatrick if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
98a9ac8606Spatrick CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
99e5dd7070Spatrick }
100e5dd7070Spatrick
101e5dd7070Spatrick // Map the LangOption for exception behavior into
102e5dd7070Spatrick // the corresponding enum in the IR.
103ec727ea7Spatrick llvm::fp::ExceptionBehavior
ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)104ec727ea7Spatrick clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
105e5dd7070Spatrick
106e5dd7070Spatrick switch (Kind) {
107e5dd7070Spatrick case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
108e5dd7070Spatrick case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
109e5dd7070Spatrick case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
110*12c85518Srobert default:
111e5dd7070Spatrick llvm_unreachable("Unsupported FP Exception Behavior");
112e5dd7070Spatrick }
113e5dd7070Spatrick }
114e5dd7070Spatrick
SetFastMathFlags(FPOptions FPFeatures)115ec727ea7Spatrick void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
116ec727ea7Spatrick llvm::FastMathFlags FMF;
117ec727ea7Spatrick FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
118ec727ea7Spatrick FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
119ec727ea7Spatrick FMF.setNoInfs(FPFeatures.getNoHonorInfs());
120ec727ea7Spatrick FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
121ec727ea7Spatrick FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
122ec727ea7Spatrick FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
123ec727ea7Spatrick FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
124ec727ea7Spatrick Builder.setFastMathFlags(FMF);
125e5dd7070Spatrick }
126e5dd7070Spatrick
CGFPOptionsRAII(CodeGenFunction & CGF,const Expr * E)127ec727ea7Spatrick CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
128a9ac8606Spatrick const Expr *E)
129a9ac8606Spatrick : CGF(CGF) {
130a9ac8606Spatrick ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
131a9ac8606Spatrick }
132a9ac8606Spatrick
CGFPOptionsRAII(CodeGenFunction & CGF,FPOptions FPFeatures)133a9ac8606Spatrick CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
134ec727ea7Spatrick FPOptions FPFeatures)
135a9ac8606Spatrick : CGF(CGF) {
136a9ac8606Spatrick ConstructorHelper(FPFeatures);
137a9ac8606Spatrick }
138a9ac8606Spatrick
ConstructorHelper(FPOptions FPFeatures)139a9ac8606Spatrick void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
140a9ac8606Spatrick OldFPFeatures = CGF.CurFPFeatures;
141ec727ea7Spatrick CGF.CurFPFeatures = FPFeatures;
142e5dd7070Spatrick
143a9ac8606Spatrick OldExcept = CGF.Builder.getDefaultConstrainedExcept();
144a9ac8606Spatrick OldRounding = CGF.Builder.getDefaultConstrainedRounding();
145a9ac8606Spatrick
146ec727ea7Spatrick if (OldFPFeatures == FPFeatures)
147ec727ea7Spatrick return;
148ec727ea7Spatrick
149ec727ea7Spatrick FMFGuard.emplace(CGF.Builder);
150ec727ea7Spatrick
151*12c85518Srobert llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
152ec727ea7Spatrick CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
153ec727ea7Spatrick auto NewExceptionBehavior =
154ec727ea7Spatrick ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
155*12c85518Srobert FPFeatures.getExceptionMode()));
156ec727ea7Spatrick CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
157ec727ea7Spatrick
158ec727ea7Spatrick CGF.SetFastMathFlags(FPFeatures);
159ec727ea7Spatrick
160ec727ea7Spatrick assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
161ec727ea7Spatrick isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
162ec727ea7Spatrick isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
163ec727ea7Spatrick (NewExceptionBehavior == llvm::fp::ebIgnore &&
164ec727ea7Spatrick NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
165ec727ea7Spatrick "FPConstrained should be enabled on entire function");
166ec727ea7Spatrick
167ec727ea7Spatrick auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
168ec727ea7Spatrick auto OldValue =
169a9ac8606Spatrick CGF.CurFn->getFnAttribute(Name).getValueAsBool();
170ec727ea7Spatrick auto NewValue = OldValue & Value;
171ec727ea7Spatrick if (OldValue != NewValue)
172ec727ea7Spatrick CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
173ec727ea7Spatrick };
174ec727ea7Spatrick mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
175ec727ea7Spatrick mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
176ec727ea7Spatrick mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
177*12c85518Srobert mergeFnAttrValue(
178*12c85518Srobert "unsafe-fp-math",
179*12c85518Srobert FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
180*12c85518Srobert FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
181*12c85518Srobert FPFeatures.allowFPContractAcrossStatement());
182e5dd7070Spatrick }
183e5dd7070Spatrick
~CGFPOptionsRAII()184ec727ea7Spatrick CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
185ec727ea7Spatrick CGF.CurFPFeatures = OldFPFeatures;
186a9ac8606Spatrick CGF.Builder.setDefaultConstrainedExcept(OldExcept);
187a9ac8606Spatrick CGF.Builder.setDefaultConstrainedRounding(OldRounding);
188e5dd7070Spatrick }
189e5dd7070Spatrick
MakeNaturalAlignAddrLValue(llvm::Value * V,QualType T)190e5dd7070Spatrick LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
191e5dd7070Spatrick LValueBaseInfo BaseInfo;
192e5dd7070Spatrick TBAAAccessInfo TBAAInfo;
193ec727ea7Spatrick CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
194*12c85518Srobert Address Addr(V, ConvertTypeForMem(T), Alignment);
195*12c85518Srobert return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
196e5dd7070Spatrick }
197e5dd7070Spatrick
198e5dd7070Spatrick /// Given a value of type T* that may not be to a complete object,
199e5dd7070Spatrick /// construct an l-value with the natural pointee alignment of T.
200e5dd7070Spatrick LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value * V,QualType T)201e5dd7070Spatrick CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
202e5dd7070Spatrick LValueBaseInfo BaseInfo;
203e5dd7070Spatrick TBAAAccessInfo TBAAInfo;
204ec727ea7Spatrick CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
205e5dd7070Spatrick /* forPointeeType= */ true);
206*12c85518Srobert Address Addr(V, ConvertTypeForMem(T), Align);
207*12c85518Srobert return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
208e5dd7070Spatrick }
209e5dd7070Spatrick
210e5dd7070Spatrick
ConvertTypeForMem(QualType T)211e5dd7070Spatrick llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
212e5dd7070Spatrick return CGM.getTypes().ConvertTypeForMem(T);
213e5dd7070Spatrick }
214e5dd7070Spatrick
ConvertType(QualType T)215e5dd7070Spatrick llvm::Type *CodeGenFunction::ConvertType(QualType T) {
216e5dd7070Spatrick return CGM.getTypes().ConvertType(T);
217e5dd7070Spatrick }
218e5dd7070Spatrick
getEvaluationKind(QualType type)219e5dd7070Spatrick TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
220e5dd7070Spatrick type = type.getCanonicalType();
221e5dd7070Spatrick while (true) {
222e5dd7070Spatrick switch (type->getTypeClass()) {
223e5dd7070Spatrick #define TYPE(name, parent)
224e5dd7070Spatrick #define ABSTRACT_TYPE(name, parent)
225e5dd7070Spatrick #define NON_CANONICAL_TYPE(name, parent) case Type::name:
226e5dd7070Spatrick #define DEPENDENT_TYPE(name, parent) case Type::name:
227e5dd7070Spatrick #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
228e5dd7070Spatrick #include "clang/AST/TypeNodes.inc"
229e5dd7070Spatrick llvm_unreachable("non-canonical or dependent type in IR-generation");
230e5dd7070Spatrick
231e5dd7070Spatrick case Type::Auto:
232e5dd7070Spatrick case Type::DeducedTemplateSpecialization:
233e5dd7070Spatrick llvm_unreachable("undeduced type in IR-generation");
234e5dd7070Spatrick
235e5dd7070Spatrick // Various scalar types.
236e5dd7070Spatrick case Type::Builtin:
237e5dd7070Spatrick case Type::Pointer:
238e5dd7070Spatrick case Type::BlockPointer:
239e5dd7070Spatrick case Type::LValueReference:
240e5dd7070Spatrick case Type::RValueReference:
241e5dd7070Spatrick case Type::MemberPointer:
242e5dd7070Spatrick case Type::Vector:
243e5dd7070Spatrick case Type::ExtVector:
244ec727ea7Spatrick case Type::ConstantMatrix:
245e5dd7070Spatrick case Type::FunctionProto:
246e5dd7070Spatrick case Type::FunctionNoProto:
247e5dd7070Spatrick case Type::Enum:
248e5dd7070Spatrick case Type::ObjCObjectPointer:
249e5dd7070Spatrick case Type::Pipe:
250*12c85518Srobert case Type::BitInt:
251e5dd7070Spatrick return TEK_Scalar;
252e5dd7070Spatrick
253e5dd7070Spatrick // Complexes.
254e5dd7070Spatrick case Type::Complex:
255e5dd7070Spatrick return TEK_Complex;
256e5dd7070Spatrick
257e5dd7070Spatrick // Arrays, records, and Objective-C objects.
258e5dd7070Spatrick case Type::ConstantArray:
259e5dd7070Spatrick case Type::IncompleteArray:
260e5dd7070Spatrick case Type::VariableArray:
261e5dd7070Spatrick case Type::Record:
262e5dd7070Spatrick case Type::ObjCObject:
263e5dd7070Spatrick case Type::ObjCInterface:
264e5dd7070Spatrick return TEK_Aggregate;
265e5dd7070Spatrick
266e5dd7070Spatrick // We operate on atomic values according to their underlying type.
267e5dd7070Spatrick case Type::Atomic:
268e5dd7070Spatrick type = cast<AtomicType>(type)->getValueType();
269e5dd7070Spatrick continue;
270e5dd7070Spatrick }
271e5dd7070Spatrick llvm_unreachable("unknown type kind!");
272e5dd7070Spatrick }
273e5dd7070Spatrick }
274e5dd7070Spatrick
EmitReturnBlock()275e5dd7070Spatrick llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
276e5dd7070Spatrick // For cleanliness, we try to avoid emitting the return block for
277e5dd7070Spatrick // simple cases.
278e5dd7070Spatrick llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
279e5dd7070Spatrick
280e5dd7070Spatrick if (CurBB) {
281e5dd7070Spatrick assert(!CurBB->getTerminator() && "Unexpected terminated block.");
282e5dd7070Spatrick
283e5dd7070Spatrick // We have a valid insert point, reuse it if it is empty or there are no
284e5dd7070Spatrick // explicit jumps to the return block.
285e5dd7070Spatrick if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
286e5dd7070Spatrick ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
287e5dd7070Spatrick delete ReturnBlock.getBlock();
288e5dd7070Spatrick ReturnBlock = JumpDest();
289e5dd7070Spatrick } else
290e5dd7070Spatrick EmitBlock(ReturnBlock.getBlock());
291e5dd7070Spatrick return llvm::DebugLoc();
292e5dd7070Spatrick }
293e5dd7070Spatrick
294e5dd7070Spatrick // Otherwise, if the return block is the target of a single direct
295e5dd7070Spatrick // branch then we can just put the code in that block instead. This
296e5dd7070Spatrick // cleans up functions which started with a unified return block.
297e5dd7070Spatrick if (ReturnBlock.getBlock()->hasOneUse()) {
298e5dd7070Spatrick llvm::BranchInst *BI =
299e5dd7070Spatrick dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
300e5dd7070Spatrick if (BI && BI->isUnconditional() &&
301e5dd7070Spatrick BI->getSuccessor(0) == ReturnBlock.getBlock()) {
302e5dd7070Spatrick // Record/return the DebugLoc of the simple 'return' expression to be used
303e5dd7070Spatrick // later by the actual 'ret' instruction.
304e5dd7070Spatrick llvm::DebugLoc Loc = BI->getDebugLoc();
305e5dd7070Spatrick Builder.SetInsertPoint(BI->getParent());
306e5dd7070Spatrick BI->eraseFromParent();
307e5dd7070Spatrick delete ReturnBlock.getBlock();
308e5dd7070Spatrick ReturnBlock = JumpDest();
309e5dd7070Spatrick return Loc;
310e5dd7070Spatrick }
311e5dd7070Spatrick }
312e5dd7070Spatrick
313e5dd7070Spatrick // FIXME: We are at an unreachable point, there is no reason to emit the block
314e5dd7070Spatrick // unless it has uses. However, we still need a place to put the debug
315e5dd7070Spatrick // region.end for now.
316e5dd7070Spatrick
317e5dd7070Spatrick EmitBlock(ReturnBlock.getBlock());
318e5dd7070Spatrick return llvm::DebugLoc();
319e5dd7070Spatrick }
320e5dd7070Spatrick
EmitIfUsed(CodeGenFunction & CGF,llvm::BasicBlock * BB)321e5dd7070Spatrick static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
322e5dd7070Spatrick if (!BB) return;
323*12c85518Srobert if (!BB->use_empty()) {
324*12c85518Srobert CGF.CurFn->insert(CGF.CurFn->end(), BB);
325*12c85518Srobert return;
326*12c85518Srobert }
327e5dd7070Spatrick delete BB;
328e5dd7070Spatrick }
329e5dd7070Spatrick
FinishFunction(SourceLocation EndLoc)330e5dd7070Spatrick void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
331e5dd7070Spatrick assert(BreakContinueStack.empty() &&
332e5dd7070Spatrick "mismatched push/pop in break/continue stack!");
333e5dd7070Spatrick
334e5dd7070Spatrick bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
335e5dd7070Spatrick && NumSimpleReturnExprs == NumReturnExprs
336e5dd7070Spatrick && ReturnBlock.getBlock()->use_empty();
337e5dd7070Spatrick // Usually the return expression is evaluated before the cleanup
338e5dd7070Spatrick // code. If the function contains only a simple return statement,
339e5dd7070Spatrick // such as a constant, the location before the cleanup code becomes
340e5dd7070Spatrick // the last useful breakpoint in the function, because the simple
341e5dd7070Spatrick // return expression will be evaluated after the cleanup code. To be
342e5dd7070Spatrick // safe, set the debug location for cleanup code to the location of
343e5dd7070Spatrick // the return statement. Otherwise the cleanup code should be at the
344e5dd7070Spatrick // end of the function's lexical scope.
345e5dd7070Spatrick //
346e5dd7070Spatrick // If there are multiple branches to the return block, the branch
347e5dd7070Spatrick // instructions will get the location of the return statements and
348e5dd7070Spatrick // all will be fine.
349e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo()) {
350e5dd7070Spatrick if (OnlySimpleReturnStmts)
351e5dd7070Spatrick DI->EmitLocation(Builder, LastStopPoint);
352e5dd7070Spatrick else
353e5dd7070Spatrick DI->EmitLocation(Builder, EndLoc);
354e5dd7070Spatrick }
355e5dd7070Spatrick
356e5dd7070Spatrick // Pop any cleanups that might have been associated with the
357e5dd7070Spatrick // parameters. Do this in whatever block we're currently in; it's
358e5dd7070Spatrick // important to do this before we enter the return block or return
359e5dd7070Spatrick // edges will be *really* confused.
360e5dd7070Spatrick bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
361e5dd7070Spatrick bool HasOnlyLifetimeMarkers =
362e5dd7070Spatrick HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
363e5dd7070Spatrick bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
364*12c85518Srobert
365*12c85518Srobert std::optional<ApplyDebugLocation> OAL;
366e5dd7070Spatrick if (HasCleanups) {
367e5dd7070Spatrick // Make sure the line table doesn't jump back into the body for
368e5dd7070Spatrick // the ret after it's been at EndLoc.
369e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo()) {
370e5dd7070Spatrick if (OnlySimpleReturnStmts)
371e5dd7070Spatrick DI->EmitLocation(Builder, EndLoc);
372e5dd7070Spatrick else
373e5dd7070Spatrick // We may not have a valid end location. Try to apply it anyway, and
374e5dd7070Spatrick // fall back to an artificial location if needed.
375*12c85518Srobert OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
376e5dd7070Spatrick }
377e5dd7070Spatrick
378e5dd7070Spatrick PopCleanupBlocks(PrologueCleanupDepth);
379e5dd7070Spatrick }
380e5dd7070Spatrick
381e5dd7070Spatrick // Emit function epilog (to return).
382e5dd7070Spatrick llvm::DebugLoc Loc = EmitReturnBlock();
383e5dd7070Spatrick
384e5dd7070Spatrick if (ShouldInstrumentFunction()) {
385e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentFunctions)
386e5dd7070Spatrick CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
387e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
388e5dd7070Spatrick CurFn->addFnAttr("instrument-function-exit-inlined",
389e5dd7070Spatrick "__cyg_profile_func_exit");
390e5dd7070Spatrick }
391e5dd7070Spatrick
392e5dd7070Spatrick // Emit debug descriptor for function end.
393e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo())
394e5dd7070Spatrick DI->EmitFunctionEnd(Builder, CurFn);
395e5dd7070Spatrick
396e5dd7070Spatrick // Reset the debug location to that of the simple 'return' expression, if any
397e5dd7070Spatrick // rather than that of the end of the function's scope '}'.
398e5dd7070Spatrick ApplyDebugLocation AL(*this, Loc);
399e5dd7070Spatrick EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
400e5dd7070Spatrick EmitEndEHSpec(CurCodeDecl);
401e5dd7070Spatrick
402e5dd7070Spatrick assert(EHStack.empty() &&
403e5dd7070Spatrick "did not remove all scopes from cleanup stack!");
404e5dd7070Spatrick
405e5dd7070Spatrick // If someone did an indirect goto, emit the indirect goto block at the end of
406e5dd7070Spatrick // the function.
407e5dd7070Spatrick if (IndirectBranch) {
408e5dd7070Spatrick EmitBlock(IndirectBranch->getParent());
409e5dd7070Spatrick Builder.ClearInsertionPoint();
410e5dd7070Spatrick }
411e5dd7070Spatrick
412e5dd7070Spatrick // If some of our locals escaped, insert a call to llvm.localescape in the
413e5dd7070Spatrick // entry block.
414e5dd7070Spatrick if (!EscapedLocals.empty()) {
415e5dd7070Spatrick // Invert the map from local to index into a simple vector. There should be
416e5dd7070Spatrick // no holes.
417e5dd7070Spatrick SmallVector<llvm::Value *, 4> EscapeArgs;
418e5dd7070Spatrick EscapeArgs.resize(EscapedLocals.size());
419e5dd7070Spatrick for (auto &Pair : EscapedLocals)
420e5dd7070Spatrick EscapeArgs[Pair.second] = Pair.first;
421e5dd7070Spatrick llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
422e5dd7070Spatrick &CGM.getModule(), llvm::Intrinsic::localescape);
423e5dd7070Spatrick CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
424e5dd7070Spatrick }
425e5dd7070Spatrick
426e5dd7070Spatrick // Remove the AllocaInsertPt instruction, which is just a convenience for us.
427e5dd7070Spatrick llvm::Instruction *Ptr = AllocaInsertPt;
428e5dd7070Spatrick AllocaInsertPt = nullptr;
429e5dd7070Spatrick Ptr->eraseFromParent();
430e5dd7070Spatrick
431*12c85518Srobert // PostAllocaInsertPt, if created, was lazily created when it was required,
432*12c85518Srobert // remove it now since it was just created for our own convenience.
433*12c85518Srobert if (PostAllocaInsertPt) {
434*12c85518Srobert llvm::Instruction *PostPtr = PostAllocaInsertPt;
435*12c85518Srobert PostAllocaInsertPt = nullptr;
436*12c85518Srobert PostPtr->eraseFromParent();
437*12c85518Srobert }
438*12c85518Srobert
439e5dd7070Spatrick // If someone took the address of a label but never did an indirect goto, we
440e5dd7070Spatrick // made a zero entry PHI node, which is illegal, zap it now.
441e5dd7070Spatrick if (IndirectBranch) {
442e5dd7070Spatrick llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
443e5dd7070Spatrick if (PN->getNumIncomingValues() == 0) {
444e5dd7070Spatrick PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
445e5dd7070Spatrick PN->eraseFromParent();
446e5dd7070Spatrick }
447e5dd7070Spatrick }
448e5dd7070Spatrick
449e5dd7070Spatrick EmitIfUsed(*this, EHResumeBlock);
450e5dd7070Spatrick EmitIfUsed(*this, TerminateLandingPad);
451e5dd7070Spatrick EmitIfUsed(*this, TerminateHandler);
452e5dd7070Spatrick EmitIfUsed(*this, UnreachableBlock);
453e5dd7070Spatrick
454e5dd7070Spatrick for (const auto &FuncletAndParent : TerminateFunclets)
455e5dd7070Spatrick EmitIfUsed(*this, FuncletAndParent.second);
456e5dd7070Spatrick
457e5dd7070Spatrick if (CGM.getCodeGenOpts().EmitDeclMetadata)
458e5dd7070Spatrick EmitDeclMetadata();
459e5dd7070Spatrick
460a9ac8606Spatrick for (const auto &R : DeferredReplacements) {
461a9ac8606Spatrick if (llvm::Value *Old = R.first) {
462a9ac8606Spatrick Old->replaceAllUsesWith(R.second);
463a9ac8606Spatrick cast<llvm::Instruction>(Old)->eraseFromParent();
464e5dd7070Spatrick }
465a9ac8606Spatrick }
466a9ac8606Spatrick DeferredReplacements.clear();
467e5dd7070Spatrick
468e5dd7070Spatrick // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
469e5dd7070Spatrick // PHIs if the current function is a coroutine. We don't do it for all
470e5dd7070Spatrick // functions as it may result in slight increase in numbers of instructions
471e5dd7070Spatrick // if compiled with no optimizations. We do it for coroutine as the lifetime
472e5dd7070Spatrick // of CleanupDestSlot alloca make correct coroutine frame building very
473e5dd7070Spatrick // difficult.
474e5dd7070Spatrick if (NormalCleanupDest.isValid() && isCoroutine()) {
475e5dd7070Spatrick llvm::DominatorTree DT(*CurFn);
476e5dd7070Spatrick llvm::PromoteMemToReg(
477e5dd7070Spatrick cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
478e5dd7070Spatrick NormalCleanupDest = Address::invalid();
479e5dd7070Spatrick }
480e5dd7070Spatrick
481e5dd7070Spatrick // Scan function arguments for vector width.
482e5dd7070Spatrick for (llvm::Argument &A : CurFn->args())
483e5dd7070Spatrick if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
484ec727ea7Spatrick LargestVectorWidth =
485ec727ea7Spatrick std::max((uint64_t)LargestVectorWidth,
486*12c85518Srobert VT->getPrimitiveSizeInBits().getKnownMinValue());
487e5dd7070Spatrick
488e5dd7070Spatrick // Update vector width based on return type.
489e5dd7070Spatrick if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
490ec727ea7Spatrick LargestVectorWidth =
491ec727ea7Spatrick std::max((uint64_t)LargestVectorWidth,
492*12c85518Srobert VT->getPrimitiveSizeInBits().getKnownMinValue());
493*12c85518Srobert
494*12c85518Srobert if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
495*12c85518Srobert LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
496e5dd7070Spatrick
497e5dd7070Spatrick // Add the required-vector-width attribute. This contains the max width from:
498e5dd7070Spatrick // 1. min-vector-width attribute used in the source program.
499e5dd7070Spatrick // 2. Any builtins used that have a vector width specified.
500e5dd7070Spatrick // 3. Values passed in and out of inline assembly.
501e5dd7070Spatrick // 4. Width of vector arguments and return types for this function.
502e5dd7070Spatrick // 5. Width of vector aguments and return types for functions called by this
503e5dd7070Spatrick // function.
504*12c85518Srobert if (getContext().getTargetInfo().getTriple().isX86())
505*12c85518Srobert CurFn->addFnAttr("min-legal-vector-width",
506*12c85518Srobert llvm::utostr(LargestVectorWidth));
507e5dd7070Spatrick
508*12c85518Srobert // Add vscale_range attribute if appropriate.
509*12c85518Srobert std::optional<std::pair<unsigned, unsigned>> VScaleRange =
510*12c85518Srobert getContext().getTargetInfo().getVScaleRange(getLangOpts());
511*12c85518Srobert if (VScaleRange) {
512*12c85518Srobert CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
513*12c85518Srobert getLLVMContext(), VScaleRange->first, VScaleRange->second));
514a9ac8606Spatrick }
515a9ac8606Spatrick
516e5dd7070Spatrick // If we generated an unreachable return block, delete it now.
517e5dd7070Spatrick if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
518e5dd7070Spatrick Builder.ClearInsertionPoint();
519e5dd7070Spatrick ReturnBlock.getBlock()->eraseFromParent();
520e5dd7070Spatrick }
521e5dd7070Spatrick if (ReturnValue.isValid()) {
522e5dd7070Spatrick auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
523e5dd7070Spatrick if (RetAlloca && RetAlloca->use_empty()) {
524e5dd7070Spatrick RetAlloca->eraseFromParent();
525e5dd7070Spatrick ReturnValue = Address::invalid();
526e5dd7070Spatrick }
527e5dd7070Spatrick }
528e5dd7070Spatrick }
529e5dd7070Spatrick
530e5dd7070Spatrick /// ShouldInstrumentFunction - Return true if the current function should be
531e5dd7070Spatrick /// instrumented with __cyg_profile_func_* calls
ShouldInstrumentFunction()532e5dd7070Spatrick bool CodeGenFunction::ShouldInstrumentFunction() {
533e5dd7070Spatrick if (!CGM.getCodeGenOpts().InstrumentFunctions &&
534e5dd7070Spatrick !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
535e5dd7070Spatrick !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
536e5dd7070Spatrick return false;
537e5dd7070Spatrick if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
538e5dd7070Spatrick return false;
539e5dd7070Spatrick return true;
540e5dd7070Spatrick }
541e5dd7070Spatrick
ShouldSkipSanitizerInstrumentation()542*12c85518Srobert bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
543*12c85518Srobert if (!CurFuncDecl)
544*12c85518Srobert return false;
545*12c85518Srobert return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
546*12c85518Srobert }
547*12c85518Srobert
548e5dd7070Spatrick /// ShouldXRayInstrument - Return true if the current function should be
549e5dd7070Spatrick /// instrumented with XRay nop sleds.
ShouldXRayInstrumentFunction() const550e5dd7070Spatrick bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
551e5dd7070Spatrick return CGM.getCodeGenOpts().XRayInstrumentFunctions;
552e5dd7070Spatrick }
553e5dd7070Spatrick
554e5dd7070Spatrick /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
555e5dd7070Spatrick /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
AlwaysEmitXRayCustomEvents() const556e5dd7070Spatrick bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
557e5dd7070Spatrick return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
558e5dd7070Spatrick (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
559e5dd7070Spatrick CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
560e5dd7070Spatrick XRayInstrKind::Custom);
561e5dd7070Spatrick }
562e5dd7070Spatrick
AlwaysEmitXRayTypedEvents() const563e5dd7070Spatrick bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
564e5dd7070Spatrick return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
565e5dd7070Spatrick (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
566e5dd7070Spatrick CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
567e5dd7070Spatrick XRayInstrKind::Typed);
568e5dd7070Spatrick }
569e5dd7070Spatrick
570e5dd7070Spatrick llvm::Value *
DecodeAddrUsedInPrologue(llvm::Value * F,llvm::Value * EncodedAddr)571e5dd7070Spatrick CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
572e5dd7070Spatrick llvm::Value *EncodedAddr) {
573e5dd7070Spatrick // Reconstruct the address of the global.
574e5dd7070Spatrick auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
575e5dd7070Spatrick auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
576e5dd7070Spatrick auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
577e5dd7070Spatrick auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
578e5dd7070Spatrick
579e5dd7070Spatrick // Load the original pointer through the global.
580*12c85518Srobert return Builder.CreateLoad(Address(GOTAddr, Int8PtrTy, getPointerAlign()),
581e5dd7070Spatrick "decoded_addr");
582e5dd7070Spatrick }
583e5dd7070Spatrick
EmitKernelMetadata(const FunctionDecl * FD,llvm::Function * Fn)584*12c85518Srobert void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
585*12c85518Srobert llvm::Function *Fn) {
586*12c85518Srobert if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
587e5dd7070Spatrick return;
588e5dd7070Spatrick
589e5dd7070Spatrick llvm::LLVMContext &Context = getLLVMContext();
590e5dd7070Spatrick
591*12c85518Srobert CGM.GenKernelArgMetadata(Fn, FD, this);
592*12c85518Srobert
593*12c85518Srobert if (!getLangOpts().OpenCL)
594*12c85518Srobert return;
595e5dd7070Spatrick
596e5dd7070Spatrick if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
597e5dd7070Spatrick QualType HintQTy = A->getTypeHint();
598e5dd7070Spatrick const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
599e5dd7070Spatrick bool IsSignedInteger =
600e5dd7070Spatrick HintQTy->isSignedIntegerType() ||
601e5dd7070Spatrick (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
602e5dd7070Spatrick llvm::Metadata *AttrMDArgs[] = {
603e5dd7070Spatrick llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
604e5dd7070Spatrick CGM.getTypes().ConvertType(A->getTypeHint()))),
605e5dd7070Spatrick llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
606e5dd7070Spatrick llvm::IntegerType::get(Context, 32),
607e5dd7070Spatrick llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
608e5dd7070Spatrick Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
609e5dd7070Spatrick }
610e5dd7070Spatrick
611e5dd7070Spatrick if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
612e5dd7070Spatrick llvm::Metadata *AttrMDArgs[] = {
613e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
614e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
615e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
616e5dd7070Spatrick Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
617e5dd7070Spatrick }
618e5dd7070Spatrick
619e5dd7070Spatrick if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
620e5dd7070Spatrick llvm::Metadata *AttrMDArgs[] = {
621e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
622e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
623e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
624e5dd7070Spatrick Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
625e5dd7070Spatrick }
626e5dd7070Spatrick
627e5dd7070Spatrick if (const OpenCLIntelReqdSubGroupSizeAttr *A =
628e5dd7070Spatrick FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
629e5dd7070Spatrick llvm::Metadata *AttrMDArgs[] = {
630e5dd7070Spatrick llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
631e5dd7070Spatrick Fn->setMetadata("intel_reqd_sub_group_size",
632e5dd7070Spatrick llvm::MDNode::get(Context, AttrMDArgs));
633e5dd7070Spatrick }
634e5dd7070Spatrick }
635e5dd7070Spatrick
636e5dd7070Spatrick /// Determine whether the function F ends with a return stmt.
endsWithReturn(const Decl * F)637e5dd7070Spatrick static bool endsWithReturn(const Decl* F) {
638e5dd7070Spatrick const Stmt *Body = nullptr;
639e5dd7070Spatrick if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
640e5dd7070Spatrick Body = FD->getBody();
641e5dd7070Spatrick else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
642e5dd7070Spatrick Body = OMD->getBody();
643e5dd7070Spatrick
644e5dd7070Spatrick if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
645e5dd7070Spatrick auto LastStmt = CS->body_rbegin();
646e5dd7070Spatrick if (LastStmt != CS->body_rend())
647e5dd7070Spatrick return isa<ReturnStmt>(*LastStmt);
648e5dd7070Spatrick }
649e5dd7070Spatrick return false;
650e5dd7070Spatrick }
651e5dd7070Spatrick
markAsIgnoreThreadCheckingAtRuntime(llvm::Function * Fn)652e5dd7070Spatrick void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
653e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Thread)) {
654e5dd7070Spatrick Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
655e5dd7070Spatrick Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
656e5dd7070Spatrick }
657e5dd7070Spatrick }
658e5dd7070Spatrick
659e5dd7070Spatrick /// Check if the return value of this function requires sanitization.
requiresReturnValueCheck() const660e5dd7070Spatrick bool CodeGenFunction::requiresReturnValueCheck() const {
661e5dd7070Spatrick return requiresReturnValueNullabilityCheck() ||
662e5dd7070Spatrick (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
663e5dd7070Spatrick CurCodeDecl->getAttr<ReturnsNonNullAttr>());
664e5dd7070Spatrick }
665e5dd7070Spatrick
matchesStlAllocatorFn(const Decl * D,const ASTContext & Ctx)666e5dd7070Spatrick static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
667e5dd7070Spatrick auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
668e5dd7070Spatrick if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
669e5dd7070Spatrick !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
670e5dd7070Spatrick (MD->getNumParams() != 1 && MD->getNumParams() != 2))
671e5dd7070Spatrick return false;
672e5dd7070Spatrick
673e5dd7070Spatrick if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
674e5dd7070Spatrick return false;
675e5dd7070Spatrick
676e5dd7070Spatrick if (MD->getNumParams() == 2) {
677e5dd7070Spatrick auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
678e5dd7070Spatrick if (!PT || !PT->isVoidPointerType() ||
679e5dd7070Spatrick !PT->getPointeeType().isConstQualified())
680e5dd7070Spatrick return false;
681e5dd7070Spatrick }
682e5dd7070Spatrick
683e5dd7070Spatrick return true;
684e5dd7070Spatrick }
685e5dd7070Spatrick
686e5dd7070Spatrick /// Return the UBSan prologue signature for \p FD if one is available.
getPrologueSignature(CodeGenModule & CGM,const FunctionDecl * FD)687e5dd7070Spatrick static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
688e5dd7070Spatrick const FunctionDecl *FD) {
689e5dd7070Spatrick if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
690e5dd7070Spatrick if (!MD->isStatic())
691e5dd7070Spatrick return nullptr;
692e5dd7070Spatrick return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
693e5dd7070Spatrick }
694e5dd7070Spatrick
StartFunction(GlobalDecl GD,QualType RetTy,llvm::Function * Fn,const CGFunctionInfo & FnInfo,const FunctionArgList & Args,SourceLocation Loc,SourceLocation StartLoc)695e5dd7070Spatrick void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
696e5dd7070Spatrick llvm::Function *Fn,
697e5dd7070Spatrick const CGFunctionInfo &FnInfo,
698e5dd7070Spatrick const FunctionArgList &Args,
699e5dd7070Spatrick SourceLocation Loc,
700e5dd7070Spatrick SourceLocation StartLoc) {
701e5dd7070Spatrick assert(!CurFn &&
702e5dd7070Spatrick "Do not use a CodeGenFunction object for more than one function");
703e5dd7070Spatrick
704e5dd7070Spatrick const Decl *D = GD.getDecl();
705e5dd7070Spatrick
706e5dd7070Spatrick DidCallStackSave = false;
707e5dd7070Spatrick CurCodeDecl = D;
708a9ac8606Spatrick const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
709a9ac8606Spatrick if (FD && FD->usesSEHTry())
710*12c85518Srobert CurSEHParent = GD;
711e5dd7070Spatrick CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
712e5dd7070Spatrick FnRetTy = RetTy;
713e5dd7070Spatrick CurFn = Fn;
714e5dd7070Spatrick CurFnInfo = &FnInfo;
715e5dd7070Spatrick assert(CurFn->isDeclaration() && "Function already has body?");
716e5dd7070Spatrick
717a9ac8606Spatrick // If this function is ignored for any of the enabled sanitizers,
718e5dd7070Spatrick // disable the sanitizer for the function.
719e5dd7070Spatrick do {
720e5dd7070Spatrick #define SANITIZER(NAME, ID) \
721e5dd7070Spatrick if (SanOpts.empty()) \
722e5dd7070Spatrick break; \
723e5dd7070Spatrick if (SanOpts.has(SanitizerKind::ID)) \
724a9ac8606Spatrick if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
725e5dd7070Spatrick SanOpts.set(SanitizerKind::ID, false);
726e5dd7070Spatrick
727e5dd7070Spatrick #include "clang/Basic/Sanitizers.def"
728e5dd7070Spatrick #undef SANITIZER
729*12c85518Srobert } while (false);
730e5dd7070Spatrick
731e5dd7070Spatrick if (D) {
732*12c85518Srobert const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
733a9ac8606Spatrick bool NoSanitizeCoverage = false;
734a9ac8606Spatrick
735*12c85518Srobert for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
736a9ac8606Spatrick // Apply the no_sanitize* attributes to SanOpts.
737e5dd7070Spatrick SanitizerMask mask = Attr->getMask();
738e5dd7070Spatrick SanOpts.Mask &= ~mask;
739e5dd7070Spatrick if (mask & SanitizerKind::Address)
740e5dd7070Spatrick SanOpts.set(SanitizerKind::KernelAddress, false);
741e5dd7070Spatrick if (mask & SanitizerKind::KernelAddress)
742e5dd7070Spatrick SanOpts.set(SanitizerKind::Address, false);
743e5dd7070Spatrick if (mask & SanitizerKind::HWAddress)
744e5dd7070Spatrick SanOpts.set(SanitizerKind::KernelHWAddress, false);
745e5dd7070Spatrick if (mask & SanitizerKind::KernelHWAddress)
746e5dd7070Spatrick SanOpts.set(SanitizerKind::HWAddress, false);
747a9ac8606Spatrick
748a9ac8606Spatrick // SanitizeCoverage is not handled by SanOpts.
749a9ac8606Spatrick if (Attr->hasCoverage())
750a9ac8606Spatrick NoSanitizeCoverage = true;
751e5dd7070Spatrick }
752a9ac8606Spatrick
753*12c85518Srobert if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
754*12c85518Srobert Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
755*12c85518Srobert
756a9ac8606Spatrick if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
757a9ac8606Spatrick Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
758e5dd7070Spatrick }
759e5dd7070Spatrick
760*12c85518Srobert if (ShouldSkipSanitizerInstrumentation()) {
761*12c85518Srobert CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
762*12c85518Srobert } else {
763e5dd7070Spatrick // Apply sanitizer attributes to the function.
764e5dd7070Spatrick if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
765e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
766*12c85518Srobert if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
767*12c85518Srobert SanitizerKind::KernelHWAddress))
768e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
769*12c85518Srobert if (SanOpts.has(SanitizerKind::MemtagStack))
770e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
771e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Thread))
772e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SanitizeThread);
773e5dd7070Spatrick if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
774e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
775*12c85518Srobert }
776e5dd7070Spatrick if (SanOpts.has(SanitizerKind::SafeStack))
777e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::SafeStack);
778e5dd7070Spatrick if (SanOpts.has(SanitizerKind::ShadowCallStack))
779e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
780e5dd7070Spatrick
781e5dd7070Spatrick // Apply fuzzing attribute to the function.
782e5dd7070Spatrick if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
783e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
784e5dd7070Spatrick
785e5dd7070Spatrick // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
786e5dd7070Spatrick // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
787e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Thread)) {
788e5dd7070Spatrick if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
789e5dd7070Spatrick IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
790e5dd7070Spatrick if (OMD->getMethodFamily() == OMF_dealloc ||
791e5dd7070Spatrick OMD->getMethodFamily() == OMF_initialize ||
792e5dd7070Spatrick (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
793e5dd7070Spatrick markAsIgnoreThreadCheckingAtRuntime(Fn);
794e5dd7070Spatrick }
795e5dd7070Spatrick }
796e5dd7070Spatrick }
797e5dd7070Spatrick
798e5dd7070Spatrick // Ignore unrelated casts in STL allocate() since the allocator must cast
799e5dd7070Spatrick // from void* to T* before object initialization completes. Don't match on the
800e5dd7070Spatrick // namespace because not all allocators are in std::
801e5dd7070Spatrick if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
802e5dd7070Spatrick if (matchesStlAllocatorFn(D, getContext()))
803e5dd7070Spatrick SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
804e5dd7070Spatrick }
805e5dd7070Spatrick
806e5dd7070Spatrick // Ignore null checks in coroutine functions since the coroutines passes
807e5dd7070Spatrick // are not aware of how to move the extra UBSan instructions across the split
808e5dd7070Spatrick // coroutine boundaries.
809e5dd7070Spatrick if (D && SanOpts.has(SanitizerKind::Null))
810a9ac8606Spatrick if (FD && FD->getBody() &&
811e5dd7070Spatrick FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
812e5dd7070Spatrick SanOpts.Mask &= ~SanitizerKind::Null;
813e5dd7070Spatrick
814e5dd7070Spatrick // Apply xray attributes to the function (as a string, for now)
815a9ac8606Spatrick bool AlwaysXRayAttr = false;
816ec727ea7Spatrick if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
817e5dd7070Spatrick if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
818ec727ea7Spatrick XRayInstrKind::FunctionEntry) ||
819ec727ea7Spatrick CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
820ec727ea7Spatrick XRayInstrKind::FunctionExit)) {
821a9ac8606Spatrick if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
822e5dd7070Spatrick Fn->addFnAttr("function-instrument", "xray-always");
823a9ac8606Spatrick AlwaysXRayAttr = true;
824a9ac8606Spatrick }
825e5dd7070Spatrick if (XRayAttr->neverXRayInstrument())
826e5dd7070Spatrick Fn->addFnAttr("function-instrument", "xray-never");
827e5dd7070Spatrick if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
828e5dd7070Spatrick if (ShouldXRayInstrumentFunction())
829e5dd7070Spatrick Fn->addFnAttr("xray-log-args",
830e5dd7070Spatrick llvm::utostr(LogArgs->getArgumentCount()));
831e5dd7070Spatrick }
832e5dd7070Spatrick } else {
833e5dd7070Spatrick if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
834e5dd7070Spatrick Fn->addFnAttr(
835e5dd7070Spatrick "xray-instruction-threshold",
836e5dd7070Spatrick llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
837e5dd7070Spatrick }
838e5dd7070Spatrick
839ec727ea7Spatrick if (ShouldXRayInstrumentFunction()) {
840ec727ea7Spatrick if (CGM.getCodeGenOpts().XRayIgnoreLoops)
841ec727ea7Spatrick Fn->addFnAttr("xray-ignore-loops");
842ec727ea7Spatrick
843ec727ea7Spatrick if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
844ec727ea7Spatrick XRayInstrKind::FunctionExit))
845ec727ea7Spatrick Fn->addFnAttr("xray-skip-exit");
846ec727ea7Spatrick
847ec727ea7Spatrick if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
848ec727ea7Spatrick XRayInstrKind::FunctionEntry))
849ec727ea7Spatrick Fn->addFnAttr("xray-skip-entry");
850a9ac8606Spatrick
851a9ac8606Spatrick auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
852a9ac8606Spatrick if (FuncGroups > 1) {
853*12c85518Srobert auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
854*12c85518Srobert CurFn->getName().bytes_end());
855a9ac8606Spatrick auto Group = crc32(FuncName) % FuncGroups;
856a9ac8606Spatrick if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
857a9ac8606Spatrick !AlwaysXRayAttr)
858a9ac8606Spatrick Fn->addFnAttr("function-instrument", "xray-never");
859ec727ea7Spatrick }
860a9ac8606Spatrick }
861a9ac8606Spatrick
862*12c85518Srobert if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
863*12c85518Srobert switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
864*12c85518Srobert case ProfileList::Skip:
865*12c85518Srobert Fn->addFnAttr(llvm::Attribute::SkipProfile);
866*12c85518Srobert break;
867*12c85518Srobert case ProfileList::Forbid:
868a9ac8606Spatrick Fn->addFnAttr(llvm::Attribute::NoProfile);
869*12c85518Srobert break;
870*12c85518Srobert case ProfileList::Allow:
871*12c85518Srobert break;
872*12c85518Srobert }
873*12c85518Srobert }
874ec727ea7Spatrick
875e5dd7070Spatrick unsigned Count, Offset;
876ec727ea7Spatrick if (const auto *Attr =
877ec727ea7Spatrick D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
878e5dd7070Spatrick Count = Attr->getCount();
879e5dd7070Spatrick Offset = Attr->getOffset();
880e5dd7070Spatrick } else {
881e5dd7070Spatrick Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
882e5dd7070Spatrick Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
883e5dd7070Spatrick }
884e5dd7070Spatrick if (Count && Offset <= Count) {
885e5dd7070Spatrick Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
886e5dd7070Spatrick if (Offset)
887e5dd7070Spatrick Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
888e5dd7070Spatrick }
889*12c85518Srobert // Instruct that functions for COFF/CodeView targets should start with a
890*12c85518Srobert // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
891*12c85518Srobert // backends as they don't need it -- instructions on these architectures are
892*12c85518Srobert // always atomically patchable at runtime.
893*12c85518Srobert if (CGM.getCodeGenOpts().HotPatch &&
894*12c85518Srobert getContext().getTargetInfo().getTriple().isX86() &&
895*12c85518Srobert getContext().getTargetInfo().getTriple().getEnvironment() !=
896*12c85518Srobert llvm::Triple::CODE16)
897*12c85518Srobert Fn->addFnAttr("patchable-function", "prologue-short-redirect");
898e5dd7070Spatrick
899e5dd7070Spatrick // Add no-jump-tables value.
900a9ac8606Spatrick if (CGM.getCodeGenOpts().NoUseJumpTables)
901a9ac8606Spatrick Fn->addFnAttr("no-jump-tables", "true");
902e5dd7070Spatrick
903e5dd7070Spatrick // Add no-inline-line-tables value.
904e5dd7070Spatrick if (CGM.getCodeGenOpts().NoInlineLineTables)
905e5dd7070Spatrick Fn->addFnAttr("no-inline-line-tables");
906e5dd7070Spatrick
907e5dd7070Spatrick // Add profile-sample-accurate value.
908e5dd7070Spatrick if (CGM.getCodeGenOpts().ProfileSampleAccurate)
909e5dd7070Spatrick Fn->addFnAttr("profile-sample-accurate");
910e5dd7070Spatrick
911ec727ea7Spatrick if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
912ec727ea7Spatrick Fn->addFnAttr("use-sample-profile");
913ec727ea7Spatrick
914e5dd7070Spatrick if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
915e5dd7070Spatrick Fn->addFnAttr("cfi-canonical-jump-table");
916e5dd7070Spatrick
917a9ac8606Spatrick if (D && D->hasAttr<NoProfileFunctionAttr>())
918a9ac8606Spatrick Fn->addFnAttr(llvm::Attribute::NoProfile);
919a9ac8606Spatrick
920*12c85518Srobert if (D) {
921*12c85518Srobert // Function attributes take precedence over command line flags.
922*12c85518Srobert if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
923*12c85518Srobert switch (A->getThunkType()) {
924*12c85518Srobert case FunctionReturnThunksAttr::Kind::Keep:
925*12c85518Srobert break;
926*12c85518Srobert case FunctionReturnThunksAttr::Kind::Extern:
927*12c85518Srobert Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
928*12c85518Srobert break;
929*12c85518Srobert }
930*12c85518Srobert } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
931*12c85518Srobert Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
932*12c85518Srobert }
933*12c85518Srobert
934*12c85518Srobert if (FD && (getLangOpts().OpenCL ||
935*12c85518Srobert (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
936e5dd7070Spatrick // Add metadata for a kernel function.
937*12c85518Srobert EmitKernelMetadata(FD, Fn);
938e5dd7070Spatrick }
939e5dd7070Spatrick
940e5dd7070Spatrick // If we are checking function types, emit a function type signature as
941e5dd7070Spatrick // prologue data.
942a9ac8606Spatrick if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
943e5dd7070Spatrick if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
944e5dd7070Spatrick // Remove any (C++17) exception specifications, to allow calling e.g. a
945e5dd7070Spatrick // noexcept function through a non-noexcept pointer.
946a9ac8606Spatrick auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(
947a9ac8606Spatrick FD->getType(), EST_None);
948e5dd7070Spatrick llvm::Constant *FTRTTIConst =
949e5dd7070Spatrick CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
950*12c85518Srobert llvm::GlobalVariable *FTRTTIProxy =
951*12c85518Srobert CGM.GetOrCreateRTTIProxyGlobalVariable(FTRTTIConst);
952*12c85518Srobert llvm::LLVMContext &Ctx = Fn->getContext();
953*12c85518Srobert llvm::MDBuilder MDB(Ctx);
954*12c85518Srobert Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize,
955*12c85518Srobert MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy));
956*12c85518Srobert CGM.addCompilerUsedGlobal(FTRTTIProxy);
957e5dd7070Spatrick }
958e5dd7070Spatrick }
959e5dd7070Spatrick
960e5dd7070Spatrick // If we're checking nullability, we need to know whether we can check the
961e5dd7070Spatrick // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
962e5dd7070Spatrick if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
963*12c85518Srobert auto Nullability = FnRetTy->getNullability();
964e5dd7070Spatrick if (Nullability && *Nullability == NullabilityKind::NonNull) {
965e5dd7070Spatrick if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
966e5dd7070Spatrick CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
967e5dd7070Spatrick RetValNullabilityPrecondition =
968e5dd7070Spatrick llvm::ConstantInt::getTrue(getLLVMContext());
969e5dd7070Spatrick }
970e5dd7070Spatrick }
971e5dd7070Spatrick
972e5dd7070Spatrick // If we're in C++ mode and the function name is "main", it is guaranteed
973e5dd7070Spatrick // to be norecurse by the standard (3.6.1.3 "The function main shall not be
974e5dd7070Spatrick // used within a program").
975ec727ea7Spatrick //
976ec727ea7Spatrick // OpenCL C 2.0 v2.2-11 s6.9.i:
977ec727ea7Spatrick // Recursion is not supported.
978ec727ea7Spatrick //
979ec727ea7Spatrick // SYCL v1.2.1 s3.10:
980ec727ea7Spatrick // kernels cannot include RTTI information, exception classes,
981ec727ea7Spatrick // recursive code, virtual functions or make use of C++ libraries that
982ec727ea7Spatrick // are not compiled for the device.
983a9ac8606Spatrick if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
984a9ac8606Spatrick getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
985a9ac8606Spatrick (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
986e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::NoRecurse);
987e5dd7070Spatrick
988*12c85518Srobert llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
989*12c85518Srobert llvm::fp::ExceptionBehavior FPExceptionBehavior =
990*12c85518Srobert ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
991*12c85518Srobert Builder.setDefaultConstrainedRounding(RM);
992*12c85518Srobert Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
993*12c85518Srobert if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
994*12c85518Srobert (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
995*12c85518Srobert RM != llvm::RoundingMode::NearestTiesToEven))) {
996*12c85518Srobert Builder.setIsFPConstrained(true);
997e5dd7070Spatrick Fn->addFnAttr(llvm::Attribute::StrictFP);
998ec727ea7Spatrick }
999e5dd7070Spatrick
1000e5dd7070Spatrick // If a custom alignment is used, force realigning to this alignment on
1001e5dd7070Spatrick // any main function which certainly will need it.
1002a9ac8606Spatrick if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1003a9ac8606Spatrick CGM.getCodeGenOpts().StackAlignment))
1004e5dd7070Spatrick Fn->addFnAttr("stackrealign");
1005e5dd7070Spatrick
1006*12c85518Srobert // "main" doesn't need to zero out call-used registers.
1007*12c85518Srobert if (FD && FD->isMain())
1008*12c85518Srobert Fn->removeFnAttr("zero-call-used-regs");
1009*12c85518Srobert
1010e5dd7070Spatrick llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1011e5dd7070Spatrick
1012e5dd7070Spatrick // Create a marker to make it easy to insert allocas into the entryblock
1013e5dd7070Spatrick // later. Don't create this with the builder, because we don't want it
1014e5dd7070Spatrick // folded.
1015e5dd7070Spatrick llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1016e5dd7070Spatrick AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1017e5dd7070Spatrick
1018e5dd7070Spatrick ReturnBlock = getJumpDestInCurrentScope("return");
1019e5dd7070Spatrick
1020e5dd7070Spatrick Builder.SetInsertPoint(EntryBB);
1021e5dd7070Spatrick
1022e5dd7070Spatrick // If we're checking the return value, allocate space for a pointer to a
1023e5dd7070Spatrick // precise source location of the checked return statement.
1024e5dd7070Spatrick if (requiresReturnValueCheck()) {
1025e5dd7070Spatrick ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1026*12c85518Srobert Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1027*12c85518Srobert ReturnLocation);
1028e5dd7070Spatrick }
1029e5dd7070Spatrick
1030e5dd7070Spatrick // Emit subprogram debug descriptor.
1031e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo()) {
1032e5dd7070Spatrick // Reconstruct the type from the argument list so that implicit parameters,
1033e5dd7070Spatrick // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1034e5dd7070Spatrick // convention.
1035*12c85518Srobert DI->emitFunctionStart(GD, Loc, StartLoc,
1036*12c85518Srobert DI->getFunctionType(FD, RetTy, Args), CurFn,
1037*12c85518Srobert CurFuncIsThunk);
1038e5dd7070Spatrick }
1039e5dd7070Spatrick
1040e5dd7070Spatrick if (ShouldInstrumentFunction()) {
1041e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentFunctions)
1042e5dd7070Spatrick CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1043e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1044e5dd7070Spatrick CurFn->addFnAttr("instrument-function-entry-inlined",
1045e5dd7070Spatrick "__cyg_profile_func_enter");
1046e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1047e5dd7070Spatrick CurFn->addFnAttr("instrument-function-entry-inlined",
1048e5dd7070Spatrick "__cyg_profile_func_enter_bare");
1049e5dd7070Spatrick }
1050e5dd7070Spatrick
1051e5dd7070Spatrick // Since emitting the mcount call here impacts optimizations such as function
1052e5dd7070Spatrick // inlining, we just add an attribute to insert a mcount call in backend.
1053e5dd7070Spatrick // The attribute "counting-function" is set to mcount function name which is
1054e5dd7070Spatrick // architecture dependent.
1055e5dd7070Spatrick if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1056e5dd7070Spatrick // Calls to fentry/mcount should not be generated if function has
1057e5dd7070Spatrick // the no_instrument_function attribute.
1058e5dd7070Spatrick if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1059e5dd7070Spatrick if (CGM.getCodeGenOpts().CallFEntry)
1060e5dd7070Spatrick Fn->addFnAttr("fentry-call", "true");
1061e5dd7070Spatrick else {
1062e5dd7070Spatrick Fn->addFnAttr("instrument-function-entry-inlined",
1063e5dd7070Spatrick getTarget().getMCountName());
1064e5dd7070Spatrick }
1065e5dd7070Spatrick if (CGM.getCodeGenOpts().MNopMCount) {
1066e5dd7070Spatrick if (!CGM.getCodeGenOpts().CallFEntry)
1067e5dd7070Spatrick CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1068e5dd7070Spatrick << "-mnop-mcount" << "-mfentry";
1069e5dd7070Spatrick Fn->addFnAttr("mnop-mcount");
1070e5dd7070Spatrick }
1071e5dd7070Spatrick
1072e5dd7070Spatrick if (CGM.getCodeGenOpts().RecordMCount) {
1073e5dd7070Spatrick if (!CGM.getCodeGenOpts().CallFEntry)
1074e5dd7070Spatrick CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1075e5dd7070Spatrick << "-mrecord-mcount" << "-mfentry";
1076e5dd7070Spatrick Fn->addFnAttr("mrecord-mcount");
1077e5dd7070Spatrick }
1078e5dd7070Spatrick }
1079e5dd7070Spatrick }
1080e5dd7070Spatrick
1081e5dd7070Spatrick if (CGM.getCodeGenOpts().PackedStack) {
1082e5dd7070Spatrick if (getContext().getTargetInfo().getTriple().getArch() !=
1083e5dd7070Spatrick llvm::Triple::systemz)
1084e5dd7070Spatrick CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1085e5dd7070Spatrick << "-mpacked-stack";
1086e5dd7070Spatrick Fn->addFnAttr("packed-stack");
1087e5dd7070Spatrick }
1088e5dd7070Spatrick
1089*12c85518Srobert if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1090*12c85518Srobert !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1091a9ac8606Spatrick Fn->addFnAttr("warn-stack-size",
1092a9ac8606Spatrick std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1093a9ac8606Spatrick
1094e5dd7070Spatrick if (RetTy->isVoidType()) {
1095e5dd7070Spatrick // Void type; nothing to return.
1096e5dd7070Spatrick ReturnValue = Address::invalid();
1097e5dd7070Spatrick
1098e5dd7070Spatrick // Count the implicit return.
1099e5dd7070Spatrick if (!endsWithReturn(D))
1100e5dd7070Spatrick ++NumReturnExprs;
1101e5dd7070Spatrick } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1102e5dd7070Spatrick // Indirect return; emit returned value directly into sret slot.
1103e5dd7070Spatrick // This reduces code size, and affects correctness in C++.
1104e5dd7070Spatrick auto AI = CurFn->arg_begin();
1105e5dd7070Spatrick if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1106e5dd7070Spatrick ++AI;
1107*12c85518Srobert ReturnValue = Address(&*AI, ConvertType(RetTy),
1108*12c85518Srobert CurFnInfo->getReturnInfo().getIndirectAlign());
1109e5dd7070Spatrick if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1110e5dd7070Spatrick ReturnValuePointer =
1111e5dd7070Spatrick CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1112e5dd7070Spatrick Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1113e5dd7070Spatrick ReturnValue.getPointer(), Int8PtrTy),
1114e5dd7070Spatrick ReturnValuePointer);
1115e5dd7070Spatrick }
1116e5dd7070Spatrick } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1117e5dd7070Spatrick !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1118e5dd7070Spatrick // Load the sret pointer from the argument struct and return into that.
1119e5dd7070Spatrick unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1120e5dd7070Spatrick llvm::Function::arg_iterator EI = CurFn->arg_end();
1121e5dd7070Spatrick --EI;
1122a9ac8606Spatrick llvm::Value *Addr = Builder.CreateStructGEP(
1123*12c85518Srobert CurFnInfo->getArgStruct(), &*EI, Idx);
1124a9ac8606Spatrick llvm::Type *Ty =
1125a9ac8606Spatrick cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1126*12c85518Srobert ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1127a9ac8606Spatrick Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1128*12c85518Srobert ReturnValue =
1129*12c85518Srobert Address(Addr, ConvertType(RetTy), CGM.getNaturalTypeAlignment(RetTy));
1130e5dd7070Spatrick } else {
1131e5dd7070Spatrick ReturnValue = CreateIRTemp(RetTy, "retval");
1132e5dd7070Spatrick
1133e5dd7070Spatrick // Tell the epilog emitter to autorelease the result. We do this
1134e5dd7070Spatrick // now so that various specialized functions can suppress it
1135e5dd7070Spatrick // during their IR-generation.
1136e5dd7070Spatrick if (getLangOpts().ObjCAutoRefCount &&
1137e5dd7070Spatrick !CurFnInfo->isReturnsRetained() &&
1138e5dd7070Spatrick RetTy->isObjCRetainableType())
1139e5dd7070Spatrick AutoreleaseResult = true;
1140e5dd7070Spatrick }
1141e5dd7070Spatrick
1142e5dd7070Spatrick EmitStartEHSpec(CurCodeDecl);
1143e5dd7070Spatrick
1144e5dd7070Spatrick PrologueCleanupDepth = EHStack.stable_begin();
1145e5dd7070Spatrick
1146e5dd7070Spatrick // Emit OpenMP specific initialization of the device functions.
1147e5dd7070Spatrick if (getLangOpts().OpenMP && CurCodeDecl)
1148e5dd7070Spatrick CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1149e5dd7070Spatrick
1150*12c85518Srobert // Handle emitting HLSL entry functions.
1151*12c85518Srobert if (D && D->hasAttr<HLSLShaderAttr>())
1152*12c85518Srobert CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1153*12c85518Srobert
1154e5dd7070Spatrick EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1155e5dd7070Spatrick
1156*12c85518Srobert if (isa_and_nonnull<CXXMethodDecl>(D) &&
1157*12c85518Srobert cast<CXXMethodDecl>(D)->isInstance()) {
1158e5dd7070Spatrick CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1159e5dd7070Spatrick const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1160e5dd7070Spatrick if (MD->getParent()->isLambda() &&
1161e5dd7070Spatrick MD->getOverloadedOperator() == OO_Call) {
1162e5dd7070Spatrick // We're in a lambda; figure out the captures.
1163e5dd7070Spatrick MD->getParent()->getCaptureFields(LambdaCaptureFields,
1164e5dd7070Spatrick LambdaThisCaptureField);
1165e5dd7070Spatrick if (LambdaThisCaptureField) {
1166e5dd7070Spatrick // If the lambda captures the object referred to by '*this' - either by
1167e5dd7070Spatrick // value or by reference, make sure CXXThisValue points to the correct
1168e5dd7070Spatrick // object.
1169e5dd7070Spatrick
1170e5dd7070Spatrick // Get the lvalue for the field (which is a copy of the enclosing object
1171e5dd7070Spatrick // or contains the address of the enclosing object).
1172e5dd7070Spatrick LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1173e5dd7070Spatrick if (!LambdaThisCaptureField->getType()->isPointerType()) {
1174e5dd7070Spatrick // If the enclosing object was captured by value, just use its address.
1175e5dd7070Spatrick CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1176e5dd7070Spatrick } else {
1177e5dd7070Spatrick // Load the lvalue pointed to by the field, since '*this' was captured
1178e5dd7070Spatrick // by reference.
1179e5dd7070Spatrick CXXThisValue =
1180e5dd7070Spatrick EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1181e5dd7070Spatrick }
1182e5dd7070Spatrick }
1183e5dd7070Spatrick for (auto *FD : MD->getParent()->fields()) {
1184e5dd7070Spatrick if (FD->hasCapturedVLAType()) {
1185e5dd7070Spatrick auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1186e5dd7070Spatrick SourceLocation()).getScalarVal();
1187e5dd7070Spatrick auto VAT = FD->getCapturedVLAType();
1188e5dd7070Spatrick VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1189e5dd7070Spatrick }
1190e5dd7070Spatrick }
1191e5dd7070Spatrick } else {
1192e5dd7070Spatrick // Not in a lambda; just use 'this' from the method.
1193e5dd7070Spatrick // FIXME: Should we generate a new load for each use of 'this'? The
1194e5dd7070Spatrick // fast register allocator would be happier...
1195e5dd7070Spatrick CXXThisValue = CXXABIThisValue;
1196e5dd7070Spatrick }
1197e5dd7070Spatrick
1198e5dd7070Spatrick // Check the 'this' pointer once per function, if it's available.
1199e5dd7070Spatrick if (CXXABIThisValue) {
1200e5dd7070Spatrick SanitizerSet SkippedChecks;
1201e5dd7070Spatrick SkippedChecks.set(SanitizerKind::ObjectSize, true);
1202e5dd7070Spatrick QualType ThisTy = MD->getThisType();
1203e5dd7070Spatrick
1204e5dd7070Spatrick // If this is the call operator of a lambda with no capture-default, it
1205e5dd7070Spatrick // may have a static invoker function, which may call this operator with
1206e5dd7070Spatrick // a null 'this' pointer.
1207e5dd7070Spatrick if (isLambdaCallOperator(MD) &&
1208e5dd7070Spatrick MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1209e5dd7070Spatrick SkippedChecks.set(SanitizerKind::Null, true);
1210e5dd7070Spatrick
1211a9ac8606Spatrick EmitTypeCheck(
1212a9ac8606Spatrick isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1213a9ac8606Spatrick Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1214e5dd7070Spatrick }
1215e5dd7070Spatrick }
1216e5dd7070Spatrick
1217e5dd7070Spatrick // If any of the arguments have a variably modified type, make sure to
1218*12c85518Srobert // emit the type size, but only if the function is not naked. Naked functions
1219*12c85518Srobert // have no prolog to run this evaluation.
1220*12c85518Srobert if (!FD || !FD->hasAttr<NakedAttr>()) {
1221*12c85518Srobert for (const VarDecl *VD : Args) {
1222e5dd7070Spatrick // Dig out the type as written from ParmVarDecls; it's unclear whether
1223e5dd7070Spatrick // the standard (C99 6.9.1p10) requires this, but we're following the
1224e5dd7070Spatrick // precedent set by gcc.
1225e5dd7070Spatrick QualType Ty;
1226e5dd7070Spatrick if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1227e5dd7070Spatrick Ty = PVD->getOriginalType();
1228e5dd7070Spatrick else
1229e5dd7070Spatrick Ty = VD->getType();
1230e5dd7070Spatrick
1231e5dd7070Spatrick if (Ty->isVariablyModifiedType())
1232e5dd7070Spatrick EmitVariablyModifiedType(Ty);
1233e5dd7070Spatrick }
1234*12c85518Srobert }
1235e5dd7070Spatrick // Emit a location at the end of the prologue.
1236e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo())
1237e5dd7070Spatrick DI->EmitLocation(Builder, StartLoc);
1238e5dd7070Spatrick // TODO: Do we need to handle this in two places like we do with
1239e5dd7070Spatrick // target-features/target-cpu?
1240e5dd7070Spatrick if (CurFuncDecl)
1241e5dd7070Spatrick if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1242e5dd7070Spatrick LargestVectorWidth = VecWidth->getVectorWidth();
1243e5dd7070Spatrick }
1244e5dd7070Spatrick
EmitFunctionBody(const Stmt * Body)1245e5dd7070Spatrick void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1246e5dd7070Spatrick incrementProfileCounter(Body);
1247e5dd7070Spatrick if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1248e5dd7070Spatrick EmitCompoundStmtWithoutScope(*S);
1249e5dd7070Spatrick else
1250e5dd7070Spatrick EmitStmt(Body);
1251a9ac8606Spatrick
1252a9ac8606Spatrick // This is checked after emitting the function body so we know if there
1253a9ac8606Spatrick // are any permitted infinite loops.
1254a9ac8606Spatrick if (checkIfFunctionMustProgress())
1255a9ac8606Spatrick CurFn->addFnAttr(llvm::Attribute::MustProgress);
1256e5dd7070Spatrick }
1257e5dd7070Spatrick
1258e5dd7070Spatrick /// When instrumenting to collect profile data, the counts for some blocks
1259e5dd7070Spatrick /// such as switch cases need to not include the fall-through counts, so
1260e5dd7070Spatrick /// emit a branch around the instrumentation code. When not instrumenting,
1261e5dd7070Spatrick /// this just calls EmitBlock().
EmitBlockWithFallThrough(llvm::BasicBlock * BB,const Stmt * S)1262e5dd7070Spatrick void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1263e5dd7070Spatrick const Stmt *S) {
1264e5dd7070Spatrick llvm::BasicBlock *SkipCountBB = nullptr;
1265e5dd7070Spatrick if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1266e5dd7070Spatrick // When instrumenting for profiling, the fallthrough to certain
1267e5dd7070Spatrick // statements needs to skip over the instrumentation code so that we
1268e5dd7070Spatrick // get an accurate count.
1269e5dd7070Spatrick SkipCountBB = createBasicBlock("skipcount");
1270e5dd7070Spatrick EmitBranch(SkipCountBB);
1271e5dd7070Spatrick }
1272e5dd7070Spatrick EmitBlock(BB);
1273e5dd7070Spatrick uint64_t CurrentCount = getCurrentProfileCount();
1274e5dd7070Spatrick incrementProfileCounter(S);
1275e5dd7070Spatrick setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1276e5dd7070Spatrick if (SkipCountBB)
1277e5dd7070Spatrick EmitBlock(SkipCountBB);
1278e5dd7070Spatrick }
1279e5dd7070Spatrick
1280e5dd7070Spatrick /// Tries to mark the given function nounwind based on the
1281e5dd7070Spatrick /// non-existence of any throwing calls within it. We believe this is
1282e5dd7070Spatrick /// lightweight enough to do at -O0.
TryMarkNoThrow(llvm::Function * F)1283e5dd7070Spatrick static void TryMarkNoThrow(llvm::Function *F) {
1284e5dd7070Spatrick // LLVM treats 'nounwind' on a function as part of the type, so we
1285e5dd7070Spatrick // can't do this on functions that can be overwritten.
1286e5dd7070Spatrick if (F->isInterposable()) return;
1287e5dd7070Spatrick
1288e5dd7070Spatrick for (llvm::BasicBlock &BB : *F)
1289e5dd7070Spatrick for (llvm::Instruction &I : BB)
1290e5dd7070Spatrick if (I.mayThrow())
1291e5dd7070Spatrick return;
1292e5dd7070Spatrick
1293e5dd7070Spatrick F->setDoesNotThrow();
1294e5dd7070Spatrick }
1295e5dd7070Spatrick
BuildFunctionArgList(GlobalDecl GD,FunctionArgList & Args)1296e5dd7070Spatrick QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1297e5dd7070Spatrick FunctionArgList &Args) {
1298e5dd7070Spatrick const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1299e5dd7070Spatrick QualType ResTy = FD->getReturnType();
1300e5dd7070Spatrick
1301e5dd7070Spatrick const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1302e5dd7070Spatrick if (MD && MD->isInstance()) {
1303e5dd7070Spatrick if (CGM.getCXXABI().HasThisReturn(GD))
1304e5dd7070Spatrick ResTy = MD->getThisType();
1305e5dd7070Spatrick else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1306e5dd7070Spatrick ResTy = CGM.getContext().VoidPtrTy;
1307e5dd7070Spatrick CGM.getCXXABI().buildThisParam(*this, Args);
1308e5dd7070Spatrick }
1309e5dd7070Spatrick
1310e5dd7070Spatrick // The base version of an inheriting constructor whose constructed base is a
1311e5dd7070Spatrick // virtual base is not passed any arguments (because it doesn't actually call
1312e5dd7070Spatrick // the inherited constructor).
1313e5dd7070Spatrick bool PassedParams = true;
1314e5dd7070Spatrick if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1315e5dd7070Spatrick if (auto Inherited = CD->getInheritedConstructor())
1316e5dd7070Spatrick PassedParams =
1317e5dd7070Spatrick getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1318e5dd7070Spatrick
1319e5dd7070Spatrick if (PassedParams) {
1320e5dd7070Spatrick for (auto *Param : FD->parameters()) {
1321e5dd7070Spatrick Args.push_back(Param);
1322e5dd7070Spatrick if (!Param->hasAttr<PassObjectSizeAttr>())
1323e5dd7070Spatrick continue;
1324e5dd7070Spatrick
1325e5dd7070Spatrick auto *Implicit = ImplicitParamDecl::Create(
1326e5dd7070Spatrick getContext(), Param->getDeclContext(), Param->getLocation(),
1327e5dd7070Spatrick /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1328e5dd7070Spatrick SizeArguments[Param] = Implicit;
1329e5dd7070Spatrick Args.push_back(Implicit);
1330e5dd7070Spatrick }
1331e5dd7070Spatrick }
1332e5dd7070Spatrick
1333e5dd7070Spatrick if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1334e5dd7070Spatrick CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1335e5dd7070Spatrick
1336e5dd7070Spatrick return ResTy;
1337e5dd7070Spatrick }
1338e5dd7070Spatrick
GenerateCode(GlobalDecl GD,llvm::Function * Fn,const CGFunctionInfo & FnInfo)1339e5dd7070Spatrick void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1340e5dd7070Spatrick const CGFunctionInfo &FnInfo) {
1341*12c85518Srobert assert(Fn && "generating code for null Function");
1342e5dd7070Spatrick const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1343e5dd7070Spatrick CurGD = GD;
1344e5dd7070Spatrick
1345e5dd7070Spatrick FunctionArgList Args;
1346e5dd7070Spatrick QualType ResTy = BuildFunctionArgList(GD, Args);
1347e5dd7070Spatrick
1348*12c85518Srobert if (FD->isInlineBuiltinDeclaration()) {
1349*12c85518Srobert // When generating code for a builtin with an inline declaration, use a
1350*12c85518Srobert // mangled name to hold the actual body, while keeping an external
1351*12c85518Srobert // definition in case the function pointer is referenced somewhere.
1352*12c85518Srobert std::string FDInlineName = (Fn->getName() + ".inline").str();
1353*12c85518Srobert llvm::Module *M = Fn->getParent();
1354*12c85518Srobert llvm::Function *Clone = M->getFunction(FDInlineName);
1355*12c85518Srobert if (!Clone) {
1356*12c85518Srobert Clone = llvm::Function::Create(Fn->getFunctionType(),
1357*12c85518Srobert llvm::GlobalValue::InternalLinkage,
1358*12c85518Srobert Fn->getAddressSpace(), FDInlineName, M);
1359*12c85518Srobert Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1360*12c85518Srobert }
1361*12c85518Srobert Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1362*12c85518Srobert Fn = Clone;
1363*12c85518Srobert } else {
1364*12c85518Srobert // Detect the unusual situation where an inline version is shadowed by a
1365*12c85518Srobert // non-inline version. In that case we should pick the external one
1366*12c85518Srobert // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1367*12c85518Srobert // to detect that situation before we reach codegen, so do some late
1368*12c85518Srobert // replacement.
1369*12c85518Srobert for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1370*12c85518Srobert PD = PD->getPreviousDecl()) {
1371*12c85518Srobert if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1372*12c85518Srobert std::string FDInlineName = (Fn->getName() + ".inline").str();
1373*12c85518Srobert llvm::Module *M = Fn->getParent();
1374*12c85518Srobert if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1375*12c85518Srobert Clone->replaceAllUsesWith(Fn);
1376*12c85518Srobert Clone->eraseFromParent();
1377*12c85518Srobert }
1378*12c85518Srobert break;
1379*12c85518Srobert }
1380*12c85518Srobert }
1381*12c85518Srobert }
1382*12c85518Srobert
1383e5dd7070Spatrick // Check if we should generate debug info for this function.
1384a9ac8606Spatrick if (FD->hasAttr<NoDebugAttr>()) {
1385a9ac8606Spatrick // Clear non-distinct debug info that was possibly attached to the function
1386a9ac8606Spatrick // due to an earlier declaration without the nodebug attribute
1387a9ac8606Spatrick Fn->setSubprogram(nullptr);
1388a9ac8606Spatrick // Disable debug info indefinitely for this function
1389a9ac8606Spatrick DebugInfo = nullptr;
1390a9ac8606Spatrick }
1391e5dd7070Spatrick
1392e5dd7070Spatrick // The function might not have a body if we're generating thunks for a
1393e5dd7070Spatrick // function declaration.
1394e5dd7070Spatrick SourceRange BodyRange;
1395e5dd7070Spatrick if (Stmt *Body = FD->getBody())
1396e5dd7070Spatrick BodyRange = Body->getSourceRange();
1397e5dd7070Spatrick else
1398e5dd7070Spatrick BodyRange = FD->getLocation();
1399e5dd7070Spatrick CurEHLocation = BodyRange.getEnd();
1400e5dd7070Spatrick
1401e5dd7070Spatrick // Use the location of the start of the function to determine where
1402e5dd7070Spatrick // the function definition is located. By default use the location
1403e5dd7070Spatrick // of the declaration as the location for the subprogram. A function
1404e5dd7070Spatrick // may lack a declaration in the source code if it is created by code
1405e5dd7070Spatrick // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1406e5dd7070Spatrick SourceLocation Loc = FD->getLocation();
1407e5dd7070Spatrick
1408e5dd7070Spatrick // If this is a function specialization then use the pattern body
1409e5dd7070Spatrick // as the location for the function.
1410e5dd7070Spatrick if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1411e5dd7070Spatrick if (SpecDecl->hasBody(SpecDecl))
1412e5dd7070Spatrick Loc = SpecDecl->getLocation();
1413e5dd7070Spatrick
1414e5dd7070Spatrick Stmt *Body = FD->getBody();
1415e5dd7070Spatrick
1416a9ac8606Spatrick if (Body) {
1417a9ac8606Spatrick // Coroutines always emit lifetime markers.
1418a9ac8606Spatrick if (isa<CoroutineBodyStmt>(Body))
1419a9ac8606Spatrick ShouldEmitLifetimeMarkers = true;
1420a9ac8606Spatrick
1421a9ac8606Spatrick // Initialize helper which will detect jumps which can cause invalid
1422a9ac8606Spatrick // lifetime markers.
1423a9ac8606Spatrick if (ShouldEmitLifetimeMarkers)
1424e5dd7070Spatrick Bypasses.Init(Body);
1425a9ac8606Spatrick }
1426e5dd7070Spatrick
1427e5dd7070Spatrick // Emit the standard function prologue.
1428e5dd7070Spatrick StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1429e5dd7070Spatrick
1430a9ac8606Spatrick // Save parameters for coroutine function.
1431a9ac8606Spatrick if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1432*12c85518Srobert llvm::append_range(FnArgs, FD->parameters());
1433a9ac8606Spatrick
1434e5dd7070Spatrick // Generate the body of the function.
1435e5dd7070Spatrick PGO.assignRegionCounters(GD, CurFn);
1436e5dd7070Spatrick if (isa<CXXDestructorDecl>(FD))
1437e5dd7070Spatrick EmitDestructorBody(Args);
1438e5dd7070Spatrick else if (isa<CXXConstructorDecl>(FD))
1439e5dd7070Spatrick EmitConstructorBody(Args);
1440e5dd7070Spatrick else if (getLangOpts().CUDA &&
1441e5dd7070Spatrick !getLangOpts().CUDAIsDevice &&
1442e5dd7070Spatrick FD->hasAttr<CUDAGlobalAttr>())
1443e5dd7070Spatrick CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1444e5dd7070Spatrick else if (isa<CXXMethodDecl>(FD) &&
1445e5dd7070Spatrick cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1446e5dd7070Spatrick // The lambda static invoker function is special, because it forwards or
1447e5dd7070Spatrick // clones the body of the function call operator (but is actually static).
1448e5dd7070Spatrick EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1449e5dd7070Spatrick } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1450e5dd7070Spatrick (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1451e5dd7070Spatrick cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1452e5dd7070Spatrick // Implicit copy-assignment gets the same special treatment as implicit
1453e5dd7070Spatrick // copy-constructors.
1454e5dd7070Spatrick emitImplicitAssignmentOperatorBody(Args);
1455e5dd7070Spatrick } else if (Body) {
1456e5dd7070Spatrick EmitFunctionBody(Body);
1457e5dd7070Spatrick } else
1458e5dd7070Spatrick llvm_unreachable("no definition for emitted function");
1459e5dd7070Spatrick
1460e5dd7070Spatrick // C++11 [stmt.return]p2:
1461e5dd7070Spatrick // Flowing off the end of a function [...] results in undefined behavior in
1462e5dd7070Spatrick // a value-returning function.
1463e5dd7070Spatrick // C11 6.9.1p12:
1464e5dd7070Spatrick // If the '}' that terminates a function is reached, and the value of the
1465e5dd7070Spatrick // function call is used by the caller, the behavior is undefined.
1466e5dd7070Spatrick if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1467e5dd7070Spatrick !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1468e5dd7070Spatrick bool ShouldEmitUnreachable =
1469e5dd7070Spatrick CGM.getCodeGenOpts().StrictReturn ||
1470a9ac8606Spatrick !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1471e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Return)) {
1472e5dd7070Spatrick SanitizerScope SanScope(this);
1473e5dd7070Spatrick llvm::Value *IsFalse = Builder.getFalse();
1474e5dd7070Spatrick EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1475e5dd7070Spatrick SanitizerHandler::MissingReturn,
1476*12c85518Srobert EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1477e5dd7070Spatrick } else if (ShouldEmitUnreachable) {
1478e5dd7070Spatrick if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1479e5dd7070Spatrick EmitTrapCall(llvm::Intrinsic::trap);
1480e5dd7070Spatrick }
1481e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1482e5dd7070Spatrick Builder.CreateUnreachable();
1483e5dd7070Spatrick Builder.ClearInsertionPoint();
1484e5dd7070Spatrick }
1485e5dd7070Spatrick }
1486e5dd7070Spatrick
1487e5dd7070Spatrick // Emit the standard function epilogue.
1488e5dd7070Spatrick FinishFunction(BodyRange.getEnd());
1489e5dd7070Spatrick
1490e5dd7070Spatrick // If we haven't marked the function nothrow through other means, do
1491e5dd7070Spatrick // a quick pass now to see if we can.
1492e5dd7070Spatrick if (!CurFn->doesNotThrow())
1493e5dd7070Spatrick TryMarkNoThrow(CurFn);
1494e5dd7070Spatrick }
1495e5dd7070Spatrick
1496e5dd7070Spatrick /// ContainsLabel - Return true if the statement contains a label in it. If
1497e5dd7070Spatrick /// this statement is not executed normally, it not containing a label means
1498e5dd7070Spatrick /// that we can just remove the code.
ContainsLabel(const Stmt * S,bool IgnoreCaseStmts)1499e5dd7070Spatrick bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1500e5dd7070Spatrick // Null statement, not a label!
1501e5dd7070Spatrick if (!S) return false;
1502e5dd7070Spatrick
1503e5dd7070Spatrick // If this is a label, we have to emit the code, consider something like:
1504e5dd7070Spatrick // if (0) { ... foo: bar(); } goto foo;
1505e5dd7070Spatrick //
1506e5dd7070Spatrick // TODO: If anyone cared, we could track __label__'s, since we know that you
1507e5dd7070Spatrick // can't jump to one from outside their declared region.
1508e5dd7070Spatrick if (isa<LabelStmt>(S))
1509e5dd7070Spatrick return true;
1510e5dd7070Spatrick
1511e5dd7070Spatrick // If this is a case/default statement, and we haven't seen a switch, we have
1512e5dd7070Spatrick // to emit the code.
1513e5dd7070Spatrick if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1514e5dd7070Spatrick return true;
1515e5dd7070Spatrick
1516e5dd7070Spatrick // If this is a switch statement, we want to ignore cases below it.
1517e5dd7070Spatrick if (isa<SwitchStmt>(S))
1518e5dd7070Spatrick IgnoreCaseStmts = true;
1519e5dd7070Spatrick
1520e5dd7070Spatrick // Scan subexpressions for verboten labels.
1521e5dd7070Spatrick for (const Stmt *SubStmt : S->children())
1522e5dd7070Spatrick if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1523e5dd7070Spatrick return true;
1524e5dd7070Spatrick
1525e5dd7070Spatrick return false;
1526e5dd7070Spatrick }
1527e5dd7070Spatrick
1528e5dd7070Spatrick /// containsBreak - Return true if the statement contains a break out of it.
1529e5dd7070Spatrick /// If the statement (recursively) contains a switch or loop with a break
1530e5dd7070Spatrick /// inside of it, this is fine.
containsBreak(const Stmt * S)1531e5dd7070Spatrick bool CodeGenFunction::containsBreak(const Stmt *S) {
1532e5dd7070Spatrick // Null statement, not a label!
1533e5dd7070Spatrick if (!S) return false;
1534e5dd7070Spatrick
1535e5dd7070Spatrick // If this is a switch or loop that defines its own break scope, then we can
1536e5dd7070Spatrick // include it and anything inside of it.
1537e5dd7070Spatrick if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1538e5dd7070Spatrick isa<ForStmt>(S))
1539e5dd7070Spatrick return false;
1540e5dd7070Spatrick
1541e5dd7070Spatrick if (isa<BreakStmt>(S))
1542e5dd7070Spatrick return true;
1543e5dd7070Spatrick
1544e5dd7070Spatrick // Scan subexpressions for verboten breaks.
1545e5dd7070Spatrick for (const Stmt *SubStmt : S->children())
1546e5dd7070Spatrick if (containsBreak(SubStmt))
1547e5dd7070Spatrick return true;
1548e5dd7070Spatrick
1549e5dd7070Spatrick return false;
1550e5dd7070Spatrick }
1551e5dd7070Spatrick
mightAddDeclToScope(const Stmt * S)1552e5dd7070Spatrick bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1553e5dd7070Spatrick if (!S) return false;
1554e5dd7070Spatrick
1555e5dd7070Spatrick // Some statement kinds add a scope and thus never add a decl to the current
1556e5dd7070Spatrick // scope. Note, this list is longer than the list of statements that might
1557e5dd7070Spatrick // have an unscoped decl nested within them, but this way is conservatively
1558e5dd7070Spatrick // correct even if more statement kinds are added.
1559e5dd7070Spatrick if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1560e5dd7070Spatrick isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1561e5dd7070Spatrick isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1562e5dd7070Spatrick isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1563e5dd7070Spatrick return false;
1564e5dd7070Spatrick
1565e5dd7070Spatrick if (isa<DeclStmt>(S))
1566e5dd7070Spatrick return true;
1567e5dd7070Spatrick
1568e5dd7070Spatrick for (const Stmt *SubStmt : S->children())
1569e5dd7070Spatrick if (mightAddDeclToScope(SubStmt))
1570e5dd7070Spatrick return true;
1571e5dd7070Spatrick
1572e5dd7070Spatrick return false;
1573e5dd7070Spatrick }
1574e5dd7070Spatrick
1575e5dd7070Spatrick /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1576e5dd7070Spatrick /// to a constant, or if it does but contains a label, return false. If it
1577e5dd7070Spatrick /// constant folds return true and set the boolean result in Result.
ConstantFoldsToSimpleInteger(const Expr * Cond,bool & ResultBool,bool AllowLabels)1578e5dd7070Spatrick bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1579e5dd7070Spatrick bool &ResultBool,
1580e5dd7070Spatrick bool AllowLabels) {
1581e5dd7070Spatrick llvm::APSInt ResultInt;
1582e5dd7070Spatrick if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1583e5dd7070Spatrick return false;
1584e5dd7070Spatrick
1585e5dd7070Spatrick ResultBool = ResultInt.getBoolValue();
1586e5dd7070Spatrick return true;
1587e5dd7070Spatrick }
1588e5dd7070Spatrick
1589e5dd7070Spatrick /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1590e5dd7070Spatrick /// to a constant, or if it does but contains a label, return false. If it
1591e5dd7070Spatrick /// constant folds return true and set the folded value.
ConstantFoldsToSimpleInteger(const Expr * Cond,llvm::APSInt & ResultInt,bool AllowLabels)1592e5dd7070Spatrick bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1593e5dd7070Spatrick llvm::APSInt &ResultInt,
1594e5dd7070Spatrick bool AllowLabels) {
1595e5dd7070Spatrick // FIXME: Rename and handle conversion of other evaluatable things
1596e5dd7070Spatrick // to bool.
1597e5dd7070Spatrick Expr::EvalResult Result;
1598e5dd7070Spatrick if (!Cond->EvaluateAsInt(Result, getContext()))
1599e5dd7070Spatrick return false; // Not foldable, not integer or not fully evaluatable.
1600e5dd7070Spatrick
1601e5dd7070Spatrick llvm::APSInt Int = Result.Val.getInt();
1602e5dd7070Spatrick if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1603e5dd7070Spatrick return false; // Contains a label.
1604e5dd7070Spatrick
1605e5dd7070Spatrick ResultInt = Int;
1606e5dd7070Spatrick return true;
1607e5dd7070Spatrick }
1608e5dd7070Spatrick
1609a9ac8606Spatrick /// Determine whether the given condition is an instrumentable condition
1610a9ac8606Spatrick /// (i.e. no "&&" or "||").
isInstrumentedCondition(const Expr * C)1611a9ac8606Spatrick bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1612a9ac8606Spatrick // Bypass simplistic logical-NOT operator before determining whether the
1613a9ac8606Spatrick // condition contains any other logical operator.
1614a9ac8606Spatrick if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
1615a9ac8606Spatrick if (UnOp->getOpcode() == UO_LNot)
1616a9ac8606Spatrick C = UnOp->getSubExpr();
1617e5dd7070Spatrick
1618a9ac8606Spatrick const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
1619a9ac8606Spatrick return (!BOp || !BOp->isLogicalOp());
1620a9ac8606Spatrick }
1621a9ac8606Spatrick
1622a9ac8606Spatrick /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1623a9ac8606Spatrick /// increments a profile counter based on the semantics of the given logical
1624a9ac8606Spatrick /// operator opcode. This is used to instrument branch condition coverage for
1625a9ac8606Spatrick /// logical operators.
EmitBranchToCounterBlock(const Expr * Cond,BinaryOperator::Opcode LOp,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount,Stmt::Likelihood LH,const Expr * CntrIdx)1626a9ac8606Spatrick void CodeGenFunction::EmitBranchToCounterBlock(
1627a9ac8606Spatrick const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1628a9ac8606Spatrick llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1629a9ac8606Spatrick Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1630a9ac8606Spatrick // If not instrumenting, just emit a branch.
1631a9ac8606Spatrick bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1632a9ac8606Spatrick if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1633a9ac8606Spatrick return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1634a9ac8606Spatrick
1635*12c85518Srobert llvm::BasicBlock *ThenBlock = nullptr;
1636*12c85518Srobert llvm::BasicBlock *ElseBlock = nullptr;
1637*12c85518Srobert llvm::BasicBlock *NextBlock = nullptr;
1638a9ac8606Spatrick
1639a9ac8606Spatrick // Create the block we'll use to increment the appropriate counter.
1640a9ac8606Spatrick llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1641a9ac8606Spatrick
1642a9ac8606Spatrick // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1643a9ac8606Spatrick // means we need to evaluate the condition and increment the counter on TRUE:
1644a9ac8606Spatrick //
1645a9ac8606Spatrick // if (Cond)
1646a9ac8606Spatrick // goto CounterIncrBlock;
1647a9ac8606Spatrick // else
1648a9ac8606Spatrick // goto FalseBlock;
1649a9ac8606Spatrick //
1650a9ac8606Spatrick // CounterIncrBlock:
1651a9ac8606Spatrick // Counter++;
1652a9ac8606Spatrick // goto TrueBlock;
1653a9ac8606Spatrick
1654a9ac8606Spatrick if (LOp == BO_LAnd) {
1655a9ac8606Spatrick ThenBlock = CounterIncrBlock;
1656a9ac8606Spatrick ElseBlock = FalseBlock;
1657a9ac8606Spatrick NextBlock = TrueBlock;
1658a9ac8606Spatrick }
1659a9ac8606Spatrick
1660a9ac8606Spatrick // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1661a9ac8606Spatrick // we need to evaluate the condition and increment the counter on FALSE:
1662a9ac8606Spatrick //
1663a9ac8606Spatrick // if (Cond)
1664a9ac8606Spatrick // goto TrueBlock;
1665a9ac8606Spatrick // else
1666a9ac8606Spatrick // goto CounterIncrBlock;
1667a9ac8606Spatrick //
1668a9ac8606Spatrick // CounterIncrBlock:
1669a9ac8606Spatrick // Counter++;
1670a9ac8606Spatrick // goto FalseBlock;
1671a9ac8606Spatrick
1672a9ac8606Spatrick else if (LOp == BO_LOr) {
1673a9ac8606Spatrick ThenBlock = TrueBlock;
1674a9ac8606Spatrick ElseBlock = CounterIncrBlock;
1675a9ac8606Spatrick NextBlock = FalseBlock;
1676a9ac8606Spatrick } else {
1677a9ac8606Spatrick llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1678a9ac8606Spatrick }
1679a9ac8606Spatrick
1680a9ac8606Spatrick // Emit Branch based on condition.
1681a9ac8606Spatrick EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1682a9ac8606Spatrick
1683a9ac8606Spatrick // Emit the block containing the counter increment(s).
1684a9ac8606Spatrick EmitBlock(CounterIncrBlock);
1685a9ac8606Spatrick
1686a9ac8606Spatrick // Increment corresponding counter; if index not provided, use Cond as index.
1687a9ac8606Spatrick incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1688a9ac8606Spatrick
1689a9ac8606Spatrick // Go to the next block.
1690a9ac8606Spatrick EmitBranch(NextBlock);
1691a9ac8606Spatrick }
1692e5dd7070Spatrick
1693e5dd7070Spatrick /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1694e5dd7070Spatrick /// statement) to the specified blocks. Based on the condition, this might try
1695e5dd7070Spatrick /// to simplify the codegen of the conditional based on the branch.
1696a9ac8606Spatrick /// \param LH The value of the likelihood attribute on the True branch.
EmitBranchOnBoolExpr(const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount,Stmt::Likelihood LH)1697e5dd7070Spatrick void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1698e5dd7070Spatrick llvm::BasicBlock *TrueBlock,
1699e5dd7070Spatrick llvm::BasicBlock *FalseBlock,
1700a9ac8606Spatrick uint64_t TrueCount,
1701a9ac8606Spatrick Stmt::Likelihood LH) {
1702e5dd7070Spatrick Cond = Cond->IgnoreParens();
1703e5dd7070Spatrick
1704e5dd7070Spatrick if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1705e5dd7070Spatrick
1706e5dd7070Spatrick // Handle X && Y in a condition.
1707e5dd7070Spatrick if (CondBOp->getOpcode() == BO_LAnd) {
1708e5dd7070Spatrick // If we have "1 && X", simplify the code. "0 && X" would have constant
1709e5dd7070Spatrick // folded if the case was simple enough.
1710e5dd7070Spatrick bool ConstantBool = false;
1711e5dd7070Spatrick if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1712e5dd7070Spatrick ConstantBool) {
1713e5dd7070Spatrick // br(1 && X) -> br(X).
1714e5dd7070Spatrick incrementProfileCounter(CondBOp);
1715a9ac8606Spatrick return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1716a9ac8606Spatrick FalseBlock, TrueCount, LH);
1717e5dd7070Spatrick }
1718e5dd7070Spatrick
1719e5dd7070Spatrick // If we have "X && 1", simplify the code to use an uncond branch.
1720e5dd7070Spatrick // "X && 0" would have been constant folded to 0.
1721e5dd7070Spatrick if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1722e5dd7070Spatrick ConstantBool) {
1723e5dd7070Spatrick // br(X && 1) -> br(X).
1724a9ac8606Spatrick return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1725a9ac8606Spatrick FalseBlock, TrueCount, LH, CondBOp);
1726e5dd7070Spatrick }
1727e5dd7070Spatrick
1728e5dd7070Spatrick // Emit the LHS as a conditional. If the LHS conditional is false, we
1729e5dd7070Spatrick // want to jump to the FalseBlock.
1730e5dd7070Spatrick llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1731e5dd7070Spatrick // The counter tells us how often we evaluate RHS, and all of TrueCount
1732e5dd7070Spatrick // can be propagated to that branch.
1733e5dd7070Spatrick uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1734e5dd7070Spatrick
1735e5dd7070Spatrick ConditionalEvaluation eval(*this);
1736e5dd7070Spatrick {
1737e5dd7070Spatrick ApplyDebugLocation DL(*this, Cond);
1738a9ac8606Spatrick // Propagate the likelihood attribute like __builtin_expect
1739a9ac8606Spatrick // __builtin_expect(X && Y, 1) -> X and Y are likely
1740a9ac8606Spatrick // __builtin_expect(X && Y, 0) -> only Y is unlikely
1741a9ac8606Spatrick EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1742a9ac8606Spatrick LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1743e5dd7070Spatrick EmitBlock(LHSTrue);
1744e5dd7070Spatrick }
1745e5dd7070Spatrick
1746e5dd7070Spatrick incrementProfileCounter(CondBOp);
1747e5dd7070Spatrick setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1748e5dd7070Spatrick
1749e5dd7070Spatrick // Any temporaries created here are conditional.
1750e5dd7070Spatrick eval.begin(*this);
1751a9ac8606Spatrick EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1752a9ac8606Spatrick FalseBlock, TrueCount, LH);
1753e5dd7070Spatrick eval.end(*this);
1754e5dd7070Spatrick
1755e5dd7070Spatrick return;
1756e5dd7070Spatrick }
1757e5dd7070Spatrick
1758e5dd7070Spatrick if (CondBOp->getOpcode() == BO_LOr) {
1759e5dd7070Spatrick // If we have "0 || X", simplify the code. "1 || X" would have constant
1760e5dd7070Spatrick // folded if the case was simple enough.
1761e5dd7070Spatrick bool ConstantBool = false;
1762e5dd7070Spatrick if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1763e5dd7070Spatrick !ConstantBool) {
1764e5dd7070Spatrick // br(0 || X) -> br(X).
1765e5dd7070Spatrick incrementProfileCounter(CondBOp);
1766a9ac8606Spatrick return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1767a9ac8606Spatrick FalseBlock, TrueCount, LH);
1768e5dd7070Spatrick }
1769e5dd7070Spatrick
1770e5dd7070Spatrick // If we have "X || 0", simplify the code to use an uncond branch.
1771e5dd7070Spatrick // "X || 1" would have been constant folded to 1.
1772e5dd7070Spatrick if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1773e5dd7070Spatrick !ConstantBool) {
1774e5dd7070Spatrick // br(X || 0) -> br(X).
1775a9ac8606Spatrick return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1776a9ac8606Spatrick FalseBlock, TrueCount, LH, CondBOp);
1777e5dd7070Spatrick }
1778e5dd7070Spatrick
1779e5dd7070Spatrick // Emit the LHS as a conditional. If the LHS conditional is true, we
1780e5dd7070Spatrick // want to jump to the TrueBlock.
1781e5dd7070Spatrick llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1782e5dd7070Spatrick // We have the count for entry to the RHS and for the whole expression
1783e5dd7070Spatrick // being true, so we can divy up True count between the short circuit and
1784e5dd7070Spatrick // the RHS.
1785e5dd7070Spatrick uint64_t LHSCount =
1786e5dd7070Spatrick getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1787e5dd7070Spatrick uint64_t RHSCount = TrueCount - LHSCount;
1788e5dd7070Spatrick
1789e5dd7070Spatrick ConditionalEvaluation eval(*this);
1790e5dd7070Spatrick {
1791a9ac8606Spatrick // Propagate the likelihood attribute like __builtin_expect
1792a9ac8606Spatrick // __builtin_expect(X || Y, 1) -> only Y is likely
1793a9ac8606Spatrick // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1794e5dd7070Spatrick ApplyDebugLocation DL(*this, Cond);
1795a9ac8606Spatrick EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1796a9ac8606Spatrick LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1797e5dd7070Spatrick EmitBlock(LHSFalse);
1798e5dd7070Spatrick }
1799e5dd7070Spatrick
1800e5dd7070Spatrick incrementProfileCounter(CondBOp);
1801e5dd7070Spatrick setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1802e5dd7070Spatrick
1803e5dd7070Spatrick // Any temporaries created here are conditional.
1804e5dd7070Spatrick eval.begin(*this);
1805a9ac8606Spatrick EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1806a9ac8606Spatrick RHSCount, LH);
1807e5dd7070Spatrick
1808e5dd7070Spatrick eval.end(*this);
1809e5dd7070Spatrick
1810e5dd7070Spatrick return;
1811e5dd7070Spatrick }
1812e5dd7070Spatrick }
1813e5dd7070Spatrick
1814e5dd7070Spatrick if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1815e5dd7070Spatrick // br(!x, t, f) -> br(x, f, t)
1816e5dd7070Spatrick if (CondUOp->getOpcode() == UO_LNot) {
1817e5dd7070Spatrick // Negate the count.
1818e5dd7070Spatrick uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1819a9ac8606Spatrick // The values of the enum are chosen to make this negation possible.
1820a9ac8606Spatrick LH = static_cast<Stmt::Likelihood>(-LH);
1821e5dd7070Spatrick // Negate the condition and swap the destination blocks.
1822e5dd7070Spatrick return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1823a9ac8606Spatrick FalseCount, LH);
1824e5dd7070Spatrick }
1825e5dd7070Spatrick }
1826e5dd7070Spatrick
1827e5dd7070Spatrick if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1828e5dd7070Spatrick // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1829e5dd7070Spatrick llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1830e5dd7070Spatrick llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1831e5dd7070Spatrick
1832a9ac8606Spatrick // The ConditionalOperator itself has no likelihood information for its
1833a9ac8606Spatrick // true and false branches. This matches the behavior of __builtin_expect.
1834e5dd7070Spatrick ConditionalEvaluation cond(*this);
1835e5dd7070Spatrick EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1836a9ac8606Spatrick getProfileCount(CondOp), Stmt::LH_None);
1837e5dd7070Spatrick
1838e5dd7070Spatrick // When computing PGO branch weights, we only know the overall count for
1839e5dd7070Spatrick // the true block. This code is essentially doing tail duplication of the
1840e5dd7070Spatrick // naive code-gen, introducing new edges for which counts are not
1841e5dd7070Spatrick // available. Divide the counts proportionally between the LHS and RHS of
1842e5dd7070Spatrick // the conditional operator.
1843e5dd7070Spatrick uint64_t LHSScaledTrueCount = 0;
1844e5dd7070Spatrick if (TrueCount) {
1845e5dd7070Spatrick double LHSRatio =
1846e5dd7070Spatrick getProfileCount(CondOp) / (double)getCurrentProfileCount();
1847e5dd7070Spatrick LHSScaledTrueCount = TrueCount * LHSRatio;
1848e5dd7070Spatrick }
1849e5dd7070Spatrick
1850e5dd7070Spatrick cond.begin(*this);
1851e5dd7070Spatrick EmitBlock(LHSBlock);
1852e5dd7070Spatrick incrementProfileCounter(CondOp);
1853e5dd7070Spatrick {
1854e5dd7070Spatrick ApplyDebugLocation DL(*this, Cond);
1855e5dd7070Spatrick EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1856a9ac8606Spatrick LHSScaledTrueCount, LH);
1857e5dd7070Spatrick }
1858e5dd7070Spatrick cond.end(*this);
1859e5dd7070Spatrick
1860e5dd7070Spatrick cond.begin(*this);
1861e5dd7070Spatrick EmitBlock(RHSBlock);
1862e5dd7070Spatrick EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1863a9ac8606Spatrick TrueCount - LHSScaledTrueCount, LH);
1864e5dd7070Spatrick cond.end(*this);
1865e5dd7070Spatrick
1866e5dd7070Spatrick return;
1867e5dd7070Spatrick }
1868e5dd7070Spatrick
1869e5dd7070Spatrick if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1870e5dd7070Spatrick // Conditional operator handling can give us a throw expression as a
1871e5dd7070Spatrick // condition for a case like:
1872e5dd7070Spatrick // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1873e5dd7070Spatrick // Fold this to:
1874e5dd7070Spatrick // br(c, throw x, br(y, t, f))
1875e5dd7070Spatrick EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1876e5dd7070Spatrick return;
1877e5dd7070Spatrick }
1878e5dd7070Spatrick
1879a9ac8606Spatrick // Emit the code with the fully general case.
1880a9ac8606Spatrick llvm::Value *CondV;
1881a9ac8606Spatrick {
1882a9ac8606Spatrick ApplyDebugLocation DL(*this, Cond);
1883a9ac8606Spatrick CondV = EvaluateExprAsBool(Cond);
1884a9ac8606Spatrick }
1885a9ac8606Spatrick
1886a9ac8606Spatrick llvm::MDNode *Weights = nullptr;
1887a9ac8606Spatrick llvm::MDNode *Unpredictable = nullptr;
1888a9ac8606Spatrick
1889e5dd7070Spatrick // If the branch has a condition wrapped by __builtin_unpredictable,
1890e5dd7070Spatrick // create metadata that specifies that the branch is unpredictable.
1891e5dd7070Spatrick // Don't bother if not optimizing because that metadata would not be used.
1892e5dd7070Spatrick auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1893e5dd7070Spatrick if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1894e5dd7070Spatrick auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1895e5dd7070Spatrick if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1896e5dd7070Spatrick llvm::MDBuilder MDHelper(getLLVMContext());
1897e5dd7070Spatrick Unpredictable = MDHelper.createUnpredictable();
1898e5dd7070Spatrick }
1899e5dd7070Spatrick }
1900e5dd7070Spatrick
1901a9ac8606Spatrick // If there is a Likelihood knowledge for the cond, lower it.
1902a9ac8606Spatrick // Note that if not optimizing this won't emit anything.
1903a9ac8606Spatrick llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1904a9ac8606Spatrick if (CondV != NewCondV)
1905a9ac8606Spatrick CondV = NewCondV;
1906a9ac8606Spatrick else {
1907a9ac8606Spatrick // Otherwise, lower profile counts. Note that we do this even at -O0.
1908e5dd7070Spatrick uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1909a9ac8606Spatrick Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1910e5dd7070Spatrick }
1911a9ac8606Spatrick
1912e5dd7070Spatrick Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1913e5dd7070Spatrick }
1914e5dd7070Spatrick
1915e5dd7070Spatrick /// ErrorUnsupported - Print out an error that codegen doesn't support the
1916e5dd7070Spatrick /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)1917e5dd7070Spatrick void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1918e5dd7070Spatrick CGM.ErrorUnsupported(S, Type);
1919e5dd7070Spatrick }
1920e5dd7070Spatrick
1921e5dd7070Spatrick /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1922e5dd7070Spatrick /// variable-length array whose elements have a non-zero bit-pattern.
1923e5dd7070Spatrick ///
1924e5dd7070Spatrick /// \param baseType the inner-most element type of the array
1925e5dd7070Spatrick /// \param src - a char* pointing to the bit-pattern for a single
1926e5dd7070Spatrick /// base element of the array
1927e5dd7070Spatrick /// \param sizeInChars - the total size of the VLA, in chars
emitNonZeroVLAInit(CodeGenFunction & CGF,QualType baseType,Address dest,Address src,llvm::Value * sizeInChars)1928e5dd7070Spatrick static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1929e5dd7070Spatrick Address dest, Address src,
1930e5dd7070Spatrick llvm::Value *sizeInChars) {
1931e5dd7070Spatrick CGBuilderTy &Builder = CGF.Builder;
1932e5dd7070Spatrick
1933e5dd7070Spatrick CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1934e5dd7070Spatrick llvm::Value *baseSizeInChars
1935e5dd7070Spatrick = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1936e5dd7070Spatrick
1937e5dd7070Spatrick Address begin =
1938e5dd7070Spatrick Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1939a9ac8606Spatrick llvm::Value *end = Builder.CreateInBoundsGEP(
1940a9ac8606Spatrick begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
1941e5dd7070Spatrick
1942e5dd7070Spatrick llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1943e5dd7070Spatrick llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1944e5dd7070Spatrick llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1945e5dd7070Spatrick
1946e5dd7070Spatrick // Make a loop over the VLA. C99 guarantees that the VLA element
1947e5dd7070Spatrick // count must be nonzero.
1948e5dd7070Spatrick CGF.EmitBlock(loopBB);
1949e5dd7070Spatrick
1950e5dd7070Spatrick llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1951e5dd7070Spatrick cur->addIncoming(begin.getPointer(), originBB);
1952e5dd7070Spatrick
1953e5dd7070Spatrick CharUnits curAlign =
1954e5dd7070Spatrick dest.getAlignment().alignmentOfArrayElement(baseSize);
1955e5dd7070Spatrick
1956e5dd7070Spatrick // memcpy the individual element bit-pattern.
1957*12c85518Srobert Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
1958e5dd7070Spatrick /*volatile*/ false);
1959e5dd7070Spatrick
1960e5dd7070Spatrick // Go to the next element.
1961e5dd7070Spatrick llvm::Value *next =
1962e5dd7070Spatrick Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1963e5dd7070Spatrick
1964e5dd7070Spatrick // Leave if that's the end of the VLA.
1965e5dd7070Spatrick llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1966e5dd7070Spatrick Builder.CreateCondBr(done, contBB, loopBB);
1967e5dd7070Spatrick cur->addIncoming(next, loopBB);
1968e5dd7070Spatrick
1969e5dd7070Spatrick CGF.EmitBlock(contBB);
1970e5dd7070Spatrick }
1971e5dd7070Spatrick
1972e5dd7070Spatrick void
EmitNullInitialization(Address DestPtr,QualType Ty)1973e5dd7070Spatrick CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1974e5dd7070Spatrick // Ignore empty classes in C++.
1975e5dd7070Spatrick if (getLangOpts().CPlusPlus) {
1976e5dd7070Spatrick if (const RecordType *RT = Ty->getAs<RecordType>()) {
1977e5dd7070Spatrick if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1978e5dd7070Spatrick return;
1979e5dd7070Spatrick }
1980e5dd7070Spatrick }
1981e5dd7070Spatrick
1982e5dd7070Spatrick // Cast the dest ptr to the appropriate i8 pointer type.
1983e5dd7070Spatrick if (DestPtr.getElementType() != Int8Ty)
1984e5dd7070Spatrick DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1985e5dd7070Spatrick
1986e5dd7070Spatrick // Get size and alignment info for this aggregate.
1987e5dd7070Spatrick CharUnits size = getContext().getTypeSizeInChars(Ty);
1988e5dd7070Spatrick
1989e5dd7070Spatrick llvm::Value *SizeVal;
1990e5dd7070Spatrick const VariableArrayType *vla;
1991e5dd7070Spatrick
1992e5dd7070Spatrick // Don't bother emitting a zero-byte memset.
1993e5dd7070Spatrick if (size.isZero()) {
1994e5dd7070Spatrick // But note that getTypeInfo returns 0 for a VLA.
1995e5dd7070Spatrick if (const VariableArrayType *vlaType =
1996e5dd7070Spatrick dyn_cast_or_null<VariableArrayType>(
1997e5dd7070Spatrick getContext().getAsArrayType(Ty))) {
1998e5dd7070Spatrick auto VlaSize = getVLASize(vlaType);
1999e5dd7070Spatrick SizeVal = VlaSize.NumElts;
2000e5dd7070Spatrick CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2001e5dd7070Spatrick if (!eltSize.isOne())
2002e5dd7070Spatrick SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2003e5dd7070Spatrick vla = vlaType;
2004e5dd7070Spatrick } else {
2005e5dd7070Spatrick return;
2006e5dd7070Spatrick }
2007e5dd7070Spatrick } else {
2008e5dd7070Spatrick SizeVal = CGM.getSize(size);
2009e5dd7070Spatrick vla = nullptr;
2010e5dd7070Spatrick }
2011e5dd7070Spatrick
2012e5dd7070Spatrick // If the type contains a pointer to data member we can't memset it to zero.
2013e5dd7070Spatrick // Instead, create a null constant and copy it to the destination.
2014e5dd7070Spatrick // TODO: there are other patterns besides zero that we can usefully memset,
2015e5dd7070Spatrick // like -1, which happens to be the pattern used by member-pointers.
2016e5dd7070Spatrick if (!CGM.getTypes().isZeroInitializable(Ty)) {
2017e5dd7070Spatrick // For a VLA, emit a single element, then splat that over the VLA.
2018e5dd7070Spatrick if (vla) Ty = getContext().getBaseElementType(vla);
2019e5dd7070Spatrick
2020e5dd7070Spatrick llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2021e5dd7070Spatrick
2022e5dd7070Spatrick llvm::GlobalVariable *NullVariable =
2023e5dd7070Spatrick new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2024e5dd7070Spatrick /*isConstant=*/true,
2025e5dd7070Spatrick llvm::GlobalVariable::PrivateLinkage,
2026e5dd7070Spatrick NullConstant, Twine());
2027e5dd7070Spatrick CharUnits NullAlign = DestPtr.getAlignment();
2028e5dd7070Spatrick NullVariable->setAlignment(NullAlign.getAsAlign());
2029e5dd7070Spatrick Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
2030*12c85518Srobert Builder.getInt8Ty(), NullAlign);
2031e5dd7070Spatrick
2032e5dd7070Spatrick if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2033e5dd7070Spatrick
2034e5dd7070Spatrick // Get and call the appropriate llvm.memcpy overload.
2035e5dd7070Spatrick Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2036e5dd7070Spatrick return;
2037e5dd7070Spatrick }
2038e5dd7070Spatrick
2039e5dd7070Spatrick // Otherwise, just memset the whole thing to zero. This is legal
2040e5dd7070Spatrick // because in LLVM, all default initializers (other than the ones we just
2041e5dd7070Spatrick // handled above) are guaranteed to have a bit pattern of all zeros.
2042e5dd7070Spatrick Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2043e5dd7070Spatrick }
2044e5dd7070Spatrick
GetAddrOfLabel(const LabelDecl * L)2045e5dd7070Spatrick llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2046e5dd7070Spatrick // Make sure that there is a block for the indirect goto.
2047e5dd7070Spatrick if (!IndirectBranch)
2048e5dd7070Spatrick GetIndirectGotoBlock();
2049e5dd7070Spatrick
2050e5dd7070Spatrick llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2051e5dd7070Spatrick
2052e5dd7070Spatrick // Make sure the indirect branch includes all of the address-taken blocks.
2053e5dd7070Spatrick IndirectBranch->addDestination(BB);
2054e5dd7070Spatrick return llvm::BlockAddress::get(CurFn, BB);
2055e5dd7070Spatrick }
2056e5dd7070Spatrick
GetIndirectGotoBlock()2057e5dd7070Spatrick llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2058e5dd7070Spatrick // If we already made the indirect branch for indirect goto, return its block.
2059e5dd7070Spatrick if (IndirectBranch) return IndirectBranch->getParent();
2060e5dd7070Spatrick
2061e5dd7070Spatrick CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2062e5dd7070Spatrick
2063e5dd7070Spatrick // Create the PHI node that indirect gotos will add entries to.
2064e5dd7070Spatrick llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2065e5dd7070Spatrick "indirect.goto.dest");
2066e5dd7070Spatrick
2067e5dd7070Spatrick // Create the indirect branch instruction.
2068e5dd7070Spatrick IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2069e5dd7070Spatrick return IndirectBranch->getParent();
2070e5dd7070Spatrick }
2071e5dd7070Spatrick
2072e5dd7070Spatrick /// Computes the length of an array in elements, as well as the base
2073e5dd7070Spatrick /// element type and a properly-typed first element pointer.
emitArrayLength(const ArrayType * origArrayType,QualType & baseType,Address & addr)2074e5dd7070Spatrick llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2075e5dd7070Spatrick QualType &baseType,
2076e5dd7070Spatrick Address &addr) {
2077e5dd7070Spatrick const ArrayType *arrayType = origArrayType;
2078e5dd7070Spatrick
2079e5dd7070Spatrick // If it's a VLA, we have to load the stored size. Note that
2080e5dd7070Spatrick // this is the size of the VLA in bytes, not its size in elements.
2081e5dd7070Spatrick llvm::Value *numVLAElements = nullptr;
2082e5dd7070Spatrick if (isa<VariableArrayType>(arrayType)) {
2083e5dd7070Spatrick numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2084e5dd7070Spatrick
2085e5dd7070Spatrick // Walk into all VLAs. This doesn't require changes to addr,
2086e5dd7070Spatrick // which has type T* where T is the first non-VLA element type.
2087e5dd7070Spatrick do {
2088e5dd7070Spatrick QualType elementType = arrayType->getElementType();
2089e5dd7070Spatrick arrayType = getContext().getAsArrayType(elementType);
2090e5dd7070Spatrick
2091e5dd7070Spatrick // If we only have VLA components, 'addr' requires no adjustment.
2092e5dd7070Spatrick if (!arrayType) {
2093e5dd7070Spatrick baseType = elementType;
2094e5dd7070Spatrick return numVLAElements;
2095e5dd7070Spatrick }
2096e5dd7070Spatrick } while (isa<VariableArrayType>(arrayType));
2097e5dd7070Spatrick
2098e5dd7070Spatrick // We get out here only if we find a constant array type
2099e5dd7070Spatrick // inside the VLA.
2100e5dd7070Spatrick }
2101e5dd7070Spatrick
2102e5dd7070Spatrick // We have some number of constant-length arrays, so addr should
2103e5dd7070Spatrick // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2104e5dd7070Spatrick // down to the first element of addr.
2105e5dd7070Spatrick SmallVector<llvm::Value*, 8> gepIndices;
2106e5dd7070Spatrick
2107e5dd7070Spatrick // GEP down to the array type.
2108e5dd7070Spatrick llvm::ConstantInt *zero = Builder.getInt32(0);
2109e5dd7070Spatrick gepIndices.push_back(zero);
2110e5dd7070Spatrick
2111e5dd7070Spatrick uint64_t countFromCLAs = 1;
2112e5dd7070Spatrick QualType eltType;
2113e5dd7070Spatrick
2114e5dd7070Spatrick llvm::ArrayType *llvmArrayType =
2115e5dd7070Spatrick dyn_cast<llvm::ArrayType>(addr.getElementType());
2116e5dd7070Spatrick while (llvmArrayType) {
2117e5dd7070Spatrick assert(isa<ConstantArrayType>(arrayType));
2118e5dd7070Spatrick assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2119e5dd7070Spatrick == llvmArrayType->getNumElements());
2120e5dd7070Spatrick
2121e5dd7070Spatrick gepIndices.push_back(zero);
2122e5dd7070Spatrick countFromCLAs *= llvmArrayType->getNumElements();
2123e5dd7070Spatrick eltType = arrayType->getElementType();
2124e5dd7070Spatrick
2125e5dd7070Spatrick llvmArrayType =
2126e5dd7070Spatrick dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2127e5dd7070Spatrick arrayType = getContext().getAsArrayType(arrayType->getElementType());
2128e5dd7070Spatrick assert((!llvmArrayType || arrayType) &&
2129e5dd7070Spatrick "LLVM and Clang types are out-of-synch");
2130e5dd7070Spatrick }
2131e5dd7070Spatrick
2132e5dd7070Spatrick if (arrayType) {
2133e5dd7070Spatrick // From this point onwards, the Clang array type has been emitted
2134e5dd7070Spatrick // as some other type (probably a packed struct). Compute the array
2135e5dd7070Spatrick // size, and just emit the 'begin' expression as a bitcast.
2136e5dd7070Spatrick while (arrayType) {
2137e5dd7070Spatrick countFromCLAs *=
2138e5dd7070Spatrick cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2139e5dd7070Spatrick eltType = arrayType->getElementType();
2140e5dd7070Spatrick arrayType = getContext().getAsArrayType(eltType);
2141e5dd7070Spatrick }
2142e5dd7070Spatrick
2143e5dd7070Spatrick llvm::Type *baseType = ConvertType(eltType);
2144e5dd7070Spatrick addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
2145e5dd7070Spatrick } else {
2146e5dd7070Spatrick // Create the actual GEP.
2147a9ac8606Spatrick addr = Address(Builder.CreateInBoundsGEP(
2148a9ac8606Spatrick addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2149*12c85518Srobert ConvertTypeForMem(eltType),
2150e5dd7070Spatrick addr.getAlignment());
2151e5dd7070Spatrick }
2152e5dd7070Spatrick
2153e5dd7070Spatrick baseType = eltType;
2154e5dd7070Spatrick
2155e5dd7070Spatrick llvm::Value *numElements
2156e5dd7070Spatrick = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2157e5dd7070Spatrick
2158e5dd7070Spatrick // If we had any VLA dimensions, factor them in.
2159e5dd7070Spatrick if (numVLAElements)
2160e5dd7070Spatrick numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2161e5dd7070Spatrick
2162e5dd7070Spatrick return numElements;
2163e5dd7070Spatrick }
2164e5dd7070Spatrick
getVLASize(QualType type)2165e5dd7070Spatrick CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2166e5dd7070Spatrick const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2167e5dd7070Spatrick assert(vla && "type was not a variable array type!");
2168e5dd7070Spatrick return getVLASize(vla);
2169e5dd7070Spatrick }
2170e5dd7070Spatrick
2171e5dd7070Spatrick CodeGenFunction::VlaSizePair
getVLASize(const VariableArrayType * type)2172e5dd7070Spatrick CodeGenFunction::getVLASize(const VariableArrayType *type) {
2173e5dd7070Spatrick // The number of elements so far; always size_t.
2174e5dd7070Spatrick llvm::Value *numElements = nullptr;
2175e5dd7070Spatrick
2176e5dd7070Spatrick QualType elementType;
2177e5dd7070Spatrick do {
2178e5dd7070Spatrick elementType = type->getElementType();
2179e5dd7070Spatrick llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2180e5dd7070Spatrick assert(vlaSize && "no size for VLA!");
2181e5dd7070Spatrick assert(vlaSize->getType() == SizeTy);
2182e5dd7070Spatrick
2183e5dd7070Spatrick if (!numElements) {
2184e5dd7070Spatrick numElements = vlaSize;
2185e5dd7070Spatrick } else {
2186e5dd7070Spatrick // It's undefined behavior if this wraps around, so mark it that way.
2187e5dd7070Spatrick // FIXME: Teach -fsanitize=undefined to trap this.
2188e5dd7070Spatrick numElements = Builder.CreateNUWMul(numElements, vlaSize);
2189e5dd7070Spatrick }
2190e5dd7070Spatrick } while ((type = getContext().getAsVariableArrayType(elementType)));
2191e5dd7070Spatrick
2192e5dd7070Spatrick return { numElements, elementType };
2193e5dd7070Spatrick }
2194e5dd7070Spatrick
2195e5dd7070Spatrick CodeGenFunction::VlaSizePair
getVLAElements1D(QualType type)2196e5dd7070Spatrick CodeGenFunction::getVLAElements1D(QualType type) {
2197e5dd7070Spatrick const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2198e5dd7070Spatrick assert(vla && "type was not a variable array type!");
2199e5dd7070Spatrick return getVLAElements1D(vla);
2200e5dd7070Spatrick }
2201e5dd7070Spatrick
2202e5dd7070Spatrick CodeGenFunction::VlaSizePair
getVLAElements1D(const VariableArrayType * Vla)2203e5dd7070Spatrick CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2204e5dd7070Spatrick llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2205e5dd7070Spatrick assert(VlaSize && "no size for VLA!");
2206e5dd7070Spatrick assert(VlaSize->getType() == SizeTy);
2207e5dd7070Spatrick return { VlaSize, Vla->getElementType() };
2208e5dd7070Spatrick }
2209e5dd7070Spatrick
EmitVariablyModifiedType(QualType type)2210e5dd7070Spatrick void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2211e5dd7070Spatrick assert(type->isVariablyModifiedType() &&
2212e5dd7070Spatrick "Must pass variably modified type to EmitVLASizes!");
2213e5dd7070Spatrick
2214e5dd7070Spatrick EnsureInsertPoint();
2215e5dd7070Spatrick
2216e5dd7070Spatrick // We're going to walk down into the type and look for VLA
2217e5dd7070Spatrick // expressions.
2218e5dd7070Spatrick do {
2219e5dd7070Spatrick assert(type->isVariablyModifiedType());
2220e5dd7070Spatrick
2221e5dd7070Spatrick const Type *ty = type.getTypePtr();
2222e5dd7070Spatrick switch (ty->getTypeClass()) {
2223e5dd7070Spatrick
2224e5dd7070Spatrick #define TYPE(Class, Base)
2225e5dd7070Spatrick #define ABSTRACT_TYPE(Class, Base)
2226e5dd7070Spatrick #define NON_CANONICAL_TYPE(Class, Base)
2227e5dd7070Spatrick #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2228e5dd7070Spatrick #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2229e5dd7070Spatrick #include "clang/AST/TypeNodes.inc"
2230e5dd7070Spatrick llvm_unreachable("unexpected dependent type!");
2231e5dd7070Spatrick
2232e5dd7070Spatrick // These types are never variably-modified.
2233e5dd7070Spatrick case Type::Builtin:
2234e5dd7070Spatrick case Type::Complex:
2235e5dd7070Spatrick case Type::Vector:
2236e5dd7070Spatrick case Type::ExtVector:
2237ec727ea7Spatrick case Type::ConstantMatrix:
2238e5dd7070Spatrick case Type::Record:
2239e5dd7070Spatrick case Type::Enum:
2240*12c85518Srobert case Type::Using:
2241e5dd7070Spatrick case Type::TemplateSpecialization:
2242e5dd7070Spatrick case Type::ObjCTypeParam:
2243e5dd7070Spatrick case Type::ObjCObject:
2244e5dd7070Spatrick case Type::ObjCInterface:
2245e5dd7070Spatrick case Type::ObjCObjectPointer:
2246*12c85518Srobert case Type::BitInt:
2247e5dd7070Spatrick llvm_unreachable("type class is never variably-modified!");
2248e5dd7070Spatrick
2249*12c85518Srobert case Type::Elaborated:
2250*12c85518Srobert type = cast<ElaboratedType>(ty)->getNamedType();
2251*12c85518Srobert break;
2252*12c85518Srobert
2253e5dd7070Spatrick case Type::Adjusted:
2254e5dd7070Spatrick type = cast<AdjustedType>(ty)->getAdjustedType();
2255e5dd7070Spatrick break;
2256e5dd7070Spatrick
2257e5dd7070Spatrick case Type::Decayed:
2258e5dd7070Spatrick type = cast<DecayedType>(ty)->getPointeeType();
2259e5dd7070Spatrick break;
2260e5dd7070Spatrick
2261e5dd7070Spatrick case Type::Pointer:
2262e5dd7070Spatrick type = cast<PointerType>(ty)->getPointeeType();
2263e5dd7070Spatrick break;
2264e5dd7070Spatrick
2265e5dd7070Spatrick case Type::BlockPointer:
2266e5dd7070Spatrick type = cast<BlockPointerType>(ty)->getPointeeType();
2267e5dd7070Spatrick break;
2268e5dd7070Spatrick
2269e5dd7070Spatrick case Type::LValueReference:
2270e5dd7070Spatrick case Type::RValueReference:
2271e5dd7070Spatrick type = cast<ReferenceType>(ty)->getPointeeType();
2272e5dd7070Spatrick break;
2273e5dd7070Spatrick
2274e5dd7070Spatrick case Type::MemberPointer:
2275e5dd7070Spatrick type = cast<MemberPointerType>(ty)->getPointeeType();
2276e5dd7070Spatrick break;
2277e5dd7070Spatrick
2278e5dd7070Spatrick case Type::ConstantArray:
2279e5dd7070Spatrick case Type::IncompleteArray:
2280e5dd7070Spatrick // Losing element qualification here is fine.
2281e5dd7070Spatrick type = cast<ArrayType>(ty)->getElementType();
2282e5dd7070Spatrick break;
2283e5dd7070Spatrick
2284e5dd7070Spatrick case Type::VariableArray: {
2285e5dd7070Spatrick // Losing element qualification here is fine.
2286e5dd7070Spatrick const VariableArrayType *vat = cast<VariableArrayType>(ty);
2287e5dd7070Spatrick
2288e5dd7070Spatrick // Unknown size indication requires no size computation.
2289e5dd7070Spatrick // Otherwise, evaluate and record it.
2290*12c85518Srobert if (const Expr *sizeExpr = vat->getSizeExpr()) {
2291e5dd7070Spatrick // It's possible that we might have emitted this already,
2292e5dd7070Spatrick // e.g. with a typedef and a pointer to it.
2293*12c85518Srobert llvm::Value *&entry = VLASizeMap[sizeExpr];
2294e5dd7070Spatrick if (!entry) {
2295*12c85518Srobert llvm::Value *size = EmitScalarExpr(sizeExpr);
2296e5dd7070Spatrick
2297e5dd7070Spatrick // C11 6.7.6.2p5:
2298e5dd7070Spatrick // If the size is an expression that is not an integer constant
2299e5dd7070Spatrick // expression [...] each time it is evaluated it shall have a value
2300e5dd7070Spatrick // greater than zero.
2301*12c85518Srobert if (SanOpts.has(SanitizerKind::VLABound)) {
2302e5dd7070Spatrick SanitizerScope SanScope(this);
2303*12c85518Srobert llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2304*12c85518Srobert clang::QualType SEType = sizeExpr->getType();
2305*12c85518Srobert llvm::Value *CheckCondition =
2306*12c85518Srobert SEType->isSignedIntegerType()
2307*12c85518Srobert ? Builder.CreateICmpSGT(size, Zero)
2308*12c85518Srobert : Builder.CreateICmpUGT(size, Zero);
2309e5dd7070Spatrick llvm::Constant *StaticArgs[] = {
2310*12c85518Srobert EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2311*12c85518Srobert EmitCheckTypeDescriptor(SEType)};
2312*12c85518Srobert EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2313*12c85518Srobert SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2314e5dd7070Spatrick }
2315e5dd7070Spatrick
2316e5dd7070Spatrick // Always zexting here would be wrong if it weren't
2317e5dd7070Spatrick // undefined behavior to have a negative bound.
2318*12c85518Srobert // FIXME: What about when size's type is larger than size_t?
2319*12c85518Srobert entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2320e5dd7070Spatrick }
2321e5dd7070Spatrick }
2322e5dd7070Spatrick type = vat->getElementType();
2323e5dd7070Spatrick break;
2324e5dd7070Spatrick }
2325e5dd7070Spatrick
2326e5dd7070Spatrick case Type::FunctionProto:
2327e5dd7070Spatrick case Type::FunctionNoProto:
2328e5dd7070Spatrick type = cast<FunctionType>(ty)->getReturnType();
2329e5dd7070Spatrick break;
2330e5dd7070Spatrick
2331e5dd7070Spatrick case Type::Paren:
2332e5dd7070Spatrick case Type::TypeOf:
2333e5dd7070Spatrick case Type::UnaryTransform:
2334e5dd7070Spatrick case Type::Attributed:
2335*12c85518Srobert case Type::BTFTagAttributed:
2336e5dd7070Spatrick case Type::SubstTemplateTypeParm:
2337e5dd7070Spatrick case Type::MacroQualified:
2338e5dd7070Spatrick // Keep walking after single level desugaring.
2339e5dd7070Spatrick type = type.getSingleStepDesugaredType(getContext());
2340e5dd7070Spatrick break;
2341e5dd7070Spatrick
2342e5dd7070Spatrick case Type::Typedef:
2343e5dd7070Spatrick case Type::Decltype:
2344e5dd7070Spatrick case Type::Auto:
2345e5dd7070Spatrick case Type::DeducedTemplateSpecialization:
2346e5dd7070Spatrick // Stop walking: nothing to do.
2347e5dd7070Spatrick return;
2348e5dd7070Spatrick
2349e5dd7070Spatrick case Type::TypeOfExpr:
2350e5dd7070Spatrick // Stop walking: emit typeof expression.
2351e5dd7070Spatrick EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2352e5dd7070Spatrick return;
2353e5dd7070Spatrick
2354e5dd7070Spatrick case Type::Atomic:
2355e5dd7070Spatrick type = cast<AtomicType>(ty)->getValueType();
2356e5dd7070Spatrick break;
2357e5dd7070Spatrick
2358e5dd7070Spatrick case Type::Pipe:
2359e5dd7070Spatrick type = cast<PipeType>(ty)->getElementType();
2360e5dd7070Spatrick break;
2361e5dd7070Spatrick }
2362e5dd7070Spatrick } while (type->isVariablyModifiedType());
2363e5dd7070Spatrick }
2364e5dd7070Spatrick
EmitVAListRef(const Expr * E)2365e5dd7070Spatrick Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2366e5dd7070Spatrick if (getContext().getBuiltinVaListType()->isArrayType())
2367e5dd7070Spatrick return EmitPointerWithAlignment(E);
2368e5dd7070Spatrick return EmitLValue(E).getAddress(*this);
2369e5dd7070Spatrick }
2370e5dd7070Spatrick
EmitMSVAListRef(const Expr * E)2371e5dd7070Spatrick Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2372e5dd7070Spatrick return EmitLValue(E).getAddress(*this);
2373e5dd7070Spatrick }
2374e5dd7070Spatrick
EmitDeclRefExprDbgValue(const DeclRefExpr * E,const APValue & Init)2375e5dd7070Spatrick void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2376e5dd7070Spatrick const APValue &Init) {
2377e5dd7070Spatrick assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2378e5dd7070Spatrick if (CGDebugInfo *Dbg = getDebugInfo())
2379e5dd7070Spatrick if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2380e5dd7070Spatrick Dbg->EmitGlobalVariable(E->getDecl(), Init);
2381e5dd7070Spatrick }
2382e5dd7070Spatrick
2383e5dd7070Spatrick CodeGenFunction::PeepholeProtection
protectFromPeepholes(RValue rvalue)2384e5dd7070Spatrick CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2385e5dd7070Spatrick // At the moment, the only aggressive peephole we do in IR gen
2386e5dd7070Spatrick // is trunc(zext) folding, but if we add more, we can easily
2387e5dd7070Spatrick // extend this protection.
2388e5dd7070Spatrick
2389e5dd7070Spatrick if (!rvalue.isScalar()) return PeepholeProtection();
2390e5dd7070Spatrick llvm::Value *value = rvalue.getScalarVal();
2391e5dd7070Spatrick if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2392e5dd7070Spatrick
2393e5dd7070Spatrick // Just make an extra bitcast.
2394e5dd7070Spatrick assert(HaveInsertPoint());
2395e5dd7070Spatrick llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2396e5dd7070Spatrick Builder.GetInsertBlock());
2397e5dd7070Spatrick
2398e5dd7070Spatrick PeepholeProtection protection;
2399e5dd7070Spatrick protection.Inst = inst;
2400e5dd7070Spatrick return protection;
2401e5dd7070Spatrick }
2402e5dd7070Spatrick
unprotectFromPeepholes(PeepholeProtection protection)2403e5dd7070Spatrick void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2404e5dd7070Spatrick if (!protection.Inst) return;
2405e5dd7070Spatrick
2406e5dd7070Spatrick // In theory, we could try to duplicate the peepholes now, but whatever.
2407e5dd7070Spatrick protection.Inst->eraseFromParent();
2408e5dd7070Spatrick }
2409e5dd7070Spatrick
emitAlignmentAssumption(llvm::Value * PtrValue,QualType Ty,SourceLocation Loc,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2410ec727ea7Spatrick void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2411e5dd7070Spatrick QualType Ty, SourceLocation Loc,
2412e5dd7070Spatrick SourceLocation AssumptionLoc,
2413e5dd7070Spatrick llvm::Value *Alignment,
2414e5dd7070Spatrick llvm::Value *OffsetValue) {
2415a9ac8606Spatrick if (Alignment->getType() != IntPtrTy)
2416a9ac8606Spatrick Alignment =
2417a9ac8606Spatrick Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2418a9ac8606Spatrick if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2419a9ac8606Spatrick OffsetValue =
2420a9ac8606Spatrick Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2421a9ac8606Spatrick llvm::Value *TheCheck = nullptr;
2422e5dd7070Spatrick if (SanOpts.has(SanitizerKind::Alignment)) {
2423a9ac8606Spatrick llvm::Value *PtrIntValue =
2424a9ac8606Spatrick Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2425a9ac8606Spatrick
2426a9ac8606Spatrick if (OffsetValue) {
2427a9ac8606Spatrick bool IsOffsetZero = false;
2428a9ac8606Spatrick if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2429a9ac8606Spatrick IsOffsetZero = CI->isZero();
2430a9ac8606Spatrick
2431a9ac8606Spatrick if (!IsOffsetZero)
2432a9ac8606Spatrick PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2433a9ac8606Spatrick }
2434a9ac8606Spatrick
2435a9ac8606Spatrick llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2436a9ac8606Spatrick llvm::Value *Mask =
2437a9ac8606Spatrick Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2438a9ac8606Spatrick llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2439a9ac8606Spatrick TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2440a9ac8606Spatrick }
2441a9ac8606Spatrick llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2442a9ac8606Spatrick CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2443a9ac8606Spatrick
2444a9ac8606Spatrick if (!SanOpts.has(SanitizerKind::Alignment))
2445a9ac8606Spatrick return;
2446ec727ea7Spatrick emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2447e5dd7070Spatrick OffsetValue, TheCheck, Assumption);
2448e5dd7070Spatrick }
2449e5dd7070Spatrick
emitAlignmentAssumption(llvm::Value * PtrValue,const Expr * E,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2450ec727ea7Spatrick void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2451e5dd7070Spatrick const Expr *E,
2452e5dd7070Spatrick SourceLocation AssumptionLoc,
2453e5dd7070Spatrick llvm::Value *Alignment,
2454e5dd7070Spatrick llvm::Value *OffsetValue) {
2455e5dd7070Spatrick QualType Ty = E->getType();
2456e5dd7070Spatrick SourceLocation Loc = E->getExprLoc();
2457e5dd7070Spatrick
2458ec727ea7Spatrick emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2459e5dd7070Spatrick OffsetValue);
2460e5dd7070Spatrick }
2461e5dd7070Spatrick
EmitAnnotationCall(llvm::Function * AnnotationFn,llvm::Value * AnnotatedVal,StringRef AnnotationStr,SourceLocation Location,const AnnotateAttr * Attr)2462e5dd7070Spatrick llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2463e5dd7070Spatrick llvm::Value *AnnotatedVal,
2464e5dd7070Spatrick StringRef AnnotationStr,
2465a9ac8606Spatrick SourceLocation Location,
2466a9ac8606Spatrick const AnnotateAttr *Attr) {
2467a9ac8606Spatrick SmallVector<llvm::Value *, 5> Args = {
2468e5dd7070Spatrick AnnotatedVal,
2469*12c85518Srobert Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr),
2470*12c85518Srobert ConstGlobalsPtrTy),
2471*12c85518Srobert Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location),
2472*12c85518Srobert ConstGlobalsPtrTy),
2473a9ac8606Spatrick CGM.EmitAnnotationLineNo(Location),
2474e5dd7070Spatrick };
2475a9ac8606Spatrick if (Attr)
2476a9ac8606Spatrick Args.push_back(CGM.EmitAnnotationArgs(Attr));
2477e5dd7070Spatrick return Builder.CreateCall(AnnotationFn, Args);
2478e5dd7070Spatrick }
2479e5dd7070Spatrick
EmitVarAnnotations(const VarDecl * D,llvm::Value * V)2480e5dd7070Spatrick void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2481e5dd7070Spatrick assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2482e5dd7070Spatrick // FIXME We create a new bitcast for every annotation because that's what
2483e5dd7070Spatrick // llvm-gcc was doing.
2484*12c85518Srobert unsigned AS = V->getType()->getPointerAddressSpace();
2485*12c85518Srobert llvm::Type *I8PtrTy = Builder.getInt8PtrTy(AS);
2486e5dd7070Spatrick for (const auto *I : D->specific_attrs<AnnotateAttr>())
2487*12c85518Srobert EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2488*12c85518Srobert {I8PtrTy, CGM.ConstGlobalsPtrTy}),
2489*12c85518Srobert Builder.CreateBitCast(V, I8PtrTy, V->getName()),
2490a9ac8606Spatrick I->getAnnotation(), D->getLocation(), I);
2491e5dd7070Spatrick }
2492e5dd7070Spatrick
EmitFieldAnnotations(const FieldDecl * D,Address Addr)2493e5dd7070Spatrick Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2494e5dd7070Spatrick Address Addr) {
2495e5dd7070Spatrick assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2496e5dd7070Spatrick llvm::Value *V = Addr.getPointer();
2497e5dd7070Spatrick llvm::Type *VTy = V->getType();
2498*12c85518Srobert auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2499*12c85518Srobert unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2500*12c85518Srobert llvm::PointerType *IntrinTy =
2501*12c85518Srobert llvm::PointerType::getWithSamePointeeType(CGM.Int8PtrTy, AS);
2502e5dd7070Spatrick llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2503*12c85518Srobert {IntrinTy, CGM.ConstGlobalsPtrTy});
2504e5dd7070Spatrick
2505e5dd7070Spatrick for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2506e5dd7070Spatrick // FIXME Always emit the cast inst so we can differentiate between
2507e5dd7070Spatrick // annotation on the first field of a struct and annotation on the struct
2508e5dd7070Spatrick // itself.
2509*12c85518Srobert if (VTy != IntrinTy)
2510*12c85518Srobert V = Builder.CreateBitCast(V, IntrinTy);
2511a9ac8606Spatrick V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2512e5dd7070Spatrick V = Builder.CreateBitCast(V, VTy);
2513e5dd7070Spatrick }
2514e5dd7070Spatrick
2515*12c85518Srobert return Address(V, Addr.getElementType(), Addr.getAlignment());
2516e5dd7070Spatrick }
2517e5dd7070Spatrick
~CGCapturedStmtInfo()2518e5dd7070Spatrick CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2519e5dd7070Spatrick
SanitizerScope(CodeGenFunction * CGF)2520e5dd7070Spatrick CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2521e5dd7070Spatrick : CGF(CGF) {
2522e5dd7070Spatrick assert(!CGF->IsSanitizerScope);
2523e5dd7070Spatrick CGF->IsSanitizerScope = true;
2524e5dd7070Spatrick }
2525e5dd7070Spatrick
~SanitizerScope()2526e5dd7070Spatrick CodeGenFunction::SanitizerScope::~SanitizerScope() {
2527e5dd7070Spatrick CGF->IsSanitizerScope = false;
2528e5dd7070Spatrick }
2529e5dd7070Spatrick
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2530e5dd7070Spatrick void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2531e5dd7070Spatrick const llvm::Twine &Name,
2532e5dd7070Spatrick llvm::BasicBlock *BB,
2533e5dd7070Spatrick llvm::BasicBlock::iterator InsertPt) const {
2534e5dd7070Spatrick LoopStack.InsertHelper(I);
2535e5dd7070Spatrick if (IsSanitizerScope)
2536e5dd7070Spatrick CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2537e5dd7070Spatrick }
2538e5dd7070Spatrick
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2539e5dd7070Spatrick void CGBuilderInserter::InsertHelper(
2540e5dd7070Spatrick llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2541e5dd7070Spatrick llvm::BasicBlock::iterator InsertPt) const {
2542e5dd7070Spatrick llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2543e5dd7070Spatrick if (CGF)
2544e5dd7070Spatrick CGF->InsertHelper(I, Name, BB, InsertPt);
2545e5dd7070Spatrick }
2546e5dd7070Spatrick
2547e5dd7070Spatrick // Emits an error if we don't have a valid set of target features for the
2548e5dd7070Spatrick // called function.
checkTargetFeatures(const CallExpr * E,const FunctionDecl * TargetDecl)2549e5dd7070Spatrick void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2550e5dd7070Spatrick const FunctionDecl *TargetDecl) {
2551e5dd7070Spatrick return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2552e5dd7070Spatrick }
2553e5dd7070Spatrick
2554e5dd7070Spatrick // Emits an error if we don't have a valid set of target features for the
2555e5dd7070Spatrick // called function.
checkTargetFeatures(SourceLocation Loc,const FunctionDecl * TargetDecl)2556e5dd7070Spatrick void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2557e5dd7070Spatrick const FunctionDecl *TargetDecl) {
2558e5dd7070Spatrick // Early exit if this is an indirect call.
2559e5dd7070Spatrick if (!TargetDecl)
2560e5dd7070Spatrick return;
2561e5dd7070Spatrick
2562e5dd7070Spatrick // Get the current enclosing function if it exists. If it doesn't
2563e5dd7070Spatrick // we can't check the target features anyhow.
2564e5dd7070Spatrick const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2565e5dd7070Spatrick if (!FD)
2566e5dd7070Spatrick return;
2567e5dd7070Spatrick
2568e5dd7070Spatrick // Grab the required features for the call. For a builtin this is listed in
2569e5dd7070Spatrick // the td file with the default cpu, for an always_inline function this is any
2570e5dd7070Spatrick // listed cpu and any listed features.
2571e5dd7070Spatrick unsigned BuiltinID = TargetDecl->getBuiltinID();
2572e5dd7070Spatrick std::string MissingFeature;
2573a9ac8606Spatrick llvm::StringMap<bool> CallerFeatureMap;
2574a9ac8606Spatrick CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2575e5dd7070Spatrick if (BuiltinID) {
2576*12c85518Srobert StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2577*12c85518Srobert if (!Builtin::evaluateRequiredTargetFeatures(
2578*12c85518Srobert FeatureList, CallerFeatureMap)) {
2579e5dd7070Spatrick CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2580*12c85518Srobert << TargetDecl->getDeclName()
2581*12c85518Srobert << FeatureList;
2582*12c85518Srobert }
2583e5dd7070Spatrick } else if (!TargetDecl->isMultiVersion() &&
2584e5dd7070Spatrick TargetDecl->hasAttr<TargetAttr>()) {
2585e5dd7070Spatrick // Get the required features for the callee.
2586e5dd7070Spatrick
2587e5dd7070Spatrick const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2588e5dd7070Spatrick ParsedTargetAttr ParsedAttr =
2589e5dd7070Spatrick CGM.getContext().filterFunctionTargetAttrs(TD);
2590e5dd7070Spatrick
2591e5dd7070Spatrick SmallVector<StringRef, 1> ReqFeatures;
2592e5dd7070Spatrick llvm::StringMap<bool> CalleeFeatureMap;
2593ec727ea7Spatrick CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2594e5dd7070Spatrick
2595e5dd7070Spatrick for (const auto &F : ParsedAttr.Features) {
2596e5dd7070Spatrick if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2597e5dd7070Spatrick ReqFeatures.push_back(StringRef(F).substr(1));
2598e5dd7070Spatrick }
2599e5dd7070Spatrick
2600e5dd7070Spatrick for (const auto &F : CalleeFeatureMap) {
2601e5dd7070Spatrick // Only positive features are "required".
2602e5dd7070Spatrick if (F.getValue())
2603e5dd7070Spatrick ReqFeatures.push_back(F.getKey());
2604e5dd7070Spatrick }
2605a9ac8606Spatrick if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2606a9ac8606Spatrick if (!CallerFeatureMap.lookup(Feature)) {
2607a9ac8606Spatrick MissingFeature = Feature.str();
2608a9ac8606Spatrick return false;
2609a9ac8606Spatrick }
2610a9ac8606Spatrick return true;
2611a9ac8606Spatrick }))
2612e5dd7070Spatrick CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2613e5dd7070Spatrick << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2614e5dd7070Spatrick }
2615e5dd7070Spatrick }
2616e5dd7070Spatrick
EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)2617e5dd7070Spatrick void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2618e5dd7070Spatrick if (!CGM.getCodeGenOpts().SanitizeStats)
2619e5dd7070Spatrick return;
2620e5dd7070Spatrick
2621e5dd7070Spatrick llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2622e5dd7070Spatrick IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2623e5dd7070Spatrick CGM.getSanStats().create(IRB, SSK);
2624e5dd7070Spatrick }
2625e5dd7070Spatrick
EmitKCFIOperandBundle(const CGCallee & Callee,SmallVectorImpl<llvm::OperandBundleDef> & Bundles)2626*12c85518Srobert void CodeGenFunction::EmitKCFIOperandBundle(
2627*12c85518Srobert const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2628*12c85518Srobert const FunctionProtoType *FP =
2629*12c85518Srobert Callee.getAbstractInfo().getCalleeFunctionProtoType();
2630*12c85518Srobert if (FP)
2631*12c85518Srobert Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2632*12c85518Srobert }
2633*12c85518Srobert
FormAArch64ResolverCondition(const MultiVersionResolverOption & RO)2634*12c85518Srobert llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2635*12c85518Srobert const MultiVersionResolverOption &RO) {
2636*12c85518Srobert llvm::SmallVector<StringRef, 8> CondFeatures;
2637*12c85518Srobert for (const StringRef &Feature : RO.Conditions.Features) {
2638*12c85518Srobert // Form condition for features which are not yet enabled in target
2639*12c85518Srobert if (!getContext().getTargetInfo().hasFeature(Feature))
2640*12c85518Srobert CondFeatures.push_back(Feature);
2641*12c85518Srobert }
2642*12c85518Srobert if (!CondFeatures.empty()) {
2643*12c85518Srobert return EmitAArch64CpuSupports(CondFeatures);
2644*12c85518Srobert }
2645*12c85518Srobert return nullptr;
2646*12c85518Srobert }
2647*12c85518Srobert
FormX86ResolverCondition(const MultiVersionResolverOption & RO)2648*12c85518Srobert llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2649*12c85518Srobert const MultiVersionResolverOption &RO) {
2650e5dd7070Spatrick llvm::Value *Condition = nullptr;
2651e5dd7070Spatrick
2652e5dd7070Spatrick if (!RO.Conditions.Architecture.empty())
2653e5dd7070Spatrick Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2654e5dd7070Spatrick
2655e5dd7070Spatrick if (!RO.Conditions.Features.empty()) {
2656e5dd7070Spatrick llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2657e5dd7070Spatrick Condition =
2658e5dd7070Spatrick Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2659e5dd7070Spatrick }
2660e5dd7070Spatrick return Condition;
2661e5dd7070Spatrick }
2662e5dd7070Spatrick
CreateMultiVersionResolverReturn(CodeGenModule & CGM,llvm::Function * Resolver,CGBuilderTy & Builder,llvm::Function * FuncToReturn,bool SupportsIFunc)2663e5dd7070Spatrick static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2664e5dd7070Spatrick llvm::Function *Resolver,
2665e5dd7070Spatrick CGBuilderTy &Builder,
2666e5dd7070Spatrick llvm::Function *FuncToReturn,
2667e5dd7070Spatrick bool SupportsIFunc) {
2668e5dd7070Spatrick if (SupportsIFunc) {
2669e5dd7070Spatrick Builder.CreateRet(FuncToReturn);
2670e5dd7070Spatrick return;
2671e5dd7070Spatrick }
2672e5dd7070Spatrick
2673*12c85518Srobert llvm::SmallVector<llvm::Value *, 10> Args(
2674*12c85518Srobert llvm::make_pointer_range(Resolver->args()));
2675e5dd7070Spatrick
2676e5dd7070Spatrick llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2677e5dd7070Spatrick Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2678e5dd7070Spatrick
2679e5dd7070Spatrick if (Resolver->getReturnType()->isVoidTy())
2680e5dd7070Spatrick Builder.CreateRetVoid();
2681e5dd7070Spatrick else
2682e5dd7070Spatrick Builder.CreateRet(Result);
2683e5dd7070Spatrick }
2684e5dd7070Spatrick
EmitMultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)2685e5dd7070Spatrick void CodeGenFunction::EmitMultiVersionResolver(
2686e5dd7070Spatrick llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2687*12c85518Srobert
2688*12c85518Srobert llvm::Triple::ArchType ArchType =
2689*12c85518Srobert getContext().getTargetInfo().getTriple().getArch();
2690*12c85518Srobert
2691*12c85518Srobert switch (ArchType) {
2692*12c85518Srobert case llvm::Triple::x86:
2693*12c85518Srobert case llvm::Triple::x86_64:
2694*12c85518Srobert EmitX86MultiVersionResolver(Resolver, Options);
2695*12c85518Srobert return;
2696*12c85518Srobert case llvm::Triple::aarch64:
2697*12c85518Srobert EmitAArch64MultiVersionResolver(Resolver, Options);
2698*12c85518Srobert return;
2699*12c85518Srobert
2700*12c85518Srobert default:
2701*12c85518Srobert assert(false && "Only implemented for x86 and AArch64 targets");
2702*12c85518Srobert }
2703*12c85518Srobert }
2704*12c85518Srobert
EmitAArch64MultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)2705*12c85518Srobert void CodeGenFunction::EmitAArch64MultiVersionResolver(
2706*12c85518Srobert llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2707*12c85518Srobert assert(!Options.empty() && "No multiversion resolver options found");
2708*12c85518Srobert assert(Options.back().Conditions.Features.size() == 0 &&
2709*12c85518Srobert "Default case must be last");
2710*12c85518Srobert bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2711*12c85518Srobert assert(SupportsIFunc &&
2712*12c85518Srobert "Multiversion resolver requires target IFUNC support");
2713*12c85518Srobert bool AArch64CpuInitialized = false;
2714*12c85518Srobert llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2715*12c85518Srobert
2716*12c85518Srobert for (const MultiVersionResolverOption &RO : Options) {
2717*12c85518Srobert Builder.SetInsertPoint(CurBlock);
2718*12c85518Srobert llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2719*12c85518Srobert
2720*12c85518Srobert // The 'default' or 'all features enabled' case.
2721*12c85518Srobert if (!Condition) {
2722*12c85518Srobert CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2723*12c85518Srobert SupportsIFunc);
2724*12c85518Srobert return;
2725*12c85518Srobert }
2726*12c85518Srobert
2727*12c85518Srobert if (!AArch64CpuInitialized) {
2728*12c85518Srobert Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2729*12c85518Srobert EmitAArch64CpuInit();
2730*12c85518Srobert AArch64CpuInitialized = true;
2731*12c85518Srobert Builder.SetInsertPoint(CurBlock);
2732*12c85518Srobert }
2733*12c85518Srobert
2734*12c85518Srobert llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2735*12c85518Srobert CGBuilderTy RetBuilder(*this, RetBlock);
2736*12c85518Srobert CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2737*12c85518Srobert SupportsIFunc);
2738*12c85518Srobert CurBlock = createBasicBlock("resolver_else", Resolver);
2739*12c85518Srobert Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2740*12c85518Srobert }
2741*12c85518Srobert
2742*12c85518Srobert // If no default, emit an unreachable.
2743*12c85518Srobert Builder.SetInsertPoint(CurBlock);
2744*12c85518Srobert llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2745*12c85518Srobert TrapCall->setDoesNotReturn();
2746*12c85518Srobert TrapCall->setDoesNotThrow();
2747*12c85518Srobert Builder.CreateUnreachable();
2748*12c85518Srobert Builder.ClearInsertionPoint();
2749*12c85518Srobert }
2750*12c85518Srobert
EmitX86MultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)2751*12c85518Srobert void CodeGenFunction::EmitX86MultiVersionResolver(
2752*12c85518Srobert llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2753e5dd7070Spatrick
2754e5dd7070Spatrick bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2755e5dd7070Spatrick
2756e5dd7070Spatrick // Main function's basic block.
2757e5dd7070Spatrick llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2758e5dd7070Spatrick Builder.SetInsertPoint(CurBlock);
2759e5dd7070Spatrick EmitX86CpuInit();
2760e5dd7070Spatrick
2761e5dd7070Spatrick for (const MultiVersionResolverOption &RO : Options) {
2762e5dd7070Spatrick Builder.SetInsertPoint(CurBlock);
2763*12c85518Srobert llvm::Value *Condition = FormX86ResolverCondition(RO);
2764e5dd7070Spatrick
2765e5dd7070Spatrick // The 'default' or 'generic' case.
2766e5dd7070Spatrick if (!Condition) {
2767e5dd7070Spatrick assert(&RO == Options.end() - 1 &&
2768e5dd7070Spatrick "Default or Generic case must be last");
2769e5dd7070Spatrick CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2770e5dd7070Spatrick SupportsIFunc);
2771e5dd7070Spatrick return;
2772e5dd7070Spatrick }
2773e5dd7070Spatrick
2774e5dd7070Spatrick llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2775e5dd7070Spatrick CGBuilderTy RetBuilder(*this, RetBlock);
2776e5dd7070Spatrick CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2777e5dd7070Spatrick SupportsIFunc);
2778e5dd7070Spatrick CurBlock = createBasicBlock("resolver_else", Resolver);
2779e5dd7070Spatrick Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2780e5dd7070Spatrick }
2781e5dd7070Spatrick
2782e5dd7070Spatrick // If no generic/default, emit an unreachable.
2783e5dd7070Spatrick Builder.SetInsertPoint(CurBlock);
2784e5dd7070Spatrick llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2785e5dd7070Spatrick TrapCall->setDoesNotReturn();
2786e5dd7070Spatrick TrapCall->setDoesNotThrow();
2787e5dd7070Spatrick Builder.CreateUnreachable();
2788e5dd7070Spatrick Builder.ClearInsertionPoint();
2789e5dd7070Spatrick }
2790e5dd7070Spatrick
2791e5dd7070Spatrick // Loc - where the diagnostic will point, where in the source code this
2792e5dd7070Spatrick // alignment has failed.
2793e5dd7070Spatrick // SecondaryLoc - if present (will be present if sufficiently different from
2794e5dd7070Spatrick // Loc), the diagnostic will additionally point a "Note:" to this location.
2795e5dd7070Spatrick // It should be the location where the __attribute__((assume_aligned))
2796e5dd7070Spatrick // was written e.g.
emitAlignmentAssumptionCheck(llvm::Value * Ptr,QualType Ty,SourceLocation Loc,SourceLocation SecondaryLoc,llvm::Value * Alignment,llvm::Value * OffsetValue,llvm::Value * TheCheck,llvm::Instruction * Assumption)2797ec727ea7Spatrick void CodeGenFunction::emitAlignmentAssumptionCheck(
2798e5dd7070Spatrick llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2799e5dd7070Spatrick SourceLocation SecondaryLoc, llvm::Value *Alignment,
2800e5dd7070Spatrick llvm::Value *OffsetValue, llvm::Value *TheCheck,
2801e5dd7070Spatrick llvm::Instruction *Assumption) {
2802e5dd7070Spatrick assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2803ec727ea7Spatrick cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2804e5dd7070Spatrick llvm::Intrinsic::getDeclaration(
2805e5dd7070Spatrick Builder.GetInsertBlock()->getParent()->getParent(),
2806e5dd7070Spatrick llvm::Intrinsic::assume) &&
2807e5dd7070Spatrick "Assumption should be a call to llvm.assume().");
2808e5dd7070Spatrick assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2809e5dd7070Spatrick "Assumption should be the last instruction of the basic block, "
2810e5dd7070Spatrick "since the basic block is still being generated.");
2811e5dd7070Spatrick
2812e5dd7070Spatrick if (!SanOpts.has(SanitizerKind::Alignment))
2813e5dd7070Spatrick return;
2814e5dd7070Spatrick
2815e5dd7070Spatrick // Don't check pointers to volatile data. The behavior here is implementation-
2816e5dd7070Spatrick // defined.
2817e5dd7070Spatrick if (Ty->getPointeeType().isVolatileQualified())
2818e5dd7070Spatrick return;
2819e5dd7070Spatrick
2820e5dd7070Spatrick // We need to temorairly remove the assumption so we can insert the
2821e5dd7070Spatrick // sanitizer check before it, else the check will be dropped by optimizations.
2822e5dd7070Spatrick Assumption->removeFromParent();
2823e5dd7070Spatrick
2824e5dd7070Spatrick {
2825e5dd7070Spatrick SanitizerScope SanScope(this);
2826e5dd7070Spatrick
2827e5dd7070Spatrick if (!OffsetValue)
2828*12c85518Srobert OffsetValue = Builder.getInt1(false); // no offset.
2829e5dd7070Spatrick
2830e5dd7070Spatrick llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2831e5dd7070Spatrick EmitCheckSourceLocation(SecondaryLoc),
2832e5dd7070Spatrick EmitCheckTypeDescriptor(Ty)};
2833e5dd7070Spatrick llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2834e5dd7070Spatrick EmitCheckValue(Alignment),
2835e5dd7070Spatrick EmitCheckValue(OffsetValue)};
2836e5dd7070Spatrick EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2837e5dd7070Spatrick SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2838e5dd7070Spatrick }
2839e5dd7070Spatrick
2840e5dd7070Spatrick // We are now in the (new, empty) "cont" basic block.
2841e5dd7070Spatrick // Reintroduce the assumption.
2842e5dd7070Spatrick Builder.Insert(Assumption);
2843e5dd7070Spatrick // FIXME: Assumption still has it's original basic block as it's Parent.
2844e5dd7070Spatrick }
2845e5dd7070Spatrick
SourceLocToDebugLoc(SourceLocation Location)2846e5dd7070Spatrick llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2847e5dd7070Spatrick if (CGDebugInfo *DI = getDebugInfo())
2848e5dd7070Spatrick return DI->SourceLocToDebugLoc(Location);
2849e5dd7070Spatrick
2850e5dd7070Spatrick return llvm::DebugLoc();
2851e5dd7070Spatrick }
2852a9ac8606Spatrick
2853a9ac8606Spatrick llvm::Value *
emitCondLikelihoodViaExpectIntrinsic(llvm::Value * Cond,Stmt::Likelihood LH)2854a9ac8606Spatrick CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2855a9ac8606Spatrick Stmt::Likelihood LH) {
2856a9ac8606Spatrick switch (LH) {
2857a9ac8606Spatrick case Stmt::LH_None:
2858a9ac8606Spatrick return Cond;
2859a9ac8606Spatrick case Stmt::LH_Likely:
2860a9ac8606Spatrick case Stmt::LH_Unlikely:
2861a9ac8606Spatrick // Don't generate llvm.expect on -O0 as the backend won't use it for
2862a9ac8606Spatrick // anything.
2863a9ac8606Spatrick if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2864a9ac8606Spatrick return Cond;
2865a9ac8606Spatrick llvm::Type *CondTy = Cond->getType();
2866a9ac8606Spatrick assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2867a9ac8606Spatrick llvm::Function *FnExpect =
2868a9ac8606Spatrick CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2869a9ac8606Spatrick llvm::Value *ExpectedValueOfCond =
2870a9ac8606Spatrick llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2871a9ac8606Spatrick return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2872a9ac8606Spatrick Cond->getName() + ".expval");
2873a9ac8606Spatrick }
2874a9ac8606Spatrick llvm_unreachable("Unknown Likelihood");
2875a9ac8606Spatrick }
2876*12c85518Srobert
emitBoolVecConversion(llvm::Value * SrcVec,unsigned NumElementsDst,const llvm::Twine & Name)2877*12c85518Srobert llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2878*12c85518Srobert unsigned NumElementsDst,
2879*12c85518Srobert const llvm::Twine &Name) {
2880*12c85518Srobert auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2881*12c85518Srobert unsigned NumElementsSrc = SrcTy->getNumElements();
2882*12c85518Srobert if (NumElementsSrc == NumElementsDst)
2883*12c85518Srobert return SrcVec;
2884*12c85518Srobert
2885*12c85518Srobert std::vector<int> ShuffleMask(NumElementsDst, -1);
2886*12c85518Srobert for (unsigned MaskIdx = 0;
2887*12c85518Srobert MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
2888*12c85518Srobert ShuffleMask[MaskIdx] = MaskIdx;
2889*12c85518Srobert
2890*12c85518Srobert return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
2891*12c85518Srobert }
2892