xref: /netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/CodeGenFunction.cpp (revision cc67c5474ae5f29068c12d176c28e8659afb01f0)
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "clang/Frontend/FrontendDiagnostic.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46                                       const LangOptions &LangOpts) {
47   if (CGOpts.DisableLifetimeMarkers)
48     return false;
49 
50   // Sanitizers may use markers.
51   if (CGOpts.SanitizeAddressUseAfterScope ||
52       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
53       LangOpts.Sanitize.has(SanitizerKind::Memory))
54     return true;
55 
56   // For now, only in optimized builds.
57   return CGOpts.OptimizationLevel != 0;
58 }
59 
60 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
61     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
62       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
63               CGBuilderInserterTy(this)),
64       SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
65       PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
66                     CGM.getCodeGenOpts(), CGM.getLangOpts())) {
67   if (!suppressNewContext)
68     CGM.getCXXABI().getMangleContext().startNewFunction();
69 
70   llvm::FastMathFlags FMF;
71   if (CGM.getLangOpts().FastMath)
72     FMF.setFast();
73   if (CGM.getLangOpts().FiniteMathOnly) {
74     FMF.setNoNaNs();
75     FMF.setNoInfs();
76   }
77   if (CGM.getCodeGenOpts().NoNaNsFPMath) {
78     FMF.setNoNaNs();
79   }
80   if (CGM.getCodeGenOpts().NoSignedZeros) {
81     FMF.setNoSignedZeros();
82   }
83   if (CGM.getCodeGenOpts().ReciprocalMath) {
84     FMF.setAllowReciprocal();
85   }
86   if (CGM.getCodeGenOpts().Reassociate) {
87     FMF.setAllowReassoc();
88   }
89   Builder.setFastMathFlags(FMF);
90 }
91 
92 CodeGenFunction::~CodeGenFunction() {
93   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94 
95   // If there are any unclaimed block infos, go ahead and destroy them
96   // now.  This can happen if IR-gen gets clever and skips evaluating
97   // something.
98   if (FirstBlockInfo)
99     destroyBlockInfos(FirstBlockInfo);
100 
101   if (getLangOpts().OpenMP && CurFn)
102     CGM.getOpenMPRuntime().functionFinished(*this);
103 }
104 
105 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
106                                                     LValueBaseInfo *BaseInfo,
107                                                     TBAAAccessInfo *TBAAInfo) {
108   return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
109                                  /* forPointeeType= */ true);
110 }
111 
112 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
113                                                    LValueBaseInfo *BaseInfo,
114                                                    TBAAAccessInfo *TBAAInfo,
115                                                    bool forPointeeType) {
116   if (TBAAInfo)
117     *TBAAInfo = CGM.getTBAAAccessInfo(T);
118 
119   // Honor alignment typedef attributes even on incomplete types.
120   // We also honor them straight for C++ class types, even as pointees;
121   // there's an expressivity gap here.
122   if (auto TT = T->getAs<TypedefType>()) {
123     if (auto Align = TT->getDecl()->getMaxAlignment()) {
124       if (BaseInfo)
125         *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
126       return getContext().toCharUnitsFromBits(Align);
127     }
128   }
129 
130   if (BaseInfo)
131     *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
132 
133   CharUnits Alignment;
134   if (T->isIncompleteType()) {
135     Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
136   } else {
137     // For C++ class pointees, we don't know whether we're pointing at a
138     // base or a complete object, so we generally need to use the
139     // non-virtual alignment.
140     const CXXRecordDecl *RD;
141     if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
142       Alignment = CGM.getClassPointerAlignment(RD);
143     } else {
144       Alignment = getContext().getTypeAlignInChars(T);
145       if (T.getQualifiers().hasUnaligned())
146         Alignment = CharUnits::One();
147     }
148 
149     // Cap to the global maximum type alignment unless the alignment
150     // was somehow explicit on the type.
151     if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
152       if (Alignment.getQuantity() > MaxAlign &&
153           !getContext().isAlignmentRequired(T))
154         Alignment = CharUnits::fromQuantity(MaxAlign);
155     }
156   }
157   return Alignment;
158 }
159 
160 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
161   LValueBaseInfo BaseInfo;
162   TBAAAccessInfo TBAAInfo;
163   CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
164   return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
165                           TBAAInfo);
166 }
167 
168 /// Given a value of type T* that may not be to a complete object,
169 /// construct an l-value with the natural pointee alignment of T.
170 LValue
171 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
172   LValueBaseInfo BaseInfo;
173   TBAAAccessInfo TBAAInfo;
174   CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
175                                             /* forPointeeType= */ true);
176   return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
177 }
178 
179 
180 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
181   return CGM.getTypes().ConvertTypeForMem(T);
182 }
183 
184 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
185   return CGM.getTypes().ConvertType(T);
186 }
187 
188 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
189   type = type.getCanonicalType();
190   while (true) {
191     switch (type->getTypeClass()) {
192 #define TYPE(name, parent)
193 #define ABSTRACT_TYPE(name, parent)
194 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
195 #define DEPENDENT_TYPE(name, parent) case Type::name:
196 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
197 #include "clang/AST/TypeNodes.inc"
198       llvm_unreachable("non-canonical or dependent type in IR-generation");
199 
200     case Type::Auto:
201     case Type::DeducedTemplateSpecialization:
202       llvm_unreachable("undeduced type in IR-generation");
203 
204     // Various scalar types.
205     case Type::Builtin:
206     case Type::Pointer:
207     case Type::BlockPointer:
208     case Type::LValueReference:
209     case Type::RValueReference:
210     case Type::MemberPointer:
211     case Type::Vector:
212     case Type::ExtVector:
213     case Type::FunctionProto:
214     case Type::FunctionNoProto:
215     case Type::Enum:
216     case Type::ObjCObjectPointer:
217     case Type::Pipe:
218       return TEK_Scalar;
219 
220     // Complexes.
221     case Type::Complex:
222       return TEK_Complex;
223 
224     // Arrays, records, and Objective-C objects.
225     case Type::ConstantArray:
226     case Type::IncompleteArray:
227     case Type::VariableArray:
228     case Type::Record:
229     case Type::ObjCObject:
230     case Type::ObjCInterface:
231       return TEK_Aggregate;
232 
233     // We operate on atomic values according to their underlying type.
234     case Type::Atomic:
235       type = cast<AtomicType>(type)->getValueType();
236       continue;
237     }
238     llvm_unreachable("unknown type kind!");
239   }
240 }
241 
242 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
243   // For cleanliness, we try to avoid emitting the return block for
244   // simple cases.
245   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
246 
247   if (CurBB) {
248     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
249 
250     // We have a valid insert point, reuse it if it is empty or there are no
251     // explicit jumps to the return block.
252     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
253       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
254       delete ReturnBlock.getBlock();
255       ReturnBlock = JumpDest();
256     } else
257       EmitBlock(ReturnBlock.getBlock());
258     return llvm::DebugLoc();
259   }
260 
261   // Otherwise, if the return block is the target of a single direct
262   // branch then we can just put the code in that block instead. This
263   // cleans up functions which started with a unified return block.
264   if (ReturnBlock.getBlock()->hasOneUse()) {
265     llvm::BranchInst *BI =
266       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
267     if (BI && BI->isUnconditional() &&
268         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
269       // Record/return the DebugLoc of the simple 'return' expression to be used
270       // later by the actual 'ret' instruction.
271       llvm::DebugLoc Loc = BI->getDebugLoc();
272       Builder.SetInsertPoint(BI->getParent());
273       BI->eraseFromParent();
274       delete ReturnBlock.getBlock();
275       ReturnBlock = JumpDest();
276       return Loc;
277     }
278   }
279 
280   // FIXME: We are at an unreachable point, there is no reason to emit the block
281   // unless it has uses. However, we still need a place to put the debug
282   // region.end for now.
283 
284   EmitBlock(ReturnBlock.getBlock());
285   return llvm::DebugLoc();
286 }
287 
288 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
289   if (!BB) return;
290   if (!BB->use_empty())
291     return CGF.CurFn->getBasicBlockList().push_back(BB);
292   delete BB;
293 }
294 
295 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
296   assert(BreakContinueStack.empty() &&
297          "mismatched push/pop in break/continue stack!");
298 
299   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
300     && NumSimpleReturnExprs == NumReturnExprs
301     && ReturnBlock.getBlock()->use_empty();
302   // Usually the return expression is evaluated before the cleanup
303   // code.  If the function contains only a simple return statement,
304   // such as a constant, the location before the cleanup code becomes
305   // the last useful breakpoint in the function, because the simple
306   // return expression will be evaluated after the cleanup code. To be
307   // safe, set the debug location for cleanup code to the location of
308   // the return statement.  Otherwise the cleanup code should be at the
309   // end of the function's lexical scope.
310   //
311   // If there are multiple branches to the return block, the branch
312   // instructions will get the location of the return statements and
313   // all will be fine.
314   if (CGDebugInfo *DI = getDebugInfo()) {
315     if (OnlySimpleReturnStmts)
316       DI->EmitLocation(Builder, LastStopPoint);
317     else
318       DI->EmitLocation(Builder, EndLoc);
319   }
320 
321   // Pop any cleanups that might have been associated with the
322   // parameters.  Do this in whatever block we're currently in; it's
323   // important to do this before we enter the return block or return
324   // edges will be *really* confused.
325   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
326   bool HasOnlyLifetimeMarkers =
327       HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
328   bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
329   if (HasCleanups) {
330     // Make sure the line table doesn't jump back into the body for
331     // the ret after it's been at EndLoc.
332     if (CGDebugInfo *DI = getDebugInfo())
333       if (OnlySimpleReturnStmts)
334         DI->EmitLocation(Builder, EndLoc);
335 
336     PopCleanupBlocks(PrologueCleanupDepth);
337   }
338 
339   // Emit function epilog (to return).
340   llvm::DebugLoc Loc = EmitReturnBlock();
341 
342   if (ShouldInstrumentFunction()) {
343     if (CGM.getCodeGenOpts().InstrumentFunctions)
344       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
345     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
346       CurFn->addFnAttr("instrument-function-exit-inlined",
347                        "__cyg_profile_func_exit");
348   }
349 
350   // Emit debug descriptor for function end.
351   if (CGDebugInfo *DI = getDebugInfo())
352     DI->EmitFunctionEnd(Builder, CurFn);
353 
354   // Reset the debug location to that of the simple 'return' expression, if any
355   // rather than that of the end of the function's scope '}'.
356   ApplyDebugLocation AL(*this, Loc);
357   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
358   EmitEndEHSpec(CurCodeDecl);
359 
360   assert(EHStack.empty() &&
361          "did not remove all scopes from cleanup stack!");
362 
363   // If someone did an indirect goto, emit the indirect goto block at the end of
364   // the function.
365   if (IndirectBranch) {
366     EmitBlock(IndirectBranch->getParent());
367     Builder.ClearInsertionPoint();
368   }
369 
370   // If some of our locals escaped, insert a call to llvm.localescape in the
371   // entry block.
372   if (!EscapedLocals.empty()) {
373     // Invert the map from local to index into a simple vector. There should be
374     // no holes.
375     SmallVector<llvm::Value *, 4> EscapeArgs;
376     EscapeArgs.resize(EscapedLocals.size());
377     for (auto &Pair : EscapedLocals)
378       EscapeArgs[Pair.second] = Pair.first;
379     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
380         &CGM.getModule(), llvm::Intrinsic::localescape);
381     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
382   }
383 
384   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
385   llvm::Instruction *Ptr = AllocaInsertPt;
386   AllocaInsertPt = nullptr;
387   Ptr->eraseFromParent();
388 
389   // If someone took the address of a label but never did an indirect goto, we
390   // made a zero entry PHI node, which is illegal, zap it now.
391   if (IndirectBranch) {
392     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
393     if (PN->getNumIncomingValues() == 0) {
394       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
395       PN->eraseFromParent();
396     }
397   }
398 
399   EmitIfUsed(*this, EHResumeBlock);
400   EmitIfUsed(*this, TerminateLandingPad);
401   EmitIfUsed(*this, TerminateHandler);
402   EmitIfUsed(*this, UnreachableBlock);
403 
404   for (const auto &FuncletAndParent : TerminateFunclets)
405     EmitIfUsed(*this, FuncletAndParent.second);
406 
407   if (CGM.getCodeGenOpts().EmitDeclMetadata)
408     EmitDeclMetadata();
409 
410   for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
411            I = DeferredReplacements.begin(),
412            E = DeferredReplacements.end();
413        I != E; ++I) {
414     I->first->replaceAllUsesWith(I->second);
415     I->first->eraseFromParent();
416   }
417 
418   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
419   // PHIs if the current function is a coroutine. We don't do it for all
420   // functions as it may result in slight increase in numbers of instructions
421   // if compiled with no optimizations. We do it for coroutine as the lifetime
422   // of CleanupDestSlot alloca make correct coroutine frame building very
423   // difficult.
424   if (NormalCleanupDest.isValid() && isCoroutine()) {
425     llvm::DominatorTree DT(*CurFn);
426     llvm::PromoteMemToReg(
427         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
428     NormalCleanupDest = Address::invalid();
429   }
430 
431   // Scan function arguments for vector width.
432   for (llvm::Argument &A : CurFn->args())
433     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
434       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
435                                    VT->getPrimitiveSizeInBits().getFixedSize());
436 
437   // Update vector width based on return type.
438   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
439     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
440                                   VT->getPrimitiveSizeInBits().getFixedSize());
441 
442   // Add the required-vector-width attribute. This contains the max width from:
443   // 1. min-vector-width attribute used in the source program.
444   // 2. Any builtins used that have a vector width specified.
445   // 3. Values passed in and out of inline assembly.
446   // 4. Width of vector arguments and return types for this function.
447   // 5. Width of vector aguments and return types for functions called by this
448   //    function.
449   CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
450 
451   // If we generated an unreachable return block, delete it now.
452   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
453     Builder.ClearInsertionPoint();
454     ReturnBlock.getBlock()->eraseFromParent();
455   }
456   if (ReturnValue.isValid()) {
457     auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
458     if (RetAlloca && RetAlloca->use_empty()) {
459       RetAlloca->eraseFromParent();
460       ReturnValue = Address::invalid();
461     }
462   }
463 }
464 
465 /// ShouldInstrumentFunction - Return true if the current function should be
466 /// instrumented with __cyg_profile_func_* calls
467 bool CodeGenFunction::ShouldInstrumentFunction() {
468   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
469       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
470       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
471     return false;
472   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
473     return false;
474   return true;
475 }
476 
477 /// ShouldXRayInstrument - Return true if the current function should be
478 /// instrumented with XRay nop sleds.
479 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
480   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
481 }
482 
483 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
484 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
485 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
486   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
487          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
488           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
489               XRayInstrKind::Custom);
490 }
491 
492 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
493   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
494          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
495           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
496               XRayInstrKind::Typed);
497 }
498 
499 llvm::Constant *
500 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
501                                             llvm::Constant *Addr) {
502   // Addresses stored in prologue data can't require run-time fixups and must
503   // be PC-relative. Run-time fixups are undesirable because they necessitate
504   // writable text segments, which are unsafe. And absolute addresses are
505   // undesirable because they break PIE mode.
506 
507   // Add a layer of indirection through a private global. Taking its address
508   // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
509   auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
510                                       /*isConstant=*/true,
511                                       llvm::GlobalValue::PrivateLinkage, Addr);
512 
513   // Create a PC-relative address.
514   auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
515   auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
516   auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
517   return (IntPtrTy == Int32Ty)
518              ? PCRelAsInt
519              : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
520 }
521 
522 llvm::Value *
523 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
524                                           llvm::Value *EncodedAddr) {
525   // Reconstruct the address of the global.
526   auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
527   auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
528   auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
529   auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
530 
531   // Load the original pointer through the global.
532   return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
533                             "decoded_addr");
534 }
535 
536 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
537                                                llvm::Function *Fn)
538 {
539   if (!FD->hasAttr<OpenCLKernelAttr>())
540     return;
541 
542   llvm::LLVMContext &Context = getLLVMContext();
543 
544   CGM.GenOpenCLArgMetadata(Fn, FD, this);
545 
546   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
547     QualType HintQTy = A->getTypeHint();
548     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
549     bool IsSignedInteger =
550         HintQTy->isSignedIntegerType() ||
551         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
552     llvm::Metadata *AttrMDArgs[] = {
553         llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
554             CGM.getTypes().ConvertType(A->getTypeHint()))),
555         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
556             llvm::IntegerType::get(Context, 32),
557             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
558     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
559   }
560 
561   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
562     llvm::Metadata *AttrMDArgs[] = {
563         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
564         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
565         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
566     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
567   }
568 
569   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
570     llvm::Metadata *AttrMDArgs[] = {
571         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
572         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
573         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
574     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
575   }
576 
577   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
578           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
579     llvm::Metadata *AttrMDArgs[] = {
580         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
581     Fn->setMetadata("intel_reqd_sub_group_size",
582                     llvm::MDNode::get(Context, AttrMDArgs));
583   }
584 }
585 
586 /// Determine whether the function F ends with a return stmt.
587 static bool endsWithReturn(const Decl* F) {
588   const Stmt *Body = nullptr;
589   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
590     Body = FD->getBody();
591   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
592     Body = OMD->getBody();
593 
594   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
595     auto LastStmt = CS->body_rbegin();
596     if (LastStmt != CS->body_rend())
597       return isa<ReturnStmt>(*LastStmt);
598   }
599   return false;
600 }
601 
602 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
603   if (SanOpts.has(SanitizerKind::Thread)) {
604     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
605     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
606   }
607 }
608 
609 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
610   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
611   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
612       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
613       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
614     return false;
615 
616   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
617     return false;
618 
619   if (MD->getNumParams() == 2) {
620     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
621     if (!PT || !PT->isVoidPointerType() ||
622         !PT->getPointeeType().isConstQualified())
623       return false;
624   }
625 
626   return true;
627 }
628 
629 /// Return the UBSan prologue signature for \p FD if one is available.
630 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
631                                             const FunctionDecl *FD) {
632   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
633     if (!MD->isStatic())
634       return nullptr;
635   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
636 }
637 
638 void CodeGenFunction::StartFunction(GlobalDecl GD,
639                                     QualType RetTy,
640                                     llvm::Function *Fn,
641                                     const CGFunctionInfo &FnInfo,
642                                     const FunctionArgList &Args,
643                                     SourceLocation Loc,
644                                     SourceLocation StartLoc) {
645   assert(!CurFn &&
646          "Do not use a CodeGenFunction object for more than one function");
647 
648   const Decl *D = GD.getDecl();
649 
650   DidCallStackSave = false;
651   CurCodeDecl = D;
652   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
653     if (FD->usesSEHTry())
654       CurSEHParent = FD;
655   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
656   FnRetTy = RetTy;
657   CurFn = Fn;
658   CurFnInfo = &FnInfo;
659   assert(CurFn->isDeclaration() && "Function already has body?");
660 
661   // If this function has been blacklisted for any of the enabled sanitizers,
662   // disable the sanitizer for the function.
663   do {
664 #define SANITIZER(NAME, ID)                                                    \
665   if (SanOpts.empty())                                                         \
666     break;                                                                     \
667   if (SanOpts.has(SanitizerKind::ID))                                          \
668     if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc))                \
669       SanOpts.set(SanitizerKind::ID, false);
670 
671 #include "clang/Basic/Sanitizers.def"
672 #undef SANITIZER
673   } while (0);
674 
675   if (D) {
676     // Apply the no_sanitize* attributes to SanOpts.
677     for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
678       SanitizerMask mask = Attr->getMask();
679       SanOpts.Mask &= ~mask;
680       if (mask & SanitizerKind::Address)
681         SanOpts.set(SanitizerKind::KernelAddress, false);
682       if (mask & SanitizerKind::KernelAddress)
683         SanOpts.set(SanitizerKind::Address, false);
684       if (mask & SanitizerKind::HWAddress)
685         SanOpts.set(SanitizerKind::KernelHWAddress, false);
686       if (mask & SanitizerKind::KernelHWAddress)
687         SanOpts.set(SanitizerKind::HWAddress, false);
688     }
689   }
690 
691   // Apply sanitizer attributes to the function.
692   if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
693     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
694   if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
695     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
696   if (SanOpts.has(SanitizerKind::MemTag))
697     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
698   if (SanOpts.has(SanitizerKind::Thread))
699     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
700   if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
701     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
702   if (SanOpts.has(SanitizerKind::SafeStack))
703     Fn->addFnAttr(llvm::Attribute::SafeStack);
704   if (SanOpts.has(SanitizerKind::ShadowCallStack))
705     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
706 
707   // Apply fuzzing attribute to the function.
708   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
709     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
710 
711   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
712   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
713   if (SanOpts.has(SanitizerKind::Thread)) {
714     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
715       IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
716       if (OMD->getMethodFamily() == OMF_dealloc ||
717           OMD->getMethodFamily() == OMF_initialize ||
718           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
719         markAsIgnoreThreadCheckingAtRuntime(Fn);
720       }
721     }
722   }
723 
724   // Ignore unrelated casts in STL allocate() since the allocator must cast
725   // from void* to T* before object initialization completes. Don't match on the
726   // namespace because not all allocators are in std::
727   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
728     if (matchesStlAllocatorFn(D, getContext()))
729       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
730   }
731 
732   // Ignore null checks in coroutine functions since the coroutines passes
733   // are not aware of how to move the extra UBSan instructions across the split
734   // coroutine boundaries.
735   if (D && SanOpts.has(SanitizerKind::Null))
736     if (const auto *FD = dyn_cast<FunctionDecl>(D))
737       if (FD->getBody() &&
738           FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
739         SanOpts.Mask &= ~SanitizerKind::Null;
740 
741   // Apply xray attributes to the function (as a string, for now)
742   if (D) {
743     if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
744       if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
745               XRayInstrKind::Function)) {
746         if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
747           Fn->addFnAttr("function-instrument", "xray-always");
748         if (XRayAttr->neverXRayInstrument())
749           Fn->addFnAttr("function-instrument", "xray-never");
750         if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
751           if (ShouldXRayInstrumentFunction())
752             Fn->addFnAttr("xray-log-args",
753                           llvm::utostr(LogArgs->getArgumentCount()));
754       }
755     } else {
756       if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
757         Fn->addFnAttr(
758             "xray-instruction-threshold",
759             llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
760     }
761   }
762 
763   // Add no-jump-tables value.
764   Fn->addFnAttr("no-jump-tables",
765                 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
766 
767   // Add profile-sample-accurate value.
768   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
769     Fn->addFnAttr("profile-sample-accurate");
770 
771   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
772     Fn->addFnAttr("cfi-canonical-jump-table");
773 
774   if (getLangOpts().OpenCL) {
775     // Add metadata for a kernel function.
776     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
777       EmitOpenCLKernelMetadata(FD, Fn);
778   }
779 
780   // If we are checking function types, emit a function type signature as
781   // prologue data.
782   if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
783     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
784       if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
785         // Remove any (C++17) exception specifications, to allow calling e.g. a
786         // noexcept function through a non-noexcept pointer.
787         auto ProtoTy =
788           getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
789                                                         EST_None);
790         llvm::Constant *FTRTTIConst =
791             CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
792         llvm::Constant *FTRTTIConstEncoded =
793             EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
794         llvm::Constant *PrologueStructElems[] = {PrologueSig,
795                                                  FTRTTIConstEncoded};
796         llvm::Constant *PrologueStructConst =
797             llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
798         Fn->setPrologueData(PrologueStructConst);
799       }
800     }
801   }
802 
803   // If we're checking nullability, we need to know whether we can check the
804   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
805   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
806     auto Nullability = FnRetTy->getNullability(getContext());
807     if (Nullability && *Nullability == NullabilityKind::NonNull) {
808       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
809             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
810         RetValNullabilityPrecondition =
811             llvm::ConstantInt::getTrue(getLLVMContext());
812     }
813   }
814 
815   // If we're in C++ mode and the function name is "main", it is guaranteed
816   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
817   // used within a program").
818   if (getLangOpts().CPlusPlus)
819     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
820       if (FD->isMain())
821         Fn->addFnAttr(llvm::Attribute::NoRecurse);
822 
823   // If a custom alignment is used, force realigning to this alignment on
824   // any main function which certainly will need it.
825   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
826     if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
827         CGM.getCodeGenOpts().StackAlignment)
828       Fn->addFnAttr("stackrealign");
829 
830   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
831 
832   // Create a marker to make it easy to insert allocas into the entryblock
833   // later.  Don't create this with the builder, because we don't want it
834   // folded.
835   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
836   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
837 
838   ReturnBlock = getJumpDestInCurrentScope("return");
839 
840   Builder.SetInsertPoint(EntryBB);
841 
842   // If we're checking the return value, allocate space for a pointer to a
843   // precise source location of the checked return statement.
844   if (requiresReturnValueCheck()) {
845     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
846     InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
847   }
848 
849   // Emit subprogram debug descriptor.
850   if (CGDebugInfo *DI = getDebugInfo()) {
851     // Reconstruct the type from the argument list so that implicit parameters,
852     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
853     // convention.
854     CallingConv CC = CallingConv::CC_C;
855     if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
856       if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
857         CC = SrcFnTy->getCallConv();
858     SmallVector<QualType, 16> ArgTypes;
859     for (const VarDecl *VD : Args)
860       ArgTypes.push_back(VD->getType());
861     QualType FnType = getContext().getFunctionType(
862         RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
863     DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
864                           Builder);
865   }
866 
867   if (ShouldInstrumentFunction()) {
868     if (CGM.getCodeGenOpts().InstrumentFunctions)
869       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
870     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
871       CurFn->addFnAttr("instrument-function-entry-inlined",
872                        "__cyg_profile_func_enter");
873     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
874       CurFn->addFnAttr("instrument-function-entry-inlined",
875                        "__cyg_profile_func_enter_bare");
876   }
877 
878   // Since emitting the mcount call here impacts optimizations such as function
879   // inlining, we just add an attribute to insert a mcount call in backend.
880   // The attribute "counting-function" is set to mcount function name which is
881   // architecture dependent.
882   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
883     // Calls to fentry/mcount should not be generated if function has
884     // the no_instrument_function attribute.
885     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
886       if (CGM.getCodeGenOpts().CallFEntry)
887         Fn->addFnAttr("fentry-call", "true");
888       else {
889         Fn->addFnAttr("instrument-function-entry-inlined",
890                       getTarget().getMCountName());
891       }
892     }
893   }
894 
895   if (RetTy->isVoidType()) {
896     // Void type; nothing to return.
897     ReturnValue = Address::invalid();
898 
899     // Count the implicit return.
900     if (!endsWithReturn(D))
901       ++NumReturnExprs;
902   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
903     // Indirect return; emit returned value directly into sret slot.
904     // This reduces code size, and affects correctness in C++.
905     auto AI = CurFn->arg_begin();
906     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
907       ++AI;
908     ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
909     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
910       ReturnValuePointer =
911           CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
912       Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
913                               ReturnValue.getPointer(), Int8PtrTy),
914                           ReturnValuePointer);
915     }
916   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
917              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
918     // Load the sret pointer from the argument struct and return into that.
919     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
920     llvm::Function::arg_iterator EI = CurFn->arg_end();
921     --EI;
922     llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
923     ReturnValuePointer = Address(Addr, getPointerAlign());
924     Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
925     ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
926   } else {
927     ReturnValue = CreateIRTemp(RetTy, "retval");
928 
929     // Tell the epilog emitter to autorelease the result.  We do this
930     // now so that various specialized functions can suppress it
931     // during their IR-generation.
932     if (getLangOpts().ObjCAutoRefCount &&
933         !CurFnInfo->isReturnsRetained() &&
934         RetTy->isObjCRetainableType())
935       AutoreleaseResult = true;
936   }
937 
938   EmitStartEHSpec(CurCodeDecl);
939 
940   PrologueCleanupDepth = EHStack.stable_begin();
941 
942   // Emit OpenMP specific initialization of the device functions.
943   if (getLangOpts().OpenMP && CurCodeDecl)
944     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
945 
946   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
947 
948   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
949     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
950     const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
951     if (MD->getParent()->isLambda() &&
952         MD->getOverloadedOperator() == OO_Call) {
953       // We're in a lambda; figure out the captures.
954       MD->getParent()->getCaptureFields(LambdaCaptureFields,
955                                         LambdaThisCaptureField);
956       if (LambdaThisCaptureField) {
957         // If the lambda captures the object referred to by '*this' - either by
958         // value or by reference, make sure CXXThisValue points to the correct
959         // object.
960 
961         // Get the lvalue for the field (which is a copy of the enclosing object
962         // or contains the address of the enclosing object).
963         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
964         if (!LambdaThisCaptureField->getType()->isPointerType()) {
965           // If the enclosing object was captured by value, just use its address.
966           CXXThisValue = ThisFieldLValue.getAddress().getPointer();
967         } else {
968           // Load the lvalue pointed to by the field, since '*this' was captured
969           // by reference.
970           CXXThisValue =
971               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
972         }
973       }
974       for (auto *FD : MD->getParent()->fields()) {
975         if (FD->hasCapturedVLAType()) {
976           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
977                                            SourceLocation()).getScalarVal();
978           auto VAT = FD->getCapturedVLAType();
979           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
980         }
981       }
982     } else {
983       // Not in a lambda; just use 'this' from the method.
984       // FIXME: Should we generate a new load for each use of 'this'?  The
985       // fast register allocator would be happier...
986       CXXThisValue = CXXABIThisValue;
987     }
988 
989     // Check the 'this' pointer once per function, if it's available.
990     if (CXXABIThisValue) {
991       SanitizerSet SkippedChecks;
992       SkippedChecks.set(SanitizerKind::ObjectSize, true);
993       QualType ThisTy = MD->getThisType();
994 
995       // If this is the call operator of a lambda with no capture-default, it
996       // may have a static invoker function, which may call this operator with
997       // a null 'this' pointer.
998       if (isLambdaCallOperator(MD) &&
999           MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1000         SkippedChecks.set(SanitizerKind::Null, true);
1001 
1002       EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1003                                                 : TCK_MemberCall,
1004                     Loc, CXXABIThisValue, ThisTy,
1005                     getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1006                     SkippedChecks);
1007     }
1008   }
1009 
1010   // If any of the arguments have a variably modified type, make sure to
1011   // emit the type size.
1012   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1013        i != e; ++i) {
1014     const VarDecl *VD = *i;
1015 
1016     // Dig out the type as written from ParmVarDecls; it's unclear whether
1017     // the standard (C99 6.9.1p10) requires this, but we're following the
1018     // precedent set by gcc.
1019     QualType Ty;
1020     if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1021       Ty = PVD->getOriginalType();
1022     else
1023       Ty = VD->getType();
1024 
1025     if (Ty->isVariablyModifiedType())
1026       EmitVariablyModifiedType(Ty);
1027   }
1028   // Emit a location at the end of the prologue.
1029   if (CGDebugInfo *DI = getDebugInfo())
1030     DI->EmitLocation(Builder, StartLoc);
1031 
1032   // TODO: Do we need to handle this in two places like we do with
1033   // target-features/target-cpu?
1034   if (CurFuncDecl)
1035     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1036       LargestVectorWidth = VecWidth->getVectorWidth();
1037 }
1038 
1039 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1040   incrementProfileCounter(Body);
1041   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1042     EmitCompoundStmtWithoutScope(*S);
1043   else
1044     EmitStmt(Body);
1045 }
1046 
1047 /// When instrumenting to collect profile data, the counts for some blocks
1048 /// such as switch cases need to not include the fall-through counts, so
1049 /// emit a branch around the instrumentation code. When not instrumenting,
1050 /// this just calls EmitBlock().
1051 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1052                                                const Stmt *S) {
1053   llvm::BasicBlock *SkipCountBB = nullptr;
1054   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1055     // When instrumenting for profiling, the fallthrough to certain
1056     // statements needs to skip over the instrumentation code so that we
1057     // get an accurate count.
1058     SkipCountBB = createBasicBlock("skipcount");
1059     EmitBranch(SkipCountBB);
1060   }
1061   EmitBlock(BB);
1062   uint64_t CurrentCount = getCurrentProfileCount();
1063   incrementProfileCounter(S);
1064   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1065   if (SkipCountBB)
1066     EmitBlock(SkipCountBB);
1067 }
1068 
1069 /// Tries to mark the given function nounwind based on the
1070 /// non-existence of any throwing calls within it.  We believe this is
1071 /// lightweight enough to do at -O0.
1072 static void TryMarkNoThrow(llvm::Function *F) {
1073   // LLVM treats 'nounwind' on a function as part of the type, so we
1074   // can't do this on functions that can be overwritten.
1075   if (F->isInterposable()) return;
1076 
1077   for (llvm::BasicBlock &BB : *F)
1078     for (llvm::Instruction &I : BB)
1079       if (I.mayThrow())
1080         return;
1081 
1082   F->setDoesNotThrow();
1083 }
1084 
1085 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1086                                                FunctionArgList &Args) {
1087   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1088   QualType ResTy = FD->getReturnType();
1089 
1090   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1091   if (MD && MD->isInstance()) {
1092     if (CGM.getCXXABI().HasThisReturn(GD))
1093       ResTy = MD->getThisType();
1094     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1095       ResTy = CGM.getContext().VoidPtrTy;
1096     CGM.getCXXABI().buildThisParam(*this, Args);
1097   }
1098 
1099   // The base version of an inheriting constructor whose constructed base is a
1100   // virtual base is not passed any arguments (because it doesn't actually call
1101   // the inherited constructor).
1102   bool PassedParams = true;
1103   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1104     if (auto Inherited = CD->getInheritedConstructor())
1105       PassedParams =
1106           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1107 
1108   if (PassedParams) {
1109     for (auto *Param : FD->parameters()) {
1110       Args.push_back(Param);
1111       if (!Param->hasAttr<PassObjectSizeAttr>())
1112         continue;
1113 
1114       auto *Implicit = ImplicitParamDecl::Create(
1115           getContext(), Param->getDeclContext(), Param->getLocation(),
1116           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1117       SizeArguments[Param] = Implicit;
1118       Args.push_back(Implicit);
1119     }
1120   }
1121 
1122   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1123     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1124 
1125   return ResTy;
1126 }
1127 
1128 static bool
1129 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1130                                              const ASTContext &Context) {
1131   QualType T = FD->getReturnType();
1132   // Avoid the optimization for functions that return a record type with a
1133   // trivial destructor or another trivially copyable type.
1134   if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1135     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1136       return !ClassDecl->hasTrivialDestructor();
1137   }
1138   return !T.isTriviallyCopyableType(Context);
1139 }
1140 
1141 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1142                                    const CGFunctionInfo &FnInfo) {
1143   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1144   CurGD = GD;
1145 
1146   FunctionArgList Args;
1147   QualType ResTy = BuildFunctionArgList(GD, Args);
1148 
1149   // Check if we should generate debug info for this function.
1150   if (FD->hasAttr<NoDebugAttr>())
1151     DebugInfo = nullptr; // disable debug info indefinitely for this function
1152 
1153   // The function might not have a body if we're generating thunks for a
1154   // function declaration.
1155   SourceRange BodyRange;
1156   if (Stmt *Body = FD->getBody())
1157     BodyRange = Body->getSourceRange();
1158   else
1159     BodyRange = FD->getLocation();
1160   CurEHLocation = BodyRange.getEnd();
1161 
1162   // Use the location of the start of the function to determine where
1163   // the function definition is located. By default use the location
1164   // of the declaration as the location for the subprogram. A function
1165   // may lack a declaration in the source code if it is created by code
1166   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1167   SourceLocation Loc = FD->getLocation();
1168 
1169   // If this is a function specialization then use the pattern body
1170   // as the location for the function.
1171   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1172     if (SpecDecl->hasBody(SpecDecl))
1173       Loc = SpecDecl->getLocation();
1174 
1175   Stmt *Body = FD->getBody();
1176 
1177   // Initialize helper which will detect jumps which can cause invalid lifetime
1178   // markers.
1179   if (Body && ShouldEmitLifetimeMarkers)
1180     Bypasses.Init(Body);
1181 
1182   // Emit the standard function prologue.
1183   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1184 
1185   // Generate the body of the function.
1186   PGO.assignRegionCounters(GD, CurFn);
1187   if (isa<CXXDestructorDecl>(FD))
1188     EmitDestructorBody(Args);
1189   else if (isa<CXXConstructorDecl>(FD))
1190     EmitConstructorBody(Args);
1191   else if (getLangOpts().CUDA &&
1192            !getLangOpts().CUDAIsDevice &&
1193            FD->hasAttr<CUDAGlobalAttr>())
1194     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1195   else if (isa<CXXMethodDecl>(FD) &&
1196            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1197     // The lambda static invoker function is special, because it forwards or
1198     // clones the body of the function call operator (but is actually static).
1199     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1200   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1201              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1202               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1203     // Implicit copy-assignment gets the same special treatment as implicit
1204     // copy-constructors.
1205     emitImplicitAssignmentOperatorBody(Args);
1206   } else if (Body) {
1207     EmitFunctionBody(Body);
1208   } else
1209     llvm_unreachable("no definition for emitted function");
1210 
1211   // C++11 [stmt.return]p2:
1212   //   Flowing off the end of a function [...] results in undefined behavior in
1213   //   a value-returning function.
1214   // C11 6.9.1p12:
1215   //   If the '}' that terminates a function is reached, and the value of the
1216   //   function call is used by the caller, the behavior is undefined.
1217   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1218       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1219     bool ShouldEmitUnreachable =
1220         CGM.getCodeGenOpts().StrictReturn ||
1221         shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1222     if (SanOpts.has(SanitizerKind::Return)) {
1223       SanitizerScope SanScope(this);
1224       llvm::Value *IsFalse = Builder.getFalse();
1225       EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1226                 SanitizerHandler::MissingReturn,
1227                 EmitCheckSourceLocation(FD->getLocation()), None);
1228     } else if (ShouldEmitUnreachable) {
1229       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1230         EmitTrapCall(llvm::Intrinsic::trap);
1231     }
1232     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1233       Builder.CreateUnreachable();
1234       Builder.ClearInsertionPoint();
1235     }
1236   }
1237 
1238   // Emit the standard function epilogue.
1239   FinishFunction(BodyRange.getEnd());
1240 
1241   // If we haven't marked the function nothrow through other means, do
1242   // a quick pass now to see if we can.
1243   if (!CurFn->doesNotThrow())
1244     TryMarkNoThrow(CurFn);
1245 }
1246 
1247 /// ContainsLabel - Return true if the statement contains a label in it.  If
1248 /// this statement is not executed normally, it not containing a label means
1249 /// that we can just remove the code.
1250 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1251   // Null statement, not a label!
1252   if (!S) return false;
1253 
1254   // If this is a label, we have to emit the code, consider something like:
1255   // if (0) {  ...  foo:  bar(); }  goto foo;
1256   //
1257   // TODO: If anyone cared, we could track __label__'s, since we know that you
1258   // can't jump to one from outside their declared region.
1259   if (isa<LabelStmt>(S))
1260     return true;
1261 
1262   // If this is a case/default statement, and we haven't seen a switch, we have
1263   // to emit the code.
1264   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1265     return true;
1266 
1267   // If this is a switch statement, we want to ignore cases below it.
1268   if (isa<SwitchStmt>(S))
1269     IgnoreCaseStmts = true;
1270 
1271   // Scan subexpressions for verboten labels.
1272   for (const Stmt *SubStmt : S->children())
1273     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1274       return true;
1275 
1276   return false;
1277 }
1278 
1279 /// containsBreak - Return true if the statement contains a break out of it.
1280 /// If the statement (recursively) contains a switch or loop with a break
1281 /// inside of it, this is fine.
1282 bool CodeGenFunction::containsBreak(const Stmt *S) {
1283   // Null statement, not a label!
1284   if (!S) return false;
1285 
1286   // If this is a switch or loop that defines its own break scope, then we can
1287   // include it and anything inside of it.
1288   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1289       isa<ForStmt>(S))
1290     return false;
1291 
1292   if (isa<BreakStmt>(S))
1293     return true;
1294 
1295   // Scan subexpressions for verboten breaks.
1296   for (const Stmt *SubStmt : S->children())
1297     if (containsBreak(SubStmt))
1298       return true;
1299 
1300   return false;
1301 }
1302 
1303 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1304   if (!S) return false;
1305 
1306   // Some statement kinds add a scope and thus never add a decl to the current
1307   // scope. Note, this list is longer than the list of statements that might
1308   // have an unscoped decl nested within them, but this way is conservatively
1309   // correct even if more statement kinds are added.
1310   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1311       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1312       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1313       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1314     return false;
1315 
1316   if (isa<DeclStmt>(S))
1317     return true;
1318 
1319   for (const Stmt *SubStmt : S->children())
1320     if (mightAddDeclToScope(SubStmt))
1321       return true;
1322 
1323   return false;
1324 }
1325 
1326 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1327 /// to a constant, or if it does but contains a label, return false.  If it
1328 /// constant folds return true and set the boolean result in Result.
1329 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1330                                                    bool &ResultBool,
1331                                                    bool AllowLabels) {
1332   llvm::APSInt ResultInt;
1333   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1334     return false;
1335 
1336   ResultBool = ResultInt.getBoolValue();
1337   return true;
1338 }
1339 
1340 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1341 /// to a constant, or if it does but contains a label, return false.  If it
1342 /// constant folds return true and set the folded value.
1343 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1344                                                    llvm::APSInt &ResultInt,
1345                                                    bool AllowLabels) {
1346   // FIXME: Rename and handle conversion of other evaluatable things
1347   // to bool.
1348   Expr::EvalResult Result;
1349   if (!Cond->EvaluateAsInt(Result, getContext()))
1350     return false;  // Not foldable, not integer or not fully evaluatable.
1351 
1352   llvm::APSInt Int = Result.Val.getInt();
1353   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1354     return false;  // Contains a label.
1355 
1356   ResultInt = Int;
1357   return true;
1358 }
1359 
1360 
1361 
1362 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1363 /// statement) to the specified blocks.  Based on the condition, this might try
1364 /// to simplify the codegen of the conditional based on the branch.
1365 ///
1366 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1367                                            llvm::BasicBlock *TrueBlock,
1368                                            llvm::BasicBlock *FalseBlock,
1369                                            uint64_t TrueCount) {
1370   Cond = Cond->IgnoreParens();
1371 
1372   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1373 
1374     // Handle X && Y in a condition.
1375     if (CondBOp->getOpcode() == BO_LAnd) {
1376       // If we have "1 && X", simplify the code.  "0 && X" would have constant
1377       // folded if the case was simple enough.
1378       bool ConstantBool = false;
1379       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1380           ConstantBool) {
1381         // br(1 && X) -> br(X).
1382         incrementProfileCounter(CondBOp);
1383         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1384                                     TrueCount);
1385       }
1386 
1387       // If we have "X && 1", simplify the code to use an uncond branch.
1388       // "X && 0" would have been constant folded to 0.
1389       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1390           ConstantBool) {
1391         // br(X && 1) -> br(X).
1392         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1393                                     TrueCount);
1394       }
1395 
1396       // Emit the LHS as a conditional.  If the LHS conditional is false, we
1397       // want to jump to the FalseBlock.
1398       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1399       // The counter tells us how often we evaluate RHS, and all of TrueCount
1400       // can be propagated to that branch.
1401       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1402 
1403       ConditionalEvaluation eval(*this);
1404       {
1405         ApplyDebugLocation DL(*this, Cond);
1406         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1407         EmitBlock(LHSTrue);
1408       }
1409 
1410       incrementProfileCounter(CondBOp);
1411       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1412 
1413       // Any temporaries created here are conditional.
1414       eval.begin(*this);
1415       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1416       eval.end(*this);
1417 
1418       return;
1419     }
1420 
1421     if (CondBOp->getOpcode() == BO_LOr) {
1422       // If we have "0 || X", simplify the code.  "1 || X" would have constant
1423       // folded if the case was simple enough.
1424       bool ConstantBool = false;
1425       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1426           !ConstantBool) {
1427         // br(0 || X) -> br(X).
1428         incrementProfileCounter(CondBOp);
1429         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1430                                     TrueCount);
1431       }
1432 
1433       // If we have "X || 0", simplify the code to use an uncond branch.
1434       // "X || 1" would have been constant folded to 1.
1435       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1436           !ConstantBool) {
1437         // br(X || 0) -> br(X).
1438         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1439                                     TrueCount);
1440       }
1441 
1442       // Emit the LHS as a conditional.  If the LHS conditional is true, we
1443       // want to jump to the TrueBlock.
1444       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1445       // We have the count for entry to the RHS and for the whole expression
1446       // being true, so we can divy up True count between the short circuit and
1447       // the RHS.
1448       uint64_t LHSCount =
1449           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1450       uint64_t RHSCount = TrueCount - LHSCount;
1451 
1452       ConditionalEvaluation eval(*this);
1453       {
1454         ApplyDebugLocation DL(*this, Cond);
1455         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1456         EmitBlock(LHSFalse);
1457       }
1458 
1459       incrementProfileCounter(CondBOp);
1460       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1461 
1462       // Any temporaries created here are conditional.
1463       eval.begin(*this);
1464       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1465 
1466       eval.end(*this);
1467 
1468       return;
1469     }
1470   }
1471 
1472   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1473     // br(!x, t, f) -> br(x, f, t)
1474     if (CondUOp->getOpcode() == UO_LNot) {
1475       // Negate the count.
1476       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1477       // Negate the condition and swap the destination blocks.
1478       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1479                                   FalseCount);
1480     }
1481   }
1482 
1483   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1484     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1485     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1486     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1487 
1488     ConditionalEvaluation cond(*this);
1489     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1490                          getProfileCount(CondOp));
1491 
1492     // When computing PGO branch weights, we only know the overall count for
1493     // the true block. This code is essentially doing tail duplication of the
1494     // naive code-gen, introducing new edges for which counts are not
1495     // available. Divide the counts proportionally between the LHS and RHS of
1496     // the conditional operator.
1497     uint64_t LHSScaledTrueCount = 0;
1498     if (TrueCount) {
1499       double LHSRatio =
1500           getProfileCount(CondOp) / (double)getCurrentProfileCount();
1501       LHSScaledTrueCount = TrueCount * LHSRatio;
1502     }
1503 
1504     cond.begin(*this);
1505     EmitBlock(LHSBlock);
1506     incrementProfileCounter(CondOp);
1507     {
1508       ApplyDebugLocation DL(*this, Cond);
1509       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1510                            LHSScaledTrueCount);
1511     }
1512     cond.end(*this);
1513 
1514     cond.begin(*this);
1515     EmitBlock(RHSBlock);
1516     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1517                          TrueCount - LHSScaledTrueCount);
1518     cond.end(*this);
1519 
1520     return;
1521   }
1522 
1523   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1524     // Conditional operator handling can give us a throw expression as a
1525     // condition for a case like:
1526     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1527     // Fold this to:
1528     //   br(c, throw x, br(y, t, f))
1529     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1530     return;
1531   }
1532 
1533   // If the branch has a condition wrapped by __builtin_unpredictable,
1534   // create metadata that specifies that the branch is unpredictable.
1535   // Don't bother if not optimizing because that metadata would not be used.
1536   llvm::MDNode *Unpredictable = nullptr;
1537   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1538   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1539     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1540     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1541       llvm::MDBuilder MDHelper(getLLVMContext());
1542       Unpredictable = MDHelper.createUnpredictable();
1543     }
1544   }
1545 
1546   // Create branch weights based on the number of times we get here and the
1547   // number of times the condition should be true.
1548   uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1549   llvm::MDNode *Weights =
1550       createProfileWeights(TrueCount, CurrentCount - TrueCount);
1551 
1552   // Emit the code with the fully general case.
1553   llvm::Value *CondV;
1554   {
1555     ApplyDebugLocation DL(*this, Cond);
1556     CondV = EvaluateExprAsBool(Cond);
1557   }
1558   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1559 }
1560 
1561 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1562 /// specified stmt yet.
1563 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1564   CGM.ErrorUnsupported(S, Type);
1565 }
1566 
1567 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1568 /// variable-length array whose elements have a non-zero bit-pattern.
1569 ///
1570 /// \param baseType the inner-most element type of the array
1571 /// \param src - a char* pointing to the bit-pattern for a single
1572 /// base element of the array
1573 /// \param sizeInChars - the total size of the VLA, in chars
1574 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1575                                Address dest, Address src,
1576                                llvm::Value *sizeInChars) {
1577   CGBuilderTy &Builder = CGF.Builder;
1578 
1579   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1580   llvm::Value *baseSizeInChars
1581     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1582 
1583   Address begin =
1584     Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1585   llvm::Value *end =
1586     Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1587 
1588   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1589   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1590   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1591 
1592   // Make a loop over the VLA.  C99 guarantees that the VLA element
1593   // count must be nonzero.
1594   CGF.EmitBlock(loopBB);
1595 
1596   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1597   cur->addIncoming(begin.getPointer(), originBB);
1598 
1599   CharUnits curAlign =
1600     dest.getAlignment().alignmentOfArrayElement(baseSize);
1601 
1602   // memcpy the individual element bit-pattern.
1603   Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1604                        /*volatile*/ false);
1605 
1606   // Go to the next element.
1607   llvm::Value *next =
1608     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1609 
1610   // Leave if that's the end of the VLA.
1611   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1612   Builder.CreateCondBr(done, contBB, loopBB);
1613   cur->addIncoming(next, loopBB);
1614 
1615   CGF.EmitBlock(contBB);
1616 }
1617 
1618 void
1619 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1620   // Ignore empty classes in C++.
1621   if (getLangOpts().CPlusPlus) {
1622     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1623       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1624         return;
1625     }
1626   }
1627 
1628   // Cast the dest ptr to the appropriate i8 pointer type.
1629   if (DestPtr.getElementType() != Int8Ty)
1630     DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1631 
1632   // Get size and alignment info for this aggregate.
1633   CharUnits size = getContext().getTypeSizeInChars(Ty);
1634 
1635   llvm::Value *SizeVal;
1636   const VariableArrayType *vla;
1637 
1638   // Don't bother emitting a zero-byte memset.
1639   if (size.isZero()) {
1640     // But note that getTypeInfo returns 0 for a VLA.
1641     if (const VariableArrayType *vlaType =
1642           dyn_cast_or_null<VariableArrayType>(
1643                                           getContext().getAsArrayType(Ty))) {
1644       auto VlaSize = getVLASize(vlaType);
1645       SizeVal = VlaSize.NumElts;
1646       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1647       if (!eltSize.isOne())
1648         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1649       vla = vlaType;
1650     } else {
1651       return;
1652     }
1653   } else {
1654     SizeVal = CGM.getSize(size);
1655     vla = nullptr;
1656   }
1657 
1658   // If the type contains a pointer to data member we can't memset it to zero.
1659   // Instead, create a null constant and copy it to the destination.
1660   // TODO: there are other patterns besides zero that we can usefully memset,
1661   // like -1, which happens to be the pattern used by member-pointers.
1662   if (!CGM.getTypes().isZeroInitializable(Ty)) {
1663     // For a VLA, emit a single element, then splat that over the VLA.
1664     if (vla) Ty = getContext().getBaseElementType(vla);
1665 
1666     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1667 
1668     llvm::GlobalVariable *NullVariable =
1669       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1670                                /*isConstant=*/true,
1671                                llvm::GlobalVariable::PrivateLinkage,
1672                                NullConstant, Twine());
1673     CharUnits NullAlign = DestPtr.getAlignment();
1674     NullVariable->setAlignment(NullAlign.getAsAlign());
1675     Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1676                    NullAlign);
1677 
1678     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1679 
1680     // Get and call the appropriate llvm.memcpy overload.
1681     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1682     return;
1683   }
1684 
1685   // Otherwise, just memset the whole thing to zero.  This is legal
1686   // because in LLVM, all default initializers (other than the ones we just
1687   // handled above) are guaranteed to have a bit pattern of all zeros.
1688   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1689 }
1690 
1691 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1692   // Make sure that there is a block for the indirect goto.
1693   if (!IndirectBranch)
1694     GetIndirectGotoBlock();
1695 
1696   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1697 
1698   // Make sure the indirect branch includes all of the address-taken blocks.
1699   IndirectBranch->addDestination(BB);
1700   return llvm::BlockAddress::get(CurFn, BB);
1701 }
1702 
1703 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1704   // If we already made the indirect branch for indirect goto, return its block.
1705   if (IndirectBranch) return IndirectBranch->getParent();
1706 
1707   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1708 
1709   // Create the PHI node that indirect gotos will add entries to.
1710   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1711                                               "indirect.goto.dest");
1712 
1713   // Create the indirect branch instruction.
1714   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1715   return IndirectBranch->getParent();
1716 }
1717 
1718 /// Computes the length of an array in elements, as well as the base
1719 /// element type and a properly-typed first element pointer.
1720 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1721                                               QualType &baseType,
1722                                               Address &addr) {
1723   const ArrayType *arrayType = origArrayType;
1724 
1725   // If it's a VLA, we have to load the stored size.  Note that
1726   // this is the size of the VLA in bytes, not its size in elements.
1727   llvm::Value *numVLAElements = nullptr;
1728   if (isa<VariableArrayType>(arrayType)) {
1729     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1730 
1731     // Walk into all VLAs.  This doesn't require changes to addr,
1732     // which has type T* where T is the first non-VLA element type.
1733     do {
1734       QualType elementType = arrayType->getElementType();
1735       arrayType = getContext().getAsArrayType(elementType);
1736 
1737       // If we only have VLA components, 'addr' requires no adjustment.
1738       if (!arrayType) {
1739         baseType = elementType;
1740         return numVLAElements;
1741       }
1742     } while (isa<VariableArrayType>(arrayType));
1743 
1744     // We get out here only if we find a constant array type
1745     // inside the VLA.
1746   }
1747 
1748   // We have some number of constant-length arrays, so addr should
1749   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1750   // down to the first element of addr.
1751   SmallVector<llvm::Value*, 8> gepIndices;
1752 
1753   // GEP down to the array type.
1754   llvm::ConstantInt *zero = Builder.getInt32(0);
1755   gepIndices.push_back(zero);
1756 
1757   uint64_t countFromCLAs = 1;
1758   QualType eltType;
1759 
1760   llvm::ArrayType *llvmArrayType =
1761     dyn_cast<llvm::ArrayType>(addr.getElementType());
1762   while (llvmArrayType) {
1763     assert(isa<ConstantArrayType>(arrayType));
1764     assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1765              == llvmArrayType->getNumElements());
1766 
1767     gepIndices.push_back(zero);
1768     countFromCLAs *= llvmArrayType->getNumElements();
1769     eltType = arrayType->getElementType();
1770 
1771     llvmArrayType =
1772       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1773     arrayType = getContext().getAsArrayType(arrayType->getElementType());
1774     assert((!llvmArrayType || arrayType) &&
1775            "LLVM and Clang types are out-of-synch");
1776   }
1777 
1778   if (arrayType) {
1779     // From this point onwards, the Clang array type has been emitted
1780     // as some other type (probably a packed struct). Compute the array
1781     // size, and just emit the 'begin' expression as a bitcast.
1782     while (arrayType) {
1783       countFromCLAs *=
1784           cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1785       eltType = arrayType->getElementType();
1786       arrayType = getContext().getAsArrayType(eltType);
1787     }
1788 
1789     llvm::Type *baseType = ConvertType(eltType);
1790     addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1791   } else {
1792     // Create the actual GEP.
1793     addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1794                                              gepIndices, "array.begin"),
1795                    addr.getAlignment());
1796   }
1797 
1798   baseType = eltType;
1799 
1800   llvm::Value *numElements
1801     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1802 
1803   // If we had any VLA dimensions, factor them in.
1804   if (numVLAElements)
1805     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1806 
1807   return numElements;
1808 }
1809 
1810 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1811   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1812   assert(vla && "type was not a variable array type!");
1813   return getVLASize(vla);
1814 }
1815 
1816 CodeGenFunction::VlaSizePair
1817 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1818   // The number of elements so far; always size_t.
1819   llvm::Value *numElements = nullptr;
1820 
1821   QualType elementType;
1822   do {
1823     elementType = type->getElementType();
1824     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1825     assert(vlaSize && "no size for VLA!");
1826     assert(vlaSize->getType() == SizeTy);
1827 
1828     if (!numElements) {
1829       numElements = vlaSize;
1830     } else {
1831       // It's undefined behavior if this wraps around, so mark it that way.
1832       // FIXME: Teach -fsanitize=undefined to trap this.
1833       numElements = Builder.CreateNUWMul(numElements, vlaSize);
1834     }
1835   } while ((type = getContext().getAsVariableArrayType(elementType)));
1836 
1837   return { numElements, elementType };
1838 }
1839 
1840 CodeGenFunction::VlaSizePair
1841 CodeGenFunction::getVLAElements1D(QualType type) {
1842   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1843   assert(vla && "type was not a variable array type!");
1844   return getVLAElements1D(vla);
1845 }
1846 
1847 CodeGenFunction::VlaSizePair
1848 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1849   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1850   assert(VlaSize && "no size for VLA!");
1851   assert(VlaSize->getType() == SizeTy);
1852   return { VlaSize, Vla->getElementType() };
1853 }
1854 
1855 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1856   assert(type->isVariablyModifiedType() &&
1857          "Must pass variably modified type to EmitVLASizes!");
1858 
1859   EnsureInsertPoint();
1860 
1861   // We're going to walk down into the type and look for VLA
1862   // expressions.
1863   do {
1864     assert(type->isVariablyModifiedType());
1865 
1866     const Type *ty = type.getTypePtr();
1867     switch (ty->getTypeClass()) {
1868 
1869 #define TYPE(Class, Base)
1870 #define ABSTRACT_TYPE(Class, Base)
1871 #define NON_CANONICAL_TYPE(Class, Base)
1872 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1873 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1874 #include "clang/AST/TypeNodes.inc"
1875       llvm_unreachable("unexpected dependent type!");
1876 
1877     // These types are never variably-modified.
1878     case Type::Builtin:
1879     case Type::Complex:
1880     case Type::Vector:
1881     case Type::ExtVector:
1882     case Type::Record:
1883     case Type::Enum:
1884     case Type::Elaborated:
1885     case Type::TemplateSpecialization:
1886     case Type::ObjCTypeParam:
1887     case Type::ObjCObject:
1888     case Type::ObjCInterface:
1889     case Type::ObjCObjectPointer:
1890       llvm_unreachable("type class is never variably-modified!");
1891 
1892     case Type::Adjusted:
1893       type = cast<AdjustedType>(ty)->getAdjustedType();
1894       break;
1895 
1896     case Type::Decayed:
1897       type = cast<DecayedType>(ty)->getPointeeType();
1898       break;
1899 
1900     case Type::Pointer:
1901       type = cast<PointerType>(ty)->getPointeeType();
1902       break;
1903 
1904     case Type::BlockPointer:
1905       type = cast<BlockPointerType>(ty)->getPointeeType();
1906       break;
1907 
1908     case Type::LValueReference:
1909     case Type::RValueReference:
1910       type = cast<ReferenceType>(ty)->getPointeeType();
1911       break;
1912 
1913     case Type::MemberPointer:
1914       type = cast<MemberPointerType>(ty)->getPointeeType();
1915       break;
1916 
1917     case Type::ConstantArray:
1918     case Type::IncompleteArray:
1919       // Losing element qualification here is fine.
1920       type = cast<ArrayType>(ty)->getElementType();
1921       break;
1922 
1923     case Type::VariableArray: {
1924       // Losing element qualification here is fine.
1925       const VariableArrayType *vat = cast<VariableArrayType>(ty);
1926 
1927       // Unknown size indication requires no size computation.
1928       // Otherwise, evaluate and record it.
1929       if (const Expr *size = vat->getSizeExpr()) {
1930         // It's possible that we might have emitted this already,
1931         // e.g. with a typedef and a pointer to it.
1932         llvm::Value *&entry = VLASizeMap[size];
1933         if (!entry) {
1934           llvm::Value *Size = EmitScalarExpr(size);
1935 
1936           // C11 6.7.6.2p5:
1937           //   If the size is an expression that is not an integer constant
1938           //   expression [...] each time it is evaluated it shall have a value
1939           //   greater than zero.
1940           if (SanOpts.has(SanitizerKind::VLABound) &&
1941               size->getType()->isSignedIntegerType()) {
1942             SanitizerScope SanScope(this);
1943             llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1944             llvm::Constant *StaticArgs[] = {
1945                 EmitCheckSourceLocation(size->getBeginLoc()),
1946                 EmitCheckTypeDescriptor(size->getType())};
1947             EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1948                                      SanitizerKind::VLABound),
1949                       SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1950           }
1951 
1952           // Always zexting here would be wrong if it weren't
1953           // undefined behavior to have a negative bound.
1954           entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1955         }
1956       }
1957       type = vat->getElementType();
1958       break;
1959     }
1960 
1961     case Type::FunctionProto:
1962     case Type::FunctionNoProto:
1963       type = cast<FunctionType>(ty)->getReturnType();
1964       break;
1965 
1966     case Type::Paren:
1967     case Type::TypeOf:
1968     case Type::UnaryTransform:
1969     case Type::Attributed:
1970     case Type::SubstTemplateTypeParm:
1971     case Type::PackExpansion:
1972     case Type::MacroQualified:
1973       // Keep walking after single level desugaring.
1974       type = type.getSingleStepDesugaredType(getContext());
1975       break;
1976 
1977     case Type::Typedef:
1978     case Type::Decltype:
1979     case Type::Auto:
1980     case Type::DeducedTemplateSpecialization:
1981       // Stop walking: nothing to do.
1982       return;
1983 
1984     case Type::TypeOfExpr:
1985       // Stop walking: emit typeof expression.
1986       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1987       return;
1988 
1989     case Type::Atomic:
1990       type = cast<AtomicType>(ty)->getValueType();
1991       break;
1992 
1993     case Type::Pipe:
1994       type = cast<PipeType>(ty)->getElementType();
1995       break;
1996     }
1997   } while (type->isVariablyModifiedType());
1998 }
1999 
2000 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2001   if (getContext().getBuiltinVaListType()->isArrayType())
2002     return EmitPointerWithAlignment(E);
2003   return EmitLValue(E).getAddress();
2004 }
2005 
2006 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2007   return EmitLValue(E).getAddress();
2008 }
2009 
2010 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2011                                               const APValue &Init) {
2012   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2013   if (CGDebugInfo *Dbg = getDebugInfo())
2014     if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2015       Dbg->EmitGlobalVariable(E->getDecl(), Init);
2016 }
2017 
2018 CodeGenFunction::PeepholeProtection
2019 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2020   // At the moment, the only aggressive peephole we do in IR gen
2021   // is trunc(zext) folding, but if we add more, we can easily
2022   // extend this protection.
2023 
2024   if (!rvalue.isScalar()) return PeepholeProtection();
2025   llvm::Value *value = rvalue.getScalarVal();
2026   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2027 
2028   // Just make an extra bitcast.
2029   assert(HaveInsertPoint());
2030   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2031                                                   Builder.GetInsertBlock());
2032 
2033   PeepholeProtection protection;
2034   protection.Inst = inst;
2035   return protection;
2036 }
2037 
2038 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2039   if (!protection.Inst) return;
2040 
2041   // In theory, we could try to duplicate the peepholes now, but whatever.
2042   protection.Inst->eraseFromParent();
2043 }
2044 
2045 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2046                                               QualType Ty, SourceLocation Loc,
2047                                               SourceLocation AssumptionLoc,
2048                                               llvm::Value *Alignment,
2049                                               llvm::Value *OffsetValue) {
2050   llvm::Value *TheCheck;
2051   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2052       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2053   if (SanOpts.has(SanitizerKind::Alignment)) {
2054     EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2055                                  OffsetValue, TheCheck, Assumption);
2056   }
2057 }
2058 
2059 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2060                                               const Expr *E,
2061                                               SourceLocation AssumptionLoc,
2062                                               llvm::Value *Alignment,
2063                                               llvm::Value *OffsetValue) {
2064   if (auto *CE = dyn_cast<CastExpr>(E))
2065     E = CE->getSubExprAsWritten();
2066   QualType Ty = E->getType();
2067   SourceLocation Loc = E->getExprLoc();
2068 
2069   EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2070                           OffsetValue);
2071 }
2072 
2073 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2074                                                  llvm::Value *AnnotatedVal,
2075                                                  StringRef AnnotationStr,
2076                                                  SourceLocation Location) {
2077   llvm::Value *Args[4] = {
2078     AnnotatedVal,
2079     Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2080     Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2081     CGM.EmitAnnotationLineNo(Location)
2082   };
2083   return Builder.CreateCall(AnnotationFn, Args);
2084 }
2085 
2086 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2087   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2088   // FIXME We create a new bitcast for every annotation because that's what
2089   // llvm-gcc was doing.
2090   for (const auto *I : D->specific_attrs<AnnotateAttr>())
2091     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2092                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2093                        I->getAnnotation(), D->getLocation());
2094 }
2095 
2096 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2097                                               Address Addr) {
2098   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2099   llvm::Value *V = Addr.getPointer();
2100   llvm::Type *VTy = V->getType();
2101   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2102                                     CGM.Int8PtrTy);
2103 
2104   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2105     // FIXME Always emit the cast inst so we can differentiate between
2106     // annotation on the first field of a struct and annotation on the struct
2107     // itself.
2108     if (VTy != CGM.Int8PtrTy)
2109       V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2110     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2111     V = Builder.CreateBitCast(V, VTy);
2112   }
2113 
2114   return Address(V, Addr.getAlignment());
2115 }
2116 
2117 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2118 
2119 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2120     : CGF(CGF) {
2121   assert(!CGF->IsSanitizerScope);
2122   CGF->IsSanitizerScope = true;
2123 }
2124 
2125 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2126   CGF->IsSanitizerScope = false;
2127 }
2128 
2129 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2130                                    const llvm::Twine &Name,
2131                                    llvm::BasicBlock *BB,
2132                                    llvm::BasicBlock::iterator InsertPt) const {
2133   LoopStack.InsertHelper(I);
2134   if (IsSanitizerScope)
2135     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2136 }
2137 
2138 void CGBuilderInserter::InsertHelper(
2139     llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2140     llvm::BasicBlock::iterator InsertPt) const {
2141   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2142   if (CGF)
2143     CGF->InsertHelper(I, Name, BB, InsertPt);
2144 }
2145 
2146 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2147                                 CodeGenModule &CGM, const FunctionDecl *FD,
2148                                 std::string &FirstMissing) {
2149   // If there aren't any required features listed then go ahead and return.
2150   if (ReqFeatures.empty())
2151     return false;
2152 
2153   // Now build up the set of caller features and verify that all the required
2154   // features are there.
2155   llvm::StringMap<bool> CallerFeatureMap;
2156   CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2157 
2158   // If we have at least one of the features in the feature list return
2159   // true, otherwise return false.
2160   return std::all_of(
2161       ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2162         SmallVector<StringRef, 1> OrFeatures;
2163         Feature.split(OrFeatures, '|');
2164         return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2165           if (!CallerFeatureMap.lookup(Feature)) {
2166             FirstMissing = Feature.str();
2167             return false;
2168           }
2169           return true;
2170         });
2171       });
2172 }
2173 
2174 // Emits an error if we don't have a valid set of target features for the
2175 // called function.
2176 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2177                                           const FunctionDecl *TargetDecl) {
2178   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2179 }
2180 
2181 // Emits an error if we don't have a valid set of target features for the
2182 // called function.
2183 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2184                                           const FunctionDecl *TargetDecl) {
2185   // Early exit if this is an indirect call.
2186   if (!TargetDecl)
2187     return;
2188 
2189   // Get the current enclosing function if it exists. If it doesn't
2190   // we can't check the target features anyhow.
2191   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2192   if (!FD)
2193     return;
2194 
2195   // Grab the required features for the call. For a builtin this is listed in
2196   // the td file with the default cpu, for an always_inline function this is any
2197   // listed cpu and any listed features.
2198   unsigned BuiltinID = TargetDecl->getBuiltinID();
2199   std::string MissingFeature;
2200   if (BuiltinID) {
2201     SmallVector<StringRef, 1> ReqFeatures;
2202     const char *FeatureList =
2203         CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2204     // Return if the builtin doesn't have any required features.
2205     if (!FeatureList || StringRef(FeatureList) == "")
2206       return;
2207     StringRef(FeatureList).split(ReqFeatures, ',');
2208     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2209       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2210           << TargetDecl->getDeclName()
2211           << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2212 
2213   } else if (TargetDecl->hasAttr<TargetAttr>() ||
2214              TargetDecl->hasAttr<CPUSpecificAttr>()) {
2215     // Get the required features for the callee.
2216 
2217     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2218     TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2219 
2220     SmallVector<StringRef, 1> ReqFeatures;
2221     llvm::StringMap<bool> CalleeFeatureMap;
2222     CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2223 
2224     for (const auto &F : ParsedAttr.Features) {
2225       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2226         ReqFeatures.push_back(StringRef(F).substr(1));
2227     }
2228 
2229     for (const auto &F : CalleeFeatureMap) {
2230       // Only positive features are "required".
2231       if (F.getValue())
2232         ReqFeatures.push_back(F.getKey());
2233     }
2234     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2235       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2236           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2237   }
2238 }
2239 
2240 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2241   if (!CGM.getCodeGenOpts().SanitizeStats)
2242     return;
2243 
2244   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2245   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2246   CGM.getSanStats().create(IRB, SSK);
2247 }
2248 
2249 llvm::Value *
2250 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2251   llvm::Value *Condition = nullptr;
2252 
2253   if (!RO.Conditions.Architecture.empty())
2254     Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2255 
2256   if (!RO.Conditions.Features.empty()) {
2257     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2258     Condition =
2259         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2260   }
2261   return Condition;
2262 }
2263 
2264 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2265                                              llvm::Function *Resolver,
2266                                              CGBuilderTy &Builder,
2267                                              llvm::Function *FuncToReturn,
2268                                              bool SupportsIFunc) {
2269   if (SupportsIFunc) {
2270     Builder.CreateRet(FuncToReturn);
2271     return;
2272   }
2273 
2274   llvm::SmallVector<llvm::Value *, 10> Args;
2275   llvm::for_each(Resolver->args(),
2276                  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2277 
2278   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2279   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2280 
2281   if (Resolver->getReturnType()->isVoidTy())
2282     Builder.CreateRetVoid();
2283   else
2284     Builder.CreateRet(Result);
2285 }
2286 
2287 void CodeGenFunction::EmitMultiVersionResolver(
2288     llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2289   assert((getContext().getTargetInfo().getTriple().getArch() ==
2290               llvm::Triple::x86 ||
2291           getContext().getTargetInfo().getTriple().getArch() ==
2292               llvm::Triple::x86_64) &&
2293          "Only implemented for x86 targets");
2294 
2295   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2296 
2297   // Main function's basic block.
2298   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2299   Builder.SetInsertPoint(CurBlock);
2300   EmitX86CpuInit();
2301 
2302   for (const MultiVersionResolverOption &RO : Options) {
2303     Builder.SetInsertPoint(CurBlock);
2304     llvm::Value *Condition = FormResolverCondition(RO);
2305 
2306     // The 'default' or 'generic' case.
2307     if (!Condition) {
2308       assert(&RO == Options.end() - 1 &&
2309              "Default or Generic case must be last");
2310       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2311                                        SupportsIFunc);
2312       return;
2313     }
2314 
2315     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2316     CGBuilderTy RetBuilder(*this, RetBlock);
2317     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2318                                      SupportsIFunc);
2319     CurBlock = createBasicBlock("resolver_else", Resolver);
2320     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2321   }
2322 
2323   // If no generic/default, emit an unreachable.
2324   Builder.SetInsertPoint(CurBlock);
2325   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2326   TrapCall->setDoesNotReturn();
2327   TrapCall->setDoesNotThrow();
2328   Builder.CreateUnreachable();
2329   Builder.ClearInsertionPoint();
2330 }
2331 
2332 // Loc - where the diagnostic will point, where in the source code this
2333 //  alignment has failed.
2334 // SecondaryLoc - if present (will be present if sufficiently different from
2335 //  Loc), the diagnostic will additionally point a "Note:" to this location.
2336 //  It should be the location where the __attribute__((assume_aligned))
2337 //  was written e.g.
2338 void CodeGenFunction::EmitAlignmentAssumptionCheck(
2339     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2340     SourceLocation SecondaryLoc, llvm::Value *Alignment,
2341     llvm::Value *OffsetValue, llvm::Value *TheCheck,
2342     llvm::Instruction *Assumption) {
2343   assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2344          cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2345              llvm::Intrinsic::getDeclaration(
2346                  Builder.GetInsertBlock()->getParent()->getParent(),
2347                  llvm::Intrinsic::assume) &&
2348          "Assumption should be a call to llvm.assume().");
2349   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2350          "Assumption should be the last instruction of the basic block, "
2351          "since the basic block is still being generated.");
2352 
2353   if (!SanOpts.has(SanitizerKind::Alignment))
2354     return;
2355 
2356   // Don't check pointers to volatile data. The behavior here is implementation-
2357   // defined.
2358   if (Ty->getPointeeType().isVolatileQualified())
2359     return;
2360 
2361   // We need to temorairly remove the assumption so we can insert the
2362   // sanitizer check before it, else the check will be dropped by optimizations.
2363   Assumption->removeFromParent();
2364 
2365   {
2366     SanitizerScope SanScope(this);
2367 
2368     if (!OffsetValue)
2369       OffsetValue = Builder.getInt1(0); // no offset.
2370 
2371     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2372                                     EmitCheckSourceLocation(SecondaryLoc),
2373                                     EmitCheckTypeDescriptor(Ty)};
2374     llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2375                                   EmitCheckValue(Alignment),
2376                                   EmitCheckValue(OffsetValue)};
2377     EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2378               SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2379   }
2380 
2381   // We are now in the (new, empty) "cont" basic block.
2382   // Reintroduce the assumption.
2383   Builder.Insert(Assumption);
2384   // FIXME: Assumption still has it's original basic block as it's Parent.
2385 }
2386 
2387 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2388   if (CGDebugInfo *DI = getDebugInfo())
2389     return DI->SourceLocToDebugLoc(Location);
2390 
2391   return llvm::DebugLoc();
2392 }
2393