xref: /llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp (revision 5087ace65197471c07b78d16e3d599187c442cbf)
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/StmtCXX.h"
29 #include "clang/AST/StmtObjC.h"
30 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/CodeGenOptions.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Frontend/FrontendDiagnostic.h"
35 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/FPEnv.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Operator.h"
43 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
44 using namespace clang;
45 using namespace CodeGen;
46 
47 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
48 /// markers.
49 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
50                                       const LangOptions &LangOpts) {
51   if (CGOpts.DisableLifetimeMarkers)
52     return false;
53 
54   // Sanitizers may use markers.
55   if (CGOpts.SanitizeAddressUseAfterScope ||
56       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
57       LangOpts.Sanitize.has(SanitizerKind::Memory))
58     return true;
59 
60   // For now, only in optimized builds.
61   return CGOpts.OptimizationLevel != 0;
62 }
63 
64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67               CGBuilderInserterTy(this)),
68       SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
69       PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
70                     CGM.getCodeGenOpts(), CGM.getLangOpts())) {
71   if (!suppressNewContext)
72     CGM.getCXXABI().getMangleContext().startNewFunction();
73 
74   llvm::FastMathFlags FMF;
75   if (CGM.getLangOpts().FastMath)
76     FMF.setFast();
77   if (CGM.getLangOpts().FiniteMathOnly) {
78     FMF.setNoNaNs();
79     FMF.setNoInfs();
80   }
81   if (CGM.getCodeGenOpts().NoNaNsFPMath) {
82     FMF.setNoNaNs();
83   }
84   if (CGM.getCodeGenOpts().NoSignedZeros) {
85     FMF.setNoSignedZeros();
86   }
87   if (CGM.getCodeGenOpts().ReciprocalMath) {
88     FMF.setAllowReciprocal();
89   }
90   if (CGM.getCodeGenOpts().Reassociate) {
91     FMF.setAllowReassoc();
92   }
93   Builder.setFastMathFlags(FMF);
94   SetFPModel();
95 }
96 
97 CodeGenFunction::~CodeGenFunction() {
98   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
99 
100   // If there are any unclaimed block infos, go ahead and destroy them
101   // now.  This can happen if IR-gen gets clever and skips evaluating
102   // something.
103   if (FirstBlockInfo)
104     destroyBlockInfos(FirstBlockInfo);
105 
106   if (getLangOpts().OpenMP && CurFn)
107     CGM.getOpenMPRuntime().functionFinished(*this);
108 
109   // If we have an OpenMPIRBuilder we want to finalize functions (incl.
110   // outlining etc) at some point. Doing it once the function codegen is done
111   // seems to be a reasonable spot. We do it here, as opposed to the deletion
112   // time of the CodeGenModule, because we have to ensure the IR has not yet
113   // been "emitted" to the outside, thus, modifications are still sensible.
114   if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder())
115     OMPBuilder->finalize();
116 }
117 
118 // Map the LangOption for rounding mode into
119 // the corresponding enum in the IR.
120 static llvm::fp::RoundingMode ToConstrainedRoundingMD(
121   LangOptions::FPRoundingModeKind Kind) {
122 
123   switch (Kind) {
124   case LangOptions::FPR_ToNearest:  return llvm::fp::rmToNearest;
125   case LangOptions::FPR_Downward:   return llvm::fp::rmDownward;
126   case LangOptions::FPR_Upward:     return llvm::fp::rmUpward;
127   case LangOptions::FPR_TowardZero: return llvm::fp::rmTowardZero;
128   case LangOptions::FPR_Dynamic:    return llvm::fp::rmDynamic;
129   }
130   llvm_unreachable("Unsupported FP RoundingMode");
131 }
132 
133 // Map the LangOption for exception behavior into
134 // the corresponding enum in the IR.
135 static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
136   LangOptions::FPExceptionModeKind Kind) {
137 
138   switch (Kind) {
139   case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
140   case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
141   case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
142   }
143   llvm_unreachable("Unsupported FP Exception Behavior");
144 }
145 
146 void CodeGenFunction::SetFPModel() {
147   auto fpRoundingMode = ToConstrainedRoundingMD(
148                           getLangOpts().getFPRoundingMode());
149   auto fpExceptionBehavior = ToConstrainedExceptMD(
150                                getLangOpts().getFPExceptionMode());
151 
152   if (fpExceptionBehavior == llvm::fp::ebIgnore &&
153       fpRoundingMode == llvm::fp::rmToNearest)
154     // Constrained intrinsics are not used.
155     ;
156   else {
157     Builder.setIsFPConstrained(true);
158     Builder.setDefaultConstrainedRounding(fpRoundingMode);
159     Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
160   }
161 }
162 
163 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
164                                                     LValueBaseInfo *BaseInfo,
165                                                     TBAAAccessInfo *TBAAInfo) {
166   return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
167                                  /* forPointeeType= */ true);
168 }
169 
170 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
171                                                    LValueBaseInfo *BaseInfo,
172                                                    TBAAAccessInfo *TBAAInfo,
173                                                    bool forPointeeType) {
174   if (TBAAInfo)
175     *TBAAInfo = CGM.getTBAAAccessInfo(T);
176 
177   // Honor alignment typedef attributes even on incomplete types.
178   // We also honor them straight for C++ class types, even as pointees;
179   // there's an expressivity gap here.
180   if (auto TT = T->getAs<TypedefType>()) {
181     if (auto Align = TT->getDecl()->getMaxAlignment()) {
182       if (BaseInfo)
183         *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
184       return getContext().toCharUnitsFromBits(Align);
185     }
186   }
187 
188   if (BaseInfo)
189     *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
190 
191   CharUnits Alignment;
192   if (T->isIncompleteType()) {
193     Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
194   } else {
195     // For C++ class pointees, we don't know whether we're pointing at a
196     // base or a complete object, so we generally need to use the
197     // non-virtual alignment.
198     const CXXRecordDecl *RD;
199     if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
200       Alignment = CGM.getClassPointerAlignment(RD);
201     } else {
202       Alignment = getContext().getTypeAlignInChars(T);
203       if (T.getQualifiers().hasUnaligned())
204         Alignment = CharUnits::One();
205     }
206 
207     // Cap to the global maximum type alignment unless the alignment
208     // was somehow explicit on the type.
209     if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
210       if (Alignment.getQuantity() > MaxAlign &&
211           !getContext().isAlignmentRequired(T))
212         Alignment = CharUnits::fromQuantity(MaxAlign);
213     }
214   }
215   return Alignment;
216 }
217 
218 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
219   LValueBaseInfo BaseInfo;
220   TBAAAccessInfo TBAAInfo;
221   CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
222   return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
223                           TBAAInfo);
224 }
225 
226 /// Given a value of type T* that may not be to a complete object,
227 /// construct an l-value with the natural pointee alignment of T.
228 LValue
229 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
230   LValueBaseInfo BaseInfo;
231   TBAAAccessInfo TBAAInfo;
232   CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
233                                             /* forPointeeType= */ true);
234   return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
235 }
236 
237 
238 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
239   return CGM.getTypes().ConvertTypeForMem(T);
240 }
241 
242 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
243   return CGM.getTypes().ConvertType(T);
244 }
245 
246 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
247   type = type.getCanonicalType();
248   while (true) {
249     switch (type->getTypeClass()) {
250 #define TYPE(name, parent)
251 #define ABSTRACT_TYPE(name, parent)
252 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
253 #define DEPENDENT_TYPE(name, parent) case Type::name:
254 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
255 #include "clang/AST/TypeNodes.inc"
256       llvm_unreachable("non-canonical or dependent type in IR-generation");
257 
258     case Type::Auto:
259     case Type::DeducedTemplateSpecialization:
260       llvm_unreachable("undeduced type in IR-generation");
261 
262     // Various scalar types.
263     case Type::Builtin:
264     case Type::Pointer:
265     case Type::BlockPointer:
266     case Type::LValueReference:
267     case Type::RValueReference:
268     case Type::MemberPointer:
269     case Type::Vector:
270     case Type::ExtVector:
271     case Type::FunctionProto:
272     case Type::FunctionNoProto:
273     case Type::Enum:
274     case Type::ObjCObjectPointer:
275     case Type::Pipe:
276       return TEK_Scalar;
277 
278     // Complexes.
279     case Type::Complex:
280       return TEK_Complex;
281 
282     // Arrays, records, and Objective-C objects.
283     case Type::ConstantArray:
284     case Type::IncompleteArray:
285     case Type::VariableArray:
286     case Type::Record:
287     case Type::ObjCObject:
288     case Type::ObjCInterface:
289       return TEK_Aggregate;
290 
291     // We operate on atomic values according to their underlying type.
292     case Type::Atomic:
293       type = cast<AtomicType>(type)->getValueType();
294       continue;
295     }
296     llvm_unreachable("unknown type kind!");
297   }
298 }
299 
300 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
301   // For cleanliness, we try to avoid emitting the return block for
302   // simple cases.
303   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
304 
305   if (CurBB) {
306     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
307 
308     // We have a valid insert point, reuse it if it is empty or there are no
309     // explicit jumps to the return block.
310     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
311       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
312       delete ReturnBlock.getBlock();
313       ReturnBlock = JumpDest();
314     } else
315       EmitBlock(ReturnBlock.getBlock());
316     return llvm::DebugLoc();
317   }
318 
319   // Otherwise, if the return block is the target of a single direct
320   // branch then we can just put the code in that block instead. This
321   // cleans up functions which started with a unified return block.
322   if (ReturnBlock.getBlock()->hasOneUse()) {
323     llvm::BranchInst *BI =
324       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
325     if (BI && BI->isUnconditional() &&
326         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
327       // Record/return the DebugLoc of the simple 'return' expression to be used
328       // later by the actual 'ret' instruction.
329       llvm::DebugLoc Loc = BI->getDebugLoc();
330       Builder.SetInsertPoint(BI->getParent());
331       BI->eraseFromParent();
332       delete ReturnBlock.getBlock();
333       ReturnBlock = JumpDest();
334       return Loc;
335     }
336   }
337 
338   // FIXME: We are at an unreachable point, there is no reason to emit the block
339   // unless it has uses. However, we still need a place to put the debug
340   // region.end for now.
341 
342   EmitBlock(ReturnBlock.getBlock());
343   return llvm::DebugLoc();
344 }
345 
346 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
347   if (!BB) return;
348   if (!BB->use_empty())
349     return CGF.CurFn->getBasicBlockList().push_back(BB);
350   delete BB;
351 }
352 
353 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
354   assert(BreakContinueStack.empty() &&
355          "mismatched push/pop in break/continue stack!");
356 
357   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
358     && NumSimpleReturnExprs == NumReturnExprs
359     && ReturnBlock.getBlock()->use_empty();
360   // Usually the return expression is evaluated before the cleanup
361   // code.  If the function contains only a simple return statement,
362   // such as a constant, the location before the cleanup code becomes
363   // the last useful breakpoint in the function, because the simple
364   // return expression will be evaluated after the cleanup code. To be
365   // safe, set the debug location for cleanup code to the location of
366   // the return statement.  Otherwise the cleanup code should be at the
367   // end of the function's lexical scope.
368   //
369   // If there are multiple branches to the return block, the branch
370   // instructions will get the location of the return statements and
371   // all will be fine.
372   if (CGDebugInfo *DI = getDebugInfo()) {
373     if (OnlySimpleReturnStmts)
374       DI->EmitLocation(Builder, LastStopPoint);
375     else
376       DI->EmitLocation(Builder, EndLoc);
377   }
378 
379   // Pop any cleanups that might have been associated with the
380   // parameters.  Do this in whatever block we're currently in; it's
381   // important to do this before we enter the return block or return
382   // edges will be *really* confused.
383   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
384   bool HasOnlyLifetimeMarkers =
385       HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
386   bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
387   if (HasCleanups) {
388     // Make sure the line table doesn't jump back into the body for
389     // the ret after it's been at EndLoc.
390     Optional<ApplyDebugLocation> AL;
391     if (CGDebugInfo *DI = getDebugInfo()) {
392       if (OnlySimpleReturnStmts)
393         DI->EmitLocation(Builder, EndLoc);
394       else
395         // We may not have a valid end location. Try to apply it anyway, and
396         // fall back to an artificial location if needed.
397         AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
398     }
399 
400     PopCleanupBlocks(PrologueCleanupDepth);
401   }
402 
403   // Emit function epilog (to return).
404   llvm::DebugLoc Loc = EmitReturnBlock();
405 
406   if (ShouldInstrumentFunction()) {
407     if (CGM.getCodeGenOpts().InstrumentFunctions)
408       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
409     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
410       CurFn->addFnAttr("instrument-function-exit-inlined",
411                        "__cyg_profile_func_exit");
412   }
413 
414   // Emit debug descriptor for function end.
415   if (CGDebugInfo *DI = getDebugInfo())
416     DI->EmitFunctionEnd(Builder, CurFn);
417 
418   // Reset the debug location to that of the simple 'return' expression, if any
419   // rather than that of the end of the function's scope '}'.
420   ApplyDebugLocation AL(*this, Loc);
421   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
422   EmitEndEHSpec(CurCodeDecl);
423 
424   assert(EHStack.empty() &&
425          "did not remove all scopes from cleanup stack!");
426 
427   // If someone did an indirect goto, emit the indirect goto block at the end of
428   // the function.
429   if (IndirectBranch) {
430     EmitBlock(IndirectBranch->getParent());
431     Builder.ClearInsertionPoint();
432   }
433 
434   // If some of our locals escaped, insert a call to llvm.localescape in the
435   // entry block.
436   if (!EscapedLocals.empty()) {
437     // Invert the map from local to index into a simple vector. There should be
438     // no holes.
439     SmallVector<llvm::Value *, 4> EscapeArgs;
440     EscapeArgs.resize(EscapedLocals.size());
441     for (auto &Pair : EscapedLocals)
442       EscapeArgs[Pair.second] = Pair.first;
443     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
444         &CGM.getModule(), llvm::Intrinsic::localescape);
445     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
446   }
447 
448   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
449   llvm::Instruction *Ptr = AllocaInsertPt;
450   AllocaInsertPt = nullptr;
451   Ptr->eraseFromParent();
452 
453   // If someone took the address of a label but never did an indirect goto, we
454   // made a zero entry PHI node, which is illegal, zap it now.
455   if (IndirectBranch) {
456     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
457     if (PN->getNumIncomingValues() == 0) {
458       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
459       PN->eraseFromParent();
460     }
461   }
462 
463   EmitIfUsed(*this, EHResumeBlock);
464   EmitIfUsed(*this, TerminateLandingPad);
465   EmitIfUsed(*this, TerminateHandler);
466   EmitIfUsed(*this, UnreachableBlock);
467 
468   for (const auto &FuncletAndParent : TerminateFunclets)
469     EmitIfUsed(*this, FuncletAndParent.second);
470 
471   if (CGM.getCodeGenOpts().EmitDeclMetadata)
472     EmitDeclMetadata();
473 
474   for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
475            I = DeferredReplacements.begin(),
476            E = DeferredReplacements.end();
477        I != E; ++I) {
478     I->first->replaceAllUsesWith(I->second);
479     I->first->eraseFromParent();
480   }
481 
482   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
483   // PHIs if the current function is a coroutine. We don't do it for all
484   // functions as it may result in slight increase in numbers of instructions
485   // if compiled with no optimizations. We do it for coroutine as the lifetime
486   // of CleanupDestSlot alloca make correct coroutine frame building very
487   // difficult.
488   if (NormalCleanupDest.isValid() && isCoroutine()) {
489     llvm::DominatorTree DT(*CurFn);
490     llvm::PromoteMemToReg(
491         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
492     NormalCleanupDest = Address::invalid();
493   }
494 
495   // Scan function arguments for vector width.
496   for (llvm::Argument &A : CurFn->args())
497     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
498       LargestVectorWidth =
499           std::max((uint64_t)LargestVectorWidth,
500                    VT->getPrimitiveSizeInBits().getKnownMinSize());
501 
502   // Update vector width based on return type.
503   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
504     LargestVectorWidth =
505         std::max((uint64_t)LargestVectorWidth,
506                  VT->getPrimitiveSizeInBits().getKnownMinSize());
507 
508   // Add the required-vector-width attribute. This contains the max width from:
509   // 1. min-vector-width attribute used in the source program.
510   // 2. Any builtins used that have a vector width specified.
511   // 3. Values passed in and out of inline assembly.
512   // 4. Width of vector arguments and return types for this function.
513   // 5. Width of vector aguments and return types for functions called by this
514   //    function.
515   CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
516 
517   // If we generated an unreachable return block, delete it now.
518   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
519     Builder.ClearInsertionPoint();
520     ReturnBlock.getBlock()->eraseFromParent();
521   }
522   if (ReturnValue.isValid()) {
523     auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
524     if (RetAlloca && RetAlloca->use_empty()) {
525       RetAlloca->eraseFromParent();
526       ReturnValue = Address::invalid();
527     }
528   }
529 }
530 
531 /// ShouldInstrumentFunction - Return true if the current function should be
532 /// instrumented with __cyg_profile_func_* calls
533 bool CodeGenFunction::ShouldInstrumentFunction() {
534   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
535       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
536       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
537     return false;
538   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
539     return false;
540   return true;
541 }
542 
543 /// ShouldXRayInstrument - Return true if the current function should be
544 /// instrumented with XRay nop sleds.
545 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
546   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
547 }
548 
549 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
550 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
551 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
552   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
553          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
554           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
555               XRayInstrKind::Custom);
556 }
557 
558 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
559   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
560          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
561           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
562               XRayInstrKind::Typed);
563 }
564 
565 llvm::Constant *
566 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
567                                             llvm::Constant *Addr) {
568   // Addresses stored in prologue data can't require run-time fixups and must
569   // be PC-relative. Run-time fixups are undesirable because they necessitate
570   // writable text segments, which are unsafe. And absolute addresses are
571   // undesirable because they break PIE mode.
572 
573   // Add a layer of indirection through a private global. Taking its address
574   // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
575   auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
576                                       /*isConstant=*/true,
577                                       llvm::GlobalValue::PrivateLinkage, Addr);
578 
579   // Create a PC-relative address.
580   auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
581   auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
582   auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
583   return (IntPtrTy == Int32Ty)
584              ? PCRelAsInt
585              : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
586 }
587 
588 llvm::Value *
589 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
590                                           llvm::Value *EncodedAddr) {
591   // Reconstruct the address of the global.
592   auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
593   auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
594   auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
595   auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
596 
597   // Load the original pointer through the global.
598   return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
599                             "decoded_addr");
600 }
601 
602 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
603                                                llvm::Function *Fn)
604 {
605   if (!FD->hasAttr<OpenCLKernelAttr>())
606     return;
607 
608   llvm::LLVMContext &Context = getLLVMContext();
609 
610   CGM.GenOpenCLArgMetadata(Fn, FD, this);
611 
612   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
613     QualType HintQTy = A->getTypeHint();
614     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
615     bool IsSignedInteger =
616         HintQTy->isSignedIntegerType() ||
617         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
618     llvm::Metadata *AttrMDArgs[] = {
619         llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
620             CGM.getTypes().ConvertType(A->getTypeHint()))),
621         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
622             llvm::IntegerType::get(Context, 32),
623             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
624     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
625   }
626 
627   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
628     llvm::Metadata *AttrMDArgs[] = {
629         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
630         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
631         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
632     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
633   }
634 
635   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
636     llvm::Metadata *AttrMDArgs[] = {
637         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
638         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
639         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
640     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
641   }
642 
643   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
644           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
645     llvm::Metadata *AttrMDArgs[] = {
646         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
647     Fn->setMetadata("intel_reqd_sub_group_size",
648                     llvm::MDNode::get(Context, AttrMDArgs));
649   }
650 }
651 
652 /// Determine whether the function F ends with a return stmt.
653 static bool endsWithReturn(const Decl* F) {
654   const Stmt *Body = nullptr;
655   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
656     Body = FD->getBody();
657   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
658     Body = OMD->getBody();
659 
660   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
661     auto LastStmt = CS->body_rbegin();
662     if (LastStmt != CS->body_rend())
663       return isa<ReturnStmt>(*LastStmt);
664   }
665   return false;
666 }
667 
668 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
669   if (SanOpts.has(SanitizerKind::Thread)) {
670     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
671     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
672   }
673 }
674 
675 /// Check if the return value of this function requires sanitization.
676 bool CodeGenFunction::requiresReturnValueCheck() const {
677   return requiresReturnValueNullabilityCheck() ||
678          (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
679           CurCodeDecl->getAttr<ReturnsNonNullAttr>());
680 }
681 
682 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
683   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
684   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
685       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
686       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
687     return false;
688 
689   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
690     return false;
691 
692   if (MD->getNumParams() == 2) {
693     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
694     if (!PT || !PT->isVoidPointerType() ||
695         !PT->getPointeeType().isConstQualified())
696       return false;
697   }
698 
699   return true;
700 }
701 
702 /// Return the UBSan prologue signature for \p FD if one is available.
703 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
704                                             const FunctionDecl *FD) {
705   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
706     if (!MD->isStatic())
707       return nullptr;
708   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
709 }
710 
711 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
712                                     llvm::Function *Fn,
713                                     const CGFunctionInfo &FnInfo,
714                                     const FunctionArgList &Args,
715                                     SourceLocation Loc,
716                                     SourceLocation StartLoc) {
717   assert(!CurFn &&
718          "Do not use a CodeGenFunction object for more than one function");
719 
720   const Decl *D = GD.getDecl();
721 
722   DidCallStackSave = false;
723   CurCodeDecl = D;
724   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
725     if (FD->usesSEHTry())
726       CurSEHParent = FD;
727   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
728   FnRetTy = RetTy;
729   CurFn = Fn;
730   CurFnInfo = &FnInfo;
731   assert(CurFn->isDeclaration() && "Function already has body?");
732 
733   // If this function has been blacklisted for any of the enabled sanitizers,
734   // disable the sanitizer for the function.
735   do {
736 #define SANITIZER(NAME, ID)                                                    \
737   if (SanOpts.empty())                                                         \
738     break;                                                                     \
739   if (SanOpts.has(SanitizerKind::ID))                                          \
740     if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc))                \
741       SanOpts.set(SanitizerKind::ID, false);
742 
743 #include "clang/Basic/Sanitizers.def"
744 #undef SANITIZER
745   } while (0);
746 
747   if (D) {
748     // Apply the no_sanitize* attributes to SanOpts.
749     for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
750       SanitizerMask mask = Attr->getMask();
751       SanOpts.Mask &= ~mask;
752       if (mask & SanitizerKind::Address)
753         SanOpts.set(SanitizerKind::KernelAddress, false);
754       if (mask & SanitizerKind::KernelAddress)
755         SanOpts.set(SanitizerKind::Address, false);
756       if (mask & SanitizerKind::HWAddress)
757         SanOpts.set(SanitizerKind::KernelHWAddress, false);
758       if (mask & SanitizerKind::KernelHWAddress)
759         SanOpts.set(SanitizerKind::HWAddress, false);
760     }
761   }
762 
763   // Apply sanitizer attributes to the function.
764   if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
765     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
766   if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
767     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
768   if (SanOpts.has(SanitizerKind::MemTag))
769     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
770   if (SanOpts.has(SanitizerKind::Thread))
771     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
772   if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
773     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
774   if (SanOpts.has(SanitizerKind::SafeStack))
775     Fn->addFnAttr(llvm::Attribute::SafeStack);
776   if (SanOpts.has(SanitizerKind::ShadowCallStack))
777     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
778 
779   // Apply fuzzing attribute to the function.
780   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
781     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
782 
783   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
784   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
785   if (SanOpts.has(SanitizerKind::Thread)) {
786     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
787       IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
788       if (OMD->getMethodFamily() == OMF_dealloc ||
789           OMD->getMethodFamily() == OMF_initialize ||
790           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
791         markAsIgnoreThreadCheckingAtRuntime(Fn);
792       }
793     }
794   }
795 
796   // Ignore unrelated casts in STL allocate() since the allocator must cast
797   // from void* to T* before object initialization completes. Don't match on the
798   // namespace because not all allocators are in std::
799   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
800     if (matchesStlAllocatorFn(D, getContext()))
801       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
802   }
803 
804   // Ignore null checks in coroutine functions since the coroutines passes
805   // are not aware of how to move the extra UBSan instructions across the split
806   // coroutine boundaries.
807   if (D && SanOpts.has(SanitizerKind::Null))
808     if (const auto *FD = dyn_cast<FunctionDecl>(D))
809       if (FD->getBody() &&
810           FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
811         SanOpts.Mask &= ~SanitizerKind::Null;
812 
813   if (D) {
814     // Apply xray attributes to the function (as a string, for now)
815     if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
816       if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
817               XRayInstrKind::FunctionEntry) ||
818           CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
819               XRayInstrKind::FunctionExit)) {
820         if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
821           Fn->addFnAttr("function-instrument", "xray-always");
822         if (XRayAttr->neverXRayInstrument())
823           Fn->addFnAttr("function-instrument", "xray-never");
824         if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
825           if (ShouldXRayInstrumentFunction())
826             Fn->addFnAttr("xray-log-args",
827                           llvm::utostr(LogArgs->getArgumentCount()));
828       }
829     } else {
830       if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
831         Fn->addFnAttr(
832             "xray-instruction-threshold",
833             llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
834     }
835 
836     if (ShouldXRayInstrumentFunction()) {
837       if (CGM.getCodeGenOpts().XRayIgnoreLoops)
838         Fn->addFnAttr("xray-ignore-loops");
839 
840       if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
841               XRayInstrKind::FunctionExit))
842         Fn->addFnAttr("xray-skip-exit");
843 
844       if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
845               XRayInstrKind::FunctionEntry))
846         Fn->addFnAttr("xray-skip-entry");
847     }
848 
849     unsigned Count, Offset;
850     if (const auto *Attr = D->getAttr<PatchableFunctionEntryAttr>()) {
851       Count = Attr->getCount();
852       Offset = Attr->getOffset();
853     } else {
854       Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
855       Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
856     }
857     if (Count && Offset <= Count) {
858       Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
859       if (Offset)
860         Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
861     }
862   }
863 
864   // Add no-jump-tables value.
865   Fn->addFnAttr("no-jump-tables",
866                 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
867 
868   // Add no-inline-line-tables value.
869   if (CGM.getCodeGenOpts().NoInlineLineTables)
870     Fn->addFnAttr("no-inline-line-tables");
871 
872   // Add profile-sample-accurate value.
873   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
874     Fn->addFnAttr("profile-sample-accurate");
875 
876   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
877     Fn->addFnAttr("cfi-canonical-jump-table");
878 
879   if (getLangOpts().OpenCL) {
880     // Add metadata for a kernel function.
881     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
882       EmitOpenCLKernelMetadata(FD, Fn);
883   }
884 
885   // If we are checking function types, emit a function type signature as
886   // prologue data.
887   if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
888     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
889       if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
890         // Remove any (C++17) exception specifications, to allow calling e.g. a
891         // noexcept function through a non-noexcept pointer.
892         auto ProtoTy =
893           getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
894                                                         EST_None);
895         llvm::Constant *FTRTTIConst =
896             CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
897         llvm::Constant *FTRTTIConstEncoded =
898             EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
899         llvm::Constant *PrologueStructElems[] = {PrologueSig,
900                                                  FTRTTIConstEncoded};
901         llvm::Constant *PrologueStructConst =
902             llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
903         Fn->setPrologueData(PrologueStructConst);
904       }
905     }
906   }
907 
908   // If we're checking nullability, we need to know whether we can check the
909   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
910   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
911     auto Nullability = FnRetTy->getNullability(getContext());
912     if (Nullability && *Nullability == NullabilityKind::NonNull) {
913       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
914             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
915         RetValNullabilityPrecondition =
916             llvm::ConstantInt::getTrue(getLLVMContext());
917     }
918   }
919 
920   // If we're in C++ mode and the function name is "main", it is guaranteed
921   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
922   // used within a program").
923   //
924   // OpenCL C 2.0 v2.2-11 s6.9.i:
925   //     Recursion is not supported.
926   //
927   // SYCL v1.2.1 s3.10:
928   //     kernels cannot include RTTI information, exception classes,
929   //     recursive code, virtual functions or make use of C++ libraries that
930   //     are not compiled for the device.
931   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
932     if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
933         getLangOpts().SYCLIsDevice ||
934         (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
935       Fn->addFnAttr(llvm::Attribute::NoRecurse);
936   }
937 
938   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
939     if (FD->usesFPIntrin())
940       Fn->addFnAttr(llvm::Attribute::StrictFP);
941 
942   // If a custom alignment is used, force realigning to this alignment on
943   // any main function which certainly will need it.
944   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
945     if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
946         CGM.getCodeGenOpts().StackAlignment)
947       Fn->addFnAttr("stackrealign");
948 
949   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
950 
951   // Create a marker to make it easy to insert allocas into the entryblock
952   // later.  Don't create this with the builder, because we don't want it
953   // folded.
954   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
955   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
956 
957   ReturnBlock = getJumpDestInCurrentScope("return");
958 
959   Builder.SetInsertPoint(EntryBB);
960 
961   // If we're checking the return value, allocate space for a pointer to a
962   // precise source location of the checked return statement.
963   if (requiresReturnValueCheck()) {
964     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
965     InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
966   }
967 
968   // Emit subprogram debug descriptor.
969   if (CGDebugInfo *DI = getDebugInfo()) {
970     // Reconstruct the type from the argument list so that implicit parameters,
971     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
972     // convention.
973     CallingConv CC = CallingConv::CC_C;
974     if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
975       if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
976         CC = SrcFnTy->getCallConv();
977     SmallVector<QualType, 16> ArgTypes;
978     for (const VarDecl *VD : Args)
979       ArgTypes.push_back(VD->getType());
980     QualType FnType = getContext().getFunctionType(
981         RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
982     DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
983                           Builder);
984   }
985 
986   if (ShouldInstrumentFunction()) {
987     if (CGM.getCodeGenOpts().InstrumentFunctions)
988       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
989     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
990       CurFn->addFnAttr("instrument-function-entry-inlined",
991                        "__cyg_profile_func_enter");
992     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
993       CurFn->addFnAttr("instrument-function-entry-inlined",
994                        "__cyg_profile_func_enter_bare");
995   }
996 
997   // Since emitting the mcount call here impacts optimizations such as function
998   // inlining, we just add an attribute to insert a mcount call in backend.
999   // The attribute "counting-function" is set to mcount function name which is
1000   // architecture dependent.
1001   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1002     // Calls to fentry/mcount should not be generated if function has
1003     // the no_instrument_function attribute.
1004     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1005       if (CGM.getCodeGenOpts().CallFEntry)
1006         Fn->addFnAttr("fentry-call", "true");
1007       else {
1008         Fn->addFnAttr("instrument-function-entry-inlined",
1009                       getTarget().getMCountName());
1010       }
1011       if (CGM.getCodeGenOpts().MNopMCount) {
1012         if (!CGM.getCodeGenOpts().CallFEntry)
1013           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1014             << "-mnop-mcount" << "-mfentry";
1015         Fn->addFnAttr("mnop-mcount");
1016       }
1017 
1018       if (CGM.getCodeGenOpts().RecordMCount) {
1019         if (!CGM.getCodeGenOpts().CallFEntry)
1020           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1021             << "-mrecord-mcount" << "-mfentry";
1022         Fn->addFnAttr("mrecord-mcount");
1023       }
1024     }
1025   }
1026 
1027   if (CGM.getCodeGenOpts().PackedStack) {
1028     if (getContext().getTargetInfo().getTriple().getArch() !=
1029         llvm::Triple::systemz)
1030       CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1031         << "-mpacked-stack";
1032     Fn->addFnAttr("packed-stack");
1033   }
1034 
1035   if (RetTy->isVoidType()) {
1036     // Void type; nothing to return.
1037     ReturnValue = Address::invalid();
1038 
1039     // Count the implicit return.
1040     if (!endsWithReturn(D))
1041       ++NumReturnExprs;
1042   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1043     // Indirect return; emit returned value directly into sret slot.
1044     // This reduces code size, and affects correctness in C++.
1045     auto AI = CurFn->arg_begin();
1046     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1047       ++AI;
1048     ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1049     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1050       ReturnValuePointer =
1051           CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1052       Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1053                               ReturnValue.getPointer(), Int8PtrTy),
1054                           ReturnValuePointer);
1055     }
1056   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1057              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1058     // Load the sret pointer from the argument struct and return into that.
1059     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1060     llvm::Function::arg_iterator EI = CurFn->arg_end();
1061     --EI;
1062     llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1063     ReturnValuePointer = Address(Addr, getPointerAlign());
1064     Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1065     ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
1066   } else {
1067     ReturnValue = CreateIRTemp(RetTy, "retval");
1068 
1069     // Tell the epilog emitter to autorelease the result.  We do this
1070     // now so that various specialized functions can suppress it
1071     // during their IR-generation.
1072     if (getLangOpts().ObjCAutoRefCount &&
1073         !CurFnInfo->isReturnsRetained() &&
1074         RetTy->isObjCRetainableType())
1075       AutoreleaseResult = true;
1076   }
1077 
1078   EmitStartEHSpec(CurCodeDecl);
1079 
1080   PrologueCleanupDepth = EHStack.stable_begin();
1081 
1082   // Emit OpenMP specific initialization of the device functions.
1083   if (getLangOpts().OpenMP && CurCodeDecl)
1084     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1085 
1086   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1087 
1088   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1089     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1090     const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1091     if (MD->getParent()->isLambda() &&
1092         MD->getOverloadedOperator() == OO_Call) {
1093       // We're in a lambda; figure out the captures.
1094       MD->getParent()->getCaptureFields(LambdaCaptureFields,
1095                                         LambdaThisCaptureField);
1096       if (LambdaThisCaptureField) {
1097         // If the lambda captures the object referred to by '*this' - either by
1098         // value or by reference, make sure CXXThisValue points to the correct
1099         // object.
1100 
1101         // Get the lvalue for the field (which is a copy of the enclosing object
1102         // or contains the address of the enclosing object).
1103         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1104         if (!LambdaThisCaptureField->getType()->isPointerType()) {
1105           // If the enclosing object was captured by value, just use its address.
1106           CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1107         } else {
1108           // Load the lvalue pointed to by the field, since '*this' was captured
1109           // by reference.
1110           CXXThisValue =
1111               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1112         }
1113       }
1114       for (auto *FD : MD->getParent()->fields()) {
1115         if (FD->hasCapturedVLAType()) {
1116           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1117                                            SourceLocation()).getScalarVal();
1118           auto VAT = FD->getCapturedVLAType();
1119           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1120         }
1121       }
1122     } else {
1123       // Not in a lambda; just use 'this' from the method.
1124       // FIXME: Should we generate a new load for each use of 'this'?  The
1125       // fast register allocator would be happier...
1126       CXXThisValue = CXXABIThisValue;
1127     }
1128 
1129     // Check the 'this' pointer once per function, if it's available.
1130     if (CXXABIThisValue) {
1131       SanitizerSet SkippedChecks;
1132       SkippedChecks.set(SanitizerKind::ObjectSize, true);
1133       QualType ThisTy = MD->getThisType();
1134 
1135       // If this is the call operator of a lambda with no capture-default, it
1136       // may have a static invoker function, which may call this operator with
1137       // a null 'this' pointer.
1138       if (isLambdaCallOperator(MD) &&
1139           MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1140         SkippedChecks.set(SanitizerKind::Null, true);
1141 
1142       EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1143                                                 : TCK_MemberCall,
1144                     Loc, CXXABIThisValue, ThisTy,
1145                     getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1146                     SkippedChecks);
1147     }
1148   }
1149 
1150   // If any of the arguments have a variably modified type, make sure to
1151   // emit the type size.
1152   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1153        i != e; ++i) {
1154     const VarDecl *VD = *i;
1155 
1156     // Dig out the type as written from ParmVarDecls; it's unclear whether
1157     // the standard (C99 6.9.1p10) requires this, but we're following the
1158     // precedent set by gcc.
1159     QualType Ty;
1160     if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1161       Ty = PVD->getOriginalType();
1162     else
1163       Ty = VD->getType();
1164 
1165     if (Ty->isVariablyModifiedType())
1166       EmitVariablyModifiedType(Ty);
1167   }
1168   // Emit a location at the end of the prologue.
1169   if (CGDebugInfo *DI = getDebugInfo())
1170     DI->EmitLocation(Builder, StartLoc);
1171 
1172   // TODO: Do we need to handle this in two places like we do with
1173   // target-features/target-cpu?
1174   if (CurFuncDecl)
1175     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1176       LargestVectorWidth = VecWidth->getVectorWidth();
1177 }
1178 
1179 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1180   incrementProfileCounter(Body);
1181   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1182     EmitCompoundStmtWithoutScope(*S);
1183   else
1184     EmitStmt(Body);
1185 }
1186 
1187 /// When instrumenting to collect profile data, the counts for some blocks
1188 /// such as switch cases need to not include the fall-through counts, so
1189 /// emit a branch around the instrumentation code. When not instrumenting,
1190 /// this just calls EmitBlock().
1191 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1192                                                const Stmt *S) {
1193   llvm::BasicBlock *SkipCountBB = nullptr;
1194   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1195     // When instrumenting for profiling, the fallthrough to certain
1196     // statements needs to skip over the instrumentation code so that we
1197     // get an accurate count.
1198     SkipCountBB = createBasicBlock("skipcount");
1199     EmitBranch(SkipCountBB);
1200   }
1201   EmitBlock(BB);
1202   uint64_t CurrentCount = getCurrentProfileCount();
1203   incrementProfileCounter(S);
1204   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1205   if (SkipCountBB)
1206     EmitBlock(SkipCountBB);
1207 }
1208 
1209 /// Tries to mark the given function nounwind based on the
1210 /// non-existence of any throwing calls within it.  We believe this is
1211 /// lightweight enough to do at -O0.
1212 static void TryMarkNoThrow(llvm::Function *F) {
1213   // LLVM treats 'nounwind' on a function as part of the type, so we
1214   // can't do this on functions that can be overwritten.
1215   if (F->isInterposable()) return;
1216 
1217   for (llvm::BasicBlock &BB : *F)
1218     for (llvm::Instruction &I : BB)
1219       if (I.mayThrow())
1220         return;
1221 
1222   F->setDoesNotThrow();
1223 }
1224 
1225 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1226                                                FunctionArgList &Args) {
1227   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1228   QualType ResTy = FD->getReturnType();
1229 
1230   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1231   if (MD && MD->isInstance()) {
1232     if (CGM.getCXXABI().HasThisReturn(GD))
1233       ResTy = MD->getThisType();
1234     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1235       ResTy = CGM.getContext().VoidPtrTy;
1236     CGM.getCXXABI().buildThisParam(*this, Args);
1237   }
1238 
1239   // The base version of an inheriting constructor whose constructed base is a
1240   // virtual base is not passed any arguments (because it doesn't actually call
1241   // the inherited constructor).
1242   bool PassedParams = true;
1243   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1244     if (auto Inherited = CD->getInheritedConstructor())
1245       PassedParams =
1246           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1247 
1248   if (PassedParams) {
1249     for (auto *Param : FD->parameters()) {
1250       Args.push_back(Param);
1251       if (!Param->hasAttr<PassObjectSizeAttr>())
1252         continue;
1253 
1254       auto *Implicit = ImplicitParamDecl::Create(
1255           getContext(), Param->getDeclContext(), Param->getLocation(),
1256           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1257       SizeArguments[Param] = Implicit;
1258       Args.push_back(Implicit);
1259     }
1260   }
1261 
1262   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1263     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1264 
1265   return ResTy;
1266 }
1267 
1268 static bool
1269 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1270                                              const ASTContext &Context) {
1271   QualType T = FD->getReturnType();
1272   // Avoid the optimization for functions that return a record type with a
1273   // trivial destructor or another trivially copyable type.
1274   if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1275     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1276       return !ClassDecl->hasTrivialDestructor();
1277   }
1278   return !T.isTriviallyCopyableType(Context);
1279 }
1280 
1281 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1282                                    const CGFunctionInfo &FnInfo) {
1283   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1284   CurGD = GD;
1285 
1286   FunctionArgList Args;
1287   QualType ResTy = BuildFunctionArgList(GD, Args);
1288 
1289   // Check if we should generate debug info for this function.
1290   if (FD->hasAttr<NoDebugAttr>())
1291     DebugInfo = nullptr; // disable debug info indefinitely for this function
1292 
1293   // The function might not have a body if we're generating thunks for a
1294   // function declaration.
1295   SourceRange BodyRange;
1296   if (Stmt *Body = FD->getBody())
1297     BodyRange = Body->getSourceRange();
1298   else
1299     BodyRange = FD->getLocation();
1300   CurEHLocation = BodyRange.getEnd();
1301 
1302   // Use the location of the start of the function to determine where
1303   // the function definition is located. By default use the location
1304   // of the declaration as the location for the subprogram. A function
1305   // may lack a declaration in the source code if it is created by code
1306   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1307   SourceLocation Loc = FD->getLocation();
1308 
1309   // If this is a function specialization then use the pattern body
1310   // as the location for the function.
1311   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1312     if (SpecDecl->hasBody(SpecDecl))
1313       Loc = SpecDecl->getLocation();
1314 
1315   Stmt *Body = FD->getBody();
1316 
1317   // Initialize helper which will detect jumps which can cause invalid lifetime
1318   // markers.
1319   if (Body && ShouldEmitLifetimeMarkers)
1320     Bypasses.Init(Body);
1321 
1322   // Emit the standard function prologue.
1323   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1324 
1325   // Generate the body of the function.
1326   PGO.assignRegionCounters(GD, CurFn);
1327   if (isa<CXXDestructorDecl>(FD))
1328     EmitDestructorBody(Args);
1329   else if (isa<CXXConstructorDecl>(FD))
1330     EmitConstructorBody(Args);
1331   else if (getLangOpts().CUDA &&
1332            !getLangOpts().CUDAIsDevice &&
1333            FD->hasAttr<CUDAGlobalAttr>())
1334     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1335   else if (isa<CXXMethodDecl>(FD) &&
1336            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1337     // The lambda static invoker function is special, because it forwards or
1338     // clones the body of the function call operator (but is actually static).
1339     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1340   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1341              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1342               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1343     // Implicit copy-assignment gets the same special treatment as implicit
1344     // copy-constructors.
1345     emitImplicitAssignmentOperatorBody(Args);
1346   } else if (Body) {
1347     EmitFunctionBody(Body);
1348   } else
1349     llvm_unreachable("no definition for emitted function");
1350 
1351   // C++11 [stmt.return]p2:
1352   //   Flowing off the end of a function [...] results in undefined behavior in
1353   //   a value-returning function.
1354   // C11 6.9.1p12:
1355   //   If the '}' that terminates a function is reached, and the value of the
1356   //   function call is used by the caller, the behavior is undefined.
1357   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1358       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1359     bool ShouldEmitUnreachable =
1360         CGM.getCodeGenOpts().StrictReturn ||
1361         shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1362     if (SanOpts.has(SanitizerKind::Return)) {
1363       SanitizerScope SanScope(this);
1364       llvm::Value *IsFalse = Builder.getFalse();
1365       EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1366                 SanitizerHandler::MissingReturn,
1367                 EmitCheckSourceLocation(FD->getLocation()), None);
1368     } else if (ShouldEmitUnreachable) {
1369       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1370         EmitTrapCall(llvm::Intrinsic::trap);
1371     }
1372     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1373       Builder.CreateUnreachable();
1374       Builder.ClearInsertionPoint();
1375     }
1376   }
1377 
1378   // Emit the standard function epilogue.
1379   FinishFunction(BodyRange.getEnd());
1380 
1381   // If we haven't marked the function nothrow through other means, do
1382   // a quick pass now to see if we can.
1383   if (!CurFn->doesNotThrow())
1384     TryMarkNoThrow(CurFn);
1385 }
1386 
1387 /// ContainsLabel - Return true if the statement contains a label in it.  If
1388 /// this statement is not executed normally, it not containing a label means
1389 /// that we can just remove the code.
1390 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1391   // Null statement, not a label!
1392   if (!S) return false;
1393 
1394   // If this is a label, we have to emit the code, consider something like:
1395   // if (0) {  ...  foo:  bar(); }  goto foo;
1396   //
1397   // TODO: If anyone cared, we could track __label__'s, since we know that you
1398   // can't jump to one from outside their declared region.
1399   if (isa<LabelStmt>(S))
1400     return true;
1401 
1402   // If this is a case/default statement, and we haven't seen a switch, we have
1403   // to emit the code.
1404   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1405     return true;
1406 
1407   // If this is a switch statement, we want to ignore cases below it.
1408   if (isa<SwitchStmt>(S))
1409     IgnoreCaseStmts = true;
1410 
1411   // Scan subexpressions for verboten labels.
1412   for (const Stmt *SubStmt : S->children())
1413     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1414       return true;
1415 
1416   return false;
1417 }
1418 
1419 /// containsBreak - Return true if the statement contains a break out of it.
1420 /// If the statement (recursively) contains a switch or loop with a break
1421 /// inside of it, this is fine.
1422 bool CodeGenFunction::containsBreak(const Stmt *S) {
1423   // Null statement, not a label!
1424   if (!S) return false;
1425 
1426   // If this is a switch or loop that defines its own break scope, then we can
1427   // include it and anything inside of it.
1428   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1429       isa<ForStmt>(S))
1430     return false;
1431 
1432   if (isa<BreakStmt>(S))
1433     return true;
1434 
1435   // Scan subexpressions for verboten breaks.
1436   for (const Stmt *SubStmt : S->children())
1437     if (containsBreak(SubStmt))
1438       return true;
1439 
1440   return false;
1441 }
1442 
1443 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1444   if (!S) return false;
1445 
1446   // Some statement kinds add a scope and thus never add a decl to the current
1447   // scope. Note, this list is longer than the list of statements that might
1448   // have an unscoped decl nested within them, but this way is conservatively
1449   // correct even if more statement kinds are added.
1450   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1451       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1452       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1453       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1454     return false;
1455 
1456   if (isa<DeclStmt>(S))
1457     return true;
1458 
1459   for (const Stmt *SubStmt : S->children())
1460     if (mightAddDeclToScope(SubStmt))
1461       return true;
1462 
1463   return false;
1464 }
1465 
1466 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1467 /// to a constant, or if it does but contains a label, return false.  If it
1468 /// constant folds return true and set the boolean result in Result.
1469 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1470                                                    bool &ResultBool,
1471                                                    bool AllowLabels) {
1472   llvm::APSInt ResultInt;
1473   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1474     return false;
1475 
1476   ResultBool = ResultInt.getBoolValue();
1477   return true;
1478 }
1479 
1480 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1481 /// to a constant, or if it does but contains a label, return false.  If it
1482 /// constant folds return true and set the folded value.
1483 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1484                                                    llvm::APSInt &ResultInt,
1485                                                    bool AllowLabels) {
1486   // FIXME: Rename and handle conversion of other evaluatable things
1487   // to bool.
1488   Expr::EvalResult Result;
1489   if (!Cond->EvaluateAsInt(Result, getContext()))
1490     return false;  // Not foldable, not integer or not fully evaluatable.
1491 
1492   llvm::APSInt Int = Result.Val.getInt();
1493   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1494     return false;  // Contains a label.
1495 
1496   ResultInt = Int;
1497   return true;
1498 }
1499 
1500 
1501 
1502 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1503 /// statement) to the specified blocks.  Based on the condition, this might try
1504 /// to simplify the codegen of the conditional based on the branch.
1505 ///
1506 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1507                                            llvm::BasicBlock *TrueBlock,
1508                                            llvm::BasicBlock *FalseBlock,
1509                                            uint64_t TrueCount) {
1510   Cond = Cond->IgnoreParens();
1511 
1512   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1513 
1514     // Handle X && Y in a condition.
1515     if (CondBOp->getOpcode() == BO_LAnd) {
1516       // If we have "1 && X", simplify the code.  "0 && X" would have constant
1517       // folded if the case was simple enough.
1518       bool ConstantBool = false;
1519       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1520           ConstantBool) {
1521         // br(1 && X) -> br(X).
1522         incrementProfileCounter(CondBOp);
1523         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1524                                     TrueCount);
1525       }
1526 
1527       // If we have "X && 1", simplify the code to use an uncond branch.
1528       // "X && 0" would have been constant folded to 0.
1529       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1530           ConstantBool) {
1531         // br(X && 1) -> br(X).
1532         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1533                                     TrueCount);
1534       }
1535 
1536       // Emit the LHS as a conditional.  If the LHS conditional is false, we
1537       // want to jump to the FalseBlock.
1538       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1539       // The counter tells us how often we evaluate RHS, and all of TrueCount
1540       // can be propagated to that branch.
1541       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1542 
1543       ConditionalEvaluation eval(*this);
1544       {
1545         ApplyDebugLocation DL(*this, Cond);
1546         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1547         EmitBlock(LHSTrue);
1548       }
1549 
1550       incrementProfileCounter(CondBOp);
1551       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1552 
1553       // Any temporaries created here are conditional.
1554       eval.begin(*this);
1555       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1556       eval.end(*this);
1557 
1558       return;
1559     }
1560 
1561     if (CondBOp->getOpcode() == BO_LOr) {
1562       // If we have "0 || X", simplify the code.  "1 || X" would have constant
1563       // folded if the case was simple enough.
1564       bool ConstantBool = false;
1565       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1566           !ConstantBool) {
1567         // br(0 || X) -> br(X).
1568         incrementProfileCounter(CondBOp);
1569         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1570                                     TrueCount);
1571       }
1572 
1573       // If we have "X || 0", simplify the code to use an uncond branch.
1574       // "X || 1" would have been constant folded to 1.
1575       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1576           !ConstantBool) {
1577         // br(X || 0) -> br(X).
1578         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1579                                     TrueCount);
1580       }
1581 
1582       // Emit the LHS as a conditional.  If the LHS conditional is true, we
1583       // want to jump to the TrueBlock.
1584       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1585       // We have the count for entry to the RHS and for the whole expression
1586       // being true, so we can divy up True count between the short circuit and
1587       // the RHS.
1588       uint64_t LHSCount =
1589           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1590       uint64_t RHSCount = TrueCount - LHSCount;
1591 
1592       ConditionalEvaluation eval(*this);
1593       {
1594         ApplyDebugLocation DL(*this, Cond);
1595         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1596         EmitBlock(LHSFalse);
1597       }
1598 
1599       incrementProfileCounter(CondBOp);
1600       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1601 
1602       // Any temporaries created here are conditional.
1603       eval.begin(*this);
1604       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1605 
1606       eval.end(*this);
1607 
1608       return;
1609     }
1610   }
1611 
1612   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1613     // br(!x, t, f) -> br(x, f, t)
1614     if (CondUOp->getOpcode() == UO_LNot) {
1615       // Negate the count.
1616       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1617       // Negate the condition and swap the destination blocks.
1618       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1619                                   FalseCount);
1620     }
1621   }
1622 
1623   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1624     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1625     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1626     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1627 
1628     ConditionalEvaluation cond(*this);
1629     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1630                          getProfileCount(CondOp));
1631 
1632     // When computing PGO branch weights, we only know the overall count for
1633     // the true block. This code is essentially doing tail duplication of the
1634     // naive code-gen, introducing new edges for which counts are not
1635     // available. Divide the counts proportionally between the LHS and RHS of
1636     // the conditional operator.
1637     uint64_t LHSScaledTrueCount = 0;
1638     if (TrueCount) {
1639       double LHSRatio =
1640           getProfileCount(CondOp) / (double)getCurrentProfileCount();
1641       LHSScaledTrueCount = TrueCount * LHSRatio;
1642     }
1643 
1644     cond.begin(*this);
1645     EmitBlock(LHSBlock);
1646     incrementProfileCounter(CondOp);
1647     {
1648       ApplyDebugLocation DL(*this, Cond);
1649       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1650                            LHSScaledTrueCount);
1651     }
1652     cond.end(*this);
1653 
1654     cond.begin(*this);
1655     EmitBlock(RHSBlock);
1656     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1657                          TrueCount - LHSScaledTrueCount);
1658     cond.end(*this);
1659 
1660     return;
1661   }
1662 
1663   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1664     // Conditional operator handling can give us a throw expression as a
1665     // condition for a case like:
1666     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1667     // Fold this to:
1668     //   br(c, throw x, br(y, t, f))
1669     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1670     return;
1671   }
1672 
1673   // If the branch has a condition wrapped by __builtin_unpredictable,
1674   // create metadata that specifies that the branch is unpredictable.
1675   // Don't bother if not optimizing because that metadata would not be used.
1676   llvm::MDNode *Unpredictable = nullptr;
1677   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1678   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1679     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1680     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1681       llvm::MDBuilder MDHelper(getLLVMContext());
1682       Unpredictable = MDHelper.createUnpredictable();
1683     }
1684   }
1685 
1686   // Create branch weights based on the number of times we get here and the
1687   // number of times the condition should be true.
1688   uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1689   llvm::MDNode *Weights =
1690       createProfileWeights(TrueCount, CurrentCount - TrueCount);
1691 
1692   // Emit the code with the fully general case.
1693   llvm::Value *CondV;
1694   {
1695     ApplyDebugLocation DL(*this, Cond);
1696     CondV = EvaluateExprAsBool(Cond);
1697   }
1698   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1699 }
1700 
1701 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1702 /// specified stmt yet.
1703 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1704   CGM.ErrorUnsupported(S, Type);
1705 }
1706 
1707 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1708 /// variable-length array whose elements have a non-zero bit-pattern.
1709 ///
1710 /// \param baseType the inner-most element type of the array
1711 /// \param src - a char* pointing to the bit-pattern for a single
1712 /// base element of the array
1713 /// \param sizeInChars - the total size of the VLA, in chars
1714 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1715                                Address dest, Address src,
1716                                llvm::Value *sizeInChars) {
1717   CGBuilderTy &Builder = CGF.Builder;
1718 
1719   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1720   llvm::Value *baseSizeInChars
1721     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1722 
1723   Address begin =
1724     Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1725   llvm::Value *end =
1726     Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1727 
1728   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1729   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1730   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1731 
1732   // Make a loop over the VLA.  C99 guarantees that the VLA element
1733   // count must be nonzero.
1734   CGF.EmitBlock(loopBB);
1735 
1736   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1737   cur->addIncoming(begin.getPointer(), originBB);
1738 
1739   CharUnits curAlign =
1740     dest.getAlignment().alignmentOfArrayElement(baseSize);
1741 
1742   // memcpy the individual element bit-pattern.
1743   Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1744                        /*volatile*/ false);
1745 
1746   // Go to the next element.
1747   llvm::Value *next =
1748     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1749 
1750   // Leave if that's the end of the VLA.
1751   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1752   Builder.CreateCondBr(done, contBB, loopBB);
1753   cur->addIncoming(next, loopBB);
1754 
1755   CGF.EmitBlock(contBB);
1756 }
1757 
1758 void
1759 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1760   // Ignore empty classes in C++.
1761   if (getLangOpts().CPlusPlus) {
1762     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1763       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1764         return;
1765     }
1766   }
1767 
1768   // Cast the dest ptr to the appropriate i8 pointer type.
1769   if (DestPtr.getElementType() != Int8Ty)
1770     DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1771 
1772   // Get size and alignment info for this aggregate.
1773   CharUnits size = getContext().getTypeSizeInChars(Ty);
1774 
1775   llvm::Value *SizeVal;
1776   const VariableArrayType *vla;
1777 
1778   // Don't bother emitting a zero-byte memset.
1779   if (size.isZero()) {
1780     // But note that getTypeInfo returns 0 for a VLA.
1781     if (const VariableArrayType *vlaType =
1782           dyn_cast_or_null<VariableArrayType>(
1783                                           getContext().getAsArrayType(Ty))) {
1784       auto VlaSize = getVLASize(vlaType);
1785       SizeVal = VlaSize.NumElts;
1786       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1787       if (!eltSize.isOne())
1788         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1789       vla = vlaType;
1790     } else {
1791       return;
1792     }
1793   } else {
1794     SizeVal = CGM.getSize(size);
1795     vla = nullptr;
1796   }
1797 
1798   // If the type contains a pointer to data member we can't memset it to zero.
1799   // Instead, create a null constant and copy it to the destination.
1800   // TODO: there are other patterns besides zero that we can usefully memset,
1801   // like -1, which happens to be the pattern used by member-pointers.
1802   if (!CGM.getTypes().isZeroInitializable(Ty)) {
1803     // For a VLA, emit a single element, then splat that over the VLA.
1804     if (vla) Ty = getContext().getBaseElementType(vla);
1805 
1806     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1807 
1808     llvm::GlobalVariable *NullVariable =
1809       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1810                                /*isConstant=*/true,
1811                                llvm::GlobalVariable::PrivateLinkage,
1812                                NullConstant, Twine());
1813     CharUnits NullAlign = DestPtr.getAlignment();
1814     NullVariable->setAlignment(NullAlign.getAsAlign());
1815     Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1816                    NullAlign);
1817 
1818     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1819 
1820     // Get and call the appropriate llvm.memcpy overload.
1821     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1822     return;
1823   }
1824 
1825   // Otherwise, just memset the whole thing to zero.  This is legal
1826   // because in LLVM, all default initializers (other than the ones we just
1827   // handled above) are guaranteed to have a bit pattern of all zeros.
1828   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1829 }
1830 
1831 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1832   // Make sure that there is a block for the indirect goto.
1833   if (!IndirectBranch)
1834     GetIndirectGotoBlock();
1835 
1836   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1837 
1838   // Make sure the indirect branch includes all of the address-taken blocks.
1839   IndirectBranch->addDestination(BB);
1840   return llvm::BlockAddress::get(CurFn, BB);
1841 }
1842 
1843 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1844   // If we already made the indirect branch for indirect goto, return its block.
1845   if (IndirectBranch) return IndirectBranch->getParent();
1846 
1847   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1848 
1849   // Create the PHI node that indirect gotos will add entries to.
1850   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1851                                               "indirect.goto.dest");
1852 
1853   // Create the indirect branch instruction.
1854   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1855   return IndirectBranch->getParent();
1856 }
1857 
1858 /// Computes the length of an array in elements, as well as the base
1859 /// element type and a properly-typed first element pointer.
1860 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1861                                               QualType &baseType,
1862                                               Address &addr) {
1863   const ArrayType *arrayType = origArrayType;
1864 
1865   // If it's a VLA, we have to load the stored size.  Note that
1866   // this is the size of the VLA in bytes, not its size in elements.
1867   llvm::Value *numVLAElements = nullptr;
1868   if (isa<VariableArrayType>(arrayType)) {
1869     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1870 
1871     // Walk into all VLAs.  This doesn't require changes to addr,
1872     // which has type T* where T is the first non-VLA element type.
1873     do {
1874       QualType elementType = arrayType->getElementType();
1875       arrayType = getContext().getAsArrayType(elementType);
1876 
1877       // If we only have VLA components, 'addr' requires no adjustment.
1878       if (!arrayType) {
1879         baseType = elementType;
1880         return numVLAElements;
1881       }
1882     } while (isa<VariableArrayType>(arrayType));
1883 
1884     // We get out here only if we find a constant array type
1885     // inside the VLA.
1886   }
1887 
1888   // We have some number of constant-length arrays, so addr should
1889   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1890   // down to the first element of addr.
1891   SmallVector<llvm::Value*, 8> gepIndices;
1892 
1893   // GEP down to the array type.
1894   llvm::ConstantInt *zero = Builder.getInt32(0);
1895   gepIndices.push_back(zero);
1896 
1897   uint64_t countFromCLAs = 1;
1898   QualType eltType;
1899 
1900   llvm::ArrayType *llvmArrayType =
1901     dyn_cast<llvm::ArrayType>(addr.getElementType());
1902   while (llvmArrayType) {
1903     assert(isa<ConstantArrayType>(arrayType));
1904     assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1905              == llvmArrayType->getNumElements());
1906 
1907     gepIndices.push_back(zero);
1908     countFromCLAs *= llvmArrayType->getNumElements();
1909     eltType = arrayType->getElementType();
1910 
1911     llvmArrayType =
1912       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1913     arrayType = getContext().getAsArrayType(arrayType->getElementType());
1914     assert((!llvmArrayType || arrayType) &&
1915            "LLVM and Clang types are out-of-synch");
1916   }
1917 
1918   if (arrayType) {
1919     // From this point onwards, the Clang array type has been emitted
1920     // as some other type (probably a packed struct). Compute the array
1921     // size, and just emit the 'begin' expression as a bitcast.
1922     while (arrayType) {
1923       countFromCLAs *=
1924           cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1925       eltType = arrayType->getElementType();
1926       arrayType = getContext().getAsArrayType(eltType);
1927     }
1928 
1929     llvm::Type *baseType = ConvertType(eltType);
1930     addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1931   } else {
1932     // Create the actual GEP.
1933     addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1934                                              gepIndices, "array.begin"),
1935                    addr.getAlignment());
1936   }
1937 
1938   baseType = eltType;
1939 
1940   llvm::Value *numElements
1941     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1942 
1943   // If we had any VLA dimensions, factor them in.
1944   if (numVLAElements)
1945     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1946 
1947   return numElements;
1948 }
1949 
1950 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1951   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1952   assert(vla && "type was not a variable array type!");
1953   return getVLASize(vla);
1954 }
1955 
1956 CodeGenFunction::VlaSizePair
1957 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1958   // The number of elements so far; always size_t.
1959   llvm::Value *numElements = nullptr;
1960 
1961   QualType elementType;
1962   do {
1963     elementType = type->getElementType();
1964     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1965     assert(vlaSize && "no size for VLA!");
1966     assert(vlaSize->getType() == SizeTy);
1967 
1968     if (!numElements) {
1969       numElements = vlaSize;
1970     } else {
1971       // It's undefined behavior if this wraps around, so mark it that way.
1972       // FIXME: Teach -fsanitize=undefined to trap this.
1973       numElements = Builder.CreateNUWMul(numElements, vlaSize);
1974     }
1975   } while ((type = getContext().getAsVariableArrayType(elementType)));
1976 
1977   return { numElements, elementType };
1978 }
1979 
1980 CodeGenFunction::VlaSizePair
1981 CodeGenFunction::getVLAElements1D(QualType type) {
1982   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1983   assert(vla && "type was not a variable array type!");
1984   return getVLAElements1D(vla);
1985 }
1986 
1987 CodeGenFunction::VlaSizePair
1988 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1989   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1990   assert(VlaSize && "no size for VLA!");
1991   assert(VlaSize->getType() == SizeTy);
1992   return { VlaSize, Vla->getElementType() };
1993 }
1994 
1995 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1996   assert(type->isVariablyModifiedType() &&
1997          "Must pass variably modified type to EmitVLASizes!");
1998 
1999   EnsureInsertPoint();
2000 
2001   // We're going to walk down into the type and look for VLA
2002   // expressions.
2003   do {
2004     assert(type->isVariablyModifiedType());
2005 
2006     const Type *ty = type.getTypePtr();
2007     switch (ty->getTypeClass()) {
2008 
2009 #define TYPE(Class, Base)
2010 #define ABSTRACT_TYPE(Class, Base)
2011 #define NON_CANONICAL_TYPE(Class, Base)
2012 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2013 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2014 #include "clang/AST/TypeNodes.inc"
2015       llvm_unreachable("unexpected dependent type!");
2016 
2017     // These types are never variably-modified.
2018     case Type::Builtin:
2019     case Type::Complex:
2020     case Type::Vector:
2021     case Type::ExtVector:
2022     case Type::Record:
2023     case Type::Enum:
2024     case Type::Elaborated:
2025     case Type::TemplateSpecialization:
2026     case Type::ObjCTypeParam:
2027     case Type::ObjCObject:
2028     case Type::ObjCInterface:
2029     case Type::ObjCObjectPointer:
2030       llvm_unreachable("type class is never variably-modified!");
2031 
2032     case Type::Adjusted:
2033       type = cast<AdjustedType>(ty)->getAdjustedType();
2034       break;
2035 
2036     case Type::Decayed:
2037       type = cast<DecayedType>(ty)->getPointeeType();
2038       break;
2039 
2040     case Type::Pointer:
2041       type = cast<PointerType>(ty)->getPointeeType();
2042       break;
2043 
2044     case Type::BlockPointer:
2045       type = cast<BlockPointerType>(ty)->getPointeeType();
2046       break;
2047 
2048     case Type::LValueReference:
2049     case Type::RValueReference:
2050       type = cast<ReferenceType>(ty)->getPointeeType();
2051       break;
2052 
2053     case Type::MemberPointer:
2054       type = cast<MemberPointerType>(ty)->getPointeeType();
2055       break;
2056 
2057     case Type::ConstantArray:
2058     case Type::IncompleteArray:
2059       // Losing element qualification here is fine.
2060       type = cast<ArrayType>(ty)->getElementType();
2061       break;
2062 
2063     case Type::VariableArray: {
2064       // Losing element qualification here is fine.
2065       const VariableArrayType *vat = cast<VariableArrayType>(ty);
2066 
2067       // Unknown size indication requires no size computation.
2068       // Otherwise, evaluate and record it.
2069       if (const Expr *size = vat->getSizeExpr()) {
2070         // It's possible that we might have emitted this already,
2071         // e.g. with a typedef and a pointer to it.
2072         llvm::Value *&entry = VLASizeMap[size];
2073         if (!entry) {
2074           llvm::Value *Size = EmitScalarExpr(size);
2075 
2076           // C11 6.7.6.2p5:
2077           //   If the size is an expression that is not an integer constant
2078           //   expression [...] each time it is evaluated it shall have a value
2079           //   greater than zero.
2080           if (SanOpts.has(SanitizerKind::VLABound) &&
2081               size->getType()->isSignedIntegerType()) {
2082             SanitizerScope SanScope(this);
2083             llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2084             llvm::Constant *StaticArgs[] = {
2085                 EmitCheckSourceLocation(size->getBeginLoc()),
2086                 EmitCheckTypeDescriptor(size->getType())};
2087             EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2088                                      SanitizerKind::VLABound),
2089                       SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2090           }
2091 
2092           // Always zexting here would be wrong if it weren't
2093           // undefined behavior to have a negative bound.
2094           entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2095         }
2096       }
2097       type = vat->getElementType();
2098       break;
2099     }
2100 
2101     case Type::FunctionProto:
2102     case Type::FunctionNoProto:
2103       type = cast<FunctionType>(ty)->getReturnType();
2104       break;
2105 
2106     case Type::Paren:
2107     case Type::TypeOf:
2108     case Type::UnaryTransform:
2109     case Type::Attributed:
2110     case Type::SubstTemplateTypeParm:
2111     case Type::PackExpansion:
2112     case Type::MacroQualified:
2113       // Keep walking after single level desugaring.
2114       type = type.getSingleStepDesugaredType(getContext());
2115       break;
2116 
2117     case Type::Typedef:
2118     case Type::Decltype:
2119     case Type::Auto:
2120     case Type::DeducedTemplateSpecialization:
2121       // Stop walking: nothing to do.
2122       return;
2123 
2124     case Type::TypeOfExpr:
2125       // Stop walking: emit typeof expression.
2126       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2127       return;
2128 
2129     case Type::Atomic:
2130       type = cast<AtomicType>(ty)->getValueType();
2131       break;
2132 
2133     case Type::Pipe:
2134       type = cast<PipeType>(ty)->getElementType();
2135       break;
2136     }
2137   } while (type->isVariablyModifiedType());
2138 }
2139 
2140 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2141   if (getContext().getBuiltinVaListType()->isArrayType())
2142     return EmitPointerWithAlignment(E);
2143   return EmitLValue(E).getAddress(*this);
2144 }
2145 
2146 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2147   return EmitLValue(E).getAddress(*this);
2148 }
2149 
2150 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2151                                               const APValue &Init) {
2152   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2153   if (CGDebugInfo *Dbg = getDebugInfo())
2154     if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2155       Dbg->EmitGlobalVariable(E->getDecl(), Init);
2156 }
2157 
2158 CodeGenFunction::PeepholeProtection
2159 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2160   // At the moment, the only aggressive peephole we do in IR gen
2161   // is trunc(zext) folding, but if we add more, we can easily
2162   // extend this protection.
2163 
2164   if (!rvalue.isScalar()) return PeepholeProtection();
2165   llvm::Value *value = rvalue.getScalarVal();
2166   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2167 
2168   // Just make an extra bitcast.
2169   assert(HaveInsertPoint());
2170   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2171                                                   Builder.GetInsertBlock());
2172 
2173   PeepholeProtection protection;
2174   protection.Inst = inst;
2175   return protection;
2176 }
2177 
2178 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2179   if (!protection.Inst) return;
2180 
2181   // In theory, we could try to duplicate the peepholes now, but whatever.
2182   protection.Inst->eraseFromParent();
2183 }
2184 
2185 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2186                                               QualType Ty, SourceLocation Loc,
2187                                               SourceLocation AssumptionLoc,
2188                                               llvm::Value *Alignment,
2189                                               llvm::Value *OffsetValue) {
2190   llvm::Value *TheCheck;
2191   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2192       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2193   if (SanOpts.has(SanitizerKind::Alignment)) {
2194     emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2195                                  OffsetValue, TheCheck, Assumption);
2196   }
2197 }
2198 
2199 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2200                                               const Expr *E,
2201                                               SourceLocation AssumptionLoc,
2202                                               llvm::Value *Alignment,
2203                                               llvm::Value *OffsetValue) {
2204   if (auto *CE = dyn_cast<CastExpr>(E))
2205     E = CE->getSubExprAsWritten();
2206   QualType Ty = E->getType();
2207   SourceLocation Loc = E->getExprLoc();
2208 
2209   emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2210                           OffsetValue);
2211 }
2212 
2213 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2214                                                  llvm::Value *AnnotatedVal,
2215                                                  StringRef AnnotationStr,
2216                                                  SourceLocation Location) {
2217   llvm::Value *Args[4] = {
2218     AnnotatedVal,
2219     Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2220     Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2221     CGM.EmitAnnotationLineNo(Location)
2222   };
2223   return Builder.CreateCall(AnnotationFn, Args);
2224 }
2225 
2226 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2227   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2228   // FIXME We create a new bitcast for every annotation because that's what
2229   // llvm-gcc was doing.
2230   for (const auto *I : D->specific_attrs<AnnotateAttr>())
2231     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2232                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2233                        I->getAnnotation(), D->getLocation());
2234 }
2235 
2236 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2237                                               Address Addr) {
2238   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2239   llvm::Value *V = Addr.getPointer();
2240   llvm::Type *VTy = V->getType();
2241   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2242                                     CGM.Int8PtrTy);
2243 
2244   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2245     // FIXME Always emit the cast inst so we can differentiate between
2246     // annotation on the first field of a struct and annotation on the struct
2247     // itself.
2248     if (VTy != CGM.Int8PtrTy)
2249       V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2250     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2251     V = Builder.CreateBitCast(V, VTy);
2252   }
2253 
2254   return Address(V, Addr.getAlignment());
2255 }
2256 
2257 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2258 
2259 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2260     : CGF(CGF) {
2261   assert(!CGF->IsSanitizerScope);
2262   CGF->IsSanitizerScope = true;
2263 }
2264 
2265 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2266   CGF->IsSanitizerScope = false;
2267 }
2268 
2269 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2270                                    const llvm::Twine &Name,
2271                                    llvm::BasicBlock *BB,
2272                                    llvm::BasicBlock::iterator InsertPt) const {
2273   LoopStack.InsertHelper(I);
2274   if (IsSanitizerScope)
2275     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2276 }
2277 
2278 void CGBuilderInserter::InsertHelper(
2279     llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2280     llvm::BasicBlock::iterator InsertPt) const {
2281   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2282   if (CGF)
2283     CGF->InsertHelper(I, Name, BB, InsertPt);
2284 }
2285 
2286 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2287                                 CodeGenModule &CGM, const FunctionDecl *FD,
2288                                 std::string &FirstMissing) {
2289   // If there aren't any required features listed then go ahead and return.
2290   if (ReqFeatures.empty())
2291     return false;
2292 
2293   // Now build up the set of caller features and verify that all the required
2294   // features are there.
2295   llvm::StringMap<bool> CallerFeatureMap;
2296   CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2297 
2298   // If we have at least one of the features in the feature list return
2299   // true, otherwise return false.
2300   return std::all_of(
2301       ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2302         SmallVector<StringRef, 1> OrFeatures;
2303         Feature.split(OrFeatures, '|');
2304         return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2305           if (!CallerFeatureMap.lookup(Feature)) {
2306             FirstMissing = Feature.str();
2307             return false;
2308           }
2309           return true;
2310         });
2311       });
2312 }
2313 
2314 // Emits an error if we don't have a valid set of target features for the
2315 // called function.
2316 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2317                                           const FunctionDecl *TargetDecl) {
2318   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2319 }
2320 
2321 // Emits an error if we don't have a valid set of target features for the
2322 // called function.
2323 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2324                                           const FunctionDecl *TargetDecl) {
2325   // Early exit if this is an indirect call.
2326   if (!TargetDecl)
2327     return;
2328 
2329   // Get the current enclosing function if it exists. If it doesn't
2330   // we can't check the target features anyhow.
2331   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2332   if (!FD)
2333     return;
2334 
2335   // Grab the required features for the call. For a builtin this is listed in
2336   // the td file with the default cpu, for an always_inline function this is any
2337   // listed cpu and any listed features.
2338   unsigned BuiltinID = TargetDecl->getBuiltinID();
2339   std::string MissingFeature;
2340   if (BuiltinID) {
2341     SmallVector<StringRef, 1> ReqFeatures;
2342     const char *FeatureList =
2343         CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2344     // Return if the builtin doesn't have any required features.
2345     if (!FeatureList || StringRef(FeatureList) == "")
2346       return;
2347     StringRef(FeatureList).split(ReqFeatures, ',');
2348     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2349       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2350           << TargetDecl->getDeclName()
2351           << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2352 
2353   } else if (!TargetDecl->isMultiVersion() &&
2354              TargetDecl->hasAttr<TargetAttr>()) {
2355     // Get the required features for the callee.
2356 
2357     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2358     ParsedTargetAttr ParsedAttr =
2359         CGM.getContext().filterFunctionTargetAttrs(TD);
2360 
2361     SmallVector<StringRef, 1> ReqFeatures;
2362     llvm::StringMap<bool> CalleeFeatureMap;
2363     CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap,
2364                                            GlobalDecl(TargetDecl));
2365 
2366     for (const auto &F : ParsedAttr.Features) {
2367       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2368         ReqFeatures.push_back(StringRef(F).substr(1));
2369     }
2370 
2371     for (const auto &F : CalleeFeatureMap) {
2372       // Only positive features are "required".
2373       if (F.getValue())
2374         ReqFeatures.push_back(F.getKey());
2375     }
2376     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2377       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2378           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2379   }
2380 }
2381 
2382 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2383   if (!CGM.getCodeGenOpts().SanitizeStats)
2384     return;
2385 
2386   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2387   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2388   CGM.getSanStats().create(IRB, SSK);
2389 }
2390 
2391 llvm::Value *
2392 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2393   llvm::Value *Condition = nullptr;
2394 
2395   if (!RO.Conditions.Architecture.empty())
2396     Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2397 
2398   if (!RO.Conditions.Features.empty()) {
2399     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2400     Condition =
2401         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2402   }
2403   return Condition;
2404 }
2405 
2406 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2407                                              llvm::Function *Resolver,
2408                                              CGBuilderTy &Builder,
2409                                              llvm::Function *FuncToReturn,
2410                                              bool SupportsIFunc) {
2411   if (SupportsIFunc) {
2412     Builder.CreateRet(FuncToReturn);
2413     return;
2414   }
2415 
2416   llvm::SmallVector<llvm::Value *, 10> Args;
2417   llvm::for_each(Resolver->args(),
2418                  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2419 
2420   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2421   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2422 
2423   if (Resolver->getReturnType()->isVoidTy())
2424     Builder.CreateRetVoid();
2425   else
2426     Builder.CreateRet(Result);
2427 }
2428 
2429 void CodeGenFunction::EmitMultiVersionResolver(
2430     llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2431   assert(getContext().getTargetInfo().getTriple().isX86() &&
2432          "Only implemented for x86 targets");
2433 
2434   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2435 
2436   // Main function's basic block.
2437   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2438   Builder.SetInsertPoint(CurBlock);
2439   EmitX86CpuInit();
2440 
2441   for (const MultiVersionResolverOption &RO : Options) {
2442     Builder.SetInsertPoint(CurBlock);
2443     llvm::Value *Condition = FormResolverCondition(RO);
2444 
2445     // The 'default' or 'generic' case.
2446     if (!Condition) {
2447       assert(&RO == Options.end() - 1 &&
2448              "Default or Generic case must be last");
2449       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2450                                        SupportsIFunc);
2451       return;
2452     }
2453 
2454     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2455     CGBuilderTy RetBuilder(*this, RetBlock);
2456     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2457                                      SupportsIFunc);
2458     CurBlock = createBasicBlock("resolver_else", Resolver);
2459     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2460   }
2461 
2462   // If no generic/default, emit an unreachable.
2463   Builder.SetInsertPoint(CurBlock);
2464   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2465   TrapCall->setDoesNotReturn();
2466   TrapCall->setDoesNotThrow();
2467   Builder.CreateUnreachable();
2468   Builder.ClearInsertionPoint();
2469 }
2470 
2471 // Loc - where the diagnostic will point, where in the source code this
2472 //  alignment has failed.
2473 // SecondaryLoc - if present (will be present if sufficiently different from
2474 //  Loc), the diagnostic will additionally point a "Note:" to this location.
2475 //  It should be the location where the __attribute__((assume_aligned))
2476 //  was written e.g.
2477 void CodeGenFunction::emitAlignmentAssumptionCheck(
2478     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2479     SourceLocation SecondaryLoc, llvm::Value *Alignment,
2480     llvm::Value *OffsetValue, llvm::Value *TheCheck,
2481     llvm::Instruction *Assumption) {
2482   assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2483          cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2484              llvm::Intrinsic::getDeclaration(
2485                  Builder.GetInsertBlock()->getParent()->getParent(),
2486                  llvm::Intrinsic::assume) &&
2487          "Assumption should be a call to llvm.assume().");
2488   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2489          "Assumption should be the last instruction of the basic block, "
2490          "since the basic block is still being generated.");
2491 
2492   if (!SanOpts.has(SanitizerKind::Alignment))
2493     return;
2494 
2495   // Don't check pointers to volatile data. The behavior here is implementation-
2496   // defined.
2497   if (Ty->getPointeeType().isVolatileQualified())
2498     return;
2499 
2500   // We need to temorairly remove the assumption so we can insert the
2501   // sanitizer check before it, else the check will be dropped by optimizations.
2502   Assumption->removeFromParent();
2503 
2504   {
2505     SanitizerScope SanScope(this);
2506 
2507     if (!OffsetValue)
2508       OffsetValue = Builder.getInt1(0); // no offset.
2509 
2510     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2511                                     EmitCheckSourceLocation(SecondaryLoc),
2512                                     EmitCheckTypeDescriptor(Ty)};
2513     llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2514                                   EmitCheckValue(Alignment),
2515                                   EmitCheckValue(OffsetValue)};
2516     EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2517               SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2518   }
2519 
2520   // We are now in the (new, empty) "cont" basic block.
2521   // Reintroduce the assumption.
2522   Builder.Insert(Assumption);
2523   // FIXME: Assumption still has it's original basic block as it's Parent.
2524 }
2525 
2526 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2527   if (CGDebugInfo *DI = getDebugInfo())
2528     return DI->SourceLocToDebugLoc(Location);
2529 
2530   return llvm::DebugLoc();
2531 }
2532