xref: /llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp (revision 20141f2d8cb0785d0117ca8804fa2320df738c9a)
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGDebugInfo.h"
17 #include "CGException.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Intrinsics.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
31   : BlockFunction(cgm, *this, Builder), CGM(cgm),
32     Target(CGM.getContext().Target),
33     Builder(cgm.getModule().getContext()),
34     ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
35     SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
36     DidCallStackSave(false), UnreachableBlock(0),
37     CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
38     ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
39     TrapBB(0) {
40 
41   // Get some frequently used types.
42   LLVMPointerWidth = Target.getPointerWidth(0);
43   llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
44   IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
45   Int32Ty  = llvm::Type::getInt32Ty(LLVMContext);
46   Int64Ty  = llvm::Type::getInt64Ty(LLVMContext);
47 
48   Exceptions = getContext().getLangOptions().Exceptions;
49   CatchUndefined = getContext().getLangOptions().CatchUndefined;
50   CGM.getMangleContext().startNewFunction();
51 }
52 
53 ASTContext &CodeGenFunction::getContext() const {
54   return CGM.getContext();
55 }
56 
57 
58 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
59   llvm::Value *Res = LocalDeclMap[VD];
60   assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
61   return Res;
62 }
63 
64 llvm::Constant *
65 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
66   return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
67 }
68 
69 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
70   return CGM.getTypes().ConvertTypeForMem(T);
71 }
72 
73 const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
74   return CGM.getTypes().ConvertType(T);
75 }
76 
77 bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
78   return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
79     T->isMemberFunctionPointerType();
80 }
81 
82 void CodeGenFunction::EmitReturnBlock() {
83   // For cleanliness, we try to avoid emitting the return block for
84   // simple cases.
85   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
86 
87   if (CurBB) {
88     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
89 
90     // We have a valid insert point, reuse it if it is empty or there are no
91     // explicit jumps to the return block.
92     if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
93       ReturnBlock.Block->replaceAllUsesWith(CurBB);
94       delete ReturnBlock.Block;
95     } else
96       EmitBlock(ReturnBlock.Block);
97     return;
98   }
99 
100   // Otherwise, if the return block is the target of a single direct
101   // branch then we can just put the code in that block instead. This
102   // cleans up functions which started with a unified return block.
103   if (ReturnBlock.Block->hasOneUse()) {
104     llvm::BranchInst *BI =
105       dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
106     if (BI && BI->isUnconditional() &&
107         BI->getSuccessor(0) == ReturnBlock.Block) {
108       // Reset insertion point and delete the branch.
109       Builder.SetInsertPoint(BI->getParent());
110       BI->eraseFromParent();
111       delete ReturnBlock.Block;
112       return;
113     }
114   }
115 
116   // FIXME: We are at an unreachable point, there is no reason to emit the block
117   // unless it has uses. However, we still need a place to put the debug
118   // region.end for now.
119 
120   EmitBlock(ReturnBlock.Block);
121 }
122 
123 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
124   if (!BB) return;
125   if (!BB->use_empty())
126     return CGF.CurFn->getBasicBlockList().push_back(BB);
127   delete BB;
128 }
129 
130 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
131   assert(BreakContinueStack.empty() &&
132          "mismatched push/pop in break/continue stack!");
133 
134   // Emit function epilog (to return).
135   EmitReturnBlock();
136 
137   EmitFunctionInstrumentation("__cyg_profile_func_exit");
138 
139   // Emit debug descriptor for function end.
140   if (CGDebugInfo *DI = getDebugInfo()) {
141     DI->setLocation(EndLoc);
142     DI->EmitRegionEnd(Builder);
143   }
144 
145   EmitFunctionEpilog(*CurFnInfo);
146   EmitEndEHSpec(CurCodeDecl);
147 
148   assert(EHStack.empty() &&
149          "did not remove all scopes from cleanup stack!");
150 
151   // If someone did an indirect goto, emit the indirect goto block at the end of
152   // the function.
153   if (IndirectBranch) {
154     EmitBlock(IndirectBranch->getParent());
155     Builder.ClearInsertionPoint();
156   }
157 
158   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
159   llvm::Instruction *Ptr = AllocaInsertPt;
160   AllocaInsertPt = 0;
161   Ptr->eraseFromParent();
162 
163   // If someone took the address of a label but never did an indirect goto, we
164   // made a zero entry PHI node, which is illegal, zap it now.
165   if (IndirectBranch) {
166     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
167     if (PN->getNumIncomingValues() == 0) {
168       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
169       PN->eraseFromParent();
170     }
171   }
172 
173   EmitIfUsed(*this, TerminateLandingPad);
174   EmitIfUsed(*this, TerminateHandler);
175   EmitIfUsed(*this, UnreachableBlock);
176 
177   if (CGM.getCodeGenOpts().EmitDeclMetadata)
178     EmitDeclMetadata();
179 }
180 
181 /// ShouldInstrumentFunction - Return true if the current function should be
182 /// instrumented with __cyg_profile_func_* calls
183 bool CodeGenFunction::ShouldInstrumentFunction() {
184   if (!CGM.getCodeGenOpts().InstrumentFunctions)
185     return false;
186   if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
187     return false;
188   return true;
189 }
190 
191 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
192 /// instrumentation function with the current function and the call site, if
193 /// function instrumentation is enabled.
194 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
195   if (!ShouldInstrumentFunction())
196     return;
197 
198   const llvm::PointerType *PointerTy;
199   const llvm::FunctionType *FunctionTy;
200   std::vector<const llvm::Type*> ProfileFuncArgs;
201 
202   // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
203   PointerTy = llvm::Type::getInt8PtrTy(VMContext);
204   ProfileFuncArgs.push_back(PointerTy);
205   ProfileFuncArgs.push_back(PointerTy);
206   FunctionTy = llvm::FunctionType::get(
207     llvm::Type::getVoidTy(VMContext),
208     ProfileFuncArgs, false);
209 
210   llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
211   llvm::CallInst *CallSite = Builder.CreateCall(
212     CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
213     llvm::ConstantInt::get(Int32Ty, 0),
214     "callsite");
215 
216   Builder.CreateCall2(F,
217                       llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
218                       CallSite);
219 }
220 
221 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
222                                     llvm::Function *Fn,
223                                     const FunctionArgList &Args,
224                                     SourceLocation StartLoc) {
225   const Decl *D = GD.getDecl();
226 
227   DidCallStackSave = false;
228   CurCodeDecl = CurFuncDecl = D;
229   FnRetTy = RetTy;
230   CurFn = Fn;
231   assert(CurFn->isDeclaration() && "Function already has body?");
232 
233   // Pass inline keyword to optimizer if it appears explicitly on any
234   // declaration.
235   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
236     for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
237            RE = FD->redecls_end(); RI != RE; ++RI)
238       if (RI->isInlineSpecified()) {
239         Fn->addFnAttr(llvm::Attribute::InlineHint);
240         break;
241       }
242 
243   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
244 
245   // Create a marker to make it easy to insert allocas into the entryblock
246   // later.  Don't create this with the builder, because we don't want it
247   // folded.
248   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
249   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
250   if (Builder.isNamePreserving())
251     AllocaInsertPt->setName("allocapt");
252 
253   ReturnBlock = getJumpDestInCurrentScope("return");
254 
255   Builder.SetInsertPoint(EntryBB);
256 
257   QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
258                                                  false, false, 0, 0,
259                                                  /*FIXME?*/
260                                                  FunctionType::ExtInfo());
261 
262   // Emit subprogram debug descriptor.
263   if (CGDebugInfo *DI = getDebugInfo()) {
264     DI->setLocation(StartLoc);
265     DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
266   }
267 
268   EmitFunctionInstrumentation("__cyg_profile_func_enter");
269 
270   // FIXME: Leaked.
271   // CC info is ignored, hopefully?
272   CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
273                                               FunctionType::ExtInfo());
274 
275   if (RetTy->isVoidType()) {
276     // Void type; nothing to return.
277     ReturnValue = 0;
278   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
279              hasAggregateLLVMType(CurFnInfo->getReturnType())) {
280     // Indirect aggregate return; emit returned value directly into sret slot.
281     // This reduces code size, and affects correctness in C++.
282     ReturnValue = CurFn->arg_begin();
283   } else {
284     ReturnValue = CreateIRTemp(RetTy, "retval");
285   }
286 
287   EmitStartEHSpec(CurCodeDecl);
288   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
289 
290   if (CXXThisDecl)
291     CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
292   if (CXXVTTDecl)
293     CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
294 
295   // If any of the arguments have a variably modified type, make sure to
296   // emit the type size.
297   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
298        i != e; ++i) {
299     QualType Ty = i->second;
300 
301     if (Ty->isVariablyModifiedType())
302       EmitVLASize(Ty);
303   }
304 }
305 
306 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
307   const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
308   assert(FD->getBody());
309   EmitStmt(FD->getBody());
310 }
311 
312 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
313   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
314 
315   // Check if we should generate debug info for this function.
316   if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
317     DebugInfo = CGM.getDebugInfo();
318 
319   FunctionArgList Args;
320 
321   CurGD = GD;
322   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
323     if (MD->isInstance()) {
324       // Create the implicit 'this' decl.
325       // FIXME: I'm not entirely sure I like using a fake decl just for code
326       // generation. Maybe we can come up with a better way?
327       CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
328                                               FD->getLocation(),
329                                               &getContext().Idents.get("this"),
330                                               MD->getThisType(getContext()));
331       Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
332 
333       // Check if we need a VTT parameter as well.
334       if (CodeGenVTables::needsVTTParameter(GD)) {
335         // FIXME: The comment about using a fake decl above applies here too.
336         QualType T = getContext().getPointerType(getContext().VoidPtrTy);
337         CXXVTTDecl =
338           ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
339                                     &getContext().Idents.get("vtt"), T);
340         Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
341       }
342     }
343   }
344 
345   if (FD->getNumParams()) {
346     const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
347     assert(FProto && "Function def must have prototype!");
348 
349     for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
350       Args.push_back(std::make_pair(FD->getParamDecl(i),
351                                     FProto->getArgType(i)));
352   }
353 
354   SourceRange BodyRange;
355   if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
356 
357   // Emit the standard function prologue.
358   StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
359 
360   // Generate the body of the function.
361   if (isa<CXXDestructorDecl>(FD))
362     EmitDestructorBody(Args);
363   else if (isa<CXXConstructorDecl>(FD))
364     EmitConstructorBody(Args);
365   else
366     EmitFunctionBody(Args);
367 
368   // Emit the standard function epilogue.
369   FinishFunction(BodyRange.getEnd());
370 
371   // Destroy the 'this' declaration.
372   if (CXXThisDecl)
373     CXXThisDecl->Destroy(getContext());
374 
375   // Destroy the VTT declaration.
376   if (CXXVTTDecl)
377     CXXVTTDecl->Destroy(getContext());
378 }
379 
380 /// ContainsLabel - Return true if the statement contains a label in it.  If
381 /// this statement is not executed normally, it not containing a label means
382 /// that we can just remove the code.
383 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
384   // Null statement, not a label!
385   if (S == 0) return false;
386 
387   // If this is a label, we have to emit the code, consider something like:
388   // if (0) {  ...  foo:  bar(); }  goto foo;
389   if (isa<LabelStmt>(S))
390     return true;
391 
392   // If this is a case/default statement, and we haven't seen a switch, we have
393   // to emit the code.
394   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
395     return true;
396 
397   // If this is a switch statement, we want to ignore cases below it.
398   if (isa<SwitchStmt>(S))
399     IgnoreCaseStmts = true;
400 
401   // Scan subexpressions for verboten labels.
402   for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
403        I != E; ++I)
404     if (ContainsLabel(*I, IgnoreCaseStmts))
405       return true;
406 
407   return false;
408 }
409 
410 
411 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
412 /// a constant, or if it does but contains a label, return 0.  If it constant
413 /// folds to 'true' and does not contain a label, return 1, if it constant folds
414 /// to 'false' and does not contain a label, return -1.
415 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
416   // FIXME: Rename and handle conversion of other evaluatable things
417   // to bool.
418   Expr::EvalResult Result;
419   if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
420       Result.HasSideEffects)
421     return 0;  // Not foldable, not integer or not fully evaluatable.
422 
423   if (CodeGenFunction::ContainsLabel(Cond))
424     return 0;  // Contains a label.
425 
426   return Result.Val.getInt().getBoolValue() ? 1 : -1;
427 }
428 
429 
430 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
431 /// statement) to the specified blocks.  Based on the condition, this might try
432 /// to simplify the codegen of the conditional based on the branch.
433 ///
434 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
435                                            llvm::BasicBlock *TrueBlock,
436                                            llvm::BasicBlock *FalseBlock) {
437   if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
438     return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
439 
440   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
441     // Handle X && Y in a condition.
442     if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
443       // If we have "1 && X", simplify the code.  "0 && X" would have constant
444       // folded if the case was simple enough.
445       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
446         // br(1 && X) -> br(X).
447         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
448       }
449 
450       // If we have "X && 1", simplify the code to use an uncond branch.
451       // "X && 0" would have been constant folded to 0.
452       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
453         // br(X && 1) -> br(X).
454         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
455       }
456 
457       // Emit the LHS as a conditional.  If the LHS conditional is false, we
458       // want to jump to the FalseBlock.
459       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
460       EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
461       EmitBlock(LHSTrue);
462 
463       // Any temporaries created here are conditional.
464       BeginConditionalBranch();
465       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
466       EndConditionalBranch();
467 
468       return;
469     } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
470       // If we have "0 || X", simplify the code.  "1 || X" would have constant
471       // folded if the case was simple enough.
472       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
473         // br(0 || X) -> br(X).
474         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
475       }
476 
477       // If we have "X || 0", simplify the code to use an uncond branch.
478       // "X || 1" would have been constant folded to 1.
479       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
480         // br(X || 0) -> br(X).
481         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
482       }
483 
484       // Emit the LHS as a conditional.  If the LHS conditional is true, we
485       // want to jump to the TrueBlock.
486       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
487       EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
488       EmitBlock(LHSFalse);
489 
490       // Any temporaries created here are conditional.
491       BeginConditionalBranch();
492       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
493       EndConditionalBranch();
494 
495       return;
496     }
497   }
498 
499   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
500     // br(!x, t, f) -> br(x, f, t)
501     if (CondUOp->getOpcode() == UnaryOperator::LNot)
502       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
503   }
504 
505   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
506     // Handle ?: operator.
507 
508     // Just ignore GNU ?: extension.
509     if (CondOp->getLHS()) {
510       // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
511       llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
512       llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
513       EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
514       EmitBlock(LHSBlock);
515       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
516       EmitBlock(RHSBlock);
517       EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
518       return;
519     }
520   }
521 
522   // Emit the code with the fully general case.
523   llvm::Value *CondV = EvaluateExprAsBool(Cond);
524   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
525 }
526 
527 /// ErrorUnsupported - Print out an error that codegen doesn't support the
528 /// specified stmt yet.
529 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
530                                        bool OmitOnError) {
531   CGM.ErrorUnsupported(S, Type, OmitOnError);
532 }
533 
534 void
535 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
536   // If the type contains a pointer to data member we can't memset it to zero.
537   // Instead, create a null constant and copy it to the destination.
538   if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
539     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
540 
541     llvm::GlobalVariable *NullVariable =
542       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
543                                /*isConstant=*/true,
544                                llvm::GlobalVariable::PrivateLinkage,
545                                NullConstant, llvm::Twine());
546     EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
547     return;
548   }
549 
550 
551   // Ignore empty classes in C++.
552   if (getContext().getLangOptions().CPlusPlus) {
553     if (const RecordType *RT = Ty->getAs<RecordType>()) {
554       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
555         return;
556     }
557   }
558 
559   // Otherwise, just memset the whole thing to zero.  This is legal
560   // because in LLVM, all default initializers (other than the ones we just
561   // handled above) are guaranteed to have a bit pattern of all zeros.
562   const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
563   if (DestPtr->getType() != BP)
564     DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
565 
566   // Get size and alignment info for this aggregate.
567   std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
568 
569   // Don't bother emitting a zero-byte memset.
570   if (TypeInfo.first == 0)
571     return;
572 
573   // FIXME: Handle variable sized types.
574   Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
575                  llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
576                       // TypeInfo.first describes size in bits.
577                       llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
578                       llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
579                       llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
580                                              0));
581 }
582 
583 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
584   // Make sure that there is a block for the indirect goto.
585   if (IndirectBranch == 0)
586     GetIndirectGotoBlock();
587 
588   llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
589 
590   // Make sure the indirect branch includes all of the address-taken blocks.
591   IndirectBranch->addDestination(BB);
592   return llvm::BlockAddress::get(CurFn, BB);
593 }
594 
595 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
596   // If we already made the indirect branch for indirect goto, return its block.
597   if (IndirectBranch) return IndirectBranch->getParent();
598 
599   CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
600 
601   const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
602 
603   // Create the PHI node that indirect gotos will add entries to.
604   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
605 
606   // Create the indirect branch instruction.
607   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
608   return IndirectBranch->getParent();
609 }
610 
611 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
612   llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
613 
614   assert(SizeEntry && "Did not emit size for type");
615   return SizeEntry;
616 }
617 
618 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
619   assert(Ty->isVariablyModifiedType() &&
620          "Must pass variably modified type to EmitVLASizes!");
621 
622   EnsureInsertPoint();
623 
624   if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
625     llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
626 
627     if (!SizeEntry) {
628       const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
629 
630       // Get the element size;
631       QualType ElemTy = VAT->getElementType();
632       llvm::Value *ElemSize;
633       if (ElemTy->isVariableArrayType())
634         ElemSize = EmitVLASize(ElemTy);
635       else
636         ElemSize = llvm::ConstantInt::get(SizeTy,
637             getContext().getTypeSizeInChars(ElemTy).getQuantity());
638 
639       llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
640       NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
641 
642       SizeEntry = Builder.CreateMul(ElemSize, NumElements);
643     }
644 
645     return SizeEntry;
646   }
647 
648   if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
649     EmitVLASize(AT->getElementType());
650     return 0;
651   }
652 
653   const PointerType *PT = Ty->getAs<PointerType>();
654   assert(PT && "unknown VM type!");
655   EmitVLASize(PT->getPointeeType());
656   return 0;
657 }
658 
659 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
660   if (CGM.getContext().getBuiltinVaListType()->isArrayType())
661     return EmitScalarExpr(E);
662   return EmitLValue(E).getAddress();
663 }
664 
665 /// Pops cleanup blocks until the given savepoint is reached.
666 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
667   assert(Old.isValid());
668 
669   EHScopeStack::iterator E = EHStack.find(Old);
670   while (EHStack.begin() != E)
671     PopCleanupBlock();
672 }
673 
674 /// Creates a switch instruction to thread branches out of the given
675 /// block (which is the exit block of a cleanup).
676 static void CreateCleanupSwitch(CodeGenFunction &CGF,
677                                 llvm::BasicBlock *Block) {
678   if (Block->getTerminator()) {
679     assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
680            "cleanup block already has a terminator, but it isn't a switch");
681     return;
682   }
683 
684   llvm::Value *DestCodePtr
685     = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
686   CGBuilderTy Builder(Block);
687   llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
688 
689   // Create a switch instruction to determine where to jump next.
690   Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
691 }
692 
693 /// Attempts to reduce a cleanup's entry block to a fallthrough.  This
694 /// is basically llvm::MergeBlockIntoPredecessor, except
695 /// simplified/optimized for the tighter constraints on cleanup
696 /// blocks.
697 static void SimplifyCleanupEntry(CodeGenFunction &CGF,
698                                  llvm::BasicBlock *Entry) {
699   llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
700   if (!Pred) return;
701 
702   llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
703   if (!Br || Br->isConditional()) return;
704   assert(Br->getSuccessor(0) == Entry);
705 
706   // If we were previously inserting at the end of the cleanup entry
707   // block, we'll need to continue inserting at the end of the
708   // predecessor.
709   bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
710   assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
711 
712   // Kill the branch.
713   Br->eraseFromParent();
714 
715   // Merge the blocks.
716   Pred->getInstList().splice(Pred->end(), Entry->getInstList());
717 
718   // Kill the entry block.
719   Entry->eraseFromParent();
720 
721   if (WasInsertBlock)
722     CGF.Builder.SetInsertPoint(Pred);
723 }
724 
725 /// Attempts to reduce an cleanup's exit switch to an unconditional
726 /// branch.
727 static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
728   llvm::TerminatorInst *Terminator = Exit->getTerminator();
729   assert(Terminator && "completed cleanup exit has no terminator");
730 
731   llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
732   if (!Switch) return;
733   if (Switch->getNumCases() != 2) return; // default + 1
734 
735   llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
736   llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
737 
738   // Replace the switch instruction with an unconditional branch.
739   llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
740   Switch->eraseFromParent();
741   llvm::BranchInst::Create(Dest, Exit);
742 
743   // Delete all uses of the condition variable.
744   Cond->eraseFromParent();
745   while (!CondVar->use_empty())
746     cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
747 
748   // Delete the condition variable itself.
749   CondVar->eraseFromParent();
750 }
751 
752 /// Threads a branch fixup through a cleanup block.
753 static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
754                                       BranchFixup &Fixup,
755                                       llvm::BasicBlock *Entry,
756                                       llvm::BasicBlock *Exit) {
757   if (!Exit->getTerminator())
758     CreateCleanupSwitch(CGF, Exit);
759 
760   // Find the switch and its destination index alloca.
761   llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
762   llvm::Value *DestCodePtr =
763     cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
764 
765   // Compute the index of the new case we're adding to the switch.
766   unsigned Index = Switch->getNumCases();
767 
768   const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
769   llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
770 
771   // Set the index in the origin block.
772   new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
773 
774   // Add a case to the switch.
775   Switch->addCase(IndexV, Fixup.Destination);
776 
777   // Change the last branch to point to the cleanup entry block.
778   Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
779 
780   // And finally, update the fixup.
781   Fixup.LatestBranch = Switch;
782   Fixup.LatestBranchIndex = Index;
783 }
784 
785 /// Try to simplify both the entry and exit edges of a cleanup.
786 static void SimplifyCleanupEdges(CodeGenFunction &CGF,
787                                  llvm::BasicBlock *Entry,
788                                  llvm::BasicBlock *Exit) {
789 
790   // Given their current implementations, it's important to run these
791   // in this order: SimplifyCleanupEntry will delete Entry if it can
792   // be merged into its predecessor, which will then break
793   // SimplifyCleanupExit if (as is common) Entry == Exit.
794 
795   SimplifyCleanupExit(Exit);
796   SimplifyCleanupEntry(CGF, Entry);
797 }
798 
799 static void EmitLazyCleanup(CodeGenFunction &CGF,
800                             EHScopeStack::LazyCleanup *Fn,
801                             bool ForEH) {
802   if (ForEH) CGF.EHStack.pushTerminate();
803   Fn->Emit(CGF, ForEH);
804   if (ForEH) CGF.EHStack.popTerminate();
805   assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
806 }
807 
808 static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
809                                     EHScopeStack::LazyCleanup *Fn,
810                                     bool ForEH,
811                                     llvm::BasicBlock *Entry) {
812   assert(Entry && "no entry block for cleanup");
813 
814   // Remove the switch and load from the end of the entry block.
815   llvm::Instruction *Switch = &Entry->getInstList().back();
816   Entry->getInstList().remove(Switch);
817   assert(isa<llvm::SwitchInst>(Switch));
818   llvm::Instruction *Load = &Entry->getInstList().back();
819   Entry->getInstList().remove(Load);
820   assert(isa<llvm::LoadInst>(Load));
821 
822   assert(Entry->getInstList().empty() &&
823          "lazy cleanup block not empty after removing load/switch pair?");
824 
825   // Emit the actual cleanup at the end of the entry block.
826   CGF.Builder.SetInsertPoint(Entry);
827   EmitLazyCleanup(CGF, Fn, ForEH);
828 
829   // Put the load and switch at the end of the exit block.
830   llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
831   Exit->getInstList().push_back(Load);
832   Exit->getInstList().push_back(Switch);
833 
834   // Clean up the edges if possible.
835   SimplifyCleanupEdges(CGF, Entry, Exit);
836 
837   CGF.Builder.ClearInsertionPoint();
838 }
839 
840 static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
841   assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
842   EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
843   assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
844 
845   // Check whether we need an EH cleanup.  This is only true if we've
846   // generated a lazy EH cleanup block.
847   llvm::BasicBlock *EHEntry = Scope.getEHBlock();
848   bool RequiresEHCleanup = (EHEntry != 0);
849 
850   // Check the three conditions which might require a normal cleanup:
851 
852   // - whether there are branch fix-ups through this cleanup
853   unsigned FixupDepth = Scope.getFixupDepth();
854   bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
855 
856   // - whether control has already been threaded through this cleanup
857   llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
858   bool HasExistingBranches = (NormalEntry != 0);
859 
860   // - whether there's a fallthrough
861   llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
862   bool HasFallthrough = (FallthroughSource != 0);
863 
864   bool RequiresNormalCleanup = false;
865   if (Scope.isNormalCleanup() &&
866       (HasFixups || HasExistingBranches || HasFallthrough)) {
867     RequiresNormalCleanup = true;
868   }
869 
870   // If we don't need the cleanup at all, we're done.
871   if (!RequiresNormalCleanup && !RequiresEHCleanup) {
872     CGF.EHStack.popCleanup();
873     assert(CGF.EHStack.getNumBranchFixups() == 0 ||
874            CGF.EHStack.hasNormalCleanups());
875     return;
876   }
877 
878   // Copy the cleanup emission data out.  Note that SmallVector
879   // guarantees maximal alignment for its buffer regardless of its
880   // type parameter.
881   llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
882   CleanupBuffer.reserve(Scope.getCleanupSize());
883   memcpy(CleanupBuffer.data(),
884          Scope.getCleanupBuffer(), Scope.getCleanupSize());
885   CleanupBuffer.set_size(Scope.getCleanupSize());
886   EHScopeStack::LazyCleanup *Fn =
887     reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
888 
889   // We're done with the scope; pop it off so we can emit the cleanups.
890   CGF.EHStack.popCleanup();
891 
892   if (RequiresNormalCleanup) {
893     // If we have a fallthrough and no other need for the cleanup,
894     // emit it directly.
895     if (HasFallthrough && !HasFixups && !HasExistingBranches) {
896       EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
897 
898     // Otherwise, the best approach is to thread everything through
899     // the cleanup block and then try to clean up after ourselves.
900     } else {
901       // Force the entry block to exist.
902       if (!HasExistingBranches) {
903         NormalEntry = CGF.createBasicBlock("cleanup");
904         CreateCleanupSwitch(CGF, NormalEntry);
905       }
906 
907       CGF.EmitBlock(NormalEntry);
908 
909       // Thread the fallthrough edge through the (momentarily trivial)
910       // cleanup.
911       llvm::BasicBlock *FallthroughDestination = 0;
912       if (HasFallthrough) {
913         assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
914         FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
915 
916         BranchFixup Fix;
917         Fix.Destination = FallthroughDestination;
918         Fix.LatestBranch = FallthroughSource->getTerminator();
919         Fix.LatestBranchIndex = 0;
920         Fix.Origin = Fix.LatestBranch;
921 
922         // Restore fixup invariant.  EmitBlock added a branch to the
923         // cleanup which we need to redirect to the destination.
924         cast<llvm::BranchInst>(Fix.LatestBranch)
925           ->setSuccessor(0, Fix.Destination);
926 
927         ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
928       }
929 
930       // Thread any "real" fixups we need to thread.
931       for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
932            I != E; ++I)
933         if (CGF.EHStack.getBranchFixup(I).Destination)
934           ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
935                                     NormalEntry, NormalEntry);
936 
937       SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
938 
939       if (HasFallthrough)
940         CGF.EmitBlock(FallthroughDestination);
941     }
942   }
943 
944   // Emit the EH cleanup if required.
945   if (RequiresEHCleanup) {
946     CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
947     CGF.EmitBlock(EHEntry);
948     SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
949     CGF.Builder.restoreIP(SavedIP);
950   }
951 }
952 
953 /// Pops a cleanup block.  If the block includes a normal cleanup, the
954 /// current insertion point is threaded through the cleanup, as are
955 /// any branch fixups on the cleanup.
956 void CodeGenFunction::PopCleanupBlock() {
957   assert(!EHStack.empty() && "cleanup stack is empty!");
958   assert(isa<EHLazyCleanupScope>(*EHStack.begin()));
959   return PopLazyCleanupBlock(*this);
960 }
961 
962 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
963   if (!HaveInsertPoint())
964     return;
965 
966   // Create the branch.
967   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
968 
969   // If we're not in a cleanup scope, we don't need to worry about
970   // fixups.
971   if (!EHStack.hasNormalCleanups()) {
972     Builder.ClearInsertionPoint();
973     return;
974   }
975 
976   // Initialize a fixup.
977   BranchFixup Fixup;
978   Fixup.Destination = Dest.Block;
979   Fixup.Origin = BI;
980   Fixup.LatestBranch = BI;
981   Fixup.LatestBranchIndex = 0;
982 
983   // If we can't resolve the destination cleanup scope, just add this
984   // to the current cleanup scope.
985   if (!Dest.ScopeDepth.isValid()) {
986     EHStack.addBranchFixup() = Fixup;
987     Builder.ClearInsertionPoint();
988     return;
989   }
990 
991   for (EHScopeStack::iterator I = EHStack.begin(),
992          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
993     if (isa<EHLazyCleanupScope>(*I)) {
994       EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
995       if (Scope.isNormalCleanup()) {
996         llvm::BasicBlock *Block = Scope.getNormalBlock();
997         if (!Block) {
998           Block = createBasicBlock("cleanup");
999           Scope.setNormalBlock(Block);
1000         }
1001         ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
1002       }
1003     }
1004   }
1005 
1006   Builder.ClearInsertionPoint();
1007 }
1008 
1009 void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
1010   if (!HaveInsertPoint())
1011     return;
1012 
1013   // Create the branch.
1014   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
1015 
1016   // If we're not in a cleanup scope, we don't need to worry about
1017   // fixups.
1018   if (!EHStack.hasEHCleanups()) {
1019     Builder.ClearInsertionPoint();
1020     return;
1021   }
1022 
1023   // Initialize a fixup.
1024   BranchFixup Fixup;
1025   Fixup.Destination = Dest.Block;
1026   Fixup.Origin = BI;
1027   Fixup.LatestBranch = BI;
1028   Fixup.LatestBranchIndex = 0;
1029 
1030   // We should never get invalid scope depths for these: invalid scope
1031   // depths only arise for as-yet-unemitted labels, and we can't do an
1032   // EH-unwind to one of those.
1033   assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
1034 
1035   for (EHScopeStack::iterator I = EHStack.begin(),
1036          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1037     if (isa<EHLazyCleanupScope>(*I)) {
1038       EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
1039       if (Scope.isEHCleanup()) {
1040         llvm::BasicBlock *Block = Scope.getEHBlock();
1041         if (!Block) {
1042           Block = createBasicBlock("eh.cleanup");
1043           Scope.setEHBlock(Block);
1044         }
1045         ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
1046       }
1047     }
1048   }
1049 
1050   Builder.ClearInsertionPoint();
1051 }
1052