xref: /netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/CGExprCXX.cpp (revision e038c9c4676b0f19b1b7dd08a940c6ed64a6d5ae)
1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ expressions
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28 struct MemberCallInfo {
29   RequiredArgs ReqArgs;
30   // Number of prefix arguments for the call. Ignores the `this` pointer.
31   unsigned PrefixSize;
32 };
33 }
34 
35 static MemberCallInfo
commonEmitCXXMemberOrOperatorCall(CodeGenFunction & CGF,const CXXMethodDecl * MD,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,CallArgList & Args,CallArgList * RtlArgs)36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37                                   llvm::Value *This, llvm::Value *ImplicitParam,
38                                   QualType ImplicitParamTy, const CallExpr *CE,
39                                   CallArgList &Args, CallArgList *RtlArgs) {
40   assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41          isa<CXXOperatorCallExpr>(CE));
42   assert(MD->isInstance() &&
43          "Trying to emit a member or operator call expr on a static method!");
44 
45   // Push the this ptr.
46   const CXXRecordDecl *RD =
47       CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
48   Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
49 
50   // If there is an implicit parameter (e.g. VTT), emit it.
51   if (ImplicitParam) {
52     Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53   }
54 
55   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57   unsigned PrefixSize = Args.size() - 1;
58 
59   // And the rest of the call args.
60   if (RtlArgs) {
61     // Special case: if the caller emitted the arguments right-to-left already
62     // (prior to emitting the *this argument), we're done. This happens for
63     // assignment operators.
64     Args.addFrom(*RtlArgs);
65   } else if (CE) {
66     // Special case: skip first argument of CXXOperatorCall (it is "this").
67     unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
68     CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
69                      CE->getDirectCallee());
70   } else {
71     assert(
72         FPT->getNumParams() == 0 &&
73         "No CallExpr specified for function with non-zero number of arguments");
74   }
75   return {required, PrefixSize};
76 }
77 
EmitCXXMemberOrOperatorCall(const CXXMethodDecl * MD,const CGCallee & Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,CallArgList * RtlArgs)78 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
79     const CXXMethodDecl *MD, const CGCallee &Callee,
80     ReturnValueSlot ReturnValue,
81     llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
82     const CallExpr *CE, CallArgList *RtlArgs) {
83   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
84   CallArgList Args;
85   MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
86       *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
87   auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
88       Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
89   return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
90                   CE && CE == MustTailCall,
91                   CE ? CE->getExprLoc() : SourceLocation());
92 }
93 
EmitCXXDestructorCall(GlobalDecl Dtor,const CGCallee & Callee,llvm::Value * This,QualType ThisTy,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE)94 RValue CodeGenFunction::EmitCXXDestructorCall(
95     GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
96     llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
97   const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
98 
99   assert(!ThisTy.isNull());
100   assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
101          "Pointer/Object mixup");
102 
103   LangAS SrcAS = ThisTy.getAddressSpace();
104   LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
105   if (SrcAS != DstAS) {
106     QualType DstTy = DtorDecl->getThisType();
107     llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
108     This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
109                                                  NewType);
110   }
111 
112   CallArgList Args;
113   commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
114                                     ImplicitParamTy, CE, Args, nullptr);
115   return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
116                   ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall,
117                   CE ? CE->getExprLoc() : SourceLocation{});
118 }
119 
EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)120 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
121                                             const CXXPseudoDestructorExpr *E) {
122   QualType DestroyedType = E->getDestroyedType();
123   if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
124     // Automatic Reference Counting:
125     //   If the pseudo-expression names a retainable object with weak or
126     //   strong lifetime, the object shall be released.
127     Expr *BaseExpr = E->getBase();
128     Address BaseValue = Address::invalid();
129     Qualifiers BaseQuals;
130 
131     // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
132     if (E->isArrow()) {
133       BaseValue = EmitPointerWithAlignment(BaseExpr);
134       const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
135       BaseQuals = PTy->getPointeeType().getQualifiers();
136     } else {
137       LValue BaseLV = EmitLValue(BaseExpr);
138       BaseValue = BaseLV.getAddress(*this);
139       QualType BaseTy = BaseExpr->getType();
140       BaseQuals = BaseTy.getQualifiers();
141     }
142 
143     switch (DestroyedType.getObjCLifetime()) {
144     case Qualifiers::OCL_None:
145     case Qualifiers::OCL_ExplicitNone:
146     case Qualifiers::OCL_Autoreleasing:
147       break;
148 
149     case Qualifiers::OCL_Strong:
150       EmitARCRelease(Builder.CreateLoad(BaseValue,
151                         DestroyedType.isVolatileQualified()),
152                      ARCPreciseLifetime);
153       break;
154 
155     case Qualifiers::OCL_Weak:
156       EmitARCDestroyWeak(BaseValue);
157       break;
158     }
159   } else {
160     // C++ [expr.pseudo]p1:
161     //   The result shall only be used as the operand for the function call
162     //   operator (), and the result of such a call has type void. The only
163     //   effect is the evaluation of the postfix-expression before the dot or
164     //   arrow.
165     EmitIgnoredExpr(E->getBase());
166   }
167 
168   return RValue::get(nullptr);
169 }
170 
getCXXRecord(const Expr * E)171 static CXXRecordDecl *getCXXRecord(const Expr *E) {
172   QualType T = E->getType();
173   if (const PointerType *PTy = T->getAs<PointerType>())
174     T = PTy->getPointeeType();
175   const RecordType *Ty = T->castAs<RecordType>();
176   return cast<CXXRecordDecl>(Ty->getDecl());
177 }
178 
179 // Note: This function also emit constructor calls to support a MSVC
180 // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue)181 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
182                                               ReturnValueSlot ReturnValue) {
183   const Expr *callee = CE->getCallee()->IgnoreParens();
184 
185   if (isa<BinaryOperator>(callee))
186     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
187 
188   const MemberExpr *ME = cast<MemberExpr>(callee);
189   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
190 
191   if (MD->isStatic()) {
192     // The method is static, emit it as we would a regular call.
193     CGCallee callee =
194         CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
195     return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
196                     ReturnValue);
197   }
198 
199   bool HasQualifier = ME->hasQualifier();
200   NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
201   bool IsArrow = ME->isArrow();
202   const Expr *Base = ME->getBase();
203 
204   return EmitCXXMemberOrOperatorMemberCallExpr(
205       CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
206 }
207 
EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr * CE,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue,bool HasQualifier,NestedNameSpecifier * Qualifier,bool IsArrow,const Expr * Base)208 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
209     const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
210     bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
211     const Expr *Base) {
212   assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
213 
214   // Compute the object pointer.
215   bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
216 
217   const CXXMethodDecl *DevirtualizedMethod = nullptr;
218   if (CanUseVirtualCall &&
219       MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
220     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
221     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
222     assert(DevirtualizedMethod);
223     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
224     const Expr *Inner = Base->IgnoreParenBaseCasts();
225     if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
226         MD->getReturnType().getCanonicalType())
227       // If the return types are not the same, this might be a case where more
228       // code needs to run to compensate for it. For example, the derived
229       // method might return a type that inherits form from the return
230       // type of MD and has a prefix.
231       // For now we just avoid devirtualizing these covariant cases.
232       DevirtualizedMethod = nullptr;
233     else if (getCXXRecord(Inner) == DevirtualizedClass)
234       // If the class of the Inner expression is where the dynamic method
235       // is defined, build the this pointer from it.
236       Base = Inner;
237     else if (getCXXRecord(Base) != DevirtualizedClass) {
238       // If the method is defined in a class that is not the best dynamic
239       // one or the one of the full expression, we would have to build
240       // a derived-to-base cast to compute the correct this pointer, but
241       // we don't have support for that yet, so do a virtual call.
242       DevirtualizedMethod = nullptr;
243     }
244   }
245 
246   bool TrivialForCodegen =
247       MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());
248   bool TrivialAssignment =
249       TrivialForCodegen &&
250       (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
251       !MD->getParent()->mayInsertExtraPadding();
252 
253   // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
254   // operator before the LHS.
255   CallArgList RtlArgStorage;
256   CallArgList *RtlArgs = nullptr;
257   LValue TrivialAssignmentRHS;
258   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
259     if (OCE->isAssignmentOp()) {
260       if (TrivialAssignment) {
261         TrivialAssignmentRHS = EmitLValue(CE->getArg(1));
262       } else {
263         RtlArgs = &RtlArgStorage;
264         EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
265                      drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
266                      /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
267       }
268     }
269   }
270 
271   LValue This;
272   if (IsArrow) {
273     LValueBaseInfo BaseInfo;
274     TBAAAccessInfo TBAAInfo;
275     Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
276     This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
277   } else {
278     This = EmitLValue(Base);
279   }
280 
281   if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
282     // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
283     // constructing a new complete object of type Ctor.
284     assert(!RtlArgs);
285     assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
286     CallArgList Args;
287     commonEmitCXXMemberOrOperatorCall(
288         *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr,
289         /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
290 
291     EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
292                            /*Delegating=*/false, This.getAddress(*this), Args,
293                            AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
294                            /*NewPointerIsChecked=*/false);
295     return RValue::get(nullptr);
296   }
297 
298   if (TrivialForCodegen) {
299     if (isa<CXXDestructorDecl>(MD))
300       return RValue::get(nullptr);
301 
302     if (TrivialAssignment) {
303       // We don't like to generate the trivial copy/move assignment operator
304       // when it isn't necessary; just produce the proper effect here.
305       // It's important that we use the result of EmitLValue here rather than
306       // emitting call arguments, in order to preserve TBAA information from
307       // the RHS.
308       LValue RHS = isa<CXXOperatorCallExpr>(CE)
309                        ? TrivialAssignmentRHS
310                        : EmitLValue(*CE->arg_begin());
311       EmitAggregateAssign(This, RHS, CE->getType());
312       return RValue::get(This.getPointer(*this));
313     }
314 
315     assert(MD->getParent()->mayInsertExtraPadding() &&
316            "unknown trivial member function");
317   }
318 
319   // Compute the function type we're calling.
320   const CXXMethodDecl *CalleeDecl =
321       DevirtualizedMethod ? DevirtualizedMethod : MD;
322   const CGFunctionInfo *FInfo = nullptr;
323   if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
324     FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
325         GlobalDecl(Dtor, Dtor_Complete));
326   else
327     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
328 
329   llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
330 
331   // C++11 [class.mfct.non-static]p2:
332   //   If a non-static member function of a class X is called for an object that
333   //   is not of type X, or of a type derived from X, the behavior is undefined.
334   SourceLocation CallLoc;
335   ASTContext &C = getContext();
336   if (CE)
337     CallLoc = CE->getExprLoc();
338 
339   SanitizerSet SkippedChecks;
340   if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
341     auto *IOA = CMCE->getImplicitObjectArgument();
342     bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
343     if (IsImplicitObjectCXXThis)
344       SkippedChecks.set(SanitizerKind::Alignment, true);
345     if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
346       SkippedChecks.set(SanitizerKind::Null, true);
347   }
348   EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
349                 This.getPointer(*this),
350                 C.getRecordType(CalleeDecl->getParent()),
351                 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
352 
353   // C++ [class.virtual]p12:
354   //   Explicit qualification with the scope operator (5.1) suppresses the
355   //   virtual call mechanism.
356   //
357   // We also don't emit a virtual call if the base expression has a record type
358   // because then we know what the type is.
359   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
360 
361   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
362     assert(CE->arg_begin() == CE->arg_end() &&
363            "Destructor shouldn't have explicit parameters");
364     assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
365     if (UseVirtualCall) {
366       CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
367                                                 This.getAddress(*this),
368                                                 cast<CXXMemberCallExpr>(CE));
369     } else {
370       GlobalDecl GD(Dtor, Dtor_Complete);
371       CGCallee Callee;
372       if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
373         Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
374       else if (!DevirtualizedMethod)
375         Callee =
376             CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
377       else {
378         Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
379       }
380 
381       QualType ThisTy =
382           IsArrow ? Base->getType()->getPointeeType() : Base->getType();
383       EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,
384                             /*ImplicitParam=*/nullptr,
385                             /*ImplicitParamTy=*/QualType(), CE);
386     }
387     return RValue::get(nullptr);
388   }
389 
390   // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
391   // 'CalleeDecl' instead.
392 
393   CGCallee Callee;
394   if (UseVirtualCall) {
395     Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
396   } else {
397     if (SanOpts.has(SanitizerKind::CFINVCall) &&
398         MD->getParent()->isDynamicClass()) {
399       llvm::Value *VTable;
400       const CXXRecordDecl *RD;
401       std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
402           *this, This.getAddress(*this), CalleeDecl->getParent());
403       EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
404     }
405 
406     if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
407       Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
408     else if (!DevirtualizedMethod)
409       Callee =
410           CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
411     else {
412       Callee =
413           CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
414                               GlobalDecl(DevirtualizedMethod));
415     }
416   }
417 
418   if (MD->isVirtual()) {
419     Address NewThisAddr =
420         CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
421             *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
422     This.setAddress(NewThisAddr);
423   }
424 
425   return EmitCXXMemberOrOperatorCall(
426       CalleeDecl, Callee, ReturnValue, This.getPointer(*this),
427       /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
428 }
429 
430 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue)431 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
432                                               ReturnValueSlot ReturnValue) {
433   const BinaryOperator *BO =
434       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
435   const Expr *BaseExpr = BO->getLHS();
436   const Expr *MemFnExpr = BO->getRHS();
437 
438   const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
439   const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
440   const auto *RD =
441       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
442 
443   // Emit the 'this' pointer.
444   Address This = Address::invalid();
445   if (BO->getOpcode() == BO_PtrMemI)
446     This = EmitPointerWithAlignment(BaseExpr);
447   else
448     This = EmitLValue(BaseExpr).getAddress(*this);
449 
450   EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
451                 QualType(MPT->getClass(), 0));
452 
453   // Get the member function pointer.
454   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
455 
456   // Ask the ABI to load the callee.  Note that This is modified.
457   llvm::Value *ThisPtrForCall = nullptr;
458   CGCallee Callee =
459     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
460                                              ThisPtrForCall, MemFnPtr, MPT);
461 
462   CallArgList Args;
463 
464   QualType ThisType =
465     getContext().getPointerType(getContext().getTagDeclType(RD));
466 
467   // Push the this ptr.
468   Args.add(RValue::get(ThisPtrForCall), ThisType);
469 
470   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
471 
472   // And the rest of the call args
473   EmitCallArgs(Args, FPT, E->arguments());
474   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
475                                                       /*PrefixSize=*/0),
476                   Callee, ReturnValue, Args, nullptr, E == MustTailCall,
477                   E->getExprLoc());
478 }
479 
480 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue)481 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
482                                                const CXXMethodDecl *MD,
483                                                ReturnValueSlot ReturnValue) {
484   assert(MD->isInstance() &&
485          "Trying to emit a member call expr on a static method!");
486   return EmitCXXMemberOrOperatorMemberCallExpr(
487       E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
488       /*IsArrow=*/false, E->getArg(0));
489 }
490 
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue)491 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
492                                                ReturnValueSlot ReturnValue) {
493   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
494 }
495 
EmitNullBaseClassInitialization(CodeGenFunction & CGF,Address DestPtr,const CXXRecordDecl * Base)496 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
497                                             Address DestPtr,
498                                             const CXXRecordDecl *Base) {
499   if (Base->isEmpty())
500     return;
501 
502   DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
503 
504   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
505   CharUnits NVSize = Layout.getNonVirtualSize();
506 
507   // We cannot simply zero-initialize the entire base sub-object if vbptrs are
508   // present, they are initialized by the most derived class before calling the
509   // constructor.
510   SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
511   Stores.emplace_back(CharUnits::Zero(), NVSize);
512 
513   // Each store is split by the existence of a vbptr.
514   CharUnits VBPtrWidth = CGF.getPointerSize();
515   std::vector<CharUnits> VBPtrOffsets =
516       CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
517   for (CharUnits VBPtrOffset : VBPtrOffsets) {
518     // Stop before we hit any virtual base pointers located in virtual bases.
519     if (VBPtrOffset >= NVSize)
520       break;
521     std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
522     CharUnits LastStoreOffset = LastStore.first;
523     CharUnits LastStoreSize = LastStore.second;
524 
525     CharUnits SplitBeforeOffset = LastStoreOffset;
526     CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
527     assert(!SplitBeforeSize.isNegative() && "negative store size!");
528     if (!SplitBeforeSize.isZero())
529       Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
530 
531     CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
532     CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
533     assert(!SplitAfterSize.isNegative() && "negative store size!");
534     if (!SplitAfterSize.isZero())
535       Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
536   }
537 
538   // If the type contains a pointer to data member we can't memset it to zero.
539   // Instead, create a null constant and copy it to the destination.
540   // TODO: there are other patterns besides zero that we can usefully memset,
541   // like -1, which happens to be the pattern used by member-pointers.
542   // TODO: isZeroInitializable can be over-conservative in the case where a
543   // virtual base contains a member pointer.
544   llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
545   if (!NullConstantForBase->isNullValue()) {
546     llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
547         CGF.CGM.getModule(), NullConstantForBase->getType(),
548         /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
549         NullConstantForBase, Twine());
550 
551     CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
552                                DestPtr.getAlignment());
553     NullVariable->setAlignment(Align.getAsAlign());
554 
555     Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
556 
557     // Get and call the appropriate llvm.memcpy overload.
558     for (std::pair<CharUnits, CharUnits> Store : Stores) {
559       CharUnits StoreOffset = Store.first;
560       CharUnits StoreSize = Store.second;
561       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
562       CGF.Builder.CreateMemCpy(
563           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
564           CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
565           StoreSizeVal);
566     }
567 
568   // Otherwise, just memset the whole thing to zero.  This is legal
569   // because in LLVM, all default initializers (other than the ones we just
570   // handled above) are guaranteed to have a bit pattern of all zeros.
571   } else {
572     for (std::pair<CharUnits, CharUnits> Store : Stores) {
573       CharUnits StoreOffset = Store.first;
574       CharUnits StoreSize = Store.second;
575       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
576       CGF.Builder.CreateMemSet(
577           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
578           CGF.Builder.getInt8(0), StoreSizeVal);
579     }
580   }
581 }
582 
583 void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)584 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
585                                       AggValueSlot Dest) {
586   assert(!Dest.isIgnored() && "Must have a destination!");
587   const CXXConstructorDecl *CD = E->getConstructor();
588 
589   // If we require zero initialization before (or instead of) calling the
590   // constructor, as can be the case with a non-user-provided default
591   // constructor, emit the zero initialization now, unless destination is
592   // already zeroed.
593   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
594     switch (E->getConstructionKind()) {
595     case CXXConstructExpr::CK_Delegating:
596     case CXXConstructExpr::CK_Complete:
597       EmitNullInitialization(Dest.getAddress(), E->getType());
598       break;
599     case CXXConstructExpr::CK_VirtualBase:
600     case CXXConstructExpr::CK_NonVirtualBase:
601       EmitNullBaseClassInitialization(*this, Dest.getAddress(),
602                                       CD->getParent());
603       break;
604     }
605   }
606 
607   // If this is a call to a trivial default constructor, do nothing.
608   if (CD->isTrivial() && CD->isDefaultConstructor())
609     return;
610 
611   // Elide the constructor if we're constructing from a temporary.
612   // The temporary check is required because Sema sets this on NRVO
613   // returns.
614   if (getLangOpts().ElideConstructors && E->isElidable()) {
615     assert(getContext().hasSameUnqualifiedType(E->getType(),
616                                                E->getArg(0)->getType()));
617     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
618       EmitAggExpr(E->getArg(0), Dest);
619       return;
620     }
621   }
622 
623   if (const ArrayType *arrayType
624         = getContext().getAsArrayType(E->getType())) {
625     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
626                                Dest.isSanitizerChecked());
627   } else {
628     CXXCtorType Type = Ctor_Complete;
629     bool ForVirtualBase = false;
630     bool Delegating = false;
631 
632     switch (E->getConstructionKind()) {
633      case CXXConstructExpr::CK_Delegating:
634       // We should be emitting a constructor; GlobalDecl will assert this
635       Type = CurGD.getCtorType();
636       Delegating = true;
637       break;
638 
639      case CXXConstructExpr::CK_Complete:
640       Type = Ctor_Complete;
641       break;
642 
643      case CXXConstructExpr::CK_VirtualBase:
644       ForVirtualBase = true;
645       LLVM_FALLTHROUGH;
646 
647      case CXXConstructExpr::CK_NonVirtualBase:
648       Type = Ctor_Base;
649      }
650 
651      // Call the constructor.
652      EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
653   }
654 }
655 
EmitSynthesizedCXXCopyCtor(Address Dest,Address Src,const Expr * Exp)656 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
657                                                  const Expr *Exp) {
658   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
659     Exp = E->getSubExpr();
660   assert(isa<CXXConstructExpr>(Exp) &&
661          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
662   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
663   const CXXConstructorDecl *CD = E->getConstructor();
664   RunCleanupsScope Scope(*this);
665 
666   // If we require zero initialization before (or instead of) calling the
667   // constructor, as can be the case with a non-user-provided default
668   // constructor, emit the zero initialization now.
669   // FIXME. Do I still need this for a copy ctor synthesis?
670   if (E->requiresZeroInitialization())
671     EmitNullInitialization(Dest, E->getType());
672 
673   assert(!getContext().getAsConstantArrayType(E->getType())
674          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
675   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
676 }
677 
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)678 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
679                                         const CXXNewExpr *E) {
680   if (!E->isArray())
681     return CharUnits::Zero();
682 
683   // No cookie is required if the operator new[] being used is the
684   // reserved placement operator new[].
685   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
686     return CharUnits::Zero();
687 
688   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
689 }
690 
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)691 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
692                                         const CXXNewExpr *e,
693                                         unsigned minElements,
694                                         llvm::Value *&numElements,
695                                         llvm::Value *&sizeWithoutCookie) {
696   QualType type = e->getAllocatedType();
697 
698   if (!e->isArray()) {
699     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
700     sizeWithoutCookie
701       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
702     return sizeWithoutCookie;
703   }
704 
705   // The width of size_t.
706   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
707 
708   // Figure out the cookie size.
709   llvm::APInt cookieSize(sizeWidth,
710                          CalculateCookiePadding(CGF, e).getQuantity());
711 
712   // Emit the array size expression.
713   // We multiply the size of all dimensions for NumElements.
714   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
715   numElements =
716     ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
717   if (!numElements)
718     numElements = CGF.EmitScalarExpr(*e->getArraySize());
719   assert(isa<llvm::IntegerType>(numElements->getType()));
720 
721   // The number of elements can be have an arbitrary integer type;
722   // essentially, we need to multiply it by a constant factor, add a
723   // cookie size, and verify that the result is representable as a
724   // size_t.  That's just a gloss, though, and it's wrong in one
725   // important way: if the count is negative, it's an error even if
726   // the cookie size would bring the total size >= 0.
727   bool isSigned
728     = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
729   llvm::IntegerType *numElementsType
730     = cast<llvm::IntegerType>(numElements->getType());
731   unsigned numElementsWidth = numElementsType->getBitWidth();
732 
733   // Compute the constant factor.
734   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
735   while (const ConstantArrayType *CAT
736              = CGF.getContext().getAsConstantArrayType(type)) {
737     type = CAT->getElementType();
738     arraySizeMultiplier *= CAT->getSize();
739   }
740 
741   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
742   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
743   typeSizeMultiplier *= arraySizeMultiplier;
744 
745   // This will be a size_t.
746   llvm::Value *size;
747 
748   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
749   // Don't bloat the -O0 code.
750   if (llvm::ConstantInt *numElementsC =
751         dyn_cast<llvm::ConstantInt>(numElements)) {
752     const llvm::APInt &count = numElementsC->getValue();
753 
754     bool hasAnyOverflow = false;
755 
756     // If 'count' was a negative number, it's an overflow.
757     if (isSigned && count.isNegative())
758       hasAnyOverflow = true;
759 
760     // We want to do all this arithmetic in size_t.  If numElements is
761     // wider than that, check whether it's already too big, and if so,
762     // overflow.
763     else if (numElementsWidth > sizeWidth &&
764              numElementsWidth - sizeWidth > count.countLeadingZeros())
765       hasAnyOverflow = true;
766 
767     // Okay, compute a count at the right width.
768     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
769 
770     // If there is a brace-initializer, we cannot allocate fewer elements than
771     // there are initializers. If we do, that's treated like an overflow.
772     if (adjustedCount.ult(minElements))
773       hasAnyOverflow = true;
774 
775     // Scale numElements by that.  This might overflow, but we don't
776     // care because it only overflows if allocationSize does, too, and
777     // if that overflows then we shouldn't use this.
778     numElements = llvm::ConstantInt::get(CGF.SizeTy,
779                                          adjustedCount * arraySizeMultiplier);
780 
781     // Compute the size before cookie, and track whether it overflowed.
782     bool overflow;
783     llvm::APInt allocationSize
784       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
785     hasAnyOverflow |= overflow;
786 
787     // Add in the cookie, and check whether it's overflowed.
788     if (cookieSize != 0) {
789       // Save the current size without a cookie.  This shouldn't be
790       // used if there was overflow.
791       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
792 
793       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
794       hasAnyOverflow |= overflow;
795     }
796 
797     // On overflow, produce a -1 so operator new will fail.
798     if (hasAnyOverflow) {
799       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
800     } else {
801       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
802     }
803 
804   // Otherwise, we might need to use the overflow intrinsics.
805   } else {
806     // There are up to five conditions we need to test for:
807     // 1) if isSigned, we need to check whether numElements is negative;
808     // 2) if numElementsWidth > sizeWidth, we need to check whether
809     //   numElements is larger than something representable in size_t;
810     // 3) if minElements > 0, we need to check whether numElements is smaller
811     //    than that.
812     // 4) we need to compute
813     //      sizeWithoutCookie := numElements * typeSizeMultiplier
814     //    and check whether it overflows; and
815     // 5) if we need a cookie, we need to compute
816     //      size := sizeWithoutCookie + cookieSize
817     //    and check whether it overflows.
818 
819     llvm::Value *hasOverflow = nullptr;
820 
821     // If numElementsWidth > sizeWidth, then one way or another, we're
822     // going to have to do a comparison for (2), and this happens to
823     // take care of (1), too.
824     if (numElementsWidth > sizeWidth) {
825       llvm::APInt threshold(numElementsWidth, 1);
826       threshold <<= sizeWidth;
827 
828       llvm::Value *thresholdV
829         = llvm::ConstantInt::get(numElementsType, threshold);
830 
831       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
832       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
833 
834     // Otherwise, if we're signed, we want to sext up to size_t.
835     } else if (isSigned) {
836       if (numElementsWidth < sizeWidth)
837         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
838 
839       // If there's a non-1 type size multiplier, then we can do the
840       // signedness check at the same time as we do the multiply
841       // because a negative number times anything will cause an
842       // unsigned overflow.  Otherwise, we have to do it here. But at least
843       // in this case, we can subsume the >= minElements check.
844       if (typeSizeMultiplier == 1)
845         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
846                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
847 
848     // Otherwise, zext up to size_t if necessary.
849     } else if (numElementsWidth < sizeWidth) {
850       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
851     }
852 
853     assert(numElements->getType() == CGF.SizeTy);
854 
855     if (minElements) {
856       // Don't allow allocation of fewer elements than we have initializers.
857       if (!hasOverflow) {
858         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
859                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
860       } else if (numElementsWidth > sizeWidth) {
861         // The other existing overflow subsumes this check.
862         // We do an unsigned comparison, since any signed value < -1 is
863         // taken care of either above or below.
864         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
865                           CGF.Builder.CreateICmpULT(numElements,
866                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
867       }
868     }
869 
870     size = numElements;
871 
872     // Multiply by the type size if necessary.  This multiplier
873     // includes all the factors for nested arrays.
874     //
875     // This step also causes numElements to be scaled up by the
876     // nested-array factor if necessary.  Overflow on this computation
877     // can be ignored because the result shouldn't be used if
878     // allocation fails.
879     if (typeSizeMultiplier != 1) {
880       llvm::Function *umul_with_overflow
881         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
882 
883       llvm::Value *tsmV =
884         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
885       llvm::Value *result =
886           CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
887 
888       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
889       if (hasOverflow)
890         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
891       else
892         hasOverflow = overflowed;
893 
894       size = CGF.Builder.CreateExtractValue(result, 0);
895 
896       // Also scale up numElements by the array size multiplier.
897       if (arraySizeMultiplier != 1) {
898         // If the base element type size is 1, then we can re-use the
899         // multiply we just did.
900         if (typeSize.isOne()) {
901           assert(arraySizeMultiplier == typeSizeMultiplier);
902           numElements = size;
903 
904         // Otherwise we need a separate multiply.
905         } else {
906           llvm::Value *asmV =
907             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
908           numElements = CGF.Builder.CreateMul(numElements, asmV);
909         }
910       }
911     } else {
912       // numElements doesn't need to be scaled.
913       assert(arraySizeMultiplier == 1);
914     }
915 
916     // Add in the cookie size if necessary.
917     if (cookieSize != 0) {
918       sizeWithoutCookie = size;
919 
920       llvm::Function *uadd_with_overflow
921         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
922 
923       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
924       llvm::Value *result =
925           CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
926 
927       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
928       if (hasOverflow)
929         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
930       else
931         hasOverflow = overflowed;
932 
933       size = CGF.Builder.CreateExtractValue(result, 0);
934     }
935 
936     // If we had any possibility of dynamic overflow, make a select to
937     // overwrite 'size' with an all-ones value, which should cause
938     // operator new to throw.
939     if (hasOverflow)
940       size = CGF.Builder.CreateSelect(hasOverflow,
941                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
942                                       size);
943   }
944 
945   if (cookieSize == 0)
946     sizeWithoutCookie = size;
947   else
948     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
949 
950   return size;
951 }
952 
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,Address NewPtr,AggValueSlot::Overlap_t MayOverlap)953 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
954                                     QualType AllocType, Address NewPtr,
955                                     AggValueSlot::Overlap_t MayOverlap) {
956   // FIXME: Refactor with EmitExprAsInit.
957   switch (CGF.getEvaluationKind(AllocType)) {
958   case TEK_Scalar:
959     CGF.EmitScalarInit(Init, nullptr,
960                        CGF.MakeAddrLValue(NewPtr, AllocType), false);
961     return;
962   case TEK_Complex:
963     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
964                                   /*isInit*/ true);
965     return;
966   case TEK_Aggregate: {
967     AggValueSlot Slot
968       = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
969                               AggValueSlot::IsDestructed,
970                               AggValueSlot::DoesNotNeedGCBarriers,
971                               AggValueSlot::IsNotAliased,
972                               MayOverlap, AggValueSlot::IsNotZeroed,
973                               AggValueSlot::IsSanitizerChecked);
974     CGF.EmitAggExpr(Init, Slot);
975     return;
976   }
977   }
978   llvm_unreachable("bad evaluation kind");
979 }
980 
EmitNewArrayInitializer(const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address BeginPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)981 void CodeGenFunction::EmitNewArrayInitializer(
982     const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
983     Address BeginPtr, llvm::Value *NumElements,
984     llvm::Value *AllocSizeWithoutCookie) {
985   // If we have a type with trivial initialization and no initializer,
986   // there's nothing to do.
987   if (!E->hasInitializer())
988     return;
989 
990   Address CurPtr = BeginPtr;
991 
992   unsigned InitListElements = 0;
993 
994   const Expr *Init = E->getInitializer();
995   Address EndOfInit = Address::invalid();
996   QualType::DestructionKind DtorKind = ElementType.isDestructedType();
997   EHScopeStack::stable_iterator Cleanup;
998   llvm::Instruction *CleanupDominator = nullptr;
999 
1000   CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
1001   CharUnits ElementAlign =
1002     BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
1003 
1004   // Attempt to perform zero-initialization using memset.
1005   auto TryMemsetInitialization = [&]() -> bool {
1006     // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1007     // we can initialize with a memset to -1.
1008     if (!CGM.getTypes().isZeroInitializable(ElementType))
1009       return false;
1010 
1011     // Optimization: since zero initialization will just set the memory
1012     // to all zeroes, generate a single memset to do it in one shot.
1013 
1014     // Subtract out the size of any elements we've already initialized.
1015     auto *RemainingSize = AllocSizeWithoutCookie;
1016     if (InitListElements) {
1017       // We know this can't overflow; we check this when doing the allocation.
1018       auto *InitializedSize = llvm::ConstantInt::get(
1019           RemainingSize->getType(),
1020           getContext().getTypeSizeInChars(ElementType).getQuantity() *
1021               InitListElements);
1022       RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1023     }
1024 
1025     // Create the memset.
1026     Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1027     return true;
1028   };
1029 
1030   // If the initializer is an initializer list, first do the explicit elements.
1031   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
1032     // Initializing from a (braced) string literal is a special case; the init
1033     // list element does not initialize a (single) array element.
1034     if (ILE->isStringLiteralInit()) {
1035       // Initialize the initial portion of length equal to that of the string
1036       // literal. The allocation must be for at least this much; we emitted a
1037       // check for that earlier.
1038       AggValueSlot Slot =
1039           AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1040                                 AggValueSlot::IsDestructed,
1041                                 AggValueSlot::DoesNotNeedGCBarriers,
1042                                 AggValueSlot::IsNotAliased,
1043                                 AggValueSlot::DoesNotOverlap,
1044                                 AggValueSlot::IsNotZeroed,
1045                                 AggValueSlot::IsSanitizerChecked);
1046       EmitAggExpr(ILE->getInit(0), Slot);
1047 
1048       // Move past these elements.
1049       InitListElements =
1050           cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1051               ->getSize().getZExtValue();
1052       CurPtr =
1053           Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
1054                                             CurPtr.getPointer(),
1055                                             Builder.getSize(InitListElements),
1056                                             "string.init.end"),
1057                   CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1058                                                           ElementSize));
1059 
1060       // Zero out the rest, if any remain.
1061       llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1062       if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1063         bool OK = TryMemsetInitialization();
1064         (void)OK;
1065         assert(OK && "couldn't memset character type?");
1066       }
1067       return;
1068     }
1069 
1070     InitListElements = ILE->getNumInits();
1071 
1072     // If this is a multi-dimensional array new, we will initialize multiple
1073     // elements with each init list element.
1074     QualType AllocType = E->getAllocatedType();
1075     if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1076             AllocType->getAsArrayTypeUnsafe())) {
1077       ElementTy = ConvertTypeForMem(AllocType);
1078       CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1079       InitListElements *= getContext().getConstantArrayElementCount(CAT);
1080     }
1081 
1082     // Enter a partial-destruction Cleanup if necessary.
1083     if (needsEHCleanup(DtorKind)) {
1084       // In principle we could tell the Cleanup where we are more
1085       // directly, but the control flow can get so varied here that it
1086       // would actually be quite complex.  Therefore we go through an
1087       // alloca.
1088       EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1089                                    "array.init.end");
1090       CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1091       pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1092                                        ElementType, ElementAlign,
1093                                        getDestroyer(DtorKind));
1094       Cleanup = EHStack.stable_begin();
1095     }
1096 
1097     CharUnits StartAlign = CurPtr.getAlignment();
1098     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
1099       // Tell the cleanup that it needs to destroy up to this
1100       // element.  TODO: some of these stores can be trivially
1101       // observed to be unnecessary.
1102       if (EndOfInit.isValid()) {
1103         auto FinishedPtr =
1104           Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1105         Builder.CreateStore(FinishedPtr, EndOfInit);
1106       }
1107       // FIXME: If the last initializer is an incomplete initializer list for
1108       // an array, and we have an array filler, we can fold together the two
1109       // initialization loops.
1110       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1111                               ILE->getInit(i)->getType(), CurPtr,
1112                               AggValueSlot::DoesNotOverlap);
1113       CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
1114                                                  CurPtr.getPointer(),
1115                                                  Builder.getSize(1),
1116                                                  "array.exp.next"),
1117                        StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1118     }
1119 
1120     // The remaining elements are filled with the array filler expression.
1121     Init = ILE->getArrayFiller();
1122 
1123     // Extract the initializer for the individual array elements by pulling
1124     // out the array filler from all the nested initializer lists. This avoids
1125     // generating a nested loop for the initialization.
1126     while (Init && Init->getType()->isConstantArrayType()) {
1127       auto *SubILE = dyn_cast<InitListExpr>(Init);
1128       if (!SubILE)
1129         break;
1130       assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1131       Init = SubILE->getArrayFiller();
1132     }
1133 
1134     // Switch back to initializing one base element at a time.
1135     CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1136   }
1137 
1138   // If all elements have already been initialized, skip any further
1139   // initialization.
1140   llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1141   if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1142     // If there was a Cleanup, deactivate it.
1143     if (CleanupDominator)
1144       DeactivateCleanupBlock(Cleanup, CleanupDominator);
1145     return;
1146   }
1147 
1148   assert(Init && "have trailing elements to initialize but no initializer");
1149 
1150   // If this is a constructor call, try to optimize it out, and failing that
1151   // emit a single loop to initialize all remaining elements.
1152   if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1153     CXXConstructorDecl *Ctor = CCE->getConstructor();
1154     if (Ctor->isTrivial()) {
1155       // If new expression did not specify value-initialization, then there
1156       // is no initialization.
1157       if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1158         return;
1159 
1160       if (TryMemsetInitialization())
1161         return;
1162     }
1163 
1164     // Store the new Cleanup position for irregular Cleanups.
1165     //
1166     // FIXME: Share this cleanup with the constructor call emission rather than
1167     // having it create a cleanup of its own.
1168     if (EndOfInit.isValid())
1169       Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1170 
1171     // Emit a constructor call loop to initialize the remaining elements.
1172     if (InitListElements)
1173       NumElements = Builder.CreateSub(
1174           NumElements,
1175           llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1176     EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1177                                /*NewPointerIsChecked*/true,
1178                                CCE->requiresZeroInitialization());
1179     return;
1180   }
1181 
1182   // If this is value-initialization, we can usually use memset.
1183   ImplicitValueInitExpr IVIE(ElementType);
1184   if (isa<ImplicitValueInitExpr>(Init)) {
1185     if (TryMemsetInitialization())
1186       return;
1187 
1188     // Switch to an ImplicitValueInitExpr for the element type. This handles
1189     // only one case: multidimensional array new of pointers to members. In
1190     // all other cases, we already have an initializer for the array element.
1191     Init = &IVIE;
1192   }
1193 
1194   // At this point we should have found an initializer for the individual
1195   // elements of the array.
1196   assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1197          "got wrong type of element to initialize");
1198 
1199   // If we have an empty initializer list, we can usually use memset.
1200   if (auto *ILE = dyn_cast<InitListExpr>(Init))
1201     if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1202       return;
1203 
1204   // If we have a struct whose every field is value-initialized, we can
1205   // usually use memset.
1206   if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1207     if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1208       if (RType->getDecl()->isStruct()) {
1209         unsigned NumElements = 0;
1210         if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1211           NumElements = CXXRD->getNumBases();
1212         for (auto *Field : RType->getDecl()->fields())
1213           if (!Field->isUnnamedBitfield())
1214             ++NumElements;
1215         // FIXME: Recurse into nested InitListExprs.
1216         if (ILE->getNumInits() == NumElements)
1217           for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1218             if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1219               --NumElements;
1220         if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1221           return;
1222       }
1223     }
1224   }
1225 
1226   // Create the loop blocks.
1227   llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1228   llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1229   llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1230 
1231   // Find the end of the array, hoisted out of the loop.
1232   llvm::Value *EndPtr =
1233     Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(),
1234                               NumElements, "array.end");
1235 
1236   // If the number of elements isn't constant, we have to now check if there is
1237   // anything left to initialize.
1238   if (!ConstNum) {
1239     llvm::Value *IsEmpty =
1240       Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1241     Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1242   }
1243 
1244   // Enter the loop.
1245   EmitBlock(LoopBB);
1246 
1247   // Set up the current-element phi.
1248   llvm::PHINode *CurPtrPhi =
1249     Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1250   CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1251 
1252   CurPtr = Address(CurPtrPhi, ElementAlign);
1253 
1254   // Store the new Cleanup position for irregular Cleanups.
1255   if (EndOfInit.isValid())
1256     Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1257 
1258   // Enter a partial-destruction Cleanup if necessary.
1259   if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1260     pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1261                                    ElementType, ElementAlign,
1262                                    getDestroyer(DtorKind));
1263     Cleanup = EHStack.stable_begin();
1264     CleanupDominator = Builder.CreateUnreachable();
1265   }
1266 
1267   // Emit the initializer into this element.
1268   StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1269                           AggValueSlot::DoesNotOverlap);
1270 
1271   // Leave the Cleanup if we entered one.
1272   if (CleanupDominator) {
1273     DeactivateCleanupBlock(Cleanup, CleanupDominator);
1274     CleanupDominator->eraseFromParent();
1275   }
1276 
1277   // Advance to the next element by adjusting the pointer type as necessary.
1278   llvm::Value *NextPtr =
1279     Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1280                                        "array.next");
1281 
1282   // Check whether we've gotten to the end of the array and, if so,
1283   // exit the loop.
1284   llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1285   Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1286   CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1287 
1288   EmitBlock(ContBB);
1289 }
1290 
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)1291 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1292                                QualType ElementType, llvm::Type *ElementTy,
1293                                Address NewPtr, llvm::Value *NumElements,
1294                                llvm::Value *AllocSizeWithoutCookie) {
1295   ApplyDebugLocation DL(CGF, E);
1296   if (E->isArray())
1297     CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1298                                 AllocSizeWithoutCookie);
1299   else if (const Expr *Init = E->getInitializer())
1300     StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1301                             AggValueSlot::DoesNotOverlap);
1302 }
1303 
1304 /// Emit a call to an operator new or operator delete function, as implicitly
1305 /// created by new-expressions and delete-expressions.
EmitNewDeleteCall(CodeGenFunction & CGF,const FunctionDecl * CalleeDecl,const FunctionProtoType * CalleeType,const CallArgList & Args)1306 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1307                                 const FunctionDecl *CalleeDecl,
1308                                 const FunctionProtoType *CalleeType,
1309                                 const CallArgList &Args) {
1310   llvm::CallBase *CallOrInvoke;
1311   llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1312   CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1313   RValue RV =
1314       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1315                        Args, CalleeType, /*ChainCall=*/false),
1316                    Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1317 
1318   /// C++1y [expr.new]p10:
1319   ///   [In a new-expression,] an implementation is allowed to omit a call
1320   ///   to a replaceable global allocation function.
1321   ///
1322   /// We model such elidable calls with the 'builtin' attribute.
1323   llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1324   if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1325       Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1326     CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
1327                                llvm::Attribute::Builtin);
1328   }
1329 
1330   return RV;
1331 }
1332 
EmitBuiltinNewDeleteCall(const FunctionProtoType * Type,const CallExpr * TheCall,bool IsDelete)1333 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1334                                                  const CallExpr *TheCall,
1335                                                  bool IsDelete) {
1336   CallArgList Args;
1337   EmitCallArgs(Args, Type, TheCall->arguments());
1338   // Find the allocation or deallocation function that we're calling.
1339   ASTContext &Ctx = getContext();
1340   DeclarationName Name = Ctx.DeclarationNames
1341       .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1342 
1343   for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1344     if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1345       if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1346         return EmitNewDeleteCall(*this, FD, Type, Args);
1347   llvm_unreachable("predeclared global operator new/delete is missing");
1348 }
1349 
1350 namespace {
1351 /// The parameters to pass to a usual operator delete.
1352 struct UsualDeleteParams {
1353   bool DestroyingDelete = false;
1354   bool Size = false;
1355   bool Alignment = false;
1356 };
1357 }
1358 
getUsualDeleteParams(const FunctionDecl * FD)1359 static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1360   UsualDeleteParams Params;
1361 
1362   const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1363   auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1364 
1365   // The first argument is always a void*.
1366   ++AI;
1367 
1368   // The next parameter may be a std::destroying_delete_t.
1369   if (FD->isDestroyingOperatorDelete()) {
1370     Params.DestroyingDelete = true;
1371     assert(AI != AE);
1372     ++AI;
1373   }
1374 
1375   // Figure out what other parameters we should be implicitly passing.
1376   if (AI != AE && (*AI)->isIntegerType()) {
1377     Params.Size = true;
1378     ++AI;
1379   }
1380 
1381   if (AI != AE && (*AI)->isAlignValT()) {
1382     Params.Alignment = true;
1383     ++AI;
1384   }
1385 
1386   assert(AI == AE && "unexpected usual deallocation function parameter");
1387   return Params;
1388 }
1389 
1390 namespace {
1391   /// A cleanup to call the given 'operator delete' function upon abnormal
1392   /// exit from a new expression. Templated on a traits type that deals with
1393   /// ensuring that the arguments dominate the cleanup if necessary.
1394   template<typename Traits>
1395   class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1396     /// Type used to hold llvm::Value*s.
1397     typedef typename Traits::ValueTy ValueTy;
1398     /// Type used to hold RValues.
1399     typedef typename Traits::RValueTy RValueTy;
1400     struct PlacementArg {
1401       RValueTy ArgValue;
1402       QualType ArgType;
1403     };
1404 
1405     unsigned NumPlacementArgs : 31;
1406     unsigned PassAlignmentToPlacementDelete : 1;
1407     const FunctionDecl *OperatorDelete;
1408     ValueTy Ptr;
1409     ValueTy AllocSize;
1410     CharUnits AllocAlign;
1411 
getPlacementArgs()1412     PlacementArg *getPlacementArgs() {
1413       return reinterpret_cast<PlacementArg *>(this + 1);
1414     }
1415 
1416   public:
getExtraSize(size_t NumPlacementArgs)1417     static size_t getExtraSize(size_t NumPlacementArgs) {
1418       return NumPlacementArgs * sizeof(PlacementArg);
1419     }
1420 
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,ValueTy Ptr,ValueTy AllocSize,bool PassAlignmentToPlacementDelete,CharUnits AllocAlign)1421     CallDeleteDuringNew(size_t NumPlacementArgs,
1422                         const FunctionDecl *OperatorDelete, ValueTy Ptr,
1423                         ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1424                         CharUnits AllocAlign)
1425       : NumPlacementArgs(NumPlacementArgs),
1426         PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1427         OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1428         AllocAlign(AllocAlign) {}
1429 
setPlacementArg(unsigned I,RValueTy Arg,QualType Type)1430     void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1431       assert(I < NumPlacementArgs && "index out of range");
1432       getPlacementArgs()[I] = {Arg, Type};
1433     }
1434 
Emit(CodeGenFunction & CGF,Flags flags)1435     void Emit(CodeGenFunction &CGF, Flags flags) override {
1436       const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1437       CallArgList DeleteArgs;
1438 
1439       // The first argument is always a void* (or C* for a destroying operator
1440       // delete for class type C).
1441       DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1442 
1443       // Figure out what other parameters we should be implicitly passing.
1444       UsualDeleteParams Params;
1445       if (NumPlacementArgs) {
1446         // A placement deallocation function is implicitly passed an alignment
1447         // if the placement allocation function was, but is never passed a size.
1448         Params.Alignment = PassAlignmentToPlacementDelete;
1449       } else {
1450         // For a non-placement new-expression, 'operator delete' can take a
1451         // size and/or an alignment if it has the right parameters.
1452         Params = getUsualDeleteParams(OperatorDelete);
1453       }
1454 
1455       assert(!Params.DestroyingDelete &&
1456              "should not call destroying delete in a new-expression");
1457 
1458       // The second argument can be a std::size_t (for non-placement delete).
1459       if (Params.Size)
1460         DeleteArgs.add(Traits::get(CGF, AllocSize),
1461                        CGF.getContext().getSizeType());
1462 
1463       // The next (second or third) argument can be a std::align_val_t, which
1464       // is an enum whose underlying type is std::size_t.
1465       // FIXME: Use the right type as the parameter type. Note that in a call
1466       // to operator delete(size_t, ...), we may not have it available.
1467       if (Params.Alignment)
1468         DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1469                            CGF.SizeTy, AllocAlign.getQuantity())),
1470                        CGF.getContext().getSizeType());
1471 
1472       // Pass the rest of the arguments, which must match exactly.
1473       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1474         auto Arg = getPlacementArgs()[I];
1475         DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1476       }
1477 
1478       // Call 'operator delete'.
1479       EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1480     }
1481   };
1482 }
1483 
1484 /// Enter a cleanup to call 'operator delete' if the initializer in a
1485 /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,Address NewPtr,llvm::Value * AllocSize,CharUnits AllocAlign,const CallArgList & NewArgs)1486 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1487                                   const CXXNewExpr *E,
1488                                   Address NewPtr,
1489                                   llvm::Value *AllocSize,
1490                                   CharUnits AllocAlign,
1491                                   const CallArgList &NewArgs) {
1492   unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
1493 
1494   // If we're not inside a conditional branch, then the cleanup will
1495   // dominate and we can do the easier (and more efficient) thing.
1496   if (!CGF.isInConditionalBranch()) {
1497     struct DirectCleanupTraits {
1498       typedef llvm::Value *ValueTy;
1499       typedef RValue RValueTy;
1500       static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1501       static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1502     };
1503 
1504     typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1505 
1506     DirectCleanup *Cleanup = CGF.EHStack
1507       .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1508                                            E->getNumPlacementArgs(),
1509                                            E->getOperatorDelete(),
1510                                            NewPtr.getPointer(),
1511                                            AllocSize,
1512                                            E->passAlignment(),
1513                                            AllocAlign);
1514     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1515       auto &Arg = NewArgs[I + NumNonPlacementArgs];
1516       Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1517     }
1518 
1519     return;
1520   }
1521 
1522   // Otherwise, we need to save all this stuff.
1523   DominatingValue<RValue>::saved_type SavedNewPtr =
1524     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1525   DominatingValue<RValue>::saved_type SavedAllocSize =
1526     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1527 
1528   struct ConditionalCleanupTraits {
1529     typedef DominatingValue<RValue>::saved_type ValueTy;
1530     typedef DominatingValue<RValue>::saved_type RValueTy;
1531     static RValue get(CodeGenFunction &CGF, ValueTy V) {
1532       return V.restore(CGF);
1533     }
1534   };
1535   typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1536 
1537   ConditionalCleanup *Cleanup = CGF.EHStack
1538     .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1539                                               E->getNumPlacementArgs(),
1540                                               E->getOperatorDelete(),
1541                                               SavedNewPtr,
1542                                               SavedAllocSize,
1543                                               E->passAlignment(),
1544                                               AllocAlign);
1545   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1546     auto &Arg = NewArgs[I + NumNonPlacementArgs];
1547     Cleanup->setPlacementArg(
1548         I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1549   }
1550 
1551   CGF.initFullExprCleanup();
1552 }
1553 
EmitCXXNewExpr(const CXXNewExpr * E)1554 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1555   // The element type being allocated.
1556   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1557 
1558   // 1. Build a call to the allocation function.
1559   FunctionDecl *allocator = E->getOperatorNew();
1560 
1561   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1562   unsigned minElements = 0;
1563   if (E->isArray() && E->hasInitializer()) {
1564     const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1565     if (ILE && ILE->isStringLiteralInit())
1566       minElements =
1567           cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1568               ->getSize().getZExtValue();
1569     else if (ILE)
1570       minElements = ILE->getNumInits();
1571   }
1572 
1573   llvm::Value *numElements = nullptr;
1574   llvm::Value *allocSizeWithoutCookie = nullptr;
1575   llvm::Value *allocSize =
1576     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1577                         allocSizeWithoutCookie);
1578   CharUnits allocAlign = getContext().getPreferredTypeAlignInChars(allocType);
1579 
1580   // Emit the allocation call.  If the allocator is a global placement
1581   // operator, just "inline" it directly.
1582   Address allocation = Address::invalid();
1583   CallArgList allocatorArgs;
1584   if (allocator->isReservedGlobalPlacementOperator()) {
1585     assert(E->getNumPlacementArgs() == 1);
1586     const Expr *arg = *E->placement_arguments().begin();
1587 
1588     LValueBaseInfo BaseInfo;
1589     allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1590 
1591     // The pointer expression will, in many cases, be an opaque void*.
1592     // In these cases, discard the computed alignment and use the
1593     // formal alignment of the allocated type.
1594     if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1595       allocation = Address(allocation.getPointer(), allocAlign);
1596 
1597     // Set up allocatorArgs for the call to operator delete if it's not
1598     // the reserved global operator.
1599     if (E->getOperatorDelete() &&
1600         !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1601       allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1602       allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1603     }
1604 
1605   } else {
1606     const FunctionProtoType *allocatorType =
1607       allocator->getType()->castAs<FunctionProtoType>();
1608     unsigned ParamsToSkip = 0;
1609 
1610     // The allocation size is the first argument.
1611     QualType sizeType = getContext().getSizeType();
1612     allocatorArgs.add(RValue::get(allocSize), sizeType);
1613     ++ParamsToSkip;
1614 
1615     if (allocSize != allocSizeWithoutCookie) {
1616       CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1617       allocAlign = std::max(allocAlign, cookieAlign);
1618     }
1619 
1620     // The allocation alignment may be passed as the second argument.
1621     if (E->passAlignment()) {
1622       QualType AlignValT = sizeType;
1623       if (allocatorType->getNumParams() > 1) {
1624         AlignValT = allocatorType->getParamType(1);
1625         assert(getContext().hasSameUnqualifiedType(
1626                    AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1627                    sizeType) &&
1628                "wrong type for alignment parameter");
1629         ++ParamsToSkip;
1630       } else {
1631         // Corner case, passing alignment to 'operator new(size_t, ...)'.
1632         assert(allocator->isVariadic() && "can't pass alignment to allocator");
1633       }
1634       allocatorArgs.add(
1635           RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1636           AlignValT);
1637     }
1638 
1639     // FIXME: Why do we not pass a CalleeDecl here?
1640     EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1641                  /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1642 
1643     RValue RV =
1644       EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1645 
1646     // Set !heapallocsite metadata on the call to operator new.
1647     if (getDebugInfo())
1648       if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
1649         getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
1650                                                  E->getExprLoc());
1651 
1652     // If this was a call to a global replaceable allocation function that does
1653     // not take an alignment argument, the allocator is known to produce
1654     // storage that's suitably aligned for any object that fits, up to a known
1655     // threshold. Otherwise assume it's suitably aligned for the allocated type.
1656     CharUnits allocationAlign = allocAlign;
1657     if (!E->passAlignment() &&
1658         allocator->isReplaceableGlobalAllocationFunction()) {
1659       unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1660           Target.getNewAlign(), getContext().getTypeSize(allocType)));
1661       allocationAlign = std::max(
1662           allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1663     }
1664 
1665     allocation = Address(RV.getScalarVal(), allocationAlign);
1666   }
1667 
1668   // Emit a null check on the allocation result if the allocation
1669   // function is allowed to return null (because it has a non-throwing
1670   // exception spec or is the reserved placement new) and we have an
1671   // interesting initializer will be running sanitizers on the initialization.
1672   bool nullCheck = E->shouldNullCheckAllocation() &&
1673                    (!allocType.isPODType(getContext()) || E->hasInitializer() ||
1674                     sanitizePerformTypeCheck());
1675 
1676   llvm::BasicBlock *nullCheckBB = nullptr;
1677   llvm::BasicBlock *contBB = nullptr;
1678 
1679   // The null-check means that the initializer is conditionally
1680   // evaluated.
1681   ConditionalEvaluation conditional(*this);
1682 
1683   if (nullCheck) {
1684     conditional.begin(*this);
1685 
1686     nullCheckBB = Builder.GetInsertBlock();
1687     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1688     contBB = createBasicBlock("new.cont");
1689 
1690     llvm::Value *isNull =
1691       Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1692     Builder.CreateCondBr(isNull, contBB, notNullBB);
1693     EmitBlock(notNullBB);
1694   }
1695 
1696   // If there's an operator delete, enter a cleanup to call it if an
1697   // exception is thrown.
1698   EHScopeStack::stable_iterator operatorDeleteCleanup;
1699   llvm::Instruction *cleanupDominator = nullptr;
1700   if (E->getOperatorDelete() &&
1701       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1702     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1703                           allocatorArgs);
1704     operatorDeleteCleanup = EHStack.stable_begin();
1705     cleanupDominator = Builder.CreateUnreachable();
1706   }
1707 
1708   assert((allocSize == allocSizeWithoutCookie) ==
1709          CalculateCookiePadding(*this, E).isZero());
1710   if (allocSize != allocSizeWithoutCookie) {
1711     assert(E->isArray());
1712     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1713                                                        numElements,
1714                                                        E, allocType);
1715   }
1716 
1717   llvm::Type *elementTy = ConvertTypeForMem(allocType);
1718   Address result = Builder.CreateElementBitCast(allocation, elementTy);
1719 
1720   // Passing pointer through launder.invariant.group to avoid propagation of
1721   // vptrs information which may be included in previous type.
1722   // To not break LTO with different optimizations levels, we do it regardless
1723   // of optimization level.
1724   if (CGM.getCodeGenOpts().StrictVTablePointers &&
1725       allocator->isReservedGlobalPlacementOperator())
1726     result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
1727                      result.getAlignment());
1728 
1729   // Emit sanitizer checks for pointer value now, so that in the case of an
1730   // array it was checked only once and not at each constructor call. We may
1731   // have already checked that the pointer is non-null.
1732   // FIXME: If we have an array cookie and a potentially-throwing allocator,
1733   // we'll null check the wrong pointer here.
1734   SanitizerSet SkippedChecks;
1735   SkippedChecks.set(SanitizerKind::Null, nullCheck);
1736   EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1737                 E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1738                 result.getPointer(), allocType, result.getAlignment(),
1739                 SkippedChecks, numElements);
1740 
1741   EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1742                      allocSizeWithoutCookie);
1743   if (E->isArray()) {
1744     // NewPtr is a pointer to the base element type.  If we're
1745     // allocating an array of arrays, we'll need to cast back to the
1746     // array pointer type.
1747     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1748     if (result.getType() != resultType)
1749       result = Builder.CreateBitCast(result, resultType);
1750   }
1751 
1752   // Deactivate the 'operator delete' cleanup if we finished
1753   // initialization.
1754   if (operatorDeleteCleanup.isValid()) {
1755     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1756     cleanupDominator->eraseFromParent();
1757   }
1758 
1759   llvm::Value *resultPtr = result.getPointer();
1760   if (nullCheck) {
1761     conditional.end(*this);
1762 
1763     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1764     EmitBlock(contBB);
1765 
1766     llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1767     PHI->addIncoming(resultPtr, notNullBB);
1768     PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1769                      nullCheckBB);
1770 
1771     resultPtr = PHI;
1772   }
1773 
1774   return resultPtr;
1775 }
1776 
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * Ptr,QualType DeleteTy,llvm::Value * NumElements,CharUnits CookieSize)1777 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1778                                      llvm::Value *Ptr, QualType DeleteTy,
1779                                      llvm::Value *NumElements,
1780                                      CharUnits CookieSize) {
1781   assert((!NumElements && CookieSize.isZero()) ||
1782          DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1783 
1784   const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1785   CallArgList DeleteArgs;
1786 
1787   auto Params = getUsualDeleteParams(DeleteFD);
1788   auto ParamTypeIt = DeleteFTy->param_type_begin();
1789 
1790   // Pass the pointer itself.
1791   QualType ArgTy = *ParamTypeIt++;
1792   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1793   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1794 
1795   // Pass the std::destroying_delete tag if present.
1796   llvm::AllocaInst *DestroyingDeleteTag = nullptr;
1797   if (Params.DestroyingDelete) {
1798     QualType DDTag = *ParamTypeIt++;
1799     llvm::Type *Ty = getTypes().ConvertType(DDTag);
1800     CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);
1801     DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");
1802     DestroyingDeleteTag->setAlignment(Align.getAsAlign());
1803     DeleteArgs.add(RValue::getAggregate(Address(DestroyingDeleteTag, Align)), DDTag);
1804   }
1805 
1806   // Pass the size if the delete function has a size_t parameter.
1807   if (Params.Size) {
1808     QualType SizeType = *ParamTypeIt++;
1809     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1810     llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1811                                                DeleteTypeSize.getQuantity());
1812 
1813     // For array new, multiply by the number of elements.
1814     if (NumElements)
1815       Size = Builder.CreateMul(Size, NumElements);
1816 
1817     // If there is a cookie, add the cookie size.
1818     if (!CookieSize.isZero())
1819       Size = Builder.CreateAdd(
1820           Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1821 
1822     DeleteArgs.add(RValue::get(Size), SizeType);
1823   }
1824 
1825   // Pass the alignment if the delete function has an align_val_t parameter.
1826   if (Params.Alignment) {
1827     QualType AlignValType = *ParamTypeIt++;
1828     CharUnits DeleteTypeAlign =
1829         getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1830             DeleteTy, true /* NeedsPreferredAlignment */));
1831     llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1832                                                 DeleteTypeAlign.getQuantity());
1833     DeleteArgs.add(RValue::get(Align), AlignValType);
1834   }
1835 
1836   assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1837          "unknown parameter to usual delete function");
1838 
1839   // Emit the call to delete.
1840   EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1841 
1842   // If call argument lowering didn't use the destroying_delete_t alloca,
1843   // remove it again.
1844   if (DestroyingDeleteTag && DestroyingDeleteTag->use_empty())
1845     DestroyingDeleteTag->eraseFromParent();
1846 }
1847 
1848 namespace {
1849   /// Calls the given 'operator delete' on a single object.
1850   struct CallObjectDelete final : EHScopeStack::Cleanup {
1851     llvm::Value *Ptr;
1852     const FunctionDecl *OperatorDelete;
1853     QualType ElementType;
1854 
CallObjectDelete__anon33873e850511::CallObjectDelete1855     CallObjectDelete(llvm::Value *Ptr,
1856                      const FunctionDecl *OperatorDelete,
1857                      QualType ElementType)
1858       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1859 
Emit__anon33873e850511::CallObjectDelete1860     void Emit(CodeGenFunction &CGF, Flags flags) override {
1861       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1862     }
1863   };
1864 }
1865 
1866 void
pushCallObjectDeleteCleanup(const FunctionDecl * OperatorDelete,llvm::Value * CompletePtr,QualType ElementType)1867 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1868                                              llvm::Value *CompletePtr,
1869                                              QualType ElementType) {
1870   EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1871                                         OperatorDelete, ElementType);
1872 }
1873 
1874 /// Emit the code for deleting a single object with a destroying operator
1875 /// delete. If the element type has a non-virtual destructor, Ptr has already
1876 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1877 /// Ptr points to an object of the static type.
EmitDestroyingObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType)1878 static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1879                                        const CXXDeleteExpr *DE, Address Ptr,
1880                                        QualType ElementType) {
1881   auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1882   if (Dtor && Dtor->isVirtual())
1883     CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1884                                                 Dtor);
1885   else
1886     CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
1887 }
1888 
1889 /// Emit the code for deleting a single object.
1890 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1891 /// if not.
EmitObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,llvm::BasicBlock * UnconditionalDeleteBlock)1892 static bool EmitObjectDelete(CodeGenFunction &CGF,
1893                              const CXXDeleteExpr *DE,
1894                              Address Ptr,
1895                              QualType ElementType,
1896                              llvm::BasicBlock *UnconditionalDeleteBlock) {
1897   // C++11 [expr.delete]p3:
1898   //   If the static type of the object to be deleted is different from its
1899   //   dynamic type, the static type shall be a base class of the dynamic type
1900   //   of the object to be deleted and the static type shall have a virtual
1901   //   destructor or the behavior is undefined.
1902   CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1903                     DE->getExprLoc(), Ptr.getPointer(),
1904                     ElementType);
1905 
1906   const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1907   assert(!OperatorDelete->isDestroyingOperatorDelete());
1908 
1909   // Find the destructor for the type, if applicable.  If the
1910   // destructor is virtual, we'll just emit the vcall and return.
1911   const CXXDestructorDecl *Dtor = nullptr;
1912   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1913     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1914     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1915       Dtor = RD->getDestructor();
1916 
1917       if (Dtor->isVirtual()) {
1918         bool UseVirtualCall = true;
1919         const Expr *Base = DE->getArgument();
1920         if (auto *DevirtualizedDtor =
1921                 dyn_cast_or_null<const CXXDestructorDecl>(
1922                     Dtor->getDevirtualizedMethod(
1923                         Base, CGF.CGM.getLangOpts().AppleKext))) {
1924           UseVirtualCall = false;
1925           const CXXRecordDecl *DevirtualizedClass =
1926               DevirtualizedDtor->getParent();
1927           if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {
1928             // Devirtualized to the class of the base type (the type of the
1929             // whole expression).
1930             Dtor = DevirtualizedDtor;
1931           } else {
1932             // Devirtualized to some other type. Would need to cast the this
1933             // pointer to that type but we don't have support for that yet, so
1934             // do a virtual call. FIXME: handle the case where it is
1935             // devirtualized to the derived type (the type of the inner
1936             // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1937             UseVirtualCall = true;
1938           }
1939         }
1940         if (UseVirtualCall) {
1941           CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1942                                                       Dtor);
1943           return false;
1944         }
1945       }
1946     }
1947   }
1948 
1949   // Make sure that we call delete even if the dtor throws.
1950   // This doesn't have to a conditional cleanup because we're going
1951   // to pop it off in a second.
1952   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1953                                             Ptr.getPointer(),
1954                                             OperatorDelete, ElementType);
1955 
1956   if (Dtor)
1957     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1958                               /*ForVirtualBase=*/false,
1959                               /*Delegating=*/false,
1960                               Ptr, ElementType);
1961   else if (auto Lifetime = ElementType.getObjCLifetime()) {
1962     switch (Lifetime) {
1963     case Qualifiers::OCL_None:
1964     case Qualifiers::OCL_ExplicitNone:
1965     case Qualifiers::OCL_Autoreleasing:
1966       break;
1967 
1968     case Qualifiers::OCL_Strong:
1969       CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1970       break;
1971 
1972     case Qualifiers::OCL_Weak:
1973       CGF.EmitARCDestroyWeak(Ptr);
1974       break;
1975     }
1976   }
1977 
1978   // When optimizing for size, call 'operator delete' unconditionally.
1979   if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
1980     CGF.EmitBlock(UnconditionalDeleteBlock);
1981     CGF.PopCleanupBlock();
1982     return true;
1983   }
1984 
1985   CGF.PopCleanupBlock();
1986   return false;
1987 }
1988 
1989 namespace {
1990   /// Calls the given 'operator delete' on an array of objects.
1991   struct CallArrayDelete final : EHScopeStack::Cleanup {
1992     llvm::Value *Ptr;
1993     const FunctionDecl *OperatorDelete;
1994     llvm::Value *NumElements;
1995     QualType ElementType;
1996     CharUnits CookieSize;
1997 
CallArrayDelete__anon33873e850611::CallArrayDelete1998     CallArrayDelete(llvm::Value *Ptr,
1999                     const FunctionDecl *OperatorDelete,
2000                     llvm::Value *NumElements,
2001                     QualType ElementType,
2002                     CharUnits CookieSize)
2003       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
2004         ElementType(ElementType), CookieSize(CookieSize) {}
2005 
Emit__anon33873e850611::CallArrayDelete2006     void Emit(CodeGenFunction &CGF, Flags flags) override {
2007       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
2008                          CookieSize);
2009     }
2010   };
2011 }
2012 
2013 /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,Address deletedPtr,QualType elementType)2014 static void EmitArrayDelete(CodeGenFunction &CGF,
2015                             const CXXDeleteExpr *E,
2016                             Address deletedPtr,
2017                             QualType elementType) {
2018   llvm::Value *numElements = nullptr;
2019   llvm::Value *allocatedPtr = nullptr;
2020   CharUnits cookieSize;
2021   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
2022                                       numElements, allocatedPtr, cookieSize);
2023 
2024   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2025 
2026   // Make sure that we call delete even if one of the dtors throws.
2027   const FunctionDecl *operatorDelete = E->getOperatorDelete();
2028   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
2029                                            allocatedPtr, operatorDelete,
2030                                            numElements, elementType,
2031                                            cookieSize);
2032 
2033   // Destroy the elements.
2034   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2035     assert(numElements && "no element count for a type with a destructor!");
2036 
2037     CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2038     CharUnits elementAlign =
2039       deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2040 
2041     llvm::Value *arrayBegin = deletedPtr.getPointer();
2042     llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
2043       deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
2044 
2045     // Note that it is legal to allocate a zero-length array, and we
2046     // can never fold the check away because the length should always
2047     // come from a cookie.
2048     CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
2049                          CGF.getDestroyer(dtorKind),
2050                          /*checkZeroLength*/ true,
2051                          CGF.needsEHCleanup(dtorKind));
2052   }
2053 
2054   // Pop the cleanup block.
2055   CGF.PopCleanupBlock();
2056 }
2057 
EmitCXXDeleteExpr(const CXXDeleteExpr * E)2058 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2059   const Expr *Arg = E->getArgument();
2060   Address Ptr = EmitPointerWithAlignment(Arg);
2061 
2062   // Null check the pointer.
2063   //
2064   // We could avoid this null check if we can determine that the object
2065   // destruction is trivial and doesn't require an array cookie; we can
2066   // unconditionally perform the operator delete call in that case. For now, we
2067   // assume that deleted pointers are null rarely enough that it's better to
2068   // keep the branch. This might be worth revisiting for a -O0 code size win.
2069   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
2070   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2071 
2072   llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
2073 
2074   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2075   EmitBlock(DeleteNotNull);
2076 
2077   QualType DeleteTy = E->getDestroyedType();
2078 
2079   // A destroying operator delete overrides the entire operation of the
2080   // delete expression.
2081   if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2082     EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2083     EmitBlock(DeleteEnd);
2084     return;
2085   }
2086 
2087   // We might be deleting a pointer to array.  If so, GEP down to the
2088   // first non-array element.
2089   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2090   if (DeleteTy->isConstantArrayType()) {
2091     llvm::Value *Zero = Builder.getInt32(0);
2092     SmallVector<llvm::Value*,8> GEP;
2093 
2094     GEP.push_back(Zero); // point at the outermost array
2095 
2096     // For each layer of array type we're pointing at:
2097     while (const ConstantArrayType *Arr
2098              = getContext().getAsConstantArrayType(DeleteTy)) {
2099       // 1. Unpeel the array type.
2100       DeleteTy = Arr->getElementType();
2101 
2102       // 2. GEP to the first element of the array.
2103       GEP.push_back(Zero);
2104     }
2105 
2106     Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
2107                                             Ptr.getPointer(), GEP, "del.first"),
2108                   Ptr.getAlignment());
2109   }
2110 
2111   assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2112 
2113   if (E->isArrayForm()) {
2114     EmitArrayDelete(*this, E, Ptr, DeleteTy);
2115     EmitBlock(DeleteEnd);
2116   } else {
2117     if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
2118       EmitBlock(DeleteEnd);
2119   }
2120 }
2121 
isGLValueFromPointerDeref(const Expr * E)2122 static bool isGLValueFromPointerDeref(const Expr *E) {
2123   E = E->IgnoreParens();
2124 
2125   if (const auto *CE = dyn_cast<CastExpr>(E)) {
2126     if (!CE->getSubExpr()->isGLValue())
2127       return false;
2128     return isGLValueFromPointerDeref(CE->getSubExpr());
2129   }
2130 
2131   if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
2132     return isGLValueFromPointerDeref(OVE->getSourceExpr());
2133 
2134   if (const auto *BO = dyn_cast<BinaryOperator>(E))
2135     if (BO->getOpcode() == BO_Comma)
2136       return isGLValueFromPointerDeref(BO->getRHS());
2137 
2138   if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
2139     return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
2140            isGLValueFromPointerDeref(ACO->getFalseExpr());
2141 
2142   // C++11 [expr.sub]p1:
2143   //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2144   if (isa<ArraySubscriptExpr>(E))
2145     return true;
2146 
2147   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2148     if (UO->getOpcode() == UO_Deref)
2149       return true;
2150 
2151   return false;
2152 }
2153 
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy)2154 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2155                                          llvm::Type *StdTypeInfoPtrTy) {
2156   // Get the vtable pointer.
2157   Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
2158 
2159   QualType SrcRecordTy = E->getType();
2160 
2161   // C++ [class.cdtor]p4:
2162   //   If the operand of typeid refers to the object under construction or
2163   //   destruction and the static type of the operand is neither the constructor
2164   //   or destructor’s class nor one of its bases, the behavior is undefined.
2165   CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2166                     ThisPtr.getPointer(), SrcRecordTy);
2167 
2168   // C++ [expr.typeid]p2:
2169   //   If the glvalue expression is obtained by applying the unary * operator to
2170   //   a pointer and the pointer is a null pointer value, the typeid expression
2171   //   throws the std::bad_typeid exception.
2172   //
2173   // However, this paragraph's intent is not clear.  We choose a very generous
2174   // interpretation which implores us to consider comma operators, conditional
2175   // operators, parentheses and other such constructs.
2176   if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2177           isGLValueFromPointerDeref(E), SrcRecordTy)) {
2178     llvm::BasicBlock *BadTypeidBlock =
2179         CGF.createBasicBlock("typeid.bad_typeid");
2180     llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2181 
2182     llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2183     CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2184 
2185     CGF.EmitBlock(BadTypeidBlock);
2186     CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2187     CGF.EmitBlock(EndBlock);
2188   }
2189 
2190   return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2191                                         StdTypeInfoPtrTy);
2192 }
2193 
EmitCXXTypeidExpr(const CXXTypeidExpr * E)2194 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2195   llvm::Type *StdTypeInfoPtrTy =
2196     ConvertType(E->getType())->getPointerTo();
2197 
2198   if (E->isTypeOperand()) {
2199     llvm::Constant *TypeInfo =
2200         CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2201     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2202   }
2203 
2204   // C++ [expr.typeid]p2:
2205   //   When typeid is applied to a glvalue expression whose type is a
2206   //   polymorphic class type, the result refers to a std::type_info object
2207   //   representing the type of the most derived object (that is, the dynamic
2208   //   type) to which the glvalue refers.
2209   // If the operand is already most derived object, no need to look up vtable.
2210   if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
2211     return EmitTypeidFromVTable(*this, E->getExprOperand(),
2212                                 StdTypeInfoPtrTy);
2213 
2214   QualType OperandTy = E->getExprOperand()->getType();
2215   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2216                                StdTypeInfoPtrTy);
2217 }
2218 
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)2219 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2220                                           QualType DestTy) {
2221   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2222   if (DestTy->isPointerType())
2223     return llvm::Constant::getNullValue(DestLTy);
2224 
2225   /// C++ [expr.dynamic.cast]p9:
2226   ///   A failed cast to reference type throws std::bad_cast
2227   if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2228     return nullptr;
2229 
2230   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2231   return llvm::UndefValue::get(DestLTy);
2232 }
2233 
EmitDynamicCast(Address ThisAddr,const CXXDynamicCastExpr * DCE)2234 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2235                                               const CXXDynamicCastExpr *DCE) {
2236   CGM.EmitExplicitCastExprType(DCE, this);
2237   QualType DestTy = DCE->getTypeAsWritten();
2238 
2239   QualType SrcTy = DCE->getSubExpr()->getType();
2240 
2241   // C++ [expr.dynamic.cast]p7:
2242   //   If T is "pointer to cv void," then the result is a pointer to the most
2243   //   derived object pointed to by v.
2244   const PointerType *DestPTy = DestTy->getAs<PointerType>();
2245 
2246   bool isDynamicCastToVoid;
2247   QualType SrcRecordTy;
2248   QualType DestRecordTy;
2249   if (DestPTy) {
2250     isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2251     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2252     DestRecordTy = DestPTy->getPointeeType();
2253   } else {
2254     isDynamicCastToVoid = false;
2255     SrcRecordTy = SrcTy;
2256     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2257   }
2258 
2259   // C++ [class.cdtor]p5:
2260   //   If the operand of the dynamic_cast refers to the object under
2261   //   construction or destruction and the static type of the operand is not a
2262   //   pointer to or object of the constructor or destructor’s own class or one
2263   //   of its bases, the dynamic_cast results in undefined behavior.
2264   EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
2265                 SrcRecordTy);
2266 
2267   if (DCE->isAlwaysNull())
2268     if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2269       return T;
2270 
2271   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2272 
2273   // C++ [expr.dynamic.cast]p4:
2274   //   If the value of v is a null pointer value in the pointer case, the result
2275   //   is the null pointer value of type T.
2276   bool ShouldNullCheckSrcValue =
2277       CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2278                                                          SrcRecordTy);
2279 
2280   llvm::BasicBlock *CastNull = nullptr;
2281   llvm::BasicBlock *CastNotNull = nullptr;
2282   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2283 
2284   if (ShouldNullCheckSrcValue) {
2285     CastNull = createBasicBlock("dynamic_cast.null");
2286     CastNotNull = createBasicBlock("dynamic_cast.notnull");
2287 
2288     llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2289     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2290     EmitBlock(CastNotNull);
2291   }
2292 
2293   llvm::Value *Value;
2294   if (isDynamicCastToVoid) {
2295     Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2296                                                   DestTy);
2297   } else {
2298     assert(DestRecordTy->isRecordType() &&
2299            "destination type must be a record type!");
2300     Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2301                                                 DestTy, DestRecordTy, CastEnd);
2302     CastNotNull = Builder.GetInsertBlock();
2303   }
2304 
2305   if (ShouldNullCheckSrcValue) {
2306     EmitBranch(CastEnd);
2307 
2308     EmitBlock(CastNull);
2309     EmitBranch(CastEnd);
2310   }
2311 
2312   EmitBlock(CastEnd);
2313 
2314   if (ShouldNullCheckSrcValue) {
2315     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2316     PHI->addIncoming(Value, CastNotNull);
2317     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2318 
2319     Value = PHI;
2320   }
2321 
2322   return Value;
2323 }
2324