xref: /freebsd-src/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclTemplate.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Intrinsics.h"
29 using namespace clang;
30 using namespace CodeGen;
31 
32 //===----------------------------------------------------------------------===//
33 //                        Aggregate Expression Emitter
34 //===----------------------------------------------------------------------===//
35 
36 namespace  {
37 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
38   CodeGenFunction &CGF;
39   CGBuilderTy &Builder;
40   AggValueSlot Dest;
41   bool IsResultUnused;
42 
43   AggValueSlot EnsureSlot(QualType T) {
44     if (!Dest.isIgnored()) return Dest;
45     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
46   }
47   void EnsureDest(QualType T) {
48     if (!Dest.isIgnored()) return;
49     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
50   }
51 
52   // Calls `Fn` with a valid return value slot, potentially creating a temporary
53   // to do so. If a temporary is created, an appropriate copy into `Dest` will
54   // be emitted, as will lifetime markers.
55   //
56   // The given function should take a ReturnValueSlot, and return an RValue that
57   // points to said slot.
58   void withReturnValueSlot(const Expr *E,
59                            llvm::function_ref<RValue(ReturnValueSlot)> Fn);
60 
61 public:
62   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
63     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
64     IsResultUnused(IsResultUnused) { }
65 
66   //===--------------------------------------------------------------------===//
67   //                               Utilities
68   //===--------------------------------------------------------------------===//
69 
70   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71   /// represents a value lvalue, this method emits the address of the lvalue,
72   /// then loads the result into DestPtr.
73   void EmitAggLoadOfLValue(const Expr *E);
74 
75   enum ExprValueKind {
76     EVK_RValue,
77     EVK_NonRValue
78   };
79 
80   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
81   /// SrcIsRValue is true if source comes from an RValue.
82   void EmitFinalDestCopy(QualType type, const LValue &src,
83                          ExprValueKind SrcValueKind = EVK_NonRValue);
84   void EmitFinalDestCopy(QualType type, RValue src);
85   void EmitCopy(QualType type, const AggValueSlot &dest,
86                 const AggValueSlot &src);
87 
88   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
89 
90   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
91                      QualType ArrayQTy, InitListExpr *E);
92 
93   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
94     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
95       return AggValueSlot::NeedsGCBarriers;
96     return AggValueSlot::DoesNotNeedGCBarriers;
97   }
98 
99   bool TypeRequiresGCollection(QualType T);
100 
101   //===--------------------------------------------------------------------===//
102   //                            Visitor Methods
103   //===--------------------------------------------------------------------===//
104 
105   void Visit(Expr *E) {
106     ApplyDebugLocation DL(CGF, E);
107     StmtVisitor<AggExprEmitter>::Visit(E);
108   }
109 
110   void VisitStmt(Stmt *S) {
111     CGF.ErrorUnsupported(S, "aggregate expression");
112   }
113   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
114   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
115     Visit(GE->getResultExpr());
116   }
117   void VisitCoawaitExpr(CoawaitExpr *E) {
118     CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
119   }
120   void VisitCoyieldExpr(CoyieldExpr *E) {
121     CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
122   }
123   void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
124   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
125   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
126     return Visit(E->getReplacement());
127   }
128 
129   void VisitConstantExpr(ConstantExpr *E) {
130     EnsureDest(E->getType());
131 
132     if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
133       CGF.EmitAggregateStore(Result, Dest.getAddress(),
134                              E->getType().isVolatileQualified());
135       return;
136     }
137     return Visit(E->getSubExpr());
138   }
139 
140   // l-values.
141   void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
142   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
143   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
144   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
145   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
146   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
147     EmitAggLoadOfLValue(E);
148   }
149   void VisitPredefinedExpr(const PredefinedExpr *E) {
150     EmitAggLoadOfLValue(E);
151   }
152 
153   // Operators.
154   void VisitCastExpr(CastExpr *E);
155   void VisitCallExpr(const CallExpr *E);
156   void VisitStmtExpr(const StmtExpr *E);
157   void VisitBinaryOperator(const BinaryOperator *BO);
158   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
159   void VisitBinAssign(const BinaryOperator *E);
160   void VisitBinComma(const BinaryOperator *E);
161   void VisitBinCmp(const BinaryOperator *E);
162   void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
163     Visit(E->getSemanticForm());
164   }
165 
166   void VisitObjCMessageExpr(ObjCMessageExpr *E);
167   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
168     EmitAggLoadOfLValue(E);
169   }
170 
171   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
172   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
173   void VisitChooseExpr(const ChooseExpr *CE);
174   void VisitInitListExpr(InitListExpr *E);
175   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
176                               llvm::Value *outerBegin = nullptr);
177   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
178   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
179   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
180     CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
181     Visit(DAE->getExpr());
182   }
183   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
184     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
185     Visit(DIE->getExpr());
186   }
187   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
188   void VisitCXXConstructExpr(const CXXConstructExpr *E);
189   void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
190   void VisitLambdaExpr(LambdaExpr *E);
191   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
192   void VisitExprWithCleanups(ExprWithCleanups *E);
193   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
194   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
195   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
196   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
197 
198   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
199     if (E->isGLValue()) {
200       LValue LV = CGF.EmitPseudoObjectLValue(E);
201       return EmitFinalDestCopy(E->getType(), LV);
202     }
203 
204     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
205   }
206 
207   void VisitVAArgExpr(VAArgExpr *E);
208 
209   void EmitInitializationToLValue(Expr *E, LValue Address);
210   void EmitNullInitializationToLValue(LValue Address);
211   //  case Expr::ChooseExprClass:
212   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
213   void VisitAtomicExpr(AtomicExpr *E) {
214     RValue Res = CGF.EmitAtomicExpr(E);
215     EmitFinalDestCopy(E->getType(), Res);
216   }
217 };
218 }  // end anonymous namespace.
219 
220 //===----------------------------------------------------------------------===//
221 //                                Utilities
222 //===----------------------------------------------------------------------===//
223 
224 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
225 /// represents a value lvalue, this method emits the address of the lvalue,
226 /// then loads the result into DestPtr.
227 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
228   LValue LV = CGF.EmitLValue(E);
229 
230   // If the type of the l-value is atomic, then do an atomic load.
231   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
232     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
233     return;
234   }
235 
236   EmitFinalDestCopy(E->getType(), LV);
237 }
238 
239 /// True if the given aggregate type requires special GC API calls.
240 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
241   // Only record types have members that might require garbage collection.
242   const RecordType *RecordTy = T->getAs<RecordType>();
243   if (!RecordTy) return false;
244 
245   // Don't mess with non-trivial C++ types.
246   RecordDecl *Record = RecordTy->getDecl();
247   if (isa<CXXRecordDecl>(Record) &&
248       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
249        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
250     return false;
251 
252   // Check whether the type has an object member.
253   return Record->hasObjectMember();
254 }
255 
256 void AggExprEmitter::withReturnValueSlot(
257     const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
258   QualType RetTy = E->getType();
259   bool RequiresDestruction =
260       !Dest.isExternallyDestructed() &&
261       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
262 
263   // If it makes no observable difference, save a memcpy + temporary.
264   //
265   // We need to always provide our own temporary if destruction is required.
266   // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
267   // its lifetime before we have the chance to emit a proper destructor call.
268   bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
269                  (RequiresDestruction && !Dest.getAddress().isValid());
270 
271   Address RetAddr = Address::invalid();
272   Address RetAllocaAddr = Address::invalid();
273 
274   EHScopeStack::stable_iterator LifetimeEndBlock;
275   llvm::Value *LifetimeSizePtr = nullptr;
276   llvm::IntrinsicInst *LifetimeStartInst = nullptr;
277   if (!UseTemp) {
278     RetAddr = Dest.getAddress();
279   } else {
280     RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
281     llvm::TypeSize Size =
282         CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
283     LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
284     if (LifetimeSizePtr) {
285       LifetimeStartInst =
286           cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
287       assert(LifetimeStartInst->getIntrinsicID() ==
288                  llvm::Intrinsic::lifetime_start &&
289              "Last insertion wasn't a lifetime.start?");
290 
291       CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
292           NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
293       LifetimeEndBlock = CGF.EHStack.stable_begin();
294     }
295   }
296 
297   RValue Src =
298       EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
299                                Dest.isExternallyDestructed()));
300 
301   if (!UseTemp)
302     return;
303 
304   assert(Dest.getPointer() != Src.getAggregatePointer());
305   EmitFinalDestCopy(E->getType(), Src);
306 
307   if (!RequiresDestruction && LifetimeStartInst) {
308     // If there's no dtor to run, the copy was the last use of our temporary.
309     // Since we're not guaranteed to be in an ExprWithCleanups, clean up
310     // eagerly.
311     CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
312     CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
313   }
314 }
315 
316 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
317 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
318   assert(src.isAggregate() && "value must be aggregate value!");
319   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
320   EmitFinalDestCopy(type, srcLV, EVK_RValue);
321 }
322 
323 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
324 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
325                                        ExprValueKind SrcValueKind) {
326   // If Dest is ignored, then we're evaluating an aggregate expression
327   // in a context that doesn't care about the result.  Note that loads
328   // from volatile l-values force the existence of a non-ignored
329   // destination.
330   if (Dest.isIgnored())
331     return;
332 
333   // Copy non-trivial C structs here.
334   LValue DstLV = CGF.MakeAddrLValue(
335       Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
336 
337   if (SrcValueKind == EVK_RValue) {
338     if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
339       if (Dest.isPotentiallyAliased())
340         CGF.callCStructMoveAssignmentOperator(DstLV, src);
341       else
342         CGF.callCStructMoveConstructor(DstLV, src);
343       return;
344     }
345   } else {
346     if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
347       if (Dest.isPotentiallyAliased())
348         CGF.callCStructCopyAssignmentOperator(DstLV, src);
349       else
350         CGF.callCStructCopyConstructor(DstLV, src);
351       return;
352     }
353   }
354 
355   AggValueSlot srcAgg = AggValueSlot::forLValue(
356       src, CGF, AggValueSlot::IsDestructed, needsGC(type),
357       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
358   EmitCopy(type, Dest, srcAgg);
359 }
360 
361 /// Perform a copy from the source into the destination.
362 ///
363 /// \param type - the type of the aggregate being copied; qualifiers are
364 ///   ignored
365 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
366                               const AggValueSlot &src) {
367   if (dest.requiresGCollection()) {
368     CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
369     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
370     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
371                                                       dest.getAddress(),
372                                                       src.getAddress(),
373                                                       size);
374     return;
375   }
376 
377   // If the result of the assignment is used, copy the LHS there also.
378   // It's volatile if either side is.  Use the minimum alignment of
379   // the two sides.
380   LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
381   LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
382   CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
383                         dest.isVolatile() || src.isVolatile());
384 }
385 
386 /// Emit the initializer for a std::initializer_list initialized with a
387 /// real initializer list.
388 void
389 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
390   // Emit an array containing the elements.  The array is externally destructed
391   // if the std::initializer_list object is.
392   ASTContext &Ctx = CGF.getContext();
393   LValue Array = CGF.EmitLValue(E->getSubExpr());
394   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
395   Address ArrayPtr = Array.getAddress(CGF);
396 
397   const ConstantArrayType *ArrayType =
398       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
399   assert(ArrayType && "std::initializer_list constructed from non-array");
400 
401   // FIXME: Perform the checks on the field types in SemaInit.
402   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
403   RecordDecl::field_iterator Field = Record->field_begin();
404   if (Field == Record->field_end()) {
405     CGF.ErrorUnsupported(E, "weird std::initializer_list");
406     return;
407   }
408 
409   // Start pointer.
410   if (!Field->getType()->isPointerType() ||
411       !Ctx.hasSameType(Field->getType()->getPointeeType(),
412                        ArrayType->getElementType())) {
413     CGF.ErrorUnsupported(E, "weird std::initializer_list");
414     return;
415   }
416 
417   AggValueSlot Dest = EnsureSlot(E->getType());
418   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
419   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
420   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
421   llvm::Value *IdxStart[] = { Zero, Zero };
422   llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
423       ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart");
424   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
425   ++Field;
426 
427   if (Field == Record->field_end()) {
428     CGF.ErrorUnsupported(E, "weird std::initializer_list");
429     return;
430   }
431 
432   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
433   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
434   if (Field->getType()->isPointerType() &&
435       Ctx.hasSameType(Field->getType()->getPointeeType(),
436                       ArrayType->getElementType())) {
437     // End pointer.
438     llvm::Value *IdxEnd[] = { Zero, Size };
439     llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
440         ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend");
441     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
442   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
443     // Length.
444     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
445   } else {
446     CGF.ErrorUnsupported(E, "weird std::initializer_list");
447     return;
448   }
449 }
450 
451 /// Determine if E is a trivial array filler, that is, one that is
452 /// equivalent to zero-initialization.
453 static bool isTrivialFiller(Expr *E) {
454   if (!E)
455     return true;
456 
457   if (isa<ImplicitValueInitExpr>(E))
458     return true;
459 
460   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
461     if (ILE->getNumInits())
462       return false;
463     return isTrivialFiller(ILE->getArrayFiller());
464   }
465 
466   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
467     return Cons->getConstructor()->isDefaultConstructor() &&
468            Cons->getConstructor()->isTrivial();
469 
470   // FIXME: Are there other cases where we can avoid emitting an initializer?
471   return false;
472 }
473 
474 /// Emit initialization of an array from an initializer list.
475 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
476                                    QualType ArrayQTy, InitListExpr *E) {
477   uint64_t NumInitElements = E->getNumInits();
478 
479   uint64_t NumArrayElements = AType->getNumElements();
480   assert(NumInitElements <= NumArrayElements);
481 
482   QualType elementType =
483       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
484 
485   // DestPtr is an array*.  Construct an elementType* by drilling
486   // down a level.
487   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
488   llvm::Value *indices[] = { zero, zero };
489   llvm::Value *begin = Builder.CreateInBoundsGEP(
490       DestPtr.getElementType(), DestPtr.getPointer(), indices,
491       "arrayinit.begin");
492 
493   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
494   CharUnits elementAlign =
495     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
496   llvm::Type *llvmElementType = begin->getType()->getPointerElementType();
497 
498   // Consider initializing the array by copying from a global. For this to be
499   // more efficient than per-element initialization, the size of the elements
500   // with explicit initializers should be large enough.
501   if (NumInitElements * elementSize.getQuantity() > 16 &&
502       elementType.isTriviallyCopyableType(CGF.getContext())) {
503     CodeGen::CodeGenModule &CGM = CGF.CGM;
504     ConstantEmitter Emitter(CGF);
505     LangAS AS = ArrayQTy.getAddressSpace();
506     if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
507       auto GV = new llvm::GlobalVariable(
508           CGM.getModule(), C->getType(),
509           CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
510           llvm::GlobalValue::PrivateLinkage, C, "constinit",
511           /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
512           CGM.getContext().getTargetAddressSpace(AS));
513       Emitter.finalize(GV);
514       CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
515       GV->setAlignment(Align.getAsAlign());
516       EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
517       return;
518     }
519   }
520 
521   // Exception safety requires us to destroy all the
522   // already-constructed members if an initializer throws.
523   // For that, we'll need an EH cleanup.
524   QualType::DestructionKind dtorKind = elementType.isDestructedType();
525   Address endOfInit = Address::invalid();
526   EHScopeStack::stable_iterator cleanup;
527   llvm::Instruction *cleanupDominator = nullptr;
528   if (CGF.needsEHCleanup(dtorKind)) {
529     // In principle we could tell the cleanup where we are more
530     // directly, but the control flow can get so varied here that it
531     // would actually be quite complex.  Therefore we go through an
532     // alloca.
533     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
534                                      "arrayinit.endOfInit");
535     cleanupDominator = Builder.CreateStore(begin, endOfInit);
536     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
537                                          elementAlign,
538                                          CGF.getDestroyer(dtorKind));
539     cleanup = CGF.EHStack.stable_begin();
540 
541   // Otherwise, remember that we didn't need a cleanup.
542   } else {
543     dtorKind = QualType::DK_none;
544   }
545 
546   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
547 
548   // The 'current element to initialize'.  The invariants on this
549   // variable are complicated.  Essentially, after each iteration of
550   // the loop, it points to the last initialized element, except
551   // that it points to the beginning of the array before any
552   // elements have been initialized.
553   llvm::Value *element = begin;
554 
555   // Emit the explicit initializers.
556   for (uint64_t i = 0; i != NumInitElements; ++i) {
557     // Advance to the next element.
558     if (i > 0) {
559       element = Builder.CreateInBoundsGEP(
560           llvmElementType, element, one, "arrayinit.element");
561 
562       // Tell the cleanup that it needs to destroy up to this
563       // element.  TODO: some of these stores can be trivially
564       // observed to be unnecessary.
565       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
566     }
567 
568     LValue elementLV =
569       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
570     EmitInitializationToLValue(E->getInit(i), elementLV);
571   }
572 
573   // Check whether there's a non-trivial array-fill expression.
574   Expr *filler = E->getArrayFiller();
575   bool hasTrivialFiller = isTrivialFiller(filler);
576 
577   // Any remaining elements need to be zero-initialized, possibly
578   // using the filler expression.  We can skip this if the we're
579   // emitting to zeroed memory.
580   if (NumInitElements != NumArrayElements &&
581       !(Dest.isZeroed() && hasTrivialFiller &&
582         CGF.getTypes().isZeroInitializable(elementType))) {
583 
584     // Use an actual loop.  This is basically
585     //   do { *array++ = filler; } while (array != end);
586 
587     // Advance to the start of the rest of the array.
588     if (NumInitElements) {
589       element = Builder.CreateInBoundsGEP(
590           llvmElementType, element, one, "arrayinit.start");
591       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
592     }
593 
594     // Compute the end of the array.
595     llvm::Value *end = Builder.CreateInBoundsGEP(
596         llvmElementType, begin,
597         llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
598 
599     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
600     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
601 
602     // Jump into the body.
603     CGF.EmitBlock(bodyBB);
604     llvm::PHINode *currentElement =
605       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
606     currentElement->addIncoming(element, entryBB);
607 
608     // Emit the actual filler expression.
609     {
610       // C++1z [class.temporary]p5:
611       //   when a default constructor is called to initialize an element of
612       //   an array with no corresponding initializer [...] the destruction of
613       //   every temporary created in a default argument is sequenced before
614       //   the construction of the next array element, if any
615       CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
616       LValue elementLV =
617         CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
618       if (filler)
619         EmitInitializationToLValue(filler, elementLV);
620       else
621         EmitNullInitializationToLValue(elementLV);
622     }
623 
624     // Move on to the next element.
625     llvm::Value *nextElement = Builder.CreateInBoundsGEP(
626         llvmElementType, currentElement, one, "arrayinit.next");
627 
628     // Tell the EH cleanup that we finished with the last element.
629     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
630 
631     // Leave the loop if we're done.
632     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
633                                              "arrayinit.done");
634     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
635     Builder.CreateCondBr(done, endBB, bodyBB);
636     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
637 
638     CGF.EmitBlock(endBB);
639   }
640 
641   // Leave the partial-array cleanup if we entered one.
642   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
643 }
644 
645 //===----------------------------------------------------------------------===//
646 //                            Visitor Methods
647 //===----------------------------------------------------------------------===//
648 
649 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
650   Visit(E->getSubExpr());
651 }
652 
653 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
654   // If this is a unique OVE, just visit its source expression.
655   if (e->isUnique())
656     Visit(e->getSourceExpr());
657   else
658     EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
659 }
660 
661 void
662 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
663   if (Dest.isPotentiallyAliased() &&
664       E->getType().isPODType(CGF.getContext())) {
665     // For a POD type, just emit a load of the lvalue + a copy, because our
666     // compound literal might alias the destination.
667     EmitAggLoadOfLValue(E);
668     return;
669   }
670 
671   AggValueSlot Slot = EnsureSlot(E->getType());
672 
673   // Block-scope compound literals are destroyed at the end of the enclosing
674   // scope in C.
675   bool Destruct =
676       !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
677   if (Destruct)
678     Slot.setExternallyDestructed();
679 
680   CGF.EmitAggExpr(E->getInitializer(), Slot);
681 
682   if (Destruct)
683     if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
684       CGF.pushLifetimeExtendedDestroy(
685           CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
686           CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
687 }
688 
689 /// Attempt to look through various unimportant expressions to find a
690 /// cast of the given kind.
691 static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
692   op = op->IgnoreParenNoopCasts(ctx);
693   if (auto castE = dyn_cast<CastExpr>(op)) {
694     if (castE->getCastKind() == kind)
695       return castE->getSubExpr();
696   }
697   return nullptr;
698 }
699 
700 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
701   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
702     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
703   switch (E->getCastKind()) {
704   case CK_Dynamic: {
705     // FIXME: Can this actually happen? We have no test coverage for it.
706     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
707     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
708                                       CodeGenFunction::TCK_Load);
709     // FIXME: Do we also need to handle property references here?
710     if (LV.isSimple())
711       CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
712     else
713       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
714 
715     if (!Dest.isIgnored())
716       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
717     break;
718   }
719 
720   case CK_ToUnion: {
721     // Evaluate even if the destination is ignored.
722     if (Dest.isIgnored()) {
723       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
724                       /*ignoreResult=*/true);
725       break;
726     }
727 
728     // GCC union extension
729     QualType Ty = E->getSubExpr()->getType();
730     Address CastPtr =
731       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
732     EmitInitializationToLValue(E->getSubExpr(),
733                                CGF.MakeAddrLValue(CastPtr, Ty));
734     break;
735   }
736 
737   case CK_LValueToRValueBitCast: {
738     if (Dest.isIgnored()) {
739       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
740                       /*ignoreResult=*/true);
741       break;
742     }
743 
744     LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
745     Address SourceAddress =
746         Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty);
747     Address DestAddress =
748         Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
749     llvm::Value *SizeVal = llvm::ConstantInt::get(
750         CGF.SizeTy,
751         CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
752     Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
753     break;
754   }
755 
756   case CK_DerivedToBase:
757   case CK_BaseToDerived:
758   case CK_UncheckedDerivedToBase: {
759     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
760                 "should have been unpacked before we got here");
761   }
762 
763   case CK_NonAtomicToAtomic:
764   case CK_AtomicToNonAtomic: {
765     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
766 
767     // Determine the atomic and value types.
768     QualType atomicType = E->getSubExpr()->getType();
769     QualType valueType = E->getType();
770     if (isToAtomic) std::swap(atomicType, valueType);
771 
772     assert(atomicType->isAtomicType());
773     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
774                           atomicType->castAs<AtomicType>()->getValueType()));
775 
776     // Just recurse normally if we're ignoring the result or the
777     // atomic type doesn't change representation.
778     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
779       return Visit(E->getSubExpr());
780     }
781 
782     CastKind peepholeTarget =
783       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
784 
785     // These two cases are reverses of each other; try to peephole them.
786     if (Expr *op =
787             findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
788       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
789                                                      E->getType()) &&
790            "peephole significantly changed types?");
791       return Visit(op);
792     }
793 
794     // If we're converting an r-value of non-atomic type to an r-value
795     // of atomic type, just emit directly into the relevant sub-object.
796     if (isToAtomic) {
797       AggValueSlot valueDest = Dest;
798       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
799         // Zero-initialize.  (Strictly speaking, we only need to initialize
800         // the padding at the end, but this is simpler.)
801         if (!Dest.isZeroed())
802           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
803 
804         // Build a GEP to refer to the subobject.
805         Address valueAddr =
806             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
807         valueDest = AggValueSlot::forAddr(valueAddr,
808                                           valueDest.getQualifiers(),
809                                           valueDest.isExternallyDestructed(),
810                                           valueDest.requiresGCollection(),
811                                           valueDest.isPotentiallyAliased(),
812                                           AggValueSlot::DoesNotOverlap,
813                                           AggValueSlot::IsZeroed);
814       }
815 
816       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
817       return;
818     }
819 
820     // Otherwise, we're converting an atomic type to a non-atomic type.
821     // Make an atomic temporary, emit into that, and then copy the value out.
822     AggValueSlot atomicSlot =
823       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
824     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
825 
826     Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
827     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
828     return EmitFinalDestCopy(valueType, rvalue);
829   }
830   case CK_AddressSpaceConversion:
831      return Visit(E->getSubExpr());
832 
833   case CK_LValueToRValue:
834     // If we're loading from a volatile type, force the destination
835     // into existence.
836     if (E->getSubExpr()->getType().isVolatileQualified()) {
837       bool Destruct =
838           !Dest.isExternallyDestructed() &&
839           E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
840       if (Destruct)
841         Dest.setExternallyDestructed();
842       EnsureDest(E->getType());
843       Visit(E->getSubExpr());
844 
845       if (Destruct)
846         CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
847                         E->getType());
848 
849       return;
850     }
851 
852     LLVM_FALLTHROUGH;
853 
854 
855   case CK_NoOp:
856   case CK_UserDefinedConversion:
857   case CK_ConstructorConversion:
858     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
859                                                    E->getType()) &&
860            "Implicit cast types must be compatible");
861     Visit(E->getSubExpr());
862     break;
863 
864   case CK_LValueBitCast:
865     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
866 
867   case CK_Dependent:
868   case CK_BitCast:
869   case CK_ArrayToPointerDecay:
870   case CK_FunctionToPointerDecay:
871   case CK_NullToPointer:
872   case CK_NullToMemberPointer:
873   case CK_BaseToDerivedMemberPointer:
874   case CK_DerivedToBaseMemberPointer:
875   case CK_MemberPointerToBoolean:
876   case CK_ReinterpretMemberPointer:
877   case CK_IntegralToPointer:
878   case CK_PointerToIntegral:
879   case CK_PointerToBoolean:
880   case CK_ToVoid:
881   case CK_VectorSplat:
882   case CK_IntegralCast:
883   case CK_BooleanToSignedIntegral:
884   case CK_IntegralToBoolean:
885   case CK_IntegralToFloating:
886   case CK_FloatingToIntegral:
887   case CK_FloatingToBoolean:
888   case CK_FloatingCast:
889   case CK_CPointerToObjCPointerCast:
890   case CK_BlockPointerToObjCPointerCast:
891   case CK_AnyPointerToBlockPointerCast:
892   case CK_ObjCObjectLValueCast:
893   case CK_FloatingRealToComplex:
894   case CK_FloatingComplexToReal:
895   case CK_FloatingComplexToBoolean:
896   case CK_FloatingComplexCast:
897   case CK_FloatingComplexToIntegralComplex:
898   case CK_IntegralRealToComplex:
899   case CK_IntegralComplexToReal:
900   case CK_IntegralComplexToBoolean:
901   case CK_IntegralComplexCast:
902   case CK_IntegralComplexToFloatingComplex:
903   case CK_ARCProduceObject:
904   case CK_ARCConsumeObject:
905   case CK_ARCReclaimReturnedObject:
906   case CK_ARCExtendBlockObject:
907   case CK_CopyAndAutoreleaseBlockObject:
908   case CK_BuiltinFnToFnPtr:
909   case CK_ZeroToOCLOpaqueType:
910   case CK_MatrixCast:
911 
912   case CK_IntToOCLSampler:
913   case CK_FloatingToFixedPoint:
914   case CK_FixedPointToFloating:
915   case CK_FixedPointCast:
916   case CK_FixedPointToBoolean:
917   case CK_FixedPointToIntegral:
918   case CK_IntegralToFixedPoint:
919     llvm_unreachable("cast kind invalid for aggregate types");
920   }
921 }
922 
923 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
924   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
925     EmitAggLoadOfLValue(E);
926     return;
927   }
928 
929   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
930     return CGF.EmitCallExpr(E, Slot);
931   });
932 }
933 
934 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
935   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
936     return CGF.EmitObjCMessageExpr(E, Slot);
937   });
938 }
939 
940 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
941   CGF.EmitIgnoredExpr(E->getLHS());
942   Visit(E->getRHS());
943 }
944 
945 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
946   CodeGenFunction::StmtExprEvaluation eval(CGF);
947   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
948 }
949 
950 enum CompareKind {
951   CK_Less,
952   CK_Greater,
953   CK_Equal,
954 };
955 
956 static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
957                                 const BinaryOperator *E, llvm::Value *LHS,
958                                 llvm::Value *RHS, CompareKind Kind,
959                                 const char *NameSuffix = "") {
960   QualType ArgTy = E->getLHS()->getType();
961   if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
962     ArgTy = CT->getElementType();
963 
964   if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
965     assert(Kind == CK_Equal &&
966            "member pointers may only be compared for equality");
967     return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
968         CGF, LHS, RHS, MPT, /*IsInequality*/ false);
969   }
970 
971   // Compute the comparison instructions for the specified comparison kind.
972   struct CmpInstInfo {
973     const char *Name;
974     llvm::CmpInst::Predicate FCmp;
975     llvm::CmpInst::Predicate SCmp;
976     llvm::CmpInst::Predicate UCmp;
977   };
978   CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
979     using FI = llvm::FCmpInst;
980     using II = llvm::ICmpInst;
981     switch (Kind) {
982     case CK_Less:
983       return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
984     case CK_Greater:
985       return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
986     case CK_Equal:
987       return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
988     }
989     llvm_unreachable("Unrecognised CompareKind enum");
990   }();
991 
992   if (ArgTy->hasFloatingRepresentation())
993     return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
994                               llvm::Twine(InstInfo.Name) + NameSuffix);
995   if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
996     auto Inst =
997         ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
998     return Builder.CreateICmp(Inst, LHS, RHS,
999                               llvm::Twine(InstInfo.Name) + NameSuffix);
1000   }
1001 
1002   llvm_unreachable("unsupported aggregate binary expression should have "
1003                    "already been handled");
1004 }
1005 
1006 void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1007   using llvm::BasicBlock;
1008   using llvm::PHINode;
1009   using llvm::Value;
1010   assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1011                                       E->getRHS()->getType()));
1012   const ComparisonCategoryInfo &CmpInfo =
1013       CGF.getContext().CompCategories.getInfoForType(E->getType());
1014   assert(CmpInfo.Record->isTriviallyCopyable() &&
1015          "cannot copy non-trivially copyable aggregate");
1016 
1017   QualType ArgTy = E->getLHS()->getType();
1018 
1019   if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1020       !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1021       !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1022     return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1023   }
1024   bool IsComplex = ArgTy->isAnyComplexType();
1025 
1026   // Evaluate the operands to the expression and extract their values.
1027   auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1028     RValue RV = CGF.EmitAnyExpr(E);
1029     if (RV.isScalar())
1030       return {RV.getScalarVal(), nullptr};
1031     if (RV.isAggregate())
1032       return {RV.getAggregatePointer(), nullptr};
1033     assert(RV.isComplex());
1034     return RV.getComplexVal();
1035   };
1036   auto LHSValues = EmitOperand(E->getLHS()),
1037        RHSValues = EmitOperand(E->getRHS());
1038 
1039   auto EmitCmp = [&](CompareKind K) {
1040     Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1041                              K, IsComplex ? ".r" : "");
1042     if (!IsComplex)
1043       return Cmp;
1044     assert(K == CompareKind::CK_Equal);
1045     Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1046                                  RHSValues.second, K, ".i");
1047     return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1048   };
1049   auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1050     return Builder.getInt(VInfo->getIntValue());
1051   };
1052 
1053   Value *Select;
1054   if (ArgTy->isNullPtrType()) {
1055     Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1056   } else if (!CmpInfo.isPartial()) {
1057     Value *SelectOne =
1058         Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1059                              EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1060     Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1061                                   EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1062                                   SelectOne, "sel.eq");
1063   } else {
1064     Value *SelectEq = Builder.CreateSelect(
1065         EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1066         EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1067     Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1068                                            EmitCmpRes(CmpInfo.getGreater()),
1069                                            SelectEq, "sel.gt");
1070     Select = Builder.CreateSelect(
1071         EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1072   }
1073   // Create the return value in the destination slot.
1074   EnsureDest(E->getType());
1075   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1076 
1077   // Emit the address of the first (and only) field in the comparison category
1078   // type, and initialize it from the constant integer value selected above.
1079   LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1080       DestLV, *CmpInfo.Record->field_begin());
1081   CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1082 
1083   // All done! The result is in the Dest slot.
1084 }
1085 
1086 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1087   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1088     VisitPointerToDataMemberBinaryOperator(E);
1089   else
1090     CGF.ErrorUnsupported(E, "aggregate binary expression");
1091 }
1092 
1093 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1094                                                     const BinaryOperator *E) {
1095   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1096   EmitFinalDestCopy(E->getType(), LV);
1097 }
1098 
1099 /// Is the value of the given expression possibly a reference to or
1100 /// into a __block variable?
1101 static bool isBlockVarRef(const Expr *E) {
1102   // Make sure we look through parens.
1103   E = E->IgnoreParens();
1104 
1105   // Check for a direct reference to a __block variable.
1106   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1107     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1108     return (var && var->hasAttr<BlocksAttr>());
1109   }
1110 
1111   // More complicated stuff.
1112 
1113   // Binary operators.
1114   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1115     // For an assignment or pointer-to-member operation, just care
1116     // about the LHS.
1117     if (op->isAssignmentOp() || op->isPtrMemOp())
1118       return isBlockVarRef(op->getLHS());
1119 
1120     // For a comma, just care about the RHS.
1121     if (op->getOpcode() == BO_Comma)
1122       return isBlockVarRef(op->getRHS());
1123 
1124     // FIXME: pointer arithmetic?
1125     return false;
1126 
1127   // Check both sides of a conditional operator.
1128   } else if (const AbstractConditionalOperator *op
1129                = dyn_cast<AbstractConditionalOperator>(E)) {
1130     return isBlockVarRef(op->getTrueExpr())
1131         || isBlockVarRef(op->getFalseExpr());
1132 
1133   // OVEs are required to support BinaryConditionalOperators.
1134   } else if (const OpaqueValueExpr *op
1135                = dyn_cast<OpaqueValueExpr>(E)) {
1136     if (const Expr *src = op->getSourceExpr())
1137       return isBlockVarRef(src);
1138 
1139   // Casts are necessary to get things like (*(int*)&var) = foo().
1140   // We don't really care about the kind of cast here, except
1141   // we don't want to look through l2r casts, because it's okay
1142   // to get the *value* in a __block variable.
1143   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1144     if (cast->getCastKind() == CK_LValueToRValue)
1145       return false;
1146     return isBlockVarRef(cast->getSubExpr());
1147 
1148   // Handle unary operators.  Again, just aggressively look through
1149   // it, ignoring the operation.
1150   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1151     return isBlockVarRef(uop->getSubExpr());
1152 
1153   // Look into the base of a field access.
1154   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1155     return isBlockVarRef(mem->getBase());
1156 
1157   // Look into the base of a subscript.
1158   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1159     return isBlockVarRef(sub->getBase());
1160   }
1161 
1162   return false;
1163 }
1164 
1165 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1166   // For an assignment to work, the value on the right has
1167   // to be compatible with the value on the left.
1168   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1169                                                  E->getRHS()->getType())
1170          && "Invalid assignment");
1171 
1172   // If the LHS might be a __block variable, and the RHS can
1173   // potentially cause a block copy, we need to evaluate the RHS first
1174   // so that the assignment goes the right place.
1175   // This is pretty semantically fragile.
1176   if (isBlockVarRef(E->getLHS()) &&
1177       E->getRHS()->HasSideEffects(CGF.getContext())) {
1178     // Ensure that we have a destination, and evaluate the RHS into that.
1179     EnsureDest(E->getRHS()->getType());
1180     Visit(E->getRHS());
1181 
1182     // Now emit the LHS and copy into it.
1183     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1184 
1185     // That copy is an atomic copy if the LHS is atomic.
1186     if (LHS.getType()->isAtomicType() ||
1187         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1188       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1189       return;
1190     }
1191 
1192     EmitCopy(E->getLHS()->getType(),
1193              AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
1194                                      needsGC(E->getLHS()->getType()),
1195                                      AggValueSlot::IsAliased,
1196                                      AggValueSlot::MayOverlap),
1197              Dest);
1198     return;
1199   }
1200 
1201   LValue LHS = CGF.EmitLValue(E->getLHS());
1202 
1203   // If we have an atomic type, evaluate into the destination and then
1204   // do an atomic copy.
1205   if (LHS.getType()->isAtomicType() ||
1206       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1207     EnsureDest(E->getRHS()->getType());
1208     Visit(E->getRHS());
1209     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1210     return;
1211   }
1212 
1213   // Codegen the RHS so that it stores directly into the LHS.
1214   AggValueSlot LHSSlot = AggValueSlot::forLValue(
1215       LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1216       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
1217   // A non-volatile aggregate destination might have volatile member.
1218   if (!LHSSlot.isVolatile() &&
1219       CGF.hasVolatileMember(E->getLHS()->getType()))
1220     LHSSlot.setVolatile(true);
1221 
1222   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1223 
1224   // Copy into the destination if the assignment isn't ignored.
1225   EmitFinalDestCopy(E->getType(), LHS);
1226 
1227   if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1228       E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
1229     CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1230                     E->getType());
1231 }
1232 
1233 void AggExprEmitter::
1234 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1235   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1236   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1237   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1238 
1239   // Bind the common expression if necessary.
1240   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1241 
1242   CodeGenFunction::ConditionalEvaluation eval(CGF);
1243   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1244                            CGF.getProfileCount(E));
1245 
1246   // Save whether the destination's lifetime is externally managed.
1247   bool isExternallyDestructed = Dest.isExternallyDestructed();
1248   bool destructNonTrivialCStruct =
1249       !isExternallyDestructed &&
1250       E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
1251   isExternallyDestructed |= destructNonTrivialCStruct;
1252   Dest.setExternallyDestructed(isExternallyDestructed);
1253 
1254   eval.begin(CGF);
1255   CGF.EmitBlock(LHSBlock);
1256   CGF.incrementProfileCounter(E);
1257   Visit(E->getTrueExpr());
1258   eval.end(CGF);
1259 
1260   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1261   CGF.Builder.CreateBr(ContBlock);
1262 
1263   // If the result of an agg expression is unused, then the emission
1264   // of the LHS might need to create a destination slot.  That's fine
1265   // with us, and we can safely emit the RHS into the same slot, but
1266   // we shouldn't claim that it's already being destructed.
1267   Dest.setExternallyDestructed(isExternallyDestructed);
1268 
1269   eval.begin(CGF);
1270   CGF.EmitBlock(RHSBlock);
1271   Visit(E->getFalseExpr());
1272   eval.end(CGF);
1273 
1274   if (destructNonTrivialCStruct)
1275     CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1276                     E->getType());
1277 
1278   CGF.EmitBlock(ContBlock);
1279 }
1280 
1281 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1282   Visit(CE->getChosenSubExpr());
1283 }
1284 
1285 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1286   Address ArgValue = Address::invalid();
1287   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1288 
1289   // If EmitVAArg fails, emit an error.
1290   if (!ArgPtr.isValid()) {
1291     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1292     return;
1293   }
1294 
1295   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1296 }
1297 
1298 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1299   // Ensure that we have a slot, but if we already do, remember
1300   // whether it was externally destructed.
1301   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1302   EnsureDest(E->getType());
1303 
1304   // We're going to push a destructor if there isn't already one.
1305   Dest.setExternallyDestructed();
1306 
1307   Visit(E->getSubExpr());
1308 
1309   // Push that destructor we promised.
1310   if (!wasExternallyDestructed)
1311     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1312 }
1313 
1314 void
1315 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1316   AggValueSlot Slot = EnsureSlot(E->getType());
1317   CGF.EmitCXXConstructExpr(E, Slot);
1318 }
1319 
1320 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1321     const CXXInheritedCtorInitExpr *E) {
1322   AggValueSlot Slot = EnsureSlot(E->getType());
1323   CGF.EmitInheritedCXXConstructorCall(
1324       E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1325       E->inheritedFromVBase(), E);
1326 }
1327 
1328 void
1329 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1330   AggValueSlot Slot = EnsureSlot(E->getType());
1331   LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1332 
1333   // We'll need to enter cleanup scopes in case any of the element
1334   // initializers throws an exception.
1335   SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1336   llvm::Instruction *CleanupDominator = nullptr;
1337 
1338   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1339   for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1340                                                e = E->capture_init_end();
1341        i != e; ++i, ++CurField) {
1342     // Emit initialization
1343     LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1344     if (CurField->hasCapturedVLAType()) {
1345       CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1346       continue;
1347     }
1348 
1349     EmitInitializationToLValue(*i, LV);
1350 
1351     // Push a destructor if necessary.
1352     if (QualType::DestructionKind DtorKind =
1353             CurField->getType().isDestructedType()) {
1354       assert(LV.isSimple());
1355       if (CGF.needsEHCleanup(DtorKind)) {
1356         if (!CleanupDominator)
1357           CleanupDominator = CGF.Builder.CreateAlignedLoad(
1358               CGF.Int8Ty,
1359               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1360               CharUnits::One()); // placeholder
1361 
1362         CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
1363                         CGF.getDestroyer(DtorKind), false);
1364         Cleanups.push_back(CGF.EHStack.stable_begin());
1365       }
1366     }
1367   }
1368 
1369   // Deactivate all the partial cleanups in reverse order, which
1370   // generally means popping them.
1371   for (unsigned i = Cleanups.size(); i != 0; --i)
1372     CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
1373 
1374   // Destroy the placeholder if we made one.
1375   if (CleanupDominator)
1376     CleanupDominator->eraseFromParent();
1377 }
1378 
1379 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1380   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1381   Visit(E->getSubExpr());
1382 }
1383 
1384 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1385   QualType T = E->getType();
1386   AggValueSlot Slot = EnsureSlot(T);
1387   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1388 }
1389 
1390 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1391   QualType T = E->getType();
1392   AggValueSlot Slot = EnsureSlot(T);
1393   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1394 }
1395 
1396 /// Determine whether the given cast kind is known to always convert values
1397 /// with all zero bits in their value representation to values with all zero
1398 /// bits in their value representation.
1399 static bool castPreservesZero(const CastExpr *CE) {
1400   switch (CE->getCastKind()) {
1401     // No-ops.
1402   case CK_NoOp:
1403   case CK_UserDefinedConversion:
1404   case CK_ConstructorConversion:
1405   case CK_BitCast:
1406   case CK_ToUnion:
1407   case CK_ToVoid:
1408     // Conversions between (possibly-complex) integral, (possibly-complex)
1409     // floating-point, and bool.
1410   case CK_BooleanToSignedIntegral:
1411   case CK_FloatingCast:
1412   case CK_FloatingComplexCast:
1413   case CK_FloatingComplexToBoolean:
1414   case CK_FloatingComplexToIntegralComplex:
1415   case CK_FloatingComplexToReal:
1416   case CK_FloatingRealToComplex:
1417   case CK_FloatingToBoolean:
1418   case CK_FloatingToIntegral:
1419   case CK_IntegralCast:
1420   case CK_IntegralComplexCast:
1421   case CK_IntegralComplexToBoolean:
1422   case CK_IntegralComplexToFloatingComplex:
1423   case CK_IntegralComplexToReal:
1424   case CK_IntegralRealToComplex:
1425   case CK_IntegralToBoolean:
1426   case CK_IntegralToFloating:
1427     // Reinterpreting integers as pointers and vice versa.
1428   case CK_IntegralToPointer:
1429   case CK_PointerToIntegral:
1430     // Language extensions.
1431   case CK_VectorSplat:
1432   case CK_MatrixCast:
1433   case CK_NonAtomicToAtomic:
1434   case CK_AtomicToNonAtomic:
1435     return true;
1436 
1437   case CK_BaseToDerivedMemberPointer:
1438   case CK_DerivedToBaseMemberPointer:
1439   case CK_MemberPointerToBoolean:
1440   case CK_NullToMemberPointer:
1441   case CK_ReinterpretMemberPointer:
1442     // FIXME: ABI-dependent.
1443     return false;
1444 
1445   case CK_AnyPointerToBlockPointerCast:
1446   case CK_BlockPointerToObjCPointerCast:
1447   case CK_CPointerToObjCPointerCast:
1448   case CK_ObjCObjectLValueCast:
1449   case CK_IntToOCLSampler:
1450   case CK_ZeroToOCLOpaqueType:
1451     // FIXME: Check these.
1452     return false;
1453 
1454   case CK_FixedPointCast:
1455   case CK_FixedPointToBoolean:
1456   case CK_FixedPointToFloating:
1457   case CK_FixedPointToIntegral:
1458   case CK_FloatingToFixedPoint:
1459   case CK_IntegralToFixedPoint:
1460     // FIXME: Do all fixed-point types represent zero as all 0 bits?
1461     return false;
1462 
1463   case CK_AddressSpaceConversion:
1464   case CK_BaseToDerived:
1465   case CK_DerivedToBase:
1466   case CK_Dynamic:
1467   case CK_NullToPointer:
1468   case CK_PointerToBoolean:
1469     // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1470     // same representation in all involved address spaces.
1471     return false;
1472 
1473   case CK_ARCConsumeObject:
1474   case CK_ARCExtendBlockObject:
1475   case CK_ARCProduceObject:
1476   case CK_ARCReclaimReturnedObject:
1477   case CK_CopyAndAutoreleaseBlockObject:
1478   case CK_ArrayToPointerDecay:
1479   case CK_FunctionToPointerDecay:
1480   case CK_BuiltinFnToFnPtr:
1481   case CK_Dependent:
1482   case CK_LValueBitCast:
1483   case CK_LValueToRValue:
1484   case CK_LValueToRValueBitCast:
1485   case CK_UncheckedDerivedToBase:
1486     return false;
1487   }
1488   llvm_unreachable("Unhandled clang::CastKind enum");
1489 }
1490 
1491 /// isSimpleZero - If emitting this value will obviously just cause a store of
1492 /// zero to memory, return true.  This can return false if uncertain, so it just
1493 /// handles simple cases.
1494 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1495   E = E->IgnoreParens();
1496   while (auto *CE = dyn_cast<CastExpr>(E)) {
1497     if (!castPreservesZero(CE))
1498       break;
1499     E = CE->getSubExpr()->IgnoreParens();
1500   }
1501 
1502   // 0
1503   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1504     return IL->getValue() == 0;
1505   // +0.0
1506   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1507     return FL->getValue().isPosZero();
1508   // int()
1509   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1510       CGF.getTypes().isZeroInitializable(E->getType()))
1511     return true;
1512   // (int*)0 - Null pointer expressions.
1513   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1514     return ICE->getCastKind() == CK_NullToPointer &&
1515            CGF.getTypes().isPointerZeroInitializable(E->getType()) &&
1516            !E->HasSideEffects(CGF.getContext());
1517   // '\0'
1518   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1519     return CL->getValue() == 0;
1520 
1521   // Otherwise, hard case: conservatively return false.
1522   return false;
1523 }
1524 
1525 
1526 void
1527 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1528   QualType type = LV.getType();
1529   // FIXME: Ignore result?
1530   // FIXME: Are initializers affected by volatile?
1531   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1532     // Storing "i32 0" to a zero'd memory location is a noop.
1533     return;
1534   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1535     return EmitNullInitializationToLValue(LV);
1536   } else if (isa<NoInitExpr>(E)) {
1537     // Do nothing.
1538     return;
1539   } else if (type->isReferenceType()) {
1540     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1541     return CGF.EmitStoreThroughLValue(RV, LV);
1542   }
1543 
1544   switch (CGF.getEvaluationKind(type)) {
1545   case TEK_Complex:
1546     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1547     return;
1548   case TEK_Aggregate:
1549     CGF.EmitAggExpr(
1550         E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
1551                                    AggValueSlot::DoesNotNeedGCBarriers,
1552                                    AggValueSlot::IsNotAliased,
1553                                    AggValueSlot::MayOverlap, Dest.isZeroed()));
1554     return;
1555   case TEK_Scalar:
1556     if (LV.isSimple()) {
1557       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1558     } else {
1559       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1560     }
1561     return;
1562   }
1563   llvm_unreachable("bad evaluation kind");
1564 }
1565 
1566 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1567   QualType type = lv.getType();
1568 
1569   // If the destination slot is already zeroed out before the aggregate is
1570   // copied into it, we don't have to emit any zeros here.
1571   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1572     return;
1573 
1574   if (CGF.hasScalarEvaluationKind(type)) {
1575     // For non-aggregates, we can store the appropriate null constant.
1576     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1577     // Note that the following is not equivalent to
1578     // EmitStoreThroughBitfieldLValue for ARC types.
1579     if (lv.isBitField()) {
1580       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1581     } else {
1582       assert(lv.isSimple());
1583       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1584     }
1585   } else {
1586     // There's a potential optimization opportunity in combining
1587     // memsets; that would be easy for arrays, but relatively
1588     // difficult for structures with the current code.
1589     CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
1590   }
1591 }
1592 
1593 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1594 #if 0
1595   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1596   // (Length of globals? Chunks of zeroed-out space?).
1597   //
1598   // If we can, prefer a copy from a global; this is a lot less code for long
1599   // globals, and it's easier for the current optimizers to analyze.
1600   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1601     llvm::GlobalVariable* GV =
1602     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1603                              llvm::GlobalValue::InternalLinkage, C, "");
1604     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1605     return;
1606   }
1607 #endif
1608   if (E->hadArrayRangeDesignator())
1609     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1610 
1611   if (E->isTransparent())
1612     return Visit(E->getInit(0));
1613 
1614   AggValueSlot Dest = EnsureSlot(E->getType());
1615 
1616   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1617 
1618   // Handle initialization of an array.
1619   if (E->getType()->isArrayType()) {
1620     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1621     EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1622     return;
1623   }
1624 
1625   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1626 
1627   // Do struct initialization; this code just sets each individual member
1628   // to the approprate value.  This makes bitfield support automatic;
1629   // the disadvantage is that the generated code is more difficult for
1630   // the optimizer, especially with bitfields.
1631   unsigned NumInitElements = E->getNumInits();
1632   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1633 
1634   // We'll need to enter cleanup scopes in case any of the element
1635   // initializers throws an exception.
1636   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1637   llvm::Instruction *cleanupDominator = nullptr;
1638   auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
1639     cleanups.push_back(cleanup);
1640     if (!cleanupDominator) // create placeholder once needed
1641       cleanupDominator = CGF.Builder.CreateAlignedLoad(
1642           CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
1643           CharUnits::One());
1644   };
1645 
1646   unsigned curInitIndex = 0;
1647 
1648   // Emit initialization of base classes.
1649   if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1650     assert(E->getNumInits() >= CXXRD->getNumBases() &&
1651            "missing initializer for base class");
1652     for (auto &Base : CXXRD->bases()) {
1653       assert(!Base.isVirtual() && "should not see vbases here");
1654       auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1655       Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1656           Dest.getAddress(), CXXRD, BaseRD,
1657           /*isBaseVirtual*/ false);
1658       AggValueSlot AggSlot = AggValueSlot::forAddr(
1659           V, Qualifiers(),
1660           AggValueSlot::IsDestructed,
1661           AggValueSlot::DoesNotNeedGCBarriers,
1662           AggValueSlot::IsNotAliased,
1663           CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1664       CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1665 
1666       if (QualType::DestructionKind dtorKind =
1667               Base.getType().isDestructedType()) {
1668         CGF.pushDestroy(dtorKind, V, Base.getType());
1669         addCleanup(CGF.EHStack.stable_begin());
1670       }
1671     }
1672   }
1673 
1674   // Prepare a 'this' for CXXDefaultInitExprs.
1675   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1676 
1677   if (record->isUnion()) {
1678     // Only initialize one field of a union. The field itself is
1679     // specified by the initializer list.
1680     if (!E->getInitializedFieldInUnion()) {
1681       // Empty union; we have nothing to do.
1682 
1683 #ifndef NDEBUG
1684       // Make sure that it's really an empty and not a failure of
1685       // semantic analysis.
1686       for (const auto *Field : record->fields())
1687         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1688 #endif
1689       return;
1690     }
1691 
1692     // FIXME: volatility
1693     FieldDecl *Field = E->getInitializedFieldInUnion();
1694 
1695     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1696     if (NumInitElements) {
1697       // Store the initializer into the field
1698       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1699     } else {
1700       // Default-initialize to null.
1701       EmitNullInitializationToLValue(FieldLoc);
1702     }
1703 
1704     return;
1705   }
1706 
1707   // Here we iterate over the fields; this makes it simpler to both
1708   // default-initialize fields and skip over unnamed fields.
1709   for (const auto *field : record->fields()) {
1710     // We're done once we hit the flexible array member.
1711     if (field->getType()->isIncompleteArrayType())
1712       break;
1713 
1714     // Always skip anonymous bitfields.
1715     if (field->isUnnamedBitfield())
1716       continue;
1717 
1718     // We're done if we reach the end of the explicit initializers, we
1719     // have a zeroed object, and the rest of the fields are
1720     // zero-initializable.
1721     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1722         CGF.getTypes().isZeroInitializable(E->getType()))
1723       break;
1724 
1725 
1726     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1727     // We never generate write-barries for initialized fields.
1728     LV.setNonGC(true);
1729 
1730     if (curInitIndex < NumInitElements) {
1731       // Store the initializer into the field.
1732       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1733     } else {
1734       // We're out of initializers; default-initialize to null
1735       EmitNullInitializationToLValue(LV);
1736     }
1737 
1738     // Push a destructor if necessary.
1739     // FIXME: if we have an array of structures, all explicitly
1740     // initialized, we can end up pushing a linear number of cleanups.
1741     bool pushedCleanup = false;
1742     if (QualType::DestructionKind dtorKind
1743           = field->getType().isDestructedType()) {
1744       assert(LV.isSimple());
1745       if (CGF.needsEHCleanup(dtorKind)) {
1746         CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
1747                         CGF.getDestroyer(dtorKind), false);
1748         addCleanup(CGF.EHStack.stable_begin());
1749         pushedCleanup = true;
1750       }
1751     }
1752 
1753     // If the GEP didn't get used because of a dead zero init or something
1754     // else, clean it up for -O0 builds and general tidiness.
1755     if (!pushedCleanup && LV.isSimple())
1756       if (llvm::GetElementPtrInst *GEP =
1757               dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
1758         if (GEP->use_empty())
1759           GEP->eraseFromParent();
1760   }
1761 
1762   // Deactivate all the partial cleanups in reverse order, which
1763   // generally means popping them.
1764   assert((cleanupDominator || cleanups.empty()) &&
1765          "Missing cleanupDominator before deactivating cleanup blocks");
1766   for (unsigned i = cleanups.size(); i != 0; --i)
1767     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1768 
1769   // Destroy the placeholder if we made one.
1770   if (cleanupDominator)
1771     cleanupDominator->eraseFromParent();
1772 }
1773 
1774 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1775                                             llvm::Value *outerBegin) {
1776   // Emit the common subexpression.
1777   CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1778 
1779   Address destPtr = EnsureSlot(E->getType()).getAddress();
1780   uint64_t numElements = E->getArraySize().getZExtValue();
1781 
1782   if (!numElements)
1783     return;
1784 
1785   // destPtr is an array*. Construct an elementType* by drilling down a level.
1786   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1787   llvm::Value *indices[] = {zero, zero};
1788   llvm::Value *begin = Builder.CreateInBoundsGEP(
1789       destPtr.getElementType(), destPtr.getPointer(), indices,
1790       "arrayinit.begin");
1791 
1792   // Prepare to special-case multidimensional array initialization: we avoid
1793   // emitting multiple destructor loops in that case.
1794   if (!outerBegin)
1795     outerBegin = begin;
1796   ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1797 
1798   QualType elementType =
1799       CGF.getContext().getAsArrayType(E->getType())->getElementType();
1800   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1801   CharUnits elementAlign =
1802       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1803 
1804   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1805   llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1806 
1807   // Jump into the body.
1808   CGF.EmitBlock(bodyBB);
1809   llvm::PHINode *index =
1810       Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1811   index->addIncoming(zero, entryBB);
1812   llvm::Value *element = Builder.CreateInBoundsGEP(
1813       begin->getType()->getPointerElementType(), begin, index);
1814 
1815   // Prepare for a cleanup.
1816   QualType::DestructionKind dtorKind = elementType.isDestructedType();
1817   EHScopeStack::stable_iterator cleanup;
1818   if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1819     if (outerBegin->getType() != element->getType())
1820       outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1821     CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1822                                        elementAlign,
1823                                        CGF.getDestroyer(dtorKind));
1824     cleanup = CGF.EHStack.stable_begin();
1825   } else {
1826     dtorKind = QualType::DK_none;
1827   }
1828 
1829   // Emit the actual filler expression.
1830   {
1831     // Temporaries created in an array initialization loop are destroyed
1832     // at the end of each iteration.
1833     CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1834     CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1835     LValue elementLV =
1836         CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1837 
1838     if (InnerLoop) {
1839       // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1840       auto elementSlot = AggValueSlot::forLValue(
1841           elementLV, CGF, AggValueSlot::IsDestructed,
1842           AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
1843           AggValueSlot::DoesNotOverlap);
1844       AggExprEmitter(CGF, elementSlot, false)
1845           .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1846     } else
1847       EmitInitializationToLValue(E->getSubExpr(), elementLV);
1848   }
1849 
1850   // Move on to the next element.
1851   llvm::Value *nextIndex = Builder.CreateNUWAdd(
1852       index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1853   index->addIncoming(nextIndex, Builder.GetInsertBlock());
1854 
1855   // Leave the loop if we're done.
1856   llvm::Value *done = Builder.CreateICmpEQ(
1857       nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1858       "arrayinit.done");
1859   llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1860   Builder.CreateCondBr(done, endBB, bodyBB);
1861 
1862   CGF.EmitBlock(endBB);
1863 
1864   // Leave the partial-array cleanup if we entered one.
1865   if (dtorKind)
1866     CGF.DeactivateCleanupBlock(cleanup, index);
1867 }
1868 
1869 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1870   AggValueSlot Dest = EnsureSlot(E->getType());
1871 
1872   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1873   EmitInitializationToLValue(E->getBase(), DestLV);
1874   VisitInitListExpr(E->getUpdater());
1875 }
1876 
1877 //===----------------------------------------------------------------------===//
1878 //                        Entry Points into this File
1879 //===----------------------------------------------------------------------===//
1880 
1881 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1882 /// non-zero bytes that will be stored when outputting the initializer for the
1883 /// specified initializer expression.
1884 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1885   if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1886     E = MTE->getSubExpr();
1887   E = E->IgnoreParenNoopCasts(CGF.getContext());
1888 
1889   // 0 and 0.0 won't require any non-zero stores!
1890   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1891 
1892   // If this is an initlist expr, sum up the size of sizes of the (present)
1893   // elements.  If this is something weird, assume the whole thing is non-zero.
1894   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1895   while (ILE && ILE->isTransparent())
1896     ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1897   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1898     return CGF.getContext().getTypeSizeInChars(E->getType());
1899 
1900   // InitListExprs for structs have to be handled carefully.  If there are
1901   // reference members, we need to consider the size of the reference, not the
1902   // referencee.  InitListExprs for unions and arrays can't have references.
1903   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1904     if (!RT->isUnionType()) {
1905       RecordDecl *SD = RT->getDecl();
1906       CharUnits NumNonZeroBytes = CharUnits::Zero();
1907 
1908       unsigned ILEElement = 0;
1909       if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1910         while (ILEElement != CXXRD->getNumBases())
1911           NumNonZeroBytes +=
1912               GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1913       for (const auto *Field : SD->fields()) {
1914         // We're done once we hit the flexible array member or run out of
1915         // InitListExpr elements.
1916         if (Field->getType()->isIncompleteArrayType() ||
1917             ILEElement == ILE->getNumInits())
1918           break;
1919         if (Field->isUnnamedBitfield())
1920           continue;
1921 
1922         const Expr *E = ILE->getInit(ILEElement++);
1923 
1924         // Reference values are always non-null and have the width of a pointer.
1925         if (Field->getType()->isReferenceType())
1926           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1927               CGF.getTarget().getPointerWidth(0));
1928         else
1929           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1930       }
1931 
1932       return NumNonZeroBytes;
1933     }
1934   }
1935 
1936   // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1937   CharUnits NumNonZeroBytes = CharUnits::Zero();
1938   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1939     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1940   return NumNonZeroBytes;
1941 }
1942 
1943 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1944 /// zeros in it, emit a memset and avoid storing the individual zeros.
1945 ///
1946 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1947                                      CodeGenFunction &CGF) {
1948   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1949   // volatile stores.
1950   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1951     return;
1952 
1953   // C++ objects with a user-declared constructor don't need zero'ing.
1954   if (CGF.getLangOpts().CPlusPlus)
1955     if (const RecordType *RT = CGF.getContext()
1956                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1957       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1958       if (RD->hasUserDeclaredConstructor())
1959         return;
1960     }
1961 
1962   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1963   CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1964   if (Size <= CharUnits::fromQuantity(16))
1965     return;
1966 
1967   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1968   // we prefer to emit memset + individual stores for the rest.
1969   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1970   if (NumNonZeroBytes*4 > Size)
1971     return;
1972 
1973   // Okay, it seems like a good idea to use an initial memset, emit the call.
1974   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1975 
1976   Address Loc = Slot.getAddress();
1977   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1978   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1979 
1980   // Tell the AggExprEmitter that the slot is known zero.
1981   Slot.setZeroed();
1982 }
1983 
1984 
1985 
1986 
1987 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1988 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1989 /// the value of the aggregate expression is not needed.  If VolatileDest is
1990 /// true, DestPtr cannot be 0.
1991 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1992   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1993          "Invalid aggregate expression to emit");
1994   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1995          "slot has bits but no address");
1996 
1997   // Optimize the slot if possible.
1998   CheckAggExprForMemSetUse(Slot, E, *this);
1999 
2000   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2001 }
2002 
2003 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
2004   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2005   Address Temp = CreateMemTemp(E->getType());
2006   LValue LV = MakeAddrLValue(Temp, E->getType());
2007   EmitAggExpr(E, AggValueSlot::forLValue(
2008                      LV, *this, AggValueSlot::IsNotDestructed,
2009                      AggValueSlot::DoesNotNeedGCBarriers,
2010                      AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
2011   return LV;
2012 }
2013 
2014 AggValueSlot::Overlap_t
2015 CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
2016   if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2017     return AggValueSlot::DoesNotOverlap;
2018 
2019   // If the field lies entirely within the enclosing class's nvsize, its tail
2020   // padding cannot overlap any already-initialized object. (The only subobjects
2021   // with greater addresses that might already be initialized are vbases.)
2022   const RecordDecl *ClassRD = FD->getParent();
2023   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2024   if (Layout.getFieldOffset(FD->getFieldIndex()) +
2025           getContext().getTypeSize(FD->getType()) <=
2026       (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2027     return AggValueSlot::DoesNotOverlap;
2028 
2029   // The tail padding may contain values we need to preserve.
2030   return AggValueSlot::MayOverlap;
2031 }
2032 
2033 AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
2034     const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2035   // If the most-derived object is a field declared with [[no_unique_address]],
2036   // the tail padding of any virtual base could be reused for other subobjects
2037   // of that field's class.
2038   if (IsVirtual)
2039     return AggValueSlot::MayOverlap;
2040 
2041   // If the base class is laid out entirely within the nvsize of the derived
2042   // class, its tail padding cannot yet be initialized, so we can issue
2043   // stores at the full width of the base class.
2044   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2045   if (Layout.getBaseClassOffset(BaseRD) +
2046           getContext().getASTRecordLayout(BaseRD).getSize() <=
2047       Layout.getNonVirtualSize())
2048     return AggValueSlot::DoesNotOverlap;
2049 
2050   // The tail padding may contain values we need to preserve.
2051   return AggValueSlot::MayOverlap;
2052 }
2053 
2054 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
2055                                         AggValueSlot::Overlap_t MayOverlap,
2056                                         bool isVolatile) {
2057   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2058 
2059   Address DestPtr = Dest.getAddress(*this);
2060   Address SrcPtr = Src.getAddress(*this);
2061 
2062   if (getLangOpts().CPlusPlus) {
2063     if (const RecordType *RT = Ty->getAs<RecordType>()) {
2064       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2065       assert((Record->hasTrivialCopyConstructor() ||
2066               Record->hasTrivialCopyAssignment() ||
2067               Record->hasTrivialMoveConstructor() ||
2068               Record->hasTrivialMoveAssignment() ||
2069               Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2070              "Trying to aggregate-copy a type without a trivial copy/move "
2071              "constructor or assignment operator");
2072       // Ignore empty classes in C++.
2073       if (Record->isEmpty())
2074         return;
2075     }
2076   }
2077 
2078   if (getLangOpts().CUDAIsDevice) {
2079     if (Ty->isCUDADeviceBuiltinSurfaceType()) {
2080       if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2081                                                                   Src))
2082         return;
2083     } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2084       if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2085                                                                   Src))
2086         return;
2087     }
2088   }
2089 
2090   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
2091   // C99 6.5.16.1p3, which states "If the value being stored in an object is
2092   // read from another object that overlaps in anyway the storage of the first
2093   // object, then the overlap shall be exact and the two objects shall have
2094   // qualified or unqualified versions of a compatible type."
2095   //
2096   // memcpy is not defined if the source and destination pointers are exactly
2097   // equal, but other compilers do this optimization, and almost every memcpy
2098   // implementation handles this case safely.  If there is a libc that does not
2099   // safely handle this, we can add a target hook.
2100 
2101   // Get data size info for this aggregate. Don't copy the tail padding if this
2102   // might be a potentially-overlapping subobject, since the tail padding might
2103   // be occupied by a different object. Otherwise, copying it is fine.
2104   TypeInfoChars TypeInfo;
2105   if (MayOverlap)
2106     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
2107   else
2108     TypeInfo = getContext().getTypeInfoInChars(Ty);
2109 
2110   llvm::Value *SizeVal = nullptr;
2111   if (TypeInfo.Width.isZero()) {
2112     // But note that getTypeInfo returns 0 for a VLA.
2113     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2114             getContext().getAsArrayType(Ty))) {
2115       QualType BaseEltTy;
2116       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2117       TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2118       assert(!TypeInfo.Width.isZero());
2119       SizeVal = Builder.CreateNUWMul(
2120           SizeVal,
2121           llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2122     }
2123   }
2124   if (!SizeVal) {
2125     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2126   }
2127 
2128   // FIXME: If we have a volatile struct, the optimizer can remove what might
2129   // appear to be `extra' memory ops:
2130   //
2131   // volatile struct { int i; } a, b;
2132   //
2133   // int main() {
2134   //   a = b;
2135   //   a = b;
2136   // }
2137   //
2138   // we need to use a different call here.  We use isVolatile to indicate when
2139   // either the source or the destination is volatile.
2140 
2141   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
2142   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
2143 
2144   // Don't do any of the memmove_collectable tests if GC isn't set.
2145   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2146     // fall through
2147   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2148     RecordDecl *Record = RecordTy->getDecl();
2149     if (Record->hasObjectMember()) {
2150       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2151                                                     SizeVal);
2152       return;
2153     }
2154   } else if (Ty->isArrayType()) {
2155     QualType BaseType = getContext().getBaseElementType(Ty);
2156     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2157       if (RecordTy->getDecl()->hasObjectMember()) {
2158         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2159                                                       SizeVal);
2160         return;
2161       }
2162     }
2163   }
2164 
2165   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2166 
2167   // Determine the metadata to describe the position of any padding in this
2168   // memcpy, as well as the TBAA tags for the members of the struct, in case
2169   // the optimizer wishes to expand it in to scalar memory operations.
2170   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2171     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2172 
2173   if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2174     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2175         Dest.getTBAAInfo(), Src.getTBAAInfo());
2176     CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2177   }
2178 }
2179