xref: /openbsd-src/gnu/llvm/clang/lib/CodeGen/CGCall.cpp (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Attr.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/TargetBuiltins.h"
28 #include "clang/Basic/TargetInfo.h"
29 #include "clang/CodeGen/CGFunctionInfo.h"
30 #include "clang/CodeGen/SwiftCallingConv.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /***/
44 
45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
46   switch (CC) {
47   default: return llvm::CallingConv::C;
48   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
51   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
52   case CC_Win64: return llvm::CallingConv::Win64;
53   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
54   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
55   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
56   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
57   // TODO: Add support for __pascal to LLVM.
58   case CC_X86Pascal: return llvm::CallingConv::C;
59   // TODO: Add support for __vectorcall to LLVM.
60   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
61   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
62   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66   case CC_Swift: return llvm::CallingConv::Swift;
67   }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
71 /// qualification. Either or both of RD and MD may be null. A null RD indicates
72 /// that there is no meaningful 'this' type, and a null MD can occur when
73 /// calling a method pointer.
74 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
75                                          const CXXMethodDecl *MD) {
76   QualType RecTy;
77   if (RD)
78     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
79   else
80     RecTy = Context.VoidTy;
81 
82   if (MD)
83     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
84   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
85 }
86 
87 /// Returns the canonical formal type of the given C++ method.
88 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
89   return MD->getType()->getCanonicalTypeUnqualified()
90            .getAs<FunctionProtoType>();
91 }
92 
93 /// Returns the "extra-canonicalized" return type, which discards
94 /// qualifiers on the return type.  Codegen doesn't care about them,
95 /// and it makes ABI code a little easier to be able to assume that
96 /// all parameter and return types are top-level unqualified.
97 static CanQualType GetReturnType(QualType RetTy) {
98   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
99 }
100 
101 /// Arrange the argument and result information for a value of the given
102 /// unprototyped freestanding function type.
103 const CGFunctionInfo &
104 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
105   // When translating an unprototyped function type, always use a
106   // variadic type.
107   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
108                                  /*instanceMethod=*/false,
109                                  /*chainCall=*/false, None,
110                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
111 }
112 
113 static void addExtParameterInfosForCall(
114          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
115                                         const FunctionProtoType *proto,
116                                         unsigned prefixArgs,
117                                         unsigned totalArgs) {
118   assert(proto->hasExtParameterInfos());
119   assert(paramInfos.size() <= prefixArgs);
120   assert(proto->getNumParams() + prefixArgs <= totalArgs);
121 
122   paramInfos.reserve(totalArgs);
123 
124   // Add default infos for any prefix args that don't already have infos.
125   paramInfos.resize(prefixArgs);
126 
127   // Add infos for the prototype.
128   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
129     paramInfos.push_back(ParamInfo);
130     // pass_object_size params have no parameter info.
131     if (ParamInfo.hasPassObjectSize())
132       paramInfos.emplace_back();
133   }
134 
135   assert(paramInfos.size() <= totalArgs &&
136          "Did we forget to insert pass_object_size args?");
137   // Add default infos for the variadic and/or suffix arguments.
138   paramInfos.resize(totalArgs);
139 }
140 
141 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
142 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
143 static void appendParameterTypes(const CodeGenTypes &CGT,
144                                  SmallVectorImpl<CanQualType> &prefix,
145               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
146                                  CanQual<FunctionProtoType> FPT) {
147   // Fast path: don't touch param info if we don't need to.
148   if (!FPT->hasExtParameterInfos()) {
149     assert(paramInfos.empty() &&
150            "We have paramInfos, but the prototype doesn't?");
151     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
152     return;
153   }
154 
155   unsigned PrefixSize = prefix.size();
156   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
157   // parameters; the only thing that can change this is the presence of
158   // pass_object_size. So, we preallocate for the common case.
159   prefix.reserve(prefix.size() + FPT->getNumParams());
160 
161   auto ExtInfos = FPT->getExtParameterInfos();
162   assert(ExtInfos.size() == FPT->getNumParams());
163   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
164     prefix.push_back(FPT->getParamType(I));
165     if (ExtInfos[I].hasPassObjectSize())
166       prefix.push_back(CGT.getContext().getSizeType());
167   }
168 
169   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
170                               prefix.size());
171 }
172 
173 /// Arrange the LLVM function layout for a value of the given function
174 /// type, on top of any implicit parameters already stored.
175 static const CGFunctionInfo &
176 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
177                         SmallVectorImpl<CanQualType> &prefix,
178                         CanQual<FunctionProtoType> FTP) {
179   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
180   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
181   // FIXME: Kill copy.
182   appendParameterTypes(CGT, prefix, paramInfos, FTP);
183   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
184 
185   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
186                                      /*chainCall=*/false, prefix,
187                                      FTP->getExtInfo(), paramInfos,
188                                      Required);
189 }
190 
191 /// Arrange the argument and result information for a value of the
192 /// given freestanding function type.
193 const CGFunctionInfo &
194 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
195   SmallVector<CanQualType, 16> argTypes;
196   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
197                                    FTP);
198 }
199 
200 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
201   // Set the appropriate calling convention for the Function.
202   if (D->hasAttr<StdCallAttr>())
203     return CC_X86StdCall;
204 
205   if (D->hasAttr<FastCallAttr>())
206     return CC_X86FastCall;
207 
208   if (D->hasAttr<RegCallAttr>())
209     return CC_X86RegCall;
210 
211   if (D->hasAttr<ThisCallAttr>())
212     return CC_X86ThisCall;
213 
214   if (D->hasAttr<VectorCallAttr>())
215     return CC_X86VectorCall;
216 
217   if (D->hasAttr<PascalAttr>())
218     return CC_X86Pascal;
219 
220   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
221     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
222 
223   if (D->hasAttr<AArch64VectorPcsAttr>())
224     return CC_AArch64VectorCall;
225 
226   if (D->hasAttr<IntelOclBiccAttr>())
227     return CC_IntelOclBicc;
228 
229   if (D->hasAttr<MSABIAttr>())
230     return IsWindows ? CC_C : CC_Win64;
231 
232   if (D->hasAttr<SysVABIAttr>())
233     return IsWindows ? CC_X86_64SysV : CC_C;
234 
235   if (D->hasAttr<PreserveMostAttr>())
236     return CC_PreserveMost;
237 
238   if (D->hasAttr<PreserveAllAttr>())
239     return CC_PreserveAll;
240 
241   return CC_C;
242 }
243 
244 /// Arrange the argument and result information for a call to an
245 /// unknown C++ non-static member function of the given abstract type.
246 /// (A null RD means we don't have any meaningful "this" argument type,
247 ///  so fall back to a generic pointer type).
248 /// The member function must be an ordinary function, i.e. not a
249 /// constructor or destructor.
250 const CGFunctionInfo &
251 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
252                                    const FunctionProtoType *FTP,
253                                    const CXXMethodDecl *MD) {
254   SmallVector<CanQualType, 16> argTypes;
255 
256   // Add the 'this' pointer.
257   argTypes.push_back(DeriveThisType(RD, MD));
258 
259   return ::arrangeLLVMFunctionInfo(
260       *this, true, argTypes,
261       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
262 }
263 
264 /// Set calling convention for CUDA/HIP kernel.
265 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
266                                            const FunctionDecl *FD) {
267   if (FD->hasAttr<CUDAGlobalAttr>()) {
268     const FunctionType *FT = FTy->getAs<FunctionType>();
269     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
270     FTy = FT->getCanonicalTypeUnqualified();
271   }
272 }
273 
274 /// Arrange the argument and result information for a declaration or
275 /// definition of the given C++ non-static member function.  The
276 /// member function must be an ordinary function, i.e. not a
277 /// constructor or destructor.
278 const CGFunctionInfo &
279 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
280   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
281   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
282 
283   CanQualType FT = GetFormalType(MD).getAs<Type>();
284   setCUDAKernelCallingConvention(FT, CGM, MD);
285   auto prototype = FT.getAs<FunctionProtoType>();
286 
287   if (MD->isInstance()) {
288     // The abstract case is perfectly fine.
289     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
290     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
291   }
292 
293   return arrangeFreeFunctionType(prototype);
294 }
295 
296 bool CodeGenTypes::inheritingCtorHasParams(
297     const InheritedConstructor &Inherited, CXXCtorType Type) {
298   // Parameters are unnecessary if we're constructing a base class subobject
299   // and the inherited constructor lives in a virtual base.
300   return Type == Ctor_Complete ||
301          !Inherited.getShadowDecl()->constructsVirtualBase() ||
302          !Target.getCXXABI().hasConstructorVariants();
303 }
304 
305 const CGFunctionInfo &
306 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
307   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
308 
309   SmallVector<CanQualType, 16> argTypes;
310   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
311   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
312 
313   bool PassParams = true;
314 
315   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
316     // A base class inheriting constructor doesn't get forwarded arguments
317     // needed to construct a virtual base (or base class thereof).
318     if (auto Inherited = CD->getInheritedConstructor())
319       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
320   }
321 
322   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
323 
324   // Add the formal parameters.
325   if (PassParams)
326     appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328   CGCXXABI::AddedStructorArgs AddedArgs =
329       TheCXXABI.buildStructorSignature(GD, argTypes);
330   if (!paramInfos.empty()) {
331     // Note: prefix implies after the first param.
332     if (AddedArgs.Prefix)
333       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
334                         FunctionProtoType::ExtParameterInfo{});
335     if (AddedArgs.Suffix)
336       paramInfos.append(AddedArgs.Suffix,
337                         FunctionProtoType::ExtParameterInfo{});
338   }
339 
340   RequiredArgs required =
341       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
342                                       : RequiredArgs::All);
343 
344   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346                                ? argTypes.front()
347                                : TheCXXABI.hasMostDerivedReturn(GD)
348                                      ? CGM.getContext().VoidPtrTy
349                                      : Context.VoidTy;
350   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351                                  /*chainCall=*/false, argTypes, extInfo,
352                                  paramInfos, required);
353 }
354 
355 static SmallVector<CanQualType, 16>
356 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
357   SmallVector<CanQualType, 16> argTypes;
358   for (auto &arg : args)
359     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360   return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
364 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
365   SmallVector<CanQualType, 16> argTypes;
366   for (auto &arg : args)
367     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368   return argTypes;
369 }
370 
371 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
372 getExtParameterInfosForCall(const FunctionProtoType *proto,
373                             unsigned prefixArgs, unsigned totalArgs) {
374   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
375   if (proto->hasExtParameterInfos()) {
376     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377   }
378   return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
390 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
391                                         const CXXConstructorDecl *D,
392                                         CXXCtorType CtorKind,
393                                         unsigned ExtraPrefixArgs,
394                                         unsigned ExtraSuffixArgs,
395                                         bool PassProtoArgs) {
396   // FIXME: Kill copy.
397   SmallVector<CanQualType, 16> ArgTypes;
398   for (const auto &Arg : args)
399     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401   // +1 for implicit this, which should always be args[0].
402   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
404   CanQual<FunctionProtoType> FPT = GetFormalType(D);
405   RequiredArgs Required = PassProtoArgs
406                               ? RequiredArgs::forPrototypePlus(
407                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
408                               : RequiredArgs::All;
409 
410   GlobalDecl GD(D, CtorKind);
411   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
412                                ? ArgTypes.front()
413                                : TheCXXABI.hasMostDerivedReturn(GD)
414                                      ? CGM.getContext().VoidPtrTy
415                                      : Context.VoidTy;
416 
417   FunctionType::ExtInfo Info = FPT->getExtInfo();
418   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
419   // If the prototype args are elided, we should only have ABI-specific args,
420   // which never have param info.
421   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
422     // ABI-specific suffix arguments are treated the same as variadic arguments.
423     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
424                                 ArgTypes.size());
425   }
426   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
427                                  /*chainCall=*/false, ArgTypes, Info,
428                                  ParamInfos, Required);
429 }
430 
431 /// Arrange the argument and result information for the declaration or
432 /// definition of the given function.
433 const CGFunctionInfo &
434 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
435   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
436     if (MD->isInstance())
437       return arrangeCXXMethodDeclaration(MD);
438 
439   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
440 
441   assert(isa<FunctionType>(FTy));
442   setCUDAKernelCallingConvention(FTy, CGM, FD);
443 
444   // When declaring a function without a prototype, always use a
445   // non-variadic type.
446   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
447     return arrangeLLVMFunctionInfo(
448         noProto->getReturnType(), /*instanceMethod=*/false,
449         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
450   }
451 
452   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
453 }
454 
455 /// Arrange the argument and result information for the declaration or
456 /// definition of an Objective-C method.
457 const CGFunctionInfo &
458 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
459   // It happens that this is the same as a call with no optional
460   // arguments, except also using the formal 'self' type.
461   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
462 }
463 
464 /// Arrange the argument and result information for the function type
465 /// through which to perform a send to the given Objective-C method,
466 /// using the given receiver type.  The receiver type is not always
467 /// the 'self' type of the method or even an Objective-C pointer type.
468 /// This is *not* the right method for actually performing such a
469 /// message send, due to the possibility of optional arguments.
470 const CGFunctionInfo &
471 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
472                                               QualType receiverType) {
473   SmallVector<CanQualType, 16> argTys;
474   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
475   argTys.push_back(Context.getCanonicalParamType(receiverType));
476   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
477   // FIXME: Kill copy?
478   for (const auto *I : MD->parameters()) {
479     argTys.push_back(Context.getCanonicalParamType(I->getType()));
480     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
481         I->hasAttr<NoEscapeAttr>());
482     extParamInfos.push_back(extParamInfo);
483   }
484 
485   FunctionType::ExtInfo einfo;
486   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
487   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
488 
489   if (getContext().getLangOpts().ObjCAutoRefCount &&
490       MD->hasAttr<NSReturnsRetainedAttr>())
491     einfo = einfo.withProducesResult(true);
492 
493   RequiredArgs required =
494     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
495 
496   return arrangeLLVMFunctionInfo(
497       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
498       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
499 }
500 
501 const CGFunctionInfo &
502 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
503                                                  const CallArgList &args) {
504   auto argTypes = getArgTypesForCall(Context, args);
505   FunctionType::ExtInfo einfo;
506 
507   return arrangeLLVMFunctionInfo(
508       GetReturnType(returnType), /*instanceMethod=*/false,
509       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
510 }
511 
512 const CGFunctionInfo &
513 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
514   // FIXME: Do we need to handle ObjCMethodDecl?
515   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
516 
517   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
518       isa<CXXDestructorDecl>(GD.getDecl()))
519     return arrangeCXXStructorDeclaration(GD);
520 
521   return arrangeFunctionDeclaration(FD);
522 }
523 
524 /// Arrange a thunk that takes 'this' as the first parameter followed by
525 /// varargs.  Return a void pointer, regardless of the actual return type.
526 /// The body of the thunk will end in a musttail call to a function of the
527 /// correct type, and the caller will bitcast the function to the correct
528 /// prototype.
529 const CGFunctionInfo &
530 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
531   assert(MD->isVirtual() && "only methods have thunks");
532   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
533   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
534   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
535                                  /*chainCall=*/false, ArgTys,
536                                  FTP->getExtInfo(), {}, RequiredArgs(1));
537 }
538 
539 const CGFunctionInfo &
540 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
541                                    CXXCtorType CT) {
542   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
543 
544   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
545   SmallVector<CanQualType, 2> ArgTys;
546   const CXXRecordDecl *RD = CD->getParent();
547   ArgTys.push_back(DeriveThisType(RD, CD));
548   if (CT == Ctor_CopyingClosure)
549     ArgTys.push_back(*FTP->param_type_begin());
550   if (RD->getNumVBases() > 0)
551     ArgTys.push_back(Context.IntTy);
552   CallingConv CC = Context.getDefaultCallingConvention(
553       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
554   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
555                                  /*chainCall=*/false, ArgTys,
556                                  FunctionType::ExtInfo(CC), {},
557                                  RequiredArgs::All);
558 }
559 
560 /// Arrange a call as unto a free function, except possibly with an
561 /// additional number of formal parameters considered required.
562 static const CGFunctionInfo &
563 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
564                             CodeGenModule &CGM,
565                             const CallArgList &args,
566                             const FunctionType *fnType,
567                             unsigned numExtraRequiredArgs,
568                             bool chainCall) {
569   assert(args.size() >= numExtraRequiredArgs);
570 
571   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
572 
573   // In most cases, there are no optional arguments.
574   RequiredArgs required = RequiredArgs::All;
575 
576   // If we have a variadic prototype, the required arguments are the
577   // extra prefix plus the arguments in the prototype.
578   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
579     if (proto->isVariadic())
580       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
581 
582     if (proto->hasExtParameterInfos())
583       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
584                                   args.size());
585 
586   // If we don't have a prototype at all, but we're supposed to
587   // explicitly use the variadic convention for unprototyped calls,
588   // treat all of the arguments as required but preserve the nominal
589   // possibility of variadics.
590   } else if (CGM.getTargetCodeGenInfo()
591                 .isNoProtoCallVariadic(args,
592                                        cast<FunctionNoProtoType>(fnType))) {
593     required = RequiredArgs(args.size());
594   }
595 
596   // FIXME: Kill copy.
597   SmallVector<CanQualType, 16> argTypes;
598   for (const auto &arg : args)
599     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
601                                      /*instanceMethod=*/false, chainCall,
602                                      argTypes, fnType->getExtInfo(), paramInfos,
603                                      required);
604 }
605 
606 /// Figure out the rules for calling a function with the given formal
607 /// type using the given arguments.  The arguments are necessary
608 /// because the function might be unprototyped, in which case it's
609 /// target-dependent in crazy ways.
610 const CGFunctionInfo &
611 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
612                                       const FunctionType *fnType,
613                                       bool chainCall) {
614   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
615                                      chainCall ? 1 : 0, chainCall);
616 }
617 
618 /// A block function is essentially a free function with an
619 /// extra implicit argument.
620 const CGFunctionInfo &
621 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
622                                        const FunctionType *fnType) {
623   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
624                                      /*chainCall=*/false);
625 }
626 
627 const CGFunctionInfo &
628 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
629                                               const FunctionArgList &params) {
630   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
631   auto argTypes = getArgTypesForDeclaration(Context, params);
632 
633   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
634                                  /*instanceMethod*/ false, /*chainCall*/ false,
635                                  argTypes, proto->getExtInfo(), paramInfos,
636                                  RequiredArgs::forPrototypePlus(proto, 1));
637 }
638 
639 const CGFunctionInfo &
640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
641                                          const CallArgList &args) {
642   // FIXME: Kill copy.
643   SmallVector<CanQualType, 16> argTypes;
644   for (const auto &Arg : args)
645     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
646   return arrangeLLVMFunctionInfo(
647       GetReturnType(resultType), /*instanceMethod=*/false,
648       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649       /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
654                                                 const FunctionArgList &args) {
655   auto argTypes = getArgTypesForDeclaration(Context, args);
656 
657   return arrangeLLVMFunctionInfo(
658       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
664                                               ArrayRef<CanQualType> argTypes) {
665   return arrangeLLVMFunctionInfo(
666       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
676                                    const FunctionProtoType *proto,
677                                    RequiredArgs required,
678                                    unsigned numPrefixArgs) {
679   assert(numPrefixArgs + 1 <= args.size() &&
680          "Emitting a call with less args than the required prefix?");
681   // Add one to account for `this`. It's a bit awkward here, but we don't count
682   // `this` in similar places elsewhere.
683   auto paramInfos =
684     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686   // FIXME: Kill copy.
687   auto argTypes = getArgTypesForCall(Context, args);
688 
689   FunctionType::ExtInfo info = proto->getExtInfo();
690   return arrangeLLVMFunctionInfo(
691       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692       /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
696   return arrangeLLVMFunctionInfo(
697       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
698       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
699 }
700 
701 const CGFunctionInfo &
702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
703                           const CallArgList &args) {
704   assert(signature.arg_size() <= args.size());
705   if (signature.arg_size() == args.size())
706     return signature;
707 
708   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
709   auto sigParamInfos = signature.getExtParameterInfos();
710   if (!sigParamInfos.empty()) {
711     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712     paramInfos.resize(args.size());
713   }
714 
715   auto argTypes = getArgTypesForCall(Context, args);
716 
717   assert(signature.getRequiredArgs().allowsOptionalArgs());
718   return arrangeLLVMFunctionInfo(signature.getReturnType(),
719                                  signature.isInstanceMethod(),
720                                  signature.isChainCall(),
721                                  argTypes,
722                                  signature.getExtInfo(),
723                                  paramInfos,
724                                  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type.  This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
738                                       bool instanceMethod,
739                                       bool chainCall,
740                                       ArrayRef<CanQualType> argTypes,
741                                       FunctionType::ExtInfo info,
742                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
743                                       RequiredArgs required) {
744   assert(llvm::all_of(argTypes,
745                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747   // Lookup or create unique function info.
748   llvm::FoldingSetNodeID ID;
749   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750                           required, resultType, argTypes);
751 
752   void *insertPos = nullptr;
753   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754   if (FI)
755     return *FI;
756 
757   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759   // Construct the function info.  We co-allocate the ArgInfos.
760   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761                               paramInfos, resultType, argTypes, required);
762   FunctionInfos.InsertNode(FI, insertPos);
763 
764   bool inserted = FunctionsBeingProcessed.insert(FI).second;
765   (void)inserted;
766   assert(inserted && "Recursively being processed?");
767 
768   // Compute ABI information.
769   if (CC == llvm::CallingConv::SPIR_KERNEL) {
770     // Force target independent argument handling for the host visible
771     // kernel functions.
772     computeSPIRKernelABIInfo(CGM, *FI);
773   } else if (info.getCC() == CC_Swift) {
774     swiftcall::computeABIInfo(CGM, *FI);
775   } else {
776     getABIInfo().computeInfo(*FI);
777   }
778 
779   // Loop over all of the computed argument and return value info.  If any of
780   // them are direct or extend without a specified coerce type, specify the
781   // default now.
782   ABIArgInfo &retInfo = FI->getReturnInfo();
783   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786   for (auto &I : FI->arguments())
787     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788       I.info.setCoerceToType(ConvertType(I.type));
789 
790   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791   assert(erased && "Not in set?");
792 
793   return *FI;
794 }
795 
796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
797                                        bool instanceMethod,
798                                        bool chainCall,
799                                        const FunctionType::ExtInfo &info,
800                                        ArrayRef<ExtParameterInfo> paramInfos,
801                                        CanQualType resultType,
802                                        ArrayRef<CanQualType> argTypes,
803                                        RequiredArgs required) {
804   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805   assert(!required.allowsOptionalArgs() ||
806          required.getNumRequiredArgs() <= argTypes.size());
807 
808   void *buffer =
809     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
810                                   argTypes.size() + 1, paramInfos.size()));
811 
812   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
813   FI->CallingConvention = llvmCC;
814   FI->EffectiveCallingConvention = llvmCC;
815   FI->ASTCallingConvention = info.getCC();
816   FI->InstanceMethod = instanceMethod;
817   FI->ChainCall = chainCall;
818   FI->NoReturn = info.getNoReturn();
819   FI->ReturnsRetained = info.getProducesResult();
820   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
821   FI->NoCfCheck = info.getNoCfCheck();
822   FI->Required = required;
823   FI->HasRegParm = info.getHasRegParm();
824   FI->RegParm = info.getRegParm();
825   FI->ArgStruct = nullptr;
826   FI->ArgStructAlign = 0;
827   FI->NumArgs = argTypes.size();
828   FI->HasExtParameterInfos = !paramInfos.empty();
829   FI->getArgsBuffer()[0].type = resultType;
830   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
831     FI->getArgsBuffer()[i + 1].type = argTypes[i];
832   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
833     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
834   return FI;
835 }
836 
837 /***/
838 
839 namespace {
840 // ABIArgInfo::Expand implementation.
841 
842 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
843 struct TypeExpansion {
844   enum TypeExpansionKind {
845     // Elements of constant arrays are expanded recursively.
846     TEK_ConstantArray,
847     // Record fields are expanded recursively (but if record is a union, only
848     // the field with the largest size is expanded).
849     TEK_Record,
850     // For complex types, real and imaginary parts are expanded recursively.
851     TEK_Complex,
852     // All other types are not expandable.
853     TEK_None
854   };
855 
856   const TypeExpansionKind Kind;
857 
858   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
859   virtual ~TypeExpansion() {}
860 };
861 
862 struct ConstantArrayExpansion : TypeExpansion {
863   QualType EltTy;
864   uint64_t NumElts;
865 
866   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
867       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
868   static bool classof(const TypeExpansion *TE) {
869     return TE->Kind == TEK_ConstantArray;
870   }
871 };
872 
873 struct RecordExpansion : TypeExpansion {
874   SmallVector<const CXXBaseSpecifier *, 1> Bases;
875 
876   SmallVector<const FieldDecl *, 1> Fields;
877 
878   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
879                   SmallVector<const FieldDecl *, 1> &&Fields)
880       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
881         Fields(std::move(Fields)) {}
882   static bool classof(const TypeExpansion *TE) {
883     return TE->Kind == TEK_Record;
884   }
885 };
886 
887 struct ComplexExpansion : TypeExpansion {
888   QualType EltTy;
889 
890   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
891   static bool classof(const TypeExpansion *TE) {
892     return TE->Kind == TEK_Complex;
893   }
894 };
895 
896 struct NoExpansion : TypeExpansion {
897   NoExpansion() : TypeExpansion(TEK_None) {}
898   static bool classof(const TypeExpansion *TE) {
899     return TE->Kind == TEK_None;
900   }
901 };
902 }  // namespace
903 
904 static std::unique_ptr<TypeExpansion>
905 getTypeExpansion(QualType Ty, const ASTContext &Context) {
906   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
907     return std::make_unique<ConstantArrayExpansion>(
908         AT->getElementType(), AT->getSize().getZExtValue());
909   }
910   if (const RecordType *RT = Ty->getAs<RecordType>()) {
911     SmallVector<const CXXBaseSpecifier *, 1> Bases;
912     SmallVector<const FieldDecl *, 1> Fields;
913     const RecordDecl *RD = RT->getDecl();
914     assert(!RD->hasFlexibleArrayMember() &&
915            "Cannot expand structure with flexible array.");
916     if (RD->isUnion()) {
917       // Unions can be here only in degenerative cases - all the fields are same
918       // after flattening. Thus we have to use the "largest" field.
919       const FieldDecl *LargestFD = nullptr;
920       CharUnits UnionSize = CharUnits::Zero();
921 
922       for (const auto *FD : RD->fields()) {
923         if (FD->isZeroLengthBitField(Context))
924           continue;
925         assert(!FD->isBitField() &&
926                "Cannot expand structure with bit-field members.");
927         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
928         if (UnionSize < FieldSize) {
929           UnionSize = FieldSize;
930           LargestFD = FD;
931         }
932       }
933       if (LargestFD)
934         Fields.push_back(LargestFD);
935     } else {
936       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
937         assert(!CXXRD->isDynamicClass() &&
938                "cannot expand vtable pointers in dynamic classes");
939         for (const CXXBaseSpecifier &BS : CXXRD->bases())
940           Bases.push_back(&BS);
941       }
942 
943       for (const auto *FD : RD->fields()) {
944         if (FD->isZeroLengthBitField(Context))
945           continue;
946         assert(!FD->isBitField() &&
947                "Cannot expand structure with bit-field members.");
948         Fields.push_back(FD);
949       }
950     }
951     return std::make_unique<RecordExpansion>(std::move(Bases),
952                                               std::move(Fields));
953   }
954   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
955     return std::make_unique<ComplexExpansion>(CT->getElementType());
956   }
957   return std::make_unique<NoExpansion>();
958 }
959 
960 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
961   auto Exp = getTypeExpansion(Ty, Context);
962   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
963     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
964   }
965   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
966     int Res = 0;
967     for (auto BS : RExp->Bases)
968       Res += getExpansionSize(BS->getType(), Context);
969     for (auto FD : RExp->Fields)
970       Res += getExpansionSize(FD->getType(), Context);
971     return Res;
972   }
973   if (isa<ComplexExpansion>(Exp.get()))
974     return 2;
975   assert(isa<NoExpansion>(Exp.get()));
976   return 1;
977 }
978 
979 void
980 CodeGenTypes::getExpandedTypes(QualType Ty,
981                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
982   auto Exp = getTypeExpansion(Ty, Context);
983   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
984     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
985       getExpandedTypes(CAExp->EltTy, TI);
986     }
987   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
988     for (auto BS : RExp->Bases)
989       getExpandedTypes(BS->getType(), TI);
990     for (auto FD : RExp->Fields)
991       getExpandedTypes(FD->getType(), TI);
992   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
993     llvm::Type *EltTy = ConvertType(CExp->EltTy);
994     *TI++ = EltTy;
995     *TI++ = EltTy;
996   } else {
997     assert(isa<NoExpansion>(Exp.get()));
998     *TI++ = ConvertType(Ty);
999   }
1000 }
1001 
1002 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1003                                       ConstantArrayExpansion *CAE,
1004                                       Address BaseAddr,
1005                                       llvm::function_ref<void(Address)> Fn) {
1006   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1007   CharUnits EltAlign =
1008     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1009 
1010   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1011     llvm::Value *EltAddr =
1012       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1013     Fn(Address(EltAddr, EltAlign));
1014   }
1015 }
1016 
1017 void CodeGenFunction::ExpandTypeFromArgs(
1018     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1019   assert(LV.isSimple() &&
1020          "Unexpected non-simple lvalue during struct expansion.");
1021 
1022   auto Exp = getTypeExpansion(Ty, getContext());
1023   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1024     forConstantArrayExpansion(
1025         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1026           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1027           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1028         });
1029   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1030     Address This = LV.getAddress(*this);
1031     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1032       // Perform a single step derived-to-base conversion.
1033       Address Base =
1034           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1035                                 /*NullCheckValue=*/false, SourceLocation());
1036       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1037 
1038       // Recurse onto bases.
1039       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1040     }
1041     for (auto FD : RExp->Fields) {
1042       // FIXME: What are the right qualifiers here?
1043       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1044       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1045     }
1046   } else if (isa<ComplexExpansion>(Exp.get())) {
1047     auto realValue = *AI++;
1048     auto imagValue = *AI++;
1049     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1050   } else {
1051     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1052     // primitive store.
1053     assert(isa<NoExpansion>(Exp.get()));
1054     if (LV.isBitField())
1055       EmitStoreThroughLValue(RValue::get(*AI++), LV);
1056     else
1057       EmitStoreOfScalar(*AI++, LV);
1058   }
1059 }
1060 
1061 void CodeGenFunction::ExpandTypeToArgs(
1062     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1063     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1064   auto Exp = getTypeExpansion(Ty, getContext());
1065   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1066     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1067                                    : Arg.getKnownRValue().getAggregateAddress();
1068     forConstantArrayExpansion(
1069         *this, CAExp, Addr, [&](Address EltAddr) {
1070           CallArg EltArg = CallArg(
1071               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1072               CAExp->EltTy);
1073           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1074                            IRCallArgPos);
1075         });
1076   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1077     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1078                                    : Arg.getKnownRValue().getAggregateAddress();
1079     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1080       // Perform a single step derived-to-base conversion.
1081       Address Base =
1082           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1083                                 /*NullCheckValue=*/false, SourceLocation());
1084       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1085 
1086       // Recurse onto bases.
1087       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1088                        IRCallArgPos);
1089     }
1090 
1091     LValue LV = MakeAddrLValue(This, Ty);
1092     for (auto FD : RExp->Fields) {
1093       CallArg FldArg =
1094           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1095       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1096                        IRCallArgPos);
1097     }
1098   } else if (isa<ComplexExpansion>(Exp.get())) {
1099     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1100     IRCallArgs[IRCallArgPos++] = CV.first;
1101     IRCallArgs[IRCallArgPos++] = CV.second;
1102   } else {
1103     assert(isa<NoExpansion>(Exp.get()));
1104     auto RV = Arg.getKnownRValue();
1105     assert(RV.isScalar() &&
1106            "Unexpected non-scalar rvalue during struct expansion.");
1107 
1108     // Insert a bitcast as needed.
1109     llvm::Value *V = RV.getScalarVal();
1110     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1111         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1112       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1113 
1114     IRCallArgs[IRCallArgPos++] = V;
1115   }
1116 }
1117 
1118 /// Create a temporary allocation for the purposes of coercion.
1119 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1120                                            CharUnits MinAlign) {
1121   // Don't use an alignment that's worse than what LLVM would prefer.
1122   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1123   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1124 
1125   return CGF.CreateTempAlloca(Ty, Align);
1126 }
1127 
1128 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1129 /// accessing some number of bytes out of it, try to gep into the struct to get
1130 /// at its inner goodness.  Dive as deep as possible without entering an element
1131 /// with an in-memory size smaller than DstSize.
1132 static Address
1133 EnterStructPointerForCoercedAccess(Address SrcPtr,
1134                                    llvm::StructType *SrcSTy,
1135                                    uint64_t DstSize, CodeGenFunction &CGF) {
1136   // We can't dive into a zero-element struct.
1137   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1138 
1139   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1140 
1141   // If the first elt is at least as large as what we're looking for, or if the
1142   // first element is the same size as the whole struct, we can enter it. The
1143   // comparison must be made on the store size and not the alloca size. Using
1144   // the alloca size may overstate the size of the load.
1145   uint64_t FirstEltSize =
1146     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1147   if (FirstEltSize < DstSize &&
1148       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1149     return SrcPtr;
1150 
1151   // GEP into the first element.
1152   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1153 
1154   // If the first element is a struct, recurse.
1155   llvm::Type *SrcTy = SrcPtr.getElementType();
1156   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1157     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1158 
1159   return SrcPtr;
1160 }
1161 
1162 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1163 /// are either integers or pointers.  This does a truncation of the value if it
1164 /// is too large or a zero extension if it is too small.
1165 ///
1166 /// This behaves as if the value were coerced through memory, so on big-endian
1167 /// targets the high bits are preserved in a truncation, while little-endian
1168 /// targets preserve the low bits.
1169 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1170                                              llvm::Type *Ty,
1171                                              CodeGenFunction &CGF) {
1172   if (Val->getType() == Ty)
1173     return Val;
1174 
1175   if (isa<llvm::PointerType>(Val->getType())) {
1176     // If this is Pointer->Pointer avoid conversion to and from int.
1177     if (isa<llvm::PointerType>(Ty))
1178       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1179 
1180     // Convert the pointer to an integer so we can play with its width.
1181     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1182   }
1183 
1184   llvm::Type *DestIntTy = Ty;
1185   if (isa<llvm::PointerType>(DestIntTy))
1186     DestIntTy = CGF.IntPtrTy;
1187 
1188   if (Val->getType() != DestIntTy) {
1189     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1190     if (DL.isBigEndian()) {
1191       // Preserve the high bits on big-endian targets.
1192       // That is what memory coercion does.
1193       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1194       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1195 
1196       if (SrcSize > DstSize) {
1197         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1198         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1199       } else {
1200         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1201         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1202       }
1203     } else {
1204       // Little-endian targets preserve the low bits. No shifts required.
1205       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1206     }
1207   }
1208 
1209   if (isa<llvm::PointerType>(Ty))
1210     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1211   return Val;
1212 }
1213 
1214 
1215 
1216 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1217 /// a pointer to an object of type \arg Ty, known to be aligned to
1218 /// \arg SrcAlign bytes.
1219 ///
1220 /// This safely handles the case when the src type is smaller than the
1221 /// destination type; in this situation the values of bits which not
1222 /// present in the src are undefined.
1223 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1224                                       CodeGenFunction &CGF) {
1225   llvm::Type *SrcTy = Src.getElementType();
1226 
1227   // If SrcTy and Ty are the same, just do a load.
1228   if (SrcTy == Ty)
1229     return CGF.Builder.CreateLoad(Src);
1230 
1231   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1232 
1233   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1234     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1235     SrcTy = Src.getType()->getElementType();
1236   }
1237 
1238   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1239 
1240   // If the source and destination are integer or pointer types, just do an
1241   // extension or truncation to the desired type.
1242   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1243       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1244     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1245     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1246   }
1247 
1248   // If load is legal, just bitcast the src pointer.
1249   if (SrcSize >= DstSize) {
1250     // Generally SrcSize is never greater than DstSize, since this means we are
1251     // losing bits. However, this can happen in cases where the structure has
1252     // additional padding, for example due to a user specified alignment.
1253     //
1254     // FIXME: Assert that we aren't truncating non-padding bits when have access
1255     // to that information.
1256     Src = CGF.Builder.CreateBitCast(Src,
1257                                     Ty->getPointerTo(Src.getAddressSpace()));
1258     return CGF.Builder.CreateLoad(Src);
1259   }
1260 
1261   // Otherwise do coercion through memory. This is stupid, but simple.
1262   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1263   Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1264   Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1265   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1266       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1267       false);
1268   return CGF.Builder.CreateLoad(Tmp);
1269 }
1270 
1271 // Function to store a first-class aggregate into memory.  We prefer to
1272 // store the elements rather than the aggregate to be more friendly to
1273 // fast-isel.
1274 // FIXME: Do we need to recurse here?
1275 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1276                           Address Dest, bool DestIsVolatile) {
1277   // Prefer scalar stores to first-class aggregate stores.
1278   if (llvm::StructType *STy =
1279         dyn_cast<llvm::StructType>(Val->getType())) {
1280     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1281       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1282       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1283       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1284     }
1285   } else {
1286     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1287   }
1288 }
1289 
1290 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1291 /// where the source and destination may have different types.  The
1292 /// destination is known to be aligned to \arg DstAlign bytes.
1293 ///
1294 /// This safely handles the case when the src type is larger than the
1295 /// destination type; the upper bits of the src will be lost.
1296 static void CreateCoercedStore(llvm::Value *Src,
1297                                Address Dst,
1298                                bool DstIsVolatile,
1299                                CodeGenFunction &CGF) {
1300   llvm::Type *SrcTy = Src->getType();
1301   llvm::Type *DstTy = Dst.getType()->getElementType();
1302   if (SrcTy == DstTy) {
1303     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1304     return;
1305   }
1306 
1307   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1308 
1309   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1310     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1311     DstTy = Dst.getType()->getElementType();
1312   }
1313 
1314   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1315   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1316   if (SrcPtrTy && DstPtrTy &&
1317       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1318     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1319     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1320     return;
1321   }
1322 
1323   // If the source and destination are integer or pointer types, just do an
1324   // extension or truncation to the desired type.
1325   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1326       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1327     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1328     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1329     return;
1330   }
1331 
1332   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1333 
1334   // If store is legal, just bitcast the src pointer.
1335   if (SrcSize <= DstSize) {
1336     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1337     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1338   } else {
1339     // Otherwise do coercion through memory. This is stupid, but
1340     // simple.
1341 
1342     // Generally SrcSize is never greater than DstSize, since this means we are
1343     // losing bits. However, this can happen in cases where the structure has
1344     // additional padding, for example due to a user specified alignment.
1345     //
1346     // FIXME: Assert that we aren't truncating non-padding bits when have access
1347     // to that information.
1348     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1349     CGF.Builder.CreateStore(Src, Tmp);
1350     Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1351     Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1352     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1353         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1354         false);
1355   }
1356 }
1357 
1358 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1359                                    const ABIArgInfo &info) {
1360   if (unsigned offset = info.getDirectOffset()) {
1361     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1362     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1363                                              CharUnits::fromQuantity(offset));
1364     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1365   }
1366   return addr;
1367 }
1368 
1369 namespace {
1370 
1371 /// Encapsulates information about the way function arguments from
1372 /// CGFunctionInfo should be passed to actual LLVM IR function.
1373 class ClangToLLVMArgMapping {
1374   static const unsigned InvalidIndex = ~0U;
1375   unsigned InallocaArgNo;
1376   unsigned SRetArgNo;
1377   unsigned TotalIRArgs;
1378 
1379   /// Arguments of LLVM IR function corresponding to single Clang argument.
1380   struct IRArgs {
1381     unsigned PaddingArgIndex;
1382     // Argument is expanded to IR arguments at positions
1383     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1384     unsigned FirstArgIndex;
1385     unsigned NumberOfArgs;
1386 
1387     IRArgs()
1388         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1389           NumberOfArgs(0) {}
1390   };
1391 
1392   SmallVector<IRArgs, 8> ArgInfo;
1393 
1394 public:
1395   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1396                         bool OnlyRequiredArgs = false)
1397       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1398         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1399     construct(Context, FI, OnlyRequiredArgs);
1400   }
1401 
1402   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1403   unsigned getInallocaArgNo() const {
1404     assert(hasInallocaArg());
1405     return InallocaArgNo;
1406   }
1407 
1408   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1409   unsigned getSRetArgNo() const {
1410     assert(hasSRetArg());
1411     return SRetArgNo;
1412   }
1413 
1414   unsigned totalIRArgs() const { return TotalIRArgs; }
1415 
1416   bool hasPaddingArg(unsigned ArgNo) const {
1417     assert(ArgNo < ArgInfo.size());
1418     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1419   }
1420   unsigned getPaddingArgNo(unsigned ArgNo) const {
1421     assert(hasPaddingArg(ArgNo));
1422     return ArgInfo[ArgNo].PaddingArgIndex;
1423   }
1424 
1425   /// Returns index of first IR argument corresponding to ArgNo, and their
1426   /// quantity.
1427   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1428     assert(ArgNo < ArgInfo.size());
1429     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1430                           ArgInfo[ArgNo].NumberOfArgs);
1431   }
1432 
1433 private:
1434   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1435                  bool OnlyRequiredArgs);
1436 };
1437 
1438 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1439                                       const CGFunctionInfo &FI,
1440                                       bool OnlyRequiredArgs) {
1441   unsigned IRArgNo = 0;
1442   bool SwapThisWithSRet = false;
1443   const ABIArgInfo &RetAI = FI.getReturnInfo();
1444 
1445   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1446     SwapThisWithSRet = RetAI.isSRetAfterThis();
1447     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1448   }
1449 
1450   unsigned ArgNo = 0;
1451   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1452   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1453        ++I, ++ArgNo) {
1454     assert(I != FI.arg_end());
1455     QualType ArgType = I->type;
1456     const ABIArgInfo &AI = I->info;
1457     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1458     auto &IRArgs = ArgInfo[ArgNo];
1459 
1460     if (AI.getPaddingType())
1461       IRArgs.PaddingArgIndex = IRArgNo++;
1462 
1463     switch (AI.getKind()) {
1464     case ABIArgInfo::Extend:
1465     case ABIArgInfo::Direct: {
1466       // FIXME: handle sseregparm someday...
1467       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1468       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1469         IRArgs.NumberOfArgs = STy->getNumElements();
1470       } else {
1471         IRArgs.NumberOfArgs = 1;
1472       }
1473       break;
1474     }
1475     case ABIArgInfo::Indirect:
1476       IRArgs.NumberOfArgs = 1;
1477       break;
1478     case ABIArgInfo::Ignore:
1479     case ABIArgInfo::InAlloca:
1480       // ignore and inalloca doesn't have matching LLVM parameters.
1481       IRArgs.NumberOfArgs = 0;
1482       break;
1483     case ABIArgInfo::CoerceAndExpand:
1484       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1485       break;
1486     case ABIArgInfo::Expand:
1487       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1488       break;
1489     }
1490 
1491     if (IRArgs.NumberOfArgs > 0) {
1492       IRArgs.FirstArgIndex = IRArgNo;
1493       IRArgNo += IRArgs.NumberOfArgs;
1494     }
1495 
1496     // Skip over the sret parameter when it comes second.  We already handled it
1497     // above.
1498     if (IRArgNo == 1 && SwapThisWithSRet)
1499       IRArgNo++;
1500   }
1501   assert(ArgNo == ArgInfo.size());
1502 
1503   if (FI.usesInAlloca())
1504     InallocaArgNo = IRArgNo++;
1505 
1506   TotalIRArgs = IRArgNo;
1507 }
1508 }  // namespace
1509 
1510 /***/
1511 
1512 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1513   const auto &RI = FI.getReturnInfo();
1514   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1515 }
1516 
1517 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1518   return ReturnTypeUsesSRet(FI) &&
1519          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1520 }
1521 
1522 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1523   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1524     switch (BT->getKind()) {
1525     default:
1526       return false;
1527     case BuiltinType::Float:
1528       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1529     case BuiltinType::Double:
1530       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1531     case BuiltinType::LongDouble:
1532       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1533     }
1534   }
1535 
1536   return false;
1537 }
1538 
1539 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1540   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1541     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1542       if (BT->getKind() == BuiltinType::LongDouble)
1543         return getTarget().useObjCFP2RetForComplexLongDouble();
1544     }
1545   }
1546 
1547   return false;
1548 }
1549 
1550 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1551   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1552   return GetFunctionType(FI);
1553 }
1554 
1555 llvm::FunctionType *
1556 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1557 
1558   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1559   (void)Inserted;
1560   assert(Inserted && "Recursively being processed?");
1561 
1562   llvm::Type *resultType = nullptr;
1563   const ABIArgInfo &retAI = FI.getReturnInfo();
1564   switch (retAI.getKind()) {
1565   case ABIArgInfo::Expand:
1566     llvm_unreachable("Invalid ABI kind for return argument");
1567 
1568   case ABIArgInfo::Extend:
1569   case ABIArgInfo::Direct:
1570     resultType = retAI.getCoerceToType();
1571     break;
1572 
1573   case ABIArgInfo::InAlloca:
1574     if (retAI.getInAllocaSRet()) {
1575       // sret things on win32 aren't void, they return the sret pointer.
1576       QualType ret = FI.getReturnType();
1577       llvm::Type *ty = ConvertType(ret);
1578       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1579       resultType = llvm::PointerType::get(ty, addressSpace);
1580     } else {
1581       resultType = llvm::Type::getVoidTy(getLLVMContext());
1582     }
1583     break;
1584 
1585   case ABIArgInfo::Indirect:
1586   case ABIArgInfo::Ignore:
1587     resultType = llvm::Type::getVoidTy(getLLVMContext());
1588     break;
1589 
1590   case ABIArgInfo::CoerceAndExpand:
1591     resultType = retAI.getUnpaddedCoerceAndExpandType();
1592     break;
1593   }
1594 
1595   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1596   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1597 
1598   // Add type for sret argument.
1599   if (IRFunctionArgs.hasSRetArg()) {
1600     QualType Ret = FI.getReturnType();
1601     llvm::Type *Ty = ConvertType(Ret);
1602     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1603     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1604         llvm::PointerType::get(Ty, AddressSpace);
1605   }
1606 
1607   // Add type for inalloca argument.
1608   if (IRFunctionArgs.hasInallocaArg()) {
1609     auto ArgStruct = FI.getArgStruct();
1610     assert(ArgStruct);
1611     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1612   }
1613 
1614   // Add in all of the required arguments.
1615   unsigned ArgNo = 0;
1616   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1617                                      ie = it + FI.getNumRequiredArgs();
1618   for (; it != ie; ++it, ++ArgNo) {
1619     const ABIArgInfo &ArgInfo = it->info;
1620 
1621     // Insert a padding type to ensure proper alignment.
1622     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1623       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1624           ArgInfo.getPaddingType();
1625 
1626     unsigned FirstIRArg, NumIRArgs;
1627     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1628 
1629     switch (ArgInfo.getKind()) {
1630     case ABIArgInfo::Ignore:
1631     case ABIArgInfo::InAlloca:
1632       assert(NumIRArgs == 0);
1633       break;
1634 
1635     case ABIArgInfo::Indirect: {
1636       assert(NumIRArgs == 1);
1637       // indirect arguments are always on the stack, which is alloca addr space.
1638       llvm::Type *LTy = ConvertTypeForMem(it->type);
1639       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1640           CGM.getDataLayout().getAllocaAddrSpace());
1641       break;
1642     }
1643 
1644     case ABIArgInfo::Extend:
1645     case ABIArgInfo::Direct: {
1646       // Fast-isel and the optimizer generally like scalar values better than
1647       // FCAs, so we flatten them if this is safe to do for this argument.
1648       llvm::Type *argType = ArgInfo.getCoerceToType();
1649       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1650       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1651         assert(NumIRArgs == st->getNumElements());
1652         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1653           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1654       } else {
1655         assert(NumIRArgs == 1);
1656         ArgTypes[FirstIRArg] = argType;
1657       }
1658       break;
1659     }
1660 
1661     case ABIArgInfo::CoerceAndExpand: {
1662       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1663       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1664         *ArgTypesIter++ = EltTy;
1665       }
1666       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1667       break;
1668     }
1669 
1670     case ABIArgInfo::Expand:
1671       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1672       getExpandedTypes(it->type, ArgTypesIter);
1673       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1674       break;
1675     }
1676   }
1677 
1678   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1679   assert(Erased && "Not in set?");
1680 
1681   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1682 }
1683 
1684 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1685   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1686   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1687 
1688   if (!isFuncTypeConvertible(FPT))
1689     return llvm::StructType::get(getLLVMContext());
1690 
1691   return GetFunctionType(GD);
1692 }
1693 
1694 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1695                                                llvm::AttrBuilder &FuncAttrs,
1696                                                const FunctionProtoType *FPT) {
1697   if (!FPT)
1698     return;
1699 
1700   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1701       FPT->isNothrow())
1702     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1703 }
1704 
1705 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1706                                                bool AttrOnCallSite,
1707                                                llvm::AttrBuilder &FuncAttrs) {
1708   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1709   if (!HasOptnone) {
1710     if (CodeGenOpts.OptimizeSize)
1711       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1712     if (CodeGenOpts.OptimizeSize == 2)
1713       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1714   }
1715 
1716   if (CodeGenOpts.DisableRedZone)
1717     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1718   if (CodeGenOpts.IndirectTlsSegRefs)
1719     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1720   if (CodeGenOpts.NoImplicitFloat)
1721     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1722 
1723   if (AttrOnCallSite) {
1724     // Attributes that should go on the call site only.
1725     if (!CodeGenOpts.SimplifyLibCalls ||
1726         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1727       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1728     if (!CodeGenOpts.TrapFuncName.empty())
1729       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1730   } else {
1731     StringRef FpKind;
1732     switch (CodeGenOpts.getFramePointer()) {
1733     case CodeGenOptions::FramePointerKind::None:
1734       FpKind = "none";
1735       break;
1736     case CodeGenOptions::FramePointerKind::NonLeaf:
1737       FpKind = "non-leaf";
1738       break;
1739     case CodeGenOptions::FramePointerKind::All:
1740       FpKind = "all";
1741       break;
1742     }
1743     FuncAttrs.addAttribute("frame-pointer", FpKind);
1744 
1745     FuncAttrs.addAttribute("less-precise-fpmad",
1746                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1747 
1748     if (CodeGenOpts.NullPointerIsValid)
1749       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1750     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::Invalid)
1751       FuncAttrs.addAttribute("denormal-fp-math",
1752                              llvm::denormalModeName(CodeGenOpts.FPDenormalMode));
1753 
1754     FuncAttrs.addAttribute("no-trapping-math",
1755                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1756 
1757     // Strict (compliant) code is the default, so only add this attribute to
1758     // indicate that we are trying to workaround a problem case.
1759     if (!CodeGenOpts.StrictFloatCastOverflow)
1760       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1761 
1762     // TODO: Are these all needed?
1763     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1764     FuncAttrs.addAttribute("no-infs-fp-math",
1765                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1766     FuncAttrs.addAttribute("no-nans-fp-math",
1767                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1768     FuncAttrs.addAttribute("unsafe-fp-math",
1769                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1770     FuncAttrs.addAttribute("use-soft-float",
1771                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1772     FuncAttrs.addAttribute("stack-protector-buffer-size",
1773                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1774     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1775                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1776     FuncAttrs.addAttribute(
1777         "correctly-rounded-divide-sqrt-fp-math",
1778         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1779 
1780     if (getLangOpts().OpenCL)
1781       FuncAttrs.addAttribute("denorms-are-zero",
1782                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1783 
1784     // TODO: Reciprocal estimate codegen options should apply to instructions?
1785     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1786     if (!Recips.empty())
1787       FuncAttrs.addAttribute("reciprocal-estimates",
1788                              llvm::join(Recips, ","));
1789 
1790     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1791         CodeGenOpts.PreferVectorWidth != "none")
1792       FuncAttrs.addAttribute("prefer-vector-width",
1793                              CodeGenOpts.PreferVectorWidth);
1794 
1795     if (CodeGenOpts.StackRealignment)
1796       FuncAttrs.addAttribute("stackrealign");
1797     if (CodeGenOpts.Backchain)
1798       FuncAttrs.addAttribute("backchain");
1799 
1800     if (CodeGenOpts.SpeculativeLoadHardening)
1801       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1802   }
1803 
1804   if (getLangOpts().assumeFunctionsAreConvergent()) {
1805     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1806     // convergent (meaning, they may call an intrinsically convergent op, such
1807     // as __syncthreads() / barrier(), and so can't have certain optimizations
1808     // applied around them).  LLVM will remove this attribute where it safely
1809     // can.
1810     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1811   }
1812 
1813   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1814     // Exceptions aren't supported in CUDA device code.
1815     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1816 
1817     // Respect -fcuda-flush-denormals-to-zero.
1818     if (CodeGenOpts.FlushDenorm)
1819       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1820   }
1821 
1822   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1823     StringRef Var, Value;
1824     std::tie(Var, Value) = Attr.split('=');
1825     FuncAttrs.addAttribute(Var, Value);
1826   }
1827 }
1828 
1829 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1830   llvm::AttrBuilder FuncAttrs;
1831   ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1832                              /* AttrOnCallSite = */ false, FuncAttrs);
1833   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1834 }
1835 
1836 void CodeGenModule::ConstructAttributeList(
1837     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1838     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1839   llvm::AttrBuilder FuncAttrs;
1840   llvm::AttrBuilder RetAttrs;
1841 
1842   CallingConv = FI.getEffectiveCallingConvention();
1843   if (FI.isNoReturn())
1844     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1845 
1846   // If we have information about the function prototype, we can learn
1847   // attributes from there.
1848   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1849                                      CalleeInfo.getCalleeFunctionProtoType());
1850 
1851   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1852 
1853   bool HasOptnone = false;
1854   // FIXME: handle sseregparm someday...
1855   if (TargetDecl) {
1856     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1857       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1858     if (TargetDecl->hasAttr<NoThrowAttr>())
1859       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1860     if (TargetDecl->hasAttr<NoReturnAttr>())
1861       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1862     if (TargetDecl->hasAttr<ColdAttr>())
1863       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1864     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1865       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1866     if (TargetDecl->hasAttr<ConvergentAttr>())
1867       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1868 
1869     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1870       AddAttributesFromFunctionProtoType(
1871           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1872       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1873       const bool IsVirtualCall = MD && MD->isVirtual();
1874       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1875       // virtual function. These attributes are not inherited by overloads.
1876       if (!(AttrOnCallSite && IsVirtualCall)) {
1877         if (Fn->isNoReturn())
1878           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1879 
1880         const auto *NBA = Fn->getAttr<NoBuiltinAttr>();
1881         bool HasWildcard = NBA && llvm::is_contained(NBA->builtinNames(), "*");
1882         if (getLangOpts().NoBuiltin || HasWildcard)
1883           FuncAttrs.addAttribute("no-builtins");
1884         else {
1885           auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1886             SmallString<32> AttributeName;
1887             AttributeName += "no-builtin-";
1888             AttributeName += BuiltinName;
1889             FuncAttrs.addAttribute(AttributeName);
1890           };
1891           llvm::for_each(getLangOpts().NoBuiltinFuncs, AddNoBuiltinAttr);
1892           if (NBA)
1893             llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1894         }
1895       }
1896     }
1897 
1898     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1899     if (TargetDecl->hasAttr<ConstAttr>()) {
1900       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1901       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1902     } else if (TargetDecl->hasAttr<PureAttr>()) {
1903       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1904       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1905     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1906       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1907       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1908     }
1909     if (TargetDecl->hasAttr<RestrictAttr>())
1910       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1911     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1912         !CodeGenOpts.NullPointerIsValid)
1913       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1914     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1915       FuncAttrs.addAttribute("no_caller_saved_registers");
1916     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1917       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1918 
1919     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1920     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1921       Optional<unsigned> NumElemsParam;
1922       if (AllocSize->getNumElemsParam().isValid())
1923         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1924       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1925                                  NumElemsParam);
1926     }
1927   }
1928 
1929   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1930 
1931   // This must run after constructing the default function attribute list
1932   // to ensure that the speculative load hardening attribute is removed
1933   // in the case where the -mspeculative-load-hardening flag was passed.
1934   if (TargetDecl) {
1935     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1936       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1937     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1938       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1939   }
1940 
1941   if (CodeGenOpts.EnableSegmentedStacks &&
1942       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1943     FuncAttrs.addAttribute("split-stack");
1944 
1945   // Add NonLazyBind attribute to function declarations when -fno-plt
1946   // is used.
1947   if (TargetDecl && CodeGenOpts.NoPLT) {
1948     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1949       if (!Fn->isDefined() && !AttrOnCallSite) {
1950         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1951       }
1952     }
1953   }
1954 
1955   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1956     if (getLangOpts().OpenCLVersion <= 120) {
1957       // OpenCL v1.2 Work groups are always uniform
1958       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1959     } else {
1960       // OpenCL v2.0 Work groups may be whether uniform or not.
1961       // '-cl-uniform-work-group-size' compile option gets a hint
1962       // to the compiler that the global work-size be a multiple of
1963       // the work-group size specified to clEnqueueNDRangeKernel
1964       // (i.e. work groups are uniform).
1965       FuncAttrs.addAttribute("uniform-work-group-size",
1966                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1967     }
1968   }
1969 
1970   if (!AttrOnCallSite) {
1971     bool DisableTailCalls = false;
1972 
1973     if (CodeGenOpts.DisableTailCalls)
1974       DisableTailCalls = true;
1975     else if (TargetDecl) {
1976       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1977           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1978         DisableTailCalls = true;
1979       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1980         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1981           if (!BD->doesNotEscape())
1982             DisableTailCalls = true;
1983       }
1984     }
1985 
1986     FuncAttrs.addAttribute("disable-tail-calls",
1987                            llvm::toStringRef(DisableTailCalls));
1988     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1989 
1990     if (CodeGenOpts.ReturnProtector)
1991       FuncAttrs.addAttribute("ret-protector");
1992   }
1993 
1994   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1995 
1996   QualType RetTy = FI.getReturnType();
1997   const ABIArgInfo &RetAI = FI.getReturnInfo();
1998   switch (RetAI.getKind()) {
1999   case ABIArgInfo::Extend:
2000     if (RetAI.isSignExt())
2001       RetAttrs.addAttribute(llvm::Attribute::SExt);
2002     else
2003       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2004     LLVM_FALLTHROUGH;
2005   case ABIArgInfo::Direct:
2006     if (RetAI.getInReg())
2007       RetAttrs.addAttribute(llvm::Attribute::InReg);
2008     break;
2009   case ABIArgInfo::Ignore:
2010     break;
2011 
2012   case ABIArgInfo::InAlloca:
2013   case ABIArgInfo::Indirect: {
2014     // inalloca and sret disable readnone and readonly
2015     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2016       .removeAttribute(llvm::Attribute::ReadNone);
2017     break;
2018   }
2019 
2020   case ABIArgInfo::CoerceAndExpand:
2021     break;
2022 
2023   case ABIArgInfo::Expand:
2024     llvm_unreachable("Invalid ABI kind for return argument");
2025   }
2026 
2027   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2028     QualType PTy = RefTy->getPointeeType();
2029     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2030       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2031                                         .getQuantity());
2032     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2033              !CodeGenOpts.NullPointerIsValid)
2034       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2035   }
2036 
2037   bool hasUsedSRet = false;
2038   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2039 
2040   // Attach attributes to sret.
2041   if (IRFunctionArgs.hasSRetArg()) {
2042     llvm::AttrBuilder SRETAttrs;
2043     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2044     hasUsedSRet = true;
2045     if (RetAI.getInReg())
2046       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2047     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2048         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2049   }
2050 
2051   // Attach attributes to inalloca argument.
2052   if (IRFunctionArgs.hasInallocaArg()) {
2053     llvm::AttrBuilder Attrs;
2054     Attrs.addAttribute(llvm::Attribute::InAlloca);
2055     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2056         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2057   }
2058 
2059   unsigned ArgNo = 0;
2060   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2061                                           E = FI.arg_end();
2062        I != E; ++I, ++ArgNo) {
2063     QualType ParamType = I->type;
2064     const ABIArgInfo &AI = I->info;
2065     llvm::AttrBuilder Attrs;
2066 
2067     // Add attribute for padding argument, if necessary.
2068     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2069       if (AI.getPaddingInReg()) {
2070         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2071             llvm::AttributeSet::get(
2072                 getLLVMContext(),
2073                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2074       }
2075     }
2076 
2077     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2078     // have the corresponding parameter variable.  It doesn't make
2079     // sense to do it here because parameters are so messed up.
2080     switch (AI.getKind()) {
2081     case ABIArgInfo::Extend:
2082       if (AI.isSignExt())
2083         Attrs.addAttribute(llvm::Attribute::SExt);
2084       else
2085         Attrs.addAttribute(llvm::Attribute::ZExt);
2086       LLVM_FALLTHROUGH;
2087     case ABIArgInfo::Direct:
2088       if (ArgNo == 0 && FI.isChainCall())
2089         Attrs.addAttribute(llvm::Attribute::Nest);
2090       else if (AI.getInReg())
2091         Attrs.addAttribute(llvm::Attribute::InReg);
2092       break;
2093 
2094     case ABIArgInfo::Indirect: {
2095       if (AI.getInReg())
2096         Attrs.addAttribute(llvm::Attribute::InReg);
2097 
2098       if (AI.getIndirectByVal())
2099         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2100 
2101       CharUnits Align = AI.getIndirectAlign();
2102 
2103       // In a byval argument, it is important that the required
2104       // alignment of the type is honored, as LLVM might be creating a
2105       // *new* stack object, and needs to know what alignment to give
2106       // it. (Sometimes it can deduce a sensible alignment on its own,
2107       // but not if clang decides it must emit a packed struct, or the
2108       // user specifies increased alignment requirements.)
2109       //
2110       // This is different from indirect *not* byval, where the object
2111       // exists already, and the align attribute is purely
2112       // informative.
2113       assert(!Align.isZero());
2114 
2115       // For now, only add this when we have a byval argument.
2116       // TODO: be less lazy about updating test cases.
2117       if (AI.getIndirectByVal())
2118         Attrs.addAlignmentAttr(Align.getQuantity());
2119 
2120       // byval disables readnone and readonly.
2121       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2122         .removeAttribute(llvm::Attribute::ReadNone);
2123       break;
2124     }
2125     case ABIArgInfo::Ignore:
2126     case ABIArgInfo::Expand:
2127     case ABIArgInfo::CoerceAndExpand:
2128       break;
2129 
2130     case ABIArgInfo::InAlloca:
2131       // inalloca disables readnone and readonly.
2132       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2133           .removeAttribute(llvm::Attribute::ReadNone);
2134       continue;
2135     }
2136 
2137     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2138       QualType PTy = RefTy->getPointeeType();
2139       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2140         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2141                                        .getQuantity());
2142       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2143                !CodeGenOpts.NullPointerIsValid)
2144         Attrs.addAttribute(llvm::Attribute::NonNull);
2145     }
2146 
2147     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2148     case ParameterABI::Ordinary:
2149       break;
2150 
2151     case ParameterABI::SwiftIndirectResult: {
2152       // Add 'sret' if we haven't already used it for something, but
2153       // only if the result is void.
2154       if (!hasUsedSRet && RetTy->isVoidType()) {
2155         Attrs.addAttribute(llvm::Attribute::StructRet);
2156         hasUsedSRet = true;
2157       }
2158 
2159       // Add 'noalias' in either case.
2160       Attrs.addAttribute(llvm::Attribute::NoAlias);
2161 
2162       // Add 'dereferenceable' and 'alignment'.
2163       auto PTy = ParamType->getPointeeType();
2164       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2165         auto info = getContext().getTypeInfoInChars(PTy);
2166         Attrs.addDereferenceableAttr(info.first.getQuantity());
2167         Attrs.addAttribute(llvm::Attribute::getWithAlignment(
2168             getLLVMContext(), info.second.getAsAlign()));
2169       }
2170       break;
2171     }
2172 
2173     case ParameterABI::SwiftErrorResult:
2174       Attrs.addAttribute(llvm::Attribute::SwiftError);
2175       break;
2176 
2177     case ParameterABI::SwiftContext:
2178       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2179       break;
2180     }
2181 
2182     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2183       Attrs.addAttribute(llvm::Attribute::NoCapture);
2184 
2185     if (Attrs.hasAttributes()) {
2186       unsigned FirstIRArg, NumIRArgs;
2187       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2188       for (unsigned i = 0; i < NumIRArgs; i++)
2189         ArgAttrs[FirstIRArg + i] =
2190             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2191     }
2192   }
2193   assert(ArgNo == FI.arg_size());
2194 
2195   AttrList = llvm::AttributeList::get(
2196       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2197       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2198 }
2199 
2200 /// An argument came in as a promoted argument; demote it back to its
2201 /// declared type.
2202 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2203                                          const VarDecl *var,
2204                                          llvm::Value *value) {
2205   llvm::Type *varType = CGF.ConvertType(var->getType());
2206 
2207   // This can happen with promotions that actually don't change the
2208   // underlying type, like the enum promotions.
2209   if (value->getType() == varType) return value;
2210 
2211   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2212          && "unexpected promotion type");
2213 
2214   if (isa<llvm::IntegerType>(varType))
2215     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2216 
2217   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2218 }
2219 
2220 /// Returns the attribute (either parameter attribute, or function
2221 /// attribute), which declares argument ArgNo to be non-null.
2222 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2223                                          QualType ArgType, unsigned ArgNo) {
2224   // FIXME: __attribute__((nonnull)) can also be applied to:
2225   //   - references to pointers, where the pointee is known to be
2226   //     nonnull (apparently a Clang extension)
2227   //   - transparent unions containing pointers
2228   // In the former case, LLVM IR cannot represent the constraint. In
2229   // the latter case, we have no guarantee that the transparent union
2230   // is in fact passed as a pointer.
2231   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2232     return nullptr;
2233   // First, check attribute on parameter itself.
2234   if (PVD) {
2235     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2236       return ParmNNAttr;
2237   }
2238   // Check function attributes.
2239   if (!FD)
2240     return nullptr;
2241   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2242     if (NNAttr->isNonNull(ArgNo))
2243       return NNAttr;
2244   }
2245   return nullptr;
2246 }
2247 
2248 namespace {
2249   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2250     Address Temp;
2251     Address Arg;
2252     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2253     void Emit(CodeGenFunction &CGF, Flags flags) override {
2254       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2255       CGF.Builder.CreateStore(errorValue, Arg);
2256     }
2257   };
2258 }
2259 
2260 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2261                                          llvm::Function *Fn,
2262                                          const FunctionArgList &Args) {
2263   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2264     // Naked functions don't have prologues.
2265     return;
2266 
2267   // If this is an implicit-return-zero function, go ahead and
2268   // initialize the return value.  TODO: it might be nice to have
2269   // a more general mechanism for this that didn't require synthesized
2270   // return statements.
2271   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2272     if (FD->hasImplicitReturnZero()) {
2273       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2274       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2275       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2276       Builder.CreateStore(Zero, ReturnValue);
2277     }
2278   }
2279 
2280   // FIXME: We no longer need the types from FunctionArgList; lift up and
2281   // simplify.
2282 
2283   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2284   // Flattened function arguments.
2285   SmallVector<llvm::Value *, 16> FnArgs;
2286   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2287   for (auto &Arg : Fn->args()) {
2288     FnArgs.push_back(&Arg);
2289   }
2290   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2291 
2292   // If we're using inalloca, all the memory arguments are GEPs off of the last
2293   // parameter, which is a pointer to the complete memory area.
2294   Address ArgStruct = Address::invalid();
2295   if (IRFunctionArgs.hasInallocaArg()) {
2296     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2297                         FI.getArgStructAlignment());
2298 
2299     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2300   }
2301 
2302   // Name the struct return parameter.
2303   if (IRFunctionArgs.hasSRetArg()) {
2304     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2305     AI->setName("agg.result");
2306     AI->addAttr(llvm::Attribute::NoAlias);
2307   }
2308 
2309   // Track if we received the parameter as a pointer (indirect, byval, or
2310   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2311   // into a local alloca for us.
2312   SmallVector<ParamValue, 16> ArgVals;
2313   ArgVals.reserve(Args.size());
2314 
2315   // Create a pointer value for every parameter declaration.  This usually
2316   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2317   // any cleanups or do anything that might unwind.  We do that separately, so
2318   // we can push the cleanups in the correct order for the ABI.
2319   assert(FI.arg_size() == Args.size() &&
2320          "Mismatch between function signature & arguments.");
2321   unsigned ArgNo = 0;
2322   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2323   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2324        i != e; ++i, ++info_it, ++ArgNo) {
2325     const VarDecl *Arg = *i;
2326     const ABIArgInfo &ArgI = info_it->info;
2327 
2328     bool isPromoted =
2329       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2330     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2331     // the parameter is promoted. In this case we convert to
2332     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2333     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2334     assert(hasScalarEvaluationKind(Ty) ==
2335            hasScalarEvaluationKind(Arg->getType()));
2336 
2337     unsigned FirstIRArg, NumIRArgs;
2338     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2339 
2340     switch (ArgI.getKind()) {
2341     case ABIArgInfo::InAlloca: {
2342       assert(NumIRArgs == 0);
2343       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2344       Address V =
2345           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2346       ArgVals.push_back(ParamValue::forIndirect(V));
2347       break;
2348     }
2349 
2350     case ABIArgInfo::Indirect: {
2351       assert(NumIRArgs == 1);
2352       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2353 
2354       if (!hasScalarEvaluationKind(Ty)) {
2355         // Aggregates and complex variables are accessed by reference.  All we
2356         // need to do is realign the value, if requested.
2357         Address V = ParamAddr;
2358         if (ArgI.getIndirectRealign()) {
2359           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2360 
2361           // Copy from the incoming argument pointer to the temporary with the
2362           // appropriate alignment.
2363           //
2364           // FIXME: We should have a common utility for generating an aggregate
2365           // copy.
2366           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2367           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2368           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2369           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2370           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2371           V = AlignedTemp;
2372         }
2373         ArgVals.push_back(ParamValue::forIndirect(V));
2374       } else {
2375         // Load scalar value from indirect argument.
2376         llvm::Value *V =
2377             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2378 
2379         if (isPromoted)
2380           V = emitArgumentDemotion(*this, Arg, V);
2381         ArgVals.push_back(ParamValue::forDirect(V));
2382       }
2383       break;
2384     }
2385 
2386     case ABIArgInfo::Extend:
2387     case ABIArgInfo::Direct: {
2388 
2389       // If we have the trivial case, handle it with no muss and fuss.
2390       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2391           ArgI.getCoerceToType() == ConvertType(Ty) &&
2392           ArgI.getDirectOffset() == 0) {
2393         assert(NumIRArgs == 1);
2394         llvm::Value *V = FnArgs[FirstIRArg];
2395         auto AI = cast<llvm::Argument>(V);
2396 
2397         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2398           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2399                              PVD->getFunctionScopeIndex()) &&
2400               !CGM.getCodeGenOpts().NullPointerIsValid)
2401             AI->addAttr(llvm::Attribute::NonNull);
2402 
2403           QualType OTy = PVD->getOriginalType();
2404           if (const auto *ArrTy =
2405               getContext().getAsConstantArrayType(OTy)) {
2406             // A C99 array parameter declaration with the static keyword also
2407             // indicates dereferenceability, and if the size is constant we can
2408             // use the dereferenceable attribute (which requires the size in
2409             // bytes).
2410             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2411               QualType ETy = ArrTy->getElementType();
2412               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2413               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2414                   ArrSize) {
2415                 llvm::AttrBuilder Attrs;
2416                 Attrs.addDereferenceableAttr(
2417                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2418                 AI->addAttrs(Attrs);
2419               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2420                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2421                 AI->addAttr(llvm::Attribute::NonNull);
2422               }
2423             }
2424           } else if (const auto *ArrTy =
2425                      getContext().getAsVariableArrayType(OTy)) {
2426             // For C99 VLAs with the static keyword, we don't know the size so
2427             // we can't use the dereferenceable attribute, but in addrspace(0)
2428             // we know that it must be nonnull.
2429             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2430                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2431                 !CGM.getCodeGenOpts().NullPointerIsValid)
2432               AI->addAttr(llvm::Attribute::NonNull);
2433           }
2434 
2435           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2436           if (!AVAttr)
2437             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2438               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2439           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2440             // If alignment-assumption sanitizer is enabled, we do *not* add
2441             // alignment attribute here, but emit normal alignment assumption,
2442             // so the UBSAN check could function.
2443             llvm::Value *AlignmentValue =
2444               EmitScalarExpr(AVAttr->getAlignment());
2445             llvm::ConstantInt *AlignmentCI =
2446               cast<llvm::ConstantInt>(AlignmentValue);
2447             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2448                                           +llvm::Value::MaximumAlignment);
2449             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2450           }
2451         }
2452 
2453         if (Arg->getType().isRestrictQualified())
2454           AI->addAttr(llvm::Attribute::NoAlias);
2455 
2456         // LLVM expects swifterror parameters to be used in very restricted
2457         // ways.  Copy the value into a less-restricted temporary.
2458         if (FI.getExtParameterInfo(ArgNo).getABI()
2459               == ParameterABI::SwiftErrorResult) {
2460           QualType pointeeTy = Ty->getPointeeType();
2461           assert(pointeeTy->isPointerType());
2462           Address temp =
2463             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2464           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2465           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2466           Builder.CreateStore(incomingErrorValue, temp);
2467           V = temp.getPointer();
2468 
2469           // Push a cleanup to copy the value back at the end of the function.
2470           // The convention does not guarantee that the value will be written
2471           // back if the function exits with an unwind exception.
2472           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2473         }
2474 
2475         // Ensure the argument is the correct type.
2476         if (V->getType() != ArgI.getCoerceToType())
2477           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2478 
2479         if (isPromoted)
2480           V = emitArgumentDemotion(*this, Arg, V);
2481 
2482         // Because of merging of function types from multiple decls it is
2483         // possible for the type of an argument to not match the corresponding
2484         // type in the function type. Since we are codegening the callee
2485         // in here, add a cast to the argument type.
2486         llvm::Type *LTy = ConvertType(Arg->getType());
2487         if (V->getType() != LTy)
2488           V = Builder.CreateBitCast(V, LTy);
2489 
2490         ArgVals.push_back(ParamValue::forDirect(V));
2491         break;
2492       }
2493 
2494       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2495                                      Arg->getName());
2496 
2497       // Pointer to store into.
2498       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2499 
2500       // Fast-isel and the optimizer generally like scalar values better than
2501       // FCAs, so we flatten them if this is safe to do for this argument.
2502       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2503       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2504           STy->getNumElements() > 1) {
2505         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2506         llvm::Type *DstTy = Ptr.getElementType();
2507         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2508 
2509         Address AddrToStoreInto = Address::invalid();
2510         if (SrcSize <= DstSize) {
2511           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2512         } else {
2513           AddrToStoreInto =
2514             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2515         }
2516 
2517         assert(STy->getNumElements() == NumIRArgs);
2518         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2519           auto AI = FnArgs[FirstIRArg + i];
2520           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2521           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2522           Builder.CreateStore(AI, EltPtr);
2523         }
2524 
2525         if (SrcSize > DstSize) {
2526           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2527         }
2528 
2529       } else {
2530         // Simple case, just do a coerced store of the argument into the alloca.
2531         assert(NumIRArgs == 1);
2532         auto AI = FnArgs[FirstIRArg];
2533         AI->setName(Arg->getName() + ".coerce");
2534         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2535       }
2536 
2537       // Match to what EmitParmDecl is expecting for this type.
2538       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2539         llvm::Value *V =
2540             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2541         if (isPromoted)
2542           V = emitArgumentDemotion(*this, Arg, V);
2543         ArgVals.push_back(ParamValue::forDirect(V));
2544       } else {
2545         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2546       }
2547       break;
2548     }
2549 
2550     case ABIArgInfo::CoerceAndExpand: {
2551       // Reconstruct into a temporary.
2552       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2553       ArgVals.push_back(ParamValue::forIndirect(alloca));
2554 
2555       auto coercionType = ArgI.getCoerceAndExpandType();
2556       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2557 
2558       unsigned argIndex = FirstIRArg;
2559       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2560         llvm::Type *eltType = coercionType->getElementType(i);
2561         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2562           continue;
2563 
2564         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2565         auto elt = FnArgs[argIndex++];
2566         Builder.CreateStore(elt, eltAddr);
2567       }
2568       assert(argIndex == FirstIRArg + NumIRArgs);
2569       break;
2570     }
2571 
2572     case ABIArgInfo::Expand: {
2573       // If this structure was expanded into multiple arguments then
2574       // we need to create a temporary and reconstruct it from the
2575       // arguments.
2576       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2577       LValue LV = MakeAddrLValue(Alloca, Ty);
2578       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2579 
2580       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2581       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2582       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2583       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2584         auto AI = FnArgs[FirstIRArg + i];
2585         AI->setName(Arg->getName() + "." + Twine(i));
2586       }
2587       break;
2588     }
2589 
2590     case ABIArgInfo::Ignore:
2591       assert(NumIRArgs == 0);
2592       // Initialize the local variable appropriately.
2593       if (!hasScalarEvaluationKind(Ty)) {
2594         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2595       } else {
2596         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2597         ArgVals.push_back(ParamValue::forDirect(U));
2598       }
2599       break;
2600     }
2601   }
2602 
2603   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2604     for (int I = Args.size() - 1; I >= 0; --I)
2605       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2606   } else {
2607     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2608       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2609   }
2610 }
2611 
2612 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2613   while (insn->use_empty()) {
2614     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2615     if (!bitcast) return;
2616 
2617     // This is "safe" because we would have used a ConstantExpr otherwise.
2618     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2619     bitcast->eraseFromParent();
2620   }
2621 }
2622 
2623 /// Try to emit a fused autorelease of a return result.
2624 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2625                                                     llvm::Value *result) {
2626   // We must be immediately followed the cast.
2627   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2628   if (BB->empty()) return nullptr;
2629   if (&BB->back() != result) return nullptr;
2630 
2631   llvm::Type *resultType = result->getType();
2632 
2633   // result is in a BasicBlock and is therefore an Instruction.
2634   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2635 
2636   SmallVector<llvm::Instruction *, 4> InstsToKill;
2637 
2638   // Look for:
2639   //  %generator = bitcast %type1* %generator2 to %type2*
2640   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2641     // We would have emitted this as a constant if the operand weren't
2642     // an Instruction.
2643     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2644 
2645     // Require the generator to be immediately followed by the cast.
2646     if (generator->getNextNode() != bitcast)
2647       return nullptr;
2648 
2649     InstsToKill.push_back(bitcast);
2650   }
2651 
2652   // Look for:
2653   //   %generator = call i8* @objc_retain(i8* %originalResult)
2654   // or
2655   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2656   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2657   if (!call) return nullptr;
2658 
2659   bool doRetainAutorelease;
2660 
2661   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2662     doRetainAutorelease = true;
2663   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2664                                           .objc_retainAutoreleasedReturnValue) {
2665     doRetainAutorelease = false;
2666 
2667     // If we emitted an assembly marker for this call (and the
2668     // ARCEntrypoints field should have been set if so), go looking
2669     // for that call.  If we can't find it, we can't do this
2670     // optimization.  But it should always be the immediately previous
2671     // instruction, unless we needed bitcasts around the call.
2672     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2673       llvm::Instruction *prev = call->getPrevNode();
2674       assert(prev);
2675       if (isa<llvm::BitCastInst>(prev)) {
2676         prev = prev->getPrevNode();
2677         assert(prev);
2678       }
2679       assert(isa<llvm::CallInst>(prev));
2680       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2681                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2682       InstsToKill.push_back(prev);
2683     }
2684   } else {
2685     return nullptr;
2686   }
2687 
2688   result = call->getArgOperand(0);
2689   InstsToKill.push_back(call);
2690 
2691   // Keep killing bitcasts, for sanity.  Note that we no longer care
2692   // about precise ordering as long as there's exactly one use.
2693   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2694     if (!bitcast->hasOneUse()) break;
2695     InstsToKill.push_back(bitcast);
2696     result = bitcast->getOperand(0);
2697   }
2698 
2699   // Delete all the unnecessary instructions, from latest to earliest.
2700   for (auto *I : InstsToKill)
2701     I->eraseFromParent();
2702 
2703   // Do the fused retain/autorelease if we were asked to.
2704   if (doRetainAutorelease)
2705     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2706 
2707   // Cast back to the result type.
2708   return CGF.Builder.CreateBitCast(result, resultType);
2709 }
2710 
2711 /// If this is a +1 of the value of an immutable 'self', remove it.
2712 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2713                                           llvm::Value *result) {
2714   // This is only applicable to a method with an immutable 'self'.
2715   const ObjCMethodDecl *method =
2716     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2717   if (!method) return nullptr;
2718   const VarDecl *self = method->getSelfDecl();
2719   if (!self->getType().isConstQualified()) return nullptr;
2720 
2721   // Look for a retain call.
2722   llvm::CallInst *retainCall =
2723     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2724   if (!retainCall ||
2725       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2726     return nullptr;
2727 
2728   // Look for an ordinary load of 'self'.
2729   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2730   llvm::LoadInst *load =
2731     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2732   if (!load || load->isAtomic() || load->isVolatile() ||
2733       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2734     return nullptr;
2735 
2736   // Okay!  Burn it all down.  This relies for correctness on the
2737   // assumption that the retain is emitted as part of the return and
2738   // that thereafter everything is used "linearly".
2739   llvm::Type *resultType = result->getType();
2740   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2741   assert(retainCall->use_empty());
2742   retainCall->eraseFromParent();
2743   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2744 
2745   return CGF.Builder.CreateBitCast(load, resultType);
2746 }
2747 
2748 /// Emit an ARC autorelease of the result of a function.
2749 ///
2750 /// \return the value to actually return from the function
2751 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2752                                             llvm::Value *result) {
2753   // If we're returning 'self', kill the initial retain.  This is a
2754   // heuristic attempt to "encourage correctness" in the really unfortunate
2755   // case where we have a return of self during a dealloc and we desperately
2756   // need to avoid the possible autorelease.
2757   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2758     return self;
2759 
2760   // At -O0, try to emit a fused retain/autorelease.
2761   if (CGF.shouldUseFusedARCCalls())
2762     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2763       return fused;
2764 
2765   return CGF.EmitARCAutoreleaseReturnValue(result);
2766 }
2767 
2768 /// Heuristically search for a dominating store to the return-value slot.
2769 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2770   // Check if a User is a store which pointerOperand is the ReturnValue.
2771   // We are looking for stores to the ReturnValue, not for stores of the
2772   // ReturnValue to some other location.
2773   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2774     auto *SI = dyn_cast<llvm::StoreInst>(U);
2775     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2776       return nullptr;
2777     // These aren't actually possible for non-coerced returns, and we
2778     // only care about non-coerced returns on this code path.
2779     assert(!SI->isAtomic() && !SI->isVolatile());
2780     return SI;
2781   };
2782   // If there are multiple uses of the return-value slot, just check
2783   // for something immediately preceding the IP.  Sometimes this can
2784   // happen with how we generate implicit-returns; it can also happen
2785   // with noreturn cleanups.
2786   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2787     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2788     if (IP->empty()) return nullptr;
2789     llvm::Instruction *I = &IP->back();
2790 
2791     // Skip lifetime markers
2792     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2793                                             IE = IP->rend();
2794          II != IE; ++II) {
2795       if (llvm::IntrinsicInst *Intrinsic =
2796               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2797         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2798           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2799           ++II;
2800           if (II == IE)
2801             break;
2802           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2803             continue;
2804         }
2805       }
2806       I = &*II;
2807       break;
2808     }
2809 
2810     return GetStoreIfValid(I);
2811   }
2812 
2813   llvm::StoreInst *store =
2814       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2815   if (!store) return nullptr;
2816 
2817   // Now do a first-and-dirty dominance check: just walk up the
2818   // single-predecessors chain from the current insertion point.
2819   llvm::BasicBlock *StoreBB = store->getParent();
2820   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2821   while (IP != StoreBB) {
2822     if (!(IP = IP->getSinglePredecessor()))
2823       return nullptr;
2824   }
2825 
2826   // Okay, the store's basic block dominates the insertion point; we
2827   // can do our thing.
2828   return store;
2829 }
2830 
2831 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2832                                          bool EmitRetDbgLoc,
2833                                          SourceLocation EndLoc) {
2834   if (FI.isNoReturn()) {
2835     // Noreturn functions don't return.
2836     EmitUnreachable(EndLoc);
2837     return;
2838   }
2839 
2840   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2841     // Naked functions don't have epilogues.
2842     Builder.CreateUnreachable();
2843     return;
2844   }
2845 
2846   // Functions with no result always return void.
2847   if (!ReturnValue.isValid()) {
2848     Builder.CreateRetVoid();
2849     return;
2850   }
2851 
2852   llvm::DebugLoc RetDbgLoc;
2853   llvm::Value *RV = nullptr;
2854   QualType RetTy = FI.getReturnType();
2855   const ABIArgInfo &RetAI = FI.getReturnInfo();
2856 
2857   switch (RetAI.getKind()) {
2858   case ABIArgInfo::InAlloca:
2859     // Aggregrates get evaluated directly into the destination.  Sometimes we
2860     // need to return the sret value in a register, though.
2861     assert(hasAggregateEvaluationKind(RetTy));
2862     if (RetAI.getInAllocaSRet()) {
2863       llvm::Function::arg_iterator EI = CurFn->arg_end();
2864       --EI;
2865       llvm::Value *ArgStruct = &*EI;
2866       llvm::Value *SRet = Builder.CreateStructGEP(
2867           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2868       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2869     }
2870     break;
2871 
2872   case ABIArgInfo::Indirect: {
2873     auto AI = CurFn->arg_begin();
2874     if (RetAI.isSRetAfterThis())
2875       ++AI;
2876     switch (getEvaluationKind(RetTy)) {
2877     case TEK_Complex: {
2878       ComplexPairTy RT =
2879         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2880       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2881                          /*isInit*/ true);
2882       break;
2883     }
2884     case TEK_Aggregate:
2885       // Do nothing; aggregrates get evaluated directly into the destination.
2886       break;
2887     case TEK_Scalar:
2888       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2889                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2890                         /*isInit*/ true);
2891       break;
2892     }
2893     break;
2894   }
2895 
2896   case ABIArgInfo::Extend:
2897   case ABIArgInfo::Direct:
2898     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2899         RetAI.getDirectOffset() == 0) {
2900       // The internal return value temp always will have pointer-to-return-type
2901       // type, just do a load.
2902 
2903       // If there is a dominating store to ReturnValue, we can elide
2904       // the load, zap the store, and usually zap the alloca.
2905       if (llvm::StoreInst *SI =
2906               findDominatingStoreToReturnValue(*this)) {
2907         // Reuse the debug location from the store unless there is
2908         // cleanup code to be emitted between the store and return
2909         // instruction.
2910         if (EmitRetDbgLoc && !AutoreleaseResult)
2911           RetDbgLoc = SI->getDebugLoc();
2912         // Get the stored value and nuke the now-dead store.
2913         RV = SI->getValueOperand();
2914         SI->eraseFromParent();
2915 
2916       // Otherwise, we have to do a simple load.
2917       } else {
2918         RV = Builder.CreateLoad(ReturnValue);
2919       }
2920     } else {
2921       // If the value is offset in memory, apply the offset now.
2922       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2923 
2924       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2925     }
2926 
2927     // In ARC, end functions that return a retainable type with a call
2928     // to objc_autoreleaseReturnValue.
2929     if (AutoreleaseResult) {
2930 #ifndef NDEBUG
2931       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2932       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2933       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2934       // CurCodeDecl or BlockInfo.
2935       QualType RT;
2936 
2937       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2938         RT = FD->getReturnType();
2939       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2940         RT = MD->getReturnType();
2941       else if (isa<BlockDecl>(CurCodeDecl))
2942         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2943       else
2944         llvm_unreachable("Unexpected function/method type");
2945 
2946       assert(getLangOpts().ObjCAutoRefCount &&
2947              !FI.isReturnsRetained() &&
2948              RT->isObjCRetainableType());
2949 #endif
2950       RV = emitAutoreleaseOfResult(*this, RV);
2951     }
2952 
2953     break;
2954 
2955   case ABIArgInfo::Ignore:
2956     break;
2957 
2958   case ABIArgInfo::CoerceAndExpand: {
2959     auto coercionType = RetAI.getCoerceAndExpandType();
2960 
2961     // Load all of the coerced elements out into results.
2962     llvm::SmallVector<llvm::Value*, 4> results;
2963     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2964     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2965       auto coercedEltType = coercionType->getElementType(i);
2966       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2967         continue;
2968 
2969       auto eltAddr = Builder.CreateStructGEP(addr, i);
2970       auto elt = Builder.CreateLoad(eltAddr);
2971       results.push_back(elt);
2972     }
2973 
2974     // If we have one result, it's the single direct result type.
2975     if (results.size() == 1) {
2976       RV = results[0];
2977 
2978     // Otherwise, we need to make a first-class aggregate.
2979     } else {
2980       // Construct a return type that lacks padding elements.
2981       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2982 
2983       RV = llvm::UndefValue::get(returnType);
2984       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2985         RV = Builder.CreateInsertValue(RV, results[i], i);
2986       }
2987     }
2988     break;
2989   }
2990 
2991   case ABIArgInfo::Expand:
2992     llvm_unreachable("Invalid ABI kind for return argument");
2993   }
2994 
2995   llvm::Instruction *Ret;
2996   if (RV) {
2997     EmitReturnValueCheck(RV);
2998     Ret = Builder.CreateRet(RV);
2999   } else {
3000     Ret = Builder.CreateRetVoid();
3001   }
3002 
3003   if (RetDbgLoc)
3004     Ret->setDebugLoc(std::move(RetDbgLoc));
3005 }
3006 
3007 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3008   // A current decl may not be available when emitting vtable thunks.
3009   if (!CurCodeDecl)
3010     return;
3011 
3012   ReturnsNonNullAttr *RetNNAttr = nullptr;
3013   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3014     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3015 
3016   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3017     return;
3018 
3019   // Prefer the returns_nonnull attribute if it's present.
3020   SourceLocation AttrLoc;
3021   SanitizerMask CheckKind;
3022   SanitizerHandler Handler;
3023   if (RetNNAttr) {
3024     assert(!requiresReturnValueNullabilityCheck() &&
3025            "Cannot check nullability and the nonnull attribute");
3026     AttrLoc = RetNNAttr->getLocation();
3027     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3028     Handler = SanitizerHandler::NonnullReturn;
3029   } else {
3030     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3031       if (auto *TSI = DD->getTypeSourceInfo())
3032         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3033           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3034     CheckKind = SanitizerKind::NullabilityReturn;
3035     Handler = SanitizerHandler::NullabilityReturn;
3036   }
3037 
3038   SanitizerScope SanScope(this);
3039 
3040   // Make sure the "return" source location is valid. If we're checking a
3041   // nullability annotation, make sure the preconditions for the check are met.
3042   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3043   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3044   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3045   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3046   if (requiresReturnValueNullabilityCheck())
3047     CanNullCheck =
3048         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3049   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3050   EmitBlock(Check);
3051 
3052   // Now do the null check.
3053   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3054   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3055   llvm::Value *DynamicData[] = {SLocPtr};
3056   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3057 
3058   EmitBlock(NoCheck);
3059 
3060 #ifndef NDEBUG
3061   // The return location should not be used after the check has been emitted.
3062   ReturnLocation = Address::invalid();
3063 #endif
3064 }
3065 
3066 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3067   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3068   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3069 }
3070 
3071 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3072                                           QualType Ty) {
3073   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3074   // placeholders.
3075   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3076   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3077   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3078 
3079   // FIXME: When we generate this IR in one pass, we shouldn't need
3080   // this win32-specific alignment hack.
3081   CharUnits Align = CharUnits::fromQuantity(4);
3082   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3083 
3084   return AggValueSlot::forAddr(Address(Placeholder, Align),
3085                                Ty.getQualifiers(),
3086                                AggValueSlot::IsNotDestructed,
3087                                AggValueSlot::DoesNotNeedGCBarriers,
3088                                AggValueSlot::IsNotAliased,
3089                                AggValueSlot::DoesNotOverlap);
3090 }
3091 
3092 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3093                                           const VarDecl *param,
3094                                           SourceLocation loc) {
3095   // StartFunction converted the ABI-lowered parameter(s) into a
3096   // local alloca.  We need to turn that into an r-value suitable
3097   // for EmitCall.
3098   Address local = GetAddrOfLocalVar(param);
3099 
3100   QualType type = param->getType();
3101 
3102   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3103     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3104   }
3105 
3106   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3107   // but the argument needs to be the original pointer.
3108   if (type->isReferenceType()) {
3109     args.add(RValue::get(Builder.CreateLoad(local)), type);
3110 
3111   // In ARC, move out of consumed arguments so that the release cleanup
3112   // entered by StartFunction doesn't cause an over-release.  This isn't
3113   // optimal -O0 code generation, but it should get cleaned up when
3114   // optimization is enabled.  This also assumes that delegate calls are
3115   // performed exactly once for a set of arguments, but that should be safe.
3116   } else if (getLangOpts().ObjCAutoRefCount &&
3117              param->hasAttr<NSConsumedAttr>() &&
3118              type->isObjCRetainableType()) {
3119     llvm::Value *ptr = Builder.CreateLoad(local);
3120     auto null =
3121       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3122     Builder.CreateStore(null, local);
3123     args.add(RValue::get(ptr), type);
3124 
3125   // For the most part, we just need to load the alloca, except that
3126   // aggregate r-values are actually pointers to temporaries.
3127   } else {
3128     args.add(convertTempToRValue(local, type, loc), type);
3129   }
3130 
3131   // Deactivate the cleanup for the callee-destructed param that was pushed.
3132   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3133       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3134       param->needsDestruction(getContext())) {
3135     EHScopeStack::stable_iterator cleanup =
3136         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3137     assert(cleanup.isValid() &&
3138            "cleanup for callee-destructed param not recorded");
3139     // This unreachable is a temporary marker which will be removed later.
3140     llvm::Instruction *isActive = Builder.CreateUnreachable();
3141     args.addArgCleanupDeactivation(cleanup, isActive);
3142   }
3143 }
3144 
3145 static bool isProvablyNull(llvm::Value *addr) {
3146   return isa<llvm::ConstantPointerNull>(addr);
3147 }
3148 
3149 /// Emit the actual writing-back of a writeback.
3150 static void emitWriteback(CodeGenFunction &CGF,
3151                           const CallArgList::Writeback &writeback) {
3152   const LValue &srcLV = writeback.Source;
3153   Address srcAddr = srcLV.getAddress(CGF);
3154   assert(!isProvablyNull(srcAddr.getPointer()) &&
3155          "shouldn't have writeback for provably null argument");
3156 
3157   llvm::BasicBlock *contBB = nullptr;
3158 
3159   // If the argument wasn't provably non-null, we need to null check
3160   // before doing the store.
3161   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3162                                               CGF.CGM.getDataLayout());
3163   if (!provablyNonNull) {
3164     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3165     contBB = CGF.createBasicBlock("icr.done");
3166 
3167     llvm::Value *isNull =
3168       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3169     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3170     CGF.EmitBlock(writebackBB);
3171   }
3172 
3173   // Load the value to writeback.
3174   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3175 
3176   // Cast it back, in case we're writing an id to a Foo* or something.
3177   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3178                                     "icr.writeback-cast");
3179 
3180   // Perform the writeback.
3181 
3182   // If we have a "to use" value, it's something we need to emit a use
3183   // of.  This has to be carefully threaded in: if it's done after the
3184   // release it's potentially undefined behavior (and the optimizer
3185   // will ignore it), and if it happens before the retain then the
3186   // optimizer could move the release there.
3187   if (writeback.ToUse) {
3188     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3189 
3190     // Retain the new value.  No need to block-copy here:  the block's
3191     // being passed up the stack.
3192     value = CGF.EmitARCRetainNonBlock(value);
3193 
3194     // Emit the intrinsic use here.
3195     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3196 
3197     // Load the old value (primitively).
3198     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3199 
3200     // Put the new value in place (primitively).
3201     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3202 
3203     // Release the old value.
3204     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3205 
3206   // Otherwise, we can just do a normal lvalue store.
3207   } else {
3208     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3209   }
3210 
3211   // Jump to the continuation block.
3212   if (!provablyNonNull)
3213     CGF.EmitBlock(contBB);
3214 }
3215 
3216 static void emitWritebacks(CodeGenFunction &CGF,
3217                            const CallArgList &args) {
3218   for (const auto &I : args.writebacks())
3219     emitWriteback(CGF, I);
3220 }
3221 
3222 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3223                                             const CallArgList &CallArgs) {
3224   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3225     CallArgs.getCleanupsToDeactivate();
3226   // Iterate in reverse to increase the likelihood of popping the cleanup.
3227   for (const auto &I : llvm::reverse(Cleanups)) {
3228     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3229     I.IsActiveIP->eraseFromParent();
3230   }
3231 }
3232 
3233 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3234   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3235     if (uop->getOpcode() == UO_AddrOf)
3236       return uop->getSubExpr();
3237   return nullptr;
3238 }
3239 
3240 /// Emit an argument that's being passed call-by-writeback.  That is,
3241 /// we are passing the address of an __autoreleased temporary; it
3242 /// might be copy-initialized with the current value of the given
3243 /// address, but it will definitely be copied out of after the call.
3244 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3245                              const ObjCIndirectCopyRestoreExpr *CRE) {
3246   LValue srcLV;
3247 
3248   // Make an optimistic effort to emit the address as an l-value.
3249   // This can fail if the argument expression is more complicated.
3250   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3251     srcLV = CGF.EmitLValue(lvExpr);
3252 
3253   // Otherwise, just emit it as a scalar.
3254   } else {
3255     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3256 
3257     QualType srcAddrType =
3258       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3259     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3260   }
3261   Address srcAddr = srcLV.getAddress(CGF);
3262 
3263   // The dest and src types don't necessarily match in LLVM terms
3264   // because of the crazy ObjC compatibility rules.
3265 
3266   llvm::PointerType *destType =
3267     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3268 
3269   // If the address is a constant null, just pass the appropriate null.
3270   if (isProvablyNull(srcAddr.getPointer())) {
3271     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3272              CRE->getType());
3273     return;
3274   }
3275 
3276   // Create the temporary.
3277   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3278                                       CGF.getPointerAlign(),
3279                                       "icr.temp");
3280   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3281   // and that cleanup will be conditional if we can't prove that the l-value
3282   // isn't null, so we need to register a dominating point so that the cleanups
3283   // system will make valid IR.
3284   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3285 
3286   // Zero-initialize it if we're not doing a copy-initialization.
3287   bool shouldCopy = CRE->shouldCopy();
3288   if (!shouldCopy) {
3289     llvm::Value *null =
3290       llvm::ConstantPointerNull::get(
3291         cast<llvm::PointerType>(destType->getElementType()));
3292     CGF.Builder.CreateStore(null, temp);
3293   }
3294 
3295   llvm::BasicBlock *contBB = nullptr;
3296   llvm::BasicBlock *originBB = nullptr;
3297 
3298   // If the address is *not* known to be non-null, we need to switch.
3299   llvm::Value *finalArgument;
3300 
3301   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3302                                               CGF.CGM.getDataLayout());
3303   if (provablyNonNull) {
3304     finalArgument = temp.getPointer();
3305   } else {
3306     llvm::Value *isNull =
3307       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3308 
3309     finalArgument = CGF.Builder.CreateSelect(isNull,
3310                                    llvm::ConstantPointerNull::get(destType),
3311                                              temp.getPointer(), "icr.argument");
3312 
3313     // If we need to copy, then the load has to be conditional, which
3314     // means we need control flow.
3315     if (shouldCopy) {
3316       originBB = CGF.Builder.GetInsertBlock();
3317       contBB = CGF.createBasicBlock("icr.cont");
3318       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3319       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3320       CGF.EmitBlock(copyBB);
3321       condEval.begin(CGF);
3322     }
3323   }
3324 
3325   llvm::Value *valueToUse = nullptr;
3326 
3327   // Perform a copy if necessary.
3328   if (shouldCopy) {
3329     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3330     assert(srcRV.isScalar());
3331 
3332     llvm::Value *src = srcRV.getScalarVal();
3333     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3334                                     "icr.cast");
3335 
3336     // Use an ordinary store, not a store-to-lvalue.
3337     CGF.Builder.CreateStore(src, temp);
3338 
3339     // If optimization is enabled, and the value was held in a
3340     // __strong variable, we need to tell the optimizer that this
3341     // value has to stay alive until we're doing the store back.
3342     // This is because the temporary is effectively unretained,
3343     // and so otherwise we can violate the high-level semantics.
3344     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3345         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3346       valueToUse = src;
3347     }
3348   }
3349 
3350   // Finish the control flow if we needed it.
3351   if (shouldCopy && !provablyNonNull) {
3352     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3353     CGF.EmitBlock(contBB);
3354 
3355     // Make a phi for the value to intrinsically use.
3356     if (valueToUse) {
3357       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3358                                                       "icr.to-use");
3359       phiToUse->addIncoming(valueToUse, copyBB);
3360       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3361                             originBB);
3362       valueToUse = phiToUse;
3363     }
3364 
3365     condEval.end(CGF);
3366   }
3367 
3368   args.addWriteback(srcLV, temp, valueToUse);
3369   args.add(RValue::get(finalArgument), CRE->getType());
3370 }
3371 
3372 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3373   assert(!StackBase);
3374 
3375   // Save the stack.
3376   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3377   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3378 }
3379 
3380 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3381   if (StackBase) {
3382     // Restore the stack after the call.
3383     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3384     CGF.Builder.CreateCall(F, StackBase);
3385   }
3386 }
3387 
3388 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3389                                           SourceLocation ArgLoc,
3390                                           AbstractCallee AC,
3391                                           unsigned ParmNum) {
3392   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3393                          SanOpts.has(SanitizerKind::NullabilityArg)))
3394     return;
3395 
3396   // The param decl may be missing in a variadic function.
3397   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3398   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3399 
3400   // Prefer the nonnull attribute if it's present.
3401   const NonNullAttr *NNAttr = nullptr;
3402   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3403     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3404 
3405   bool CanCheckNullability = false;
3406   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3407     auto Nullability = PVD->getType()->getNullability(getContext());
3408     CanCheckNullability = Nullability &&
3409                           *Nullability == NullabilityKind::NonNull &&
3410                           PVD->getTypeSourceInfo();
3411   }
3412 
3413   if (!NNAttr && !CanCheckNullability)
3414     return;
3415 
3416   SourceLocation AttrLoc;
3417   SanitizerMask CheckKind;
3418   SanitizerHandler Handler;
3419   if (NNAttr) {
3420     AttrLoc = NNAttr->getLocation();
3421     CheckKind = SanitizerKind::NonnullAttribute;
3422     Handler = SanitizerHandler::NonnullArg;
3423   } else {
3424     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3425     CheckKind = SanitizerKind::NullabilityArg;
3426     Handler = SanitizerHandler::NullabilityArg;
3427   }
3428 
3429   SanitizerScope SanScope(this);
3430   assert(RV.isScalar());
3431   llvm::Value *V = RV.getScalarVal();
3432   llvm::Value *Cond =
3433       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3434   llvm::Constant *StaticData[] = {
3435       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3436       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3437   };
3438   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3439 }
3440 
3441 void CodeGenFunction::EmitCallArgs(
3442     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3443     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3444     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3445   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3446 
3447   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3448   // because arguments are destroyed left to right in the callee. As a special
3449   // case, there are certain language constructs that require left-to-right
3450   // evaluation, and in those cases we consider the evaluation order requirement
3451   // to trump the "destruction order is reverse construction order" guarantee.
3452   bool LeftToRight =
3453       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3454           ? Order == EvaluationOrder::ForceLeftToRight
3455           : Order != EvaluationOrder::ForceRightToLeft;
3456 
3457   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3458                                          RValue EmittedArg) {
3459     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3460       return;
3461     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3462     if (PS == nullptr)
3463       return;
3464 
3465     const auto &Context = getContext();
3466     auto SizeTy = Context.getSizeType();
3467     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3468     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3469     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3470                                                      EmittedArg.getScalarVal(),
3471                                                      PS->isDynamic());
3472     Args.add(RValue::get(V), SizeTy);
3473     // If we're emitting args in reverse, be sure to do so with
3474     // pass_object_size, as well.
3475     if (!LeftToRight)
3476       std::swap(Args.back(), *(&Args.back() - 1));
3477   };
3478 
3479   // Insert a stack save if we're going to need any inalloca args.
3480   bool HasInAllocaArgs = false;
3481   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3482     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3483          I != E && !HasInAllocaArgs; ++I)
3484       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3485     if (HasInAllocaArgs) {
3486       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3487       Args.allocateArgumentMemory(*this);
3488     }
3489   }
3490 
3491   // Evaluate each argument in the appropriate order.
3492   size_t CallArgsStart = Args.size();
3493   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3494     unsigned Idx = LeftToRight ? I : E - I - 1;
3495     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3496     unsigned InitialArgSize = Args.size();
3497     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3498     // the argument and parameter match or the objc method is parameterized.
3499     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3500             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3501                                                 ArgTypes[Idx]) ||
3502             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3503              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3504            "Argument and parameter types don't match");
3505     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3506     // In particular, we depend on it being the last arg in Args, and the
3507     // objectsize bits depend on there only being one arg if !LeftToRight.
3508     assert(InitialArgSize + 1 == Args.size() &&
3509            "The code below depends on only adding one arg per EmitCallArg");
3510     (void)InitialArgSize;
3511     // Since pointer argument are never emitted as LValue, it is safe to emit
3512     // non-null argument check for r-value only.
3513     if (!Args.back().hasLValue()) {
3514       RValue RVArg = Args.back().getKnownRValue();
3515       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3516                           ParamsToSkip + Idx);
3517       // @llvm.objectsize should never have side-effects and shouldn't need
3518       // destruction/cleanups, so we can safely "emit" it after its arg,
3519       // regardless of right-to-leftness
3520       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3521     }
3522   }
3523 
3524   if (!LeftToRight) {
3525     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3526     // IR function.
3527     std::reverse(Args.begin() + CallArgsStart, Args.end());
3528   }
3529 }
3530 
3531 namespace {
3532 
3533 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3534   DestroyUnpassedArg(Address Addr, QualType Ty)
3535       : Addr(Addr), Ty(Ty) {}
3536 
3537   Address Addr;
3538   QualType Ty;
3539 
3540   void Emit(CodeGenFunction &CGF, Flags flags) override {
3541     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3542     if (DtorKind == QualType::DK_cxx_destructor) {
3543       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3544       assert(!Dtor->isTrivial());
3545       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3546                                 /*Delegating=*/false, Addr, Ty);
3547     } else {
3548       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3549     }
3550   }
3551 };
3552 
3553 struct DisableDebugLocationUpdates {
3554   CodeGenFunction &CGF;
3555   bool disabledDebugInfo;
3556   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3557     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3558       CGF.disableDebugInfo();
3559   }
3560   ~DisableDebugLocationUpdates() {
3561     if (disabledDebugInfo)
3562       CGF.enableDebugInfo();
3563   }
3564 };
3565 
3566 } // end anonymous namespace
3567 
3568 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3569   if (!HasLV)
3570     return RV;
3571   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3572   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3573                         LV.isVolatile());
3574   IsUsed = true;
3575   return RValue::getAggregate(Copy.getAddress(CGF));
3576 }
3577 
3578 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3579   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3580   if (!HasLV && RV.isScalar())
3581     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3582   else if (!HasLV && RV.isComplex())
3583     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3584   else {
3585     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
3586     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3587     // We assume that call args are never copied into subobjects.
3588     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3589                           HasLV ? LV.isVolatileQualified()
3590                                 : RV.isVolatileQualified());
3591   }
3592   IsUsed = true;
3593 }
3594 
3595 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3596                                   QualType type) {
3597   DisableDebugLocationUpdates Dis(*this, E);
3598   if (const ObjCIndirectCopyRestoreExpr *CRE
3599         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3600     assert(getLangOpts().ObjCAutoRefCount);
3601     return emitWritebackArg(*this, args, CRE);
3602   }
3603 
3604   assert(type->isReferenceType() == E->isGLValue() &&
3605          "reference binding to unmaterialized r-value!");
3606 
3607   if (E->isGLValue()) {
3608     assert(E->getObjectKind() == OK_Ordinary);
3609     return args.add(EmitReferenceBindingToExpr(E), type);
3610   }
3611 
3612   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3613 
3614   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3615   // However, we still have to push an EH-only cleanup in case we unwind before
3616   // we make it to the call.
3617   if (HasAggregateEvalKind &&
3618       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3619     // If we're using inalloca, use the argument memory.  Otherwise, use a
3620     // temporary.
3621     AggValueSlot Slot;
3622     if (args.isUsingInAlloca())
3623       Slot = createPlaceholderSlot(*this, type);
3624     else
3625       Slot = CreateAggTemp(type, "agg.tmp");
3626 
3627     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3628     if (const auto *RD = type->getAsCXXRecordDecl())
3629       DestroyedInCallee = RD->hasNonTrivialDestructor();
3630     else
3631       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3632 
3633     if (DestroyedInCallee)
3634       Slot.setExternallyDestructed();
3635 
3636     EmitAggExpr(E, Slot);
3637     RValue RV = Slot.asRValue();
3638     args.add(RV, type);
3639 
3640     if (DestroyedInCallee && NeedsEHCleanup) {
3641       // Create a no-op GEP between the placeholder and the cleanup so we can
3642       // RAUW it successfully.  It also serves as a marker of the first
3643       // instruction where the cleanup is active.
3644       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3645                                               type);
3646       // This unreachable is a temporary marker which will be removed later.
3647       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3648       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3649     }
3650     return;
3651   }
3652 
3653   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3654       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3655     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3656     assert(L.isSimple());
3657     args.addUncopiedAggregate(L, type);
3658     return;
3659   }
3660 
3661   args.add(EmitAnyExprToTemp(E), type);
3662 }
3663 
3664 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3665   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3666   // implicitly widens null pointer constants that are arguments to varargs
3667   // functions to pointer-sized ints.
3668   if (!getTarget().getTriple().isOSWindows())
3669     return Arg->getType();
3670 
3671   if (Arg->getType()->isIntegerType() &&
3672       getContext().getTypeSize(Arg->getType()) <
3673           getContext().getTargetInfo().getPointerWidth(0) &&
3674       Arg->isNullPointerConstant(getContext(),
3675                                  Expr::NPC_ValueDependentIsNotNull)) {
3676     return getContext().getIntPtrType();
3677   }
3678 
3679   return Arg->getType();
3680 }
3681 
3682 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3683 // optimizer it can aggressively ignore unwind edges.
3684 void
3685 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3686   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3687       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3688     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3689                       CGM.getNoObjCARCExceptionsMetadata());
3690 }
3691 
3692 /// Emits a call to the given no-arguments nounwind runtime function.
3693 llvm::CallInst *
3694 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3695                                          const llvm::Twine &name) {
3696   return EmitNounwindRuntimeCall(callee, None, name);
3697 }
3698 
3699 /// Emits a call to the given nounwind runtime function.
3700 llvm::CallInst *
3701 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3702                                          ArrayRef<llvm::Value *> args,
3703                                          const llvm::Twine &name) {
3704   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3705   call->setDoesNotThrow();
3706   return call;
3707 }
3708 
3709 /// Emits a simple call (never an invoke) to the given no-arguments
3710 /// runtime function.
3711 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3712                                                  const llvm::Twine &name) {
3713   return EmitRuntimeCall(callee, None, name);
3714 }
3715 
3716 // Calls which may throw must have operand bundles indicating which funclet
3717 // they are nested within.
3718 SmallVector<llvm::OperandBundleDef, 1>
3719 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3720   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3721   // There is no need for a funclet operand bundle if we aren't inside a
3722   // funclet.
3723   if (!CurrentFuncletPad)
3724     return BundleList;
3725 
3726   // Skip intrinsics which cannot throw.
3727   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3728   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3729     return BundleList;
3730 
3731   BundleList.emplace_back("funclet", CurrentFuncletPad);
3732   return BundleList;
3733 }
3734 
3735 /// Emits a simple call (never an invoke) to the given runtime function.
3736 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3737                                                  ArrayRef<llvm::Value *> args,
3738                                                  const llvm::Twine &name) {
3739   llvm::CallInst *call = Builder.CreateCall(
3740       callee, args, getBundlesForFunclet(callee.getCallee()), name);
3741   call->setCallingConv(getRuntimeCC());
3742   return call;
3743 }
3744 
3745 /// Emits a call or invoke to the given noreturn runtime function.
3746 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3747     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3748   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3749       getBundlesForFunclet(callee.getCallee());
3750 
3751   if (getInvokeDest()) {
3752     llvm::InvokeInst *invoke =
3753       Builder.CreateInvoke(callee,
3754                            getUnreachableBlock(),
3755                            getInvokeDest(),
3756                            args,
3757                            BundleList);
3758     invoke->setDoesNotReturn();
3759     invoke->setCallingConv(getRuntimeCC());
3760   } else {
3761     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3762     call->setDoesNotReturn();
3763     call->setCallingConv(getRuntimeCC());
3764     Builder.CreateUnreachable();
3765   }
3766 }
3767 
3768 /// Emits a call or invoke instruction to the given nullary runtime function.
3769 llvm::CallBase *
3770 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3771                                          const Twine &name) {
3772   return EmitRuntimeCallOrInvoke(callee, None, name);
3773 }
3774 
3775 /// Emits a call or invoke instruction to the given runtime function.
3776 llvm::CallBase *
3777 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3778                                          ArrayRef<llvm::Value *> args,
3779                                          const Twine &name) {
3780   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3781   call->setCallingConv(getRuntimeCC());
3782   return call;
3783 }
3784 
3785 /// Emits a call or invoke instruction to the given function, depending
3786 /// on the current state of the EH stack.
3787 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3788                                                   ArrayRef<llvm::Value *> Args,
3789                                                   const Twine &Name) {
3790   llvm::BasicBlock *InvokeDest = getInvokeDest();
3791   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3792       getBundlesForFunclet(Callee.getCallee());
3793 
3794   llvm::CallBase *Inst;
3795   if (!InvokeDest)
3796     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3797   else {
3798     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3799     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3800                                 Name);
3801     EmitBlock(ContBB);
3802   }
3803 
3804   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3805   // optimizer it can aggressively ignore unwind edges.
3806   if (CGM.getLangOpts().ObjCAutoRefCount)
3807     AddObjCARCExceptionMetadata(Inst);
3808 
3809   return Inst;
3810 }
3811 
3812 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3813                                                   llvm::Value *New) {
3814   DeferredReplacements.push_back(std::make_pair(Old, New));
3815 }
3816 
3817 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3818                                  const CGCallee &Callee,
3819                                  ReturnValueSlot ReturnValue,
3820                                  const CallArgList &CallArgs,
3821                                  llvm::CallBase **callOrInvoke,
3822                                  SourceLocation Loc) {
3823   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3824 
3825   assert(Callee.isOrdinary() || Callee.isVirtual());
3826 
3827   // Handle struct-return functions by passing a pointer to the
3828   // location that we would like to return into.
3829   QualType RetTy = CallInfo.getReturnType();
3830   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3831 
3832   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3833 
3834   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
3835   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
3836     // We can only guarantee that a function is called from the correct
3837     // context/function based on the appropriate target attributes,
3838     // so only check in the case where we have both always_inline and target
3839     // since otherwise we could be making a conditional call after a check for
3840     // the proper cpu features (and it won't cause code generation issues due to
3841     // function based code generation).
3842     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
3843         TargetDecl->hasAttr<TargetAttr>())
3844       checkTargetFeatures(Loc, FD);
3845 
3846 #ifndef NDEBUG
3847   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3848     // For an inalloca varargs function, we don't expect CallInfo to match the
3849     // function pointer's type, because the inalloca struct a will have extra
3850     // fields in it for the varargs parameters.  Code later in this function
3851     // bitcasts the function pointer to the type derived from CallInfo.
3852     //
3853     // In other cases, we assert that the types match up (until pointers stop
3854     // having pointee types).
3855     llvm::Type *TypeFromVal;
3856     if (Callee.isVirtual())
3857       TypeFromVal = Callee.getVirtualFunctionType();
3858     else
3859       TypeFromVal =
3860           Callee.getFunctionPointer()->getType()->getPointerElementType();
3861     assert(IRFuncTy == TypeFromVal);
3862   }
3863 #endif
3864 
3865   // 1. Set up the arguments.
3866 
3867   // If we're using inalloca, insert the allocation after the stack save.
3868   // FIXME: Do this earlier rather than hacking it in here!
3869   Address ArgMemory = Address::invalid();
3870   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3871     const llvm::DataLayout &DL = CGM.getDataLayout();
3872     llvm::Instruction *IP = CallArgs.getStackBase();
3873     llvm::AllocaInst *AI;
3874     if (IP) {
3875       IP = IP->getNextNode();
3876       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3877                                 "argmem", IP);
3878     } else {
3879       AI = CreateTempAlloca(ArgStruct, "argmem");
3880     }
3881     auto Align = CallInfo.getArgStructAlignment();
3882     AI->setAlignment(Align.getAsAlign());
3883     AI->setUsedWithInAlloca(true);
3884     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3885     ArgMemory = Address(AI, Align);
3886   }
3887 
3888   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3889   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3890 
3891   // If the call returns a temporary with struct return, create a temporary
3892   // alloca to hold the result, unless one is given to us.
3893   Address SRetPtr = Address::invalid();
3894   Address SRetAlloca = Address::invalid();
3895   llvm::Value *UnusedReturnSizePtr = nullptr;
3896   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3897     if (!ReturnValue.isNull()) {
3898       SRetPtr = ReturnValue.getValue();
3899     } else {
3900       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3901       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3902         uint64_t size =
3903             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3904         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3905       }
3906     }
3907     if (IRFunctionArgs.hasSRetArg()) {
3908       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3909     } else if (RetAI.isInAlloca()) {
3910       Address Addr =
3911           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3912       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3913     }
3914   }
3915 
3916   Address swiftErrorTemp = Address::invalid();
3917   Address swiftErrorArg = Address::invalid();
3918 
3919   // When passing arguments using temporary allocas, we need to add the
3920   // appropriate lifetime markers. This vector keeps track of all the lifetime
3921   // markers that need to be ended right after the call.
3922   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
3923 
3924   // Translate all of the arguments as necessary to match the IR lowering.
3925   assert(CallInfo.arg_size() == CallArgs.size() &&
3926          "Mismatch between function signature & arguments.");
3927   unsigned ArgNo = 0;
3928   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3929   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3930        I != E; ++I, ++info_it, ++ArgNo) {
3931     const ABIArgInfo &ArgInfo = info_it->info;
3932 
3933     // Insert a padding argument to ensure proper alignment.
3934     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3935       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3936           llvm::UndefValue::get(ArgInfo.getPaddingType());
3937 
3938     unsigned FirstIRArg, NumIRArgs;
3939     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3940 
3941     switch (ArgInfo.getKind()) {
3942     case ABIArgInfo::InAlloca: {
3943       assert(NumIRArgs == 0);
3944       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3945       if (I->isAggregate()) {
3946         // Replace the placeholder with the appropriate argument slot GEP.
3947         Address Addr = I->hasLValue()
3948                            ? I->getKnownLValue().getAddress(*this)
3949                            : I->getKnownRValue().getAggregateAddress();
3950         llvm::Instruction *Placeholder =
3951             cast<llvm::Instruction>(Addr.getPointer());
3952         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3953         Builder.SetInsertPoint(Placeholder);
3954         Addr =
3955             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3956         Builder.restoreIP(IP);
3957         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3958       } else {
3959         // Store the RValue into the argument struct.
3960         Address Addr =
3961             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3962         unsigned AS = Addr.getType()->getPointerAddressSpace();
3963         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3964         // There are some cases where a trivial bitcast is not avoidable.  The
3965         // definition of a type later in a translation unit may change it's type
3966         // from {}* to (%struct.foo*)*.
3967         if (Addr.getType() != MemType)
3968           Addr = Builder.CreateBitCast(Addr, MemType);
3969         I->copyInto(*this, Addr);
3970       }
3971       break;
3972     }
3973 
3974     case ABIArgInfo::Indirect: {
3975       assert(NumIRArgs == 1);
3976       if (!I->isAggregate()) {
3977         // Make a temporary alloca to pass the argument.
3978         Address Addr = CreateMemTempWithoutCast(
3979             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3980         IRCallArgs[FirstIRArg] = Addr.getPointer();
3981 
3982         I->copyInto(*this, Addr);
3983       } else {
3984         // We want to avoid creating an unnecessary temporary+copy here;
3985         // however, we need one in three cases:
3986         // 1. If the argument is not byval, and we are required to copy the
3987         //    source.  (This case doesn't occur on any common architecture.)
3988         // 2. If the argument is byval, RV is not sufficiently aligned, and
3989         //    we cannot force it to be sufficiently aligned.
3990         // 3. If the argument is byval, but RV is not located in default
3991         //    or alloca address space.
3992         Address Addr = I->hasLValue()
3993                            ? I->getKnownLValue().getAddress(*this)
3994                            : I->getKnownRValue().getAggregateAddress();
3995         llvm::Value *V = Addr.getPointer();
3996         CharUnits Align = ArgInfo.getIndirectAlign();
3997         const llvm::DataLayout *TD = &CGM.getDataLayout();
3998 
3999         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4000                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4001                     TD->getAllocaAddrSpace()) &&
4002                "indirect argument must be in alloca address space");
4003 
4004         bool NeedCopy = false;
4005 
4006         if (Addr.getAlignment() < Align &&
4007             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
4008                 Align.getQuantity()) {
4009           NeedCopy = true;
4010         } else if (I->hasLValue()) {
4011           auto LV = I->getKnownLValue();
4012           auto AS = LV.getAddressSpace();
4013 
4014           if (!ArgInfo.getIndirectByVal() ||
4015               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4016             NeedCopy = true;
4017           }
4018           if (!getLangOpts().OpenCL) {
4019             if ((ArgInfo.getIndirectByVal() &&
4020                 (AS != LangAS::Default &&
4021                  AS != CGM.getASTAllocaAddressSpace()))) {
4022               NeedCopy = true;
4023             }
4024           }
4025           // For OpenCL even if RV is located in default or alloca address space
4026           // we don't want to perform address space cast for it.
4027           else if ((ArgInfo.getIndirectByVal() &&
4028                     Addr.getType()->getAddressSpace() != IRFuncTy->
4029                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4030             NeedCopy = true;
4031           }
4032         }
4033 
4034         if (NeedCopy) {
4035           // Create an aligned temporary, and copy to it.
4036           Address AI = CreateMemTempWithoutCast(
4037               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4038           IRCallArgs[FirstIRArg] = AI.getPointer();
4039 
4040           // Emit lifetime markers for the temporary alloca.
4041           uint64_t ByvalTempElementSize =
4042               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4043           llvm::Value *LifetimeSize =
4044               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4045 
4046           // Add cleanup code to emit the end lifetime marker after the call.
4047           if (LifetimeSize) // In case we disabled lifetime markers.
4048             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4049 
4050           // Generate the copy.
4051           I->copyInto(*this, AI);
4052         } else {
4053           // Skip the extra memcpy call.
4054           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4055               CGM.getDataLayout().getAllocaAddrSpace());
4056           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4057               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4058               true);
4059         }
4060       }
4061       break;
4062     }
4063 
4064     case ABIArgInfo::Ignore:
4065       assert(NumIRArgs == 0);
4066       break;
4067 
4068     case ABIArgInfo::Extend:
4069     case ABIArgInfo::Direct: {
4070       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4071           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4072           ArgInfo.getDirectOffset() == 0) {
4073         assert(NumIRArgs == 1);
4074         llvm::Value *V;
4075         if (!I->isAggregate())
4076           V = I->getKnownRValue().getScalarVal();
4077         else
4078           V = Builder.CreateLoad(
4079               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4080                              : I->getKnownRValue().getAggregateAddress());
4081 
4082         // Implement swifterror by copying into a new swifterror argument.
4083         // We'll write back in the normal path out of the call.
4084         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4085               == ParameterABI::SwiftErrorResult) {
4086           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4087 
4088           QualType pointeeTy = I->Ty->getPointeeType();
4089           swiftErrorArg =
4090             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4091 
4092           swiftErrorTemp =
4093             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4094           V = swiftErrorTemp.getPointer();
4095           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4096 
4097           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4098           Builder.CreateStore(errorValue, swiftErrorTemp);
4099         }
4100 
4101         // We might have to widen integers, but we should never truncate.
4102         if (ArgInfo.getCoerceToType() != V->getType() &&
4103             V->getType()->isIntegerTy())
4104           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4105 
4106         // If the argument doesn't match, perform a bitcast to coerce it.  This
4107         // can happen due to trivial type mismatches.
4108         if (FirstIRArg < IRFuncTy->getNumParams() &&
4109             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4110           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4111 
4112         IRCallArgs[FirstIRArg] = V;
4113         break;
4114       }
4115 
4116       // FIXME: Avoid the conversion through memory if possible.
4117       Address Src = Address::invalid();
4118       if (!I->isAggregate()) {
4119         Src = CreateMemTemp(I->Ty, "coerce");
4120         I->copyInto(*this, Src);
4121       } else {
4122         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4123                              : I->getKnownRValue().getAggregateAddress();
4124       }
4125 
4126       // If the value is offset in memory, apply the offset now.
4127       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4128 
4129       // Fast-isel and the optimizer generally like scalar values better than
4130       // FCAs, so we flatten them if this is safe to do for this argument.
4131       llvm::StructType *STy =
4132             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4133       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4134         llvm::Type *SrcTy = Src.getType()->getElementType();
4135         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4136         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4137 
4138         // If the source type is smaller than the destination type of the
4139         // coerce-to logic, copy the source value into a temp alloca the size
4140         // of the destination type to allow loading all of it. The bits past
4141         // the source value are left undef.
4142         if (SrcSize < DstSize) {
4143           Address TempAlloca
4144             = CreateTempAlloca(STy, Src.getAlignment(),
4145                                Src.getName() + ".coerce");
4146           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4147           Src = TempAlloca;
4148         } else {
4149           Src = Builder.CreateBitCast(Src,
4150                                       STy->getPointerTo(Src.getAddressSpace()));
4151         }
4152 
4153         assert(NumIRArgs == STy->getNumElements());
4154         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4155           Address EltPtr = Builder.CreateStructGEP(Src, i);
4156           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4157           IRCallArgs[FirstIRArg + i] = LI;
4158         }
4159       } else {
4160         // In the simple case, just pass the coerced loaded value.
4161         assert(NumIRArgs == 1);
4162         IRCallArgs[FirstIRArg] =
4163           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4164       }
4165 
4166       break;
4167     }
4168 
4169     case ABIArgInfo::CoerceAndExpand: {
4170       auto coercionType = ArgInfo.getCoerceAndExpandType();
4171       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4172 
4173       llvm::Value *tempSize = nullptr;
4174       Address addr = Address::invalid();
4175       Address AllocaAddr = Address::invalid();
4176       if (I->isAggregate()) {
4177         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4178                               : I->getKnownRValue().getAggregateAddress();
4179 
4180       } else {
4181         RValue RV = I->getKnownRValue();
4182         assert(RV.isScalar()); // complex should always just be direct
4183 
4184         llvm::Type *scalarType = RV.getScalarVal()->getType();
4185         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4186         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4187 
4188         // Materialize to a temporary.
4189         addr = CreateTempAlloca(
4190             RV.getScalarVal()->getType(),
4191             CharUnits::fromQuantity(std::max(
4192                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4193             "tmp",
4194             /*ArraySize=*/nullptr, &AllocaAddr);
4195         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4196 
4197         Builder.CreateStore(RV.getScalarVal(), addr);
4198       }
4199 
4200       addr = Builder.CreateElementBitCast(addr, coercionType);
4201 
4202       unsigned IRArgPos = FirstIRArg;
4203       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4204         llvm::Type *eltType = coercionType->getElementType(i);
4205         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4206         Address eltAddr = Builder.CreateStructGEP(addr, i);
4207         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4208         IRCallArgs[IRArgPos++] = elt;
4209       }
4210       assert(IRArgPos == FirstIRArg + NumIRArgs);
4211 
4212       if (tempSize) {
4213         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4214       }
4215 
4216       break;
4217     }
4218 
4219     case ABIArgInfo::Expand:
4220       unsigned IRArgPos = FirstIRArg;
4221       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4222       assert(IRArgPos == FirstIRArg + NumIRArgs);
4223       break;
4224     }
4225   }
4226 
4227   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4228   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4229 
4230   // If we're using inalloca, set up that argument.
4231   if (ArgMemory.isValid()) {
4232     llvm::Value *Arg = ArgMemory.getPointer();
4233     if (CallInfo.isVariadic()) {
4234       // When passing non-POD arguments by value to variadic functions, we will
4235       // end up with a variadic prototype and an inalloca call site.  In such
4236       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4237       // the callee.
4238       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4239       CalleePtr =
4240           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4241     } else {
4242       llvm::Type *LastParamTy =
4243           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4244       if (Arg->getType() != LastParamTy) {
4245 #ifndef NDEBUG
4246         // Assert that these structs have equivalent element types.
4247         llvm::StructType *FullTy = CallInfo.getArgStruct();
4248         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4249             cast<llvm::PointerType>(LastParamTy)->getElementType());
4250         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4251         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4252                                                 DE = DeclaredTy->element_end(),
4253                                                 FI = FullTy->element_begin();
4254              DI != DE; ++DI, ++FI)
4255           assert(*DI == *FI);
4256 #endif
4257         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4258       }
4259     }
4260     assert(IRFunctionArgs.hasInallocaArg());
4261     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4262   }
4263 
4264   // 2. Prepare the function pointer.
4265 
4266   // If the callee is a bitcast of a non-variadic function to have a
4267   // variadic function pointer type, check to see if we can remove the
4268   // bitcast.  This comes up with unprototyped functions.
4269   //
4270   // This makes the IR nicer, but more importantly it ensures that we
4271   // can inline the function at -O0 if it is marked always_inline.
4272   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4273                                    llvm::Value *Ptr) -> llvm::Function * {
4274     if (!CalleeFT->isVarArg())
4275       return nullptr;
4276 
4277     // Get underlying value if it's a bitcast
4278     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4279       if (CE->getOpcode() == llvm::Instruction::BitCast)
4280         Ptr = CE->getOperand(0);
4281     }
4282 
4283     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4284     if (!OrigFn)
4285       return nullptr;
4286 
4287     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4288 
4289     // If the original type is variadic, or if any of the component types
4290     // disagree, we cannot remove the cast.
4291     if (OrigFT->isVarArg() ||
4292         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4293         OrigFT->getReturnType() != CalleeFT->getReturnType())
4294       return nullptr;
4295 
4296     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4297       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4298         return nullptr;
4299 
4300     return OrigFn;
4301   };
4302 
4303   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4304     CalleePtr = OrigFn;
4305     IRFuncTy = OrigFn->getFunctionType();
4306   }
4307 
4308   // 3. Perform the actual call.
4309 
4310   // Deactivate any cleanups that we're supposed to do immediately before
4311   // the call.
4312   if (!CallArgs.getCleanupsToDeactivate().empty())
4313     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4314 
4315   // Assert that the arguments we computed match up.  The IR verifier
4316   // will catch this, but this is a common enough source of problems
4317   // during IRGen changes that it's way better for debugging to catch
4318   // it ourselves here.
4319 #ifndef NDEBUG
4320   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4321   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4322     // Inalloca argument can have different type.
4323     if (IRFunctionArgs.hasInallocaArg() &&
4324         i == IRFunctionArgs.getInallocaArgNo())
4325       continue;
4326     if (i < IRFuncTy->getNumParams())
4327       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4328   }
4329 #endif
4330 
4331   // Update the largest vector width if any arguments have vector types.
4332   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4333     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4334       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4335                                    VT->getPrimitiveSizeInBits().getFixedSize());
4336   }
4337 
4338   // Compute the calling convention and attributes.
4339   unsigned CallingConv;
4340   llvm::AttributeList Attrs;
4341   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4342                              Callee.getAbstractInfo(), Attrs, CallingConv,
4343                              /*AttrOnCallSite=*/true);
4344 
4345   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4346     if (FD->usesFPIntrin())
4347       // All calls within a strictfp function are marked strictfp
4348       Attrs =
4349         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4350                            llvm::Attribute::StrictFP);
4351 
4352   // Apply some call-site-specific attributes.
4353   // TODO: work this into building the attribute set.
4354 
4355   // Apply always_inline to all calls within flatten functions.
4356   // FIXME: should this really take priority over __try, below?
4357   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4358       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4359     Attrs =
4360         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4361                            llvm::Attribute::AlwaysInline);
4362   }
4363 
4364   // Disable inlining inside SEH __try blocks.
4365   if (isSEHTryScope()) {
4366     Attrs =
4367         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4368                            llvm::Attribute::NoInline);
4369   }
4370 
4371   // Decide whether to use a call or an invoke.
4372   bool CannotThrow;
4373   if (currentFunctionUsesSEHTry()) {
4374     // SEH cares about asynchronous exceptions, so everything can "throw."
4375     CannotThrow = false;
4376   } else if (isCleanupPadScope() &&
4377              EHPersonality::get(*this).isMSVCXXPersonality()) {
4378     // The MSVC++ personality will implicitly terminate the program if an
4379     // exception is thrown during a cleanup outside of a try/catch.
4380     // We don't need to model anything in IR to get this behavior.
4381     CannotThrow = true;
4382   } else {
4383     // Otherwise, nounwind call sites will never throw.
4384     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4385                                      llvm::Attribute::NoUnwind);
4386   }
4387 
4388   // If we made a temporary, be sure to clean up after ourselves. Note that we
4389   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4390   // pop this cleanup later on. Being eager about this is OK, since this
4391   // temporary is 'invisible' outside of the callee.
4392   if (UnusedReturnSizePtr)
4393     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4394                                          UnusedReturnSizePtr);
4395 
4396   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4397 
4398   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4399       getBundlesForFunclet(CalleePtr);
4400 
4401   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4402     if (FD->usesFPIntrin())
4403       // All calls within a strictfp function are marked strictfp
4404       Attrs =
4405         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4406                            llvm::Attribute::StrictFP);
4407 
4408   // Emit the actual call/invoke instruction.
4409   llvm::CallBase *CI;
4410   if (!InvokeDest) {
4411     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4412   } else {
4413     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4414     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4415                               BundleList);
4416     EmitBlock(Cont);
4417   }
4418   if (callOrInvoke)
4419     *callOrInvoke = CI;
4420 
4421   // If this is within a function that has the guard(nocf) attribute and is an
4422   // indirect call, add the "guard_nocf" attribute to this call to indicate that
4423   // Control Flow Guard checks should not be added, even if the call is inlined.
4424   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
4425     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
4426       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
4427         Attrs = Attrs.addAttribute(
4428             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
4429     }
4430   }
4431 
4432   // Apply the attributes and calling convention.
4433   CI->setAttributes(Attrs);
4434   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4435 
4436   // Apply various metadata.
4437 
4438   if (!CI->getType()->isVoidTy())
4439     CI->setName("call");
4440 
4441   // Update largest vector width from the return type.
4442   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4443     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4444                                   VT->getPrimitiveSizeInBits().getFixedSize());
4445 
4446   // Insert instrumentation or attach profile metadata at indirect call sites.
4447   // For more details, see the comment before the definition of
4448   // IPVK_IndirectCallTarget in InstrProfData.inc.
4449   if (!CI->getCalledFunction())
4450     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4451                      CI, CalleePtr);
4452 
4453   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4454   // optimizer it can aggressively ignore unwind edges.
4455   if (CGM.getLangOpts().ObjCAutoRefCount)
4456     AddObjCARCExceptionMetadata(CI);
4457 
4458   // Suppress tail calls if requested.
4459   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4460     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4461       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4462   }
4463 
4464   // Add metadata for calls to MSAllocator functions
4465   if (getDebugInfo() && TargetDecl &&
4466       TargetDecl->hasAttr<MSAllocatorAttr>())
4467     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4468 
4469   // 4. Finish the call.
4470 
4471   // If the call doesn't return, finish the basic block and clear the
4472   // insertion point; this allows the rest of IRGen to discard
4473   // unreachable code.
4474   if (CI->doesNotReturn()) {
4475     if (UnusedReturnSizePtr)
4476       PopCleanupBlock();
4477 
4478     // Strip away the noreturn attribute to better diagnose unreachable UB.
4479     if (SanOpts.has(SanitizerKind::Unreachable)) {
4480       // Also remove from function since CallBase::hasFnAttr additionally checks
4481       // attributes of the called function.
4482       if (auto *F = CI->getCalledFunction())
4483         F->removeFnAttr(llvm::Attribute::NoReturn);
4484       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4485                           llvm::Attribute::NoReturn);
4486 
4487       // Avoid incompatibility with ASan which relies on the `noreturn`
4488       // attribute to insert handler calls.
4489       if (SanOpts.hasOneOf(SanitizerKind::Address |
4490                            SanitizerKind::KernelAddress)) {
4491         SanitizerScope SanScope(this);
4492         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4493         Builder.SetInsertPoint(CI);
4494         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4495         llvm::FunctionCallee Fn =
4496             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4497         EmitNounwindRuntimeCall(Fn);
4498       }
4499     }
4500 
4501     EmitUnreachable(Loc);
4502     Builder.ClearInsertionPoint();
4503 
4504     // FIXME: For now, emit a dummy basic block because expr emitters in
4505     // generally are not ready to handle emitting expressions at unreachable
4506     // points.
4507     EnsureInsertPoint();
4508 
4509     // Return a reasonable RValue.
4510     return GetUndefRValue(RetTy);
4511   }
4512 
4513   // Perform the swifterror writeback.
4514   if (swiftErrorTemp.isValid()) {
4515     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4516     Builder.CreateStore(errorResult, swiftErrorArg);
4517   }
4518 
4519   // Emit any call-associated writebacks immediately.  Arguably this
4520   // should happen after any return-value munging.
4521   if (CallArgs.hasWritebacks())
4522     emitWritebacks(*this, CallArgs);
4523 
4524   // The stack cleanup for inalloca arguments has to run out of the normal
4525   // lexical order, so deactivate it and run it manually here.
4526   CallArgs.freeArgumentMemory(*this);
4527 
4528   // Extract the return value.
4529   RValue Ret = [&] {
4530     switch (RetAI.getKind()) {
4531     case ABIArgInfo::CoerceAndExpand: {
4532       auto coercionType = RetAI.getCoerceAndExpandType();
4533 
4534       Address addr = SRetPtr;
4535       addr = Builder.CreateElementBitCast(addr, coercionType);
4536 
4537       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4538       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4539 
4540       unsigned unpaddedIndex = 0;
4541       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4542         llvm::Type *eltType = coercionType->getElementType(i);
4543         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4544         Address eltAddr = Builder.CreateStructGEP(addr, i);
4545         llvm::Value *elt = CI;
4546         if (requiresExtract)
4547           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4548         else
4549           assert(unpaddedIndex == 0);
4550         Builder.CreateStore(elt, eltAddr);
4551       }
4552       // FALLTHROUGH
4553       LLVM_FALLTHROUGH;
4554     }
4555 
4556     case ABIArgInfo::InAlloca:
4557     case ABIArgInfo::Indirect: {
4558       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4559       if (UnusedReturnSizePtr)
4560         PopCleanupBlock();
4561       return ret;
4562     }
4563 
4564     case ABIArgInfo::Ignore:
4565       // If we are ignoring an argument that had a result, make sure to
4566       // construct the appropriate return value for our caller.
4567       return GetUndefRValue(RetTy);
4568 
4569     case ABIArgInfo::Extend:
4570     case ABIArgInfo::Direct: {
4571       llvm::Type *RetIRTy = ConvertType(RetTy);
4572       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4573         switch (getEvaluationKind(RetTy)) {
4574         case TEK_Complex: {
4575           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4576           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4577           return RValue::getComplex(std::make_pair(Real, Imag));
4578         }
4579         case TEK_Aggregate: {
4580           Address DestPtr = ReturnValue.getValue();
4581           bool DestIsVolatile = ReturnValue.isVolatile();
4582 
4583           if (!DestPtr.isValid()) {
4584             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4585             DestIsVolatile = false;
4586           }
4587           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4588           return RValue::getAggregate(DestPtr);
4589         }
4590         case TEK_Scalar: {
4591           // If the argument doesn't match, perform a bitcast to coerce it.  This
4592           // can happen due to trivial type mismatches.
4593           llvm::Value *V = CI;
4594           if (V->getType() != RetIRTy)
4595             V = Builder.CreateBitCast(V, RetIRTy);
4596           return RValue::get(V);
4597         }
4598         }
4599         llvm_unreachable("bad evaluation kind");
4600       }
4601 
4602       Address DestPtr = ReturnValue.getValue();
4603       bool DestIsVolatile = ReturnValue.isVolatile();
4604 
4605       if (!DestPtr.isValid()) {
4606         DestPtr = CreateMemTemp(RetTy, "coerce");
4607         DestIsVolatile = false;
4608       }
4609 
4610       // If the value is offset in memory, apply the offset now.
4611       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4612       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4613 
4614       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4615     }
4616 
4617     case ABIArgInfo::Expand:
4618       llvm_unreachable("Invalid ABI kind for return argument");
4619     }
4620 
4621     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4622   } ();
4623 
4624   // Emit the assume_aligned check on the return value.
4625   if (Ret.isScalar() && TargetDecl) {
4626     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4627       llvm::Value *OffsetValue = nullptr;
4628       if (const auto *Offset = AA->getOffset())
4629         OffsetValue = EmitScalarExpr(Offset);
4630 
4631       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4632       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4633       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4634                               AlignmentCI, OffsetValue);
4635     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4636       llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4637                                       .getRValue(*this)
4638                                       .getScalarVal();
4639       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4640                               AlignmentVal);
4641     }
4642   }
4643 
4644   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
4645   // we can't use the full cleanup mechanism.
4646   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
4647     LifetimeEnd.Emit(*this, /*Flags=*/{});
4648 
4649   return Ret;
4650 }
4651 
4652 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4653   if (isVirtual()) {
4654     const CallExpr *CE = getVirtualCallExpr();
4655     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4656         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4657         CE ? CE->getBeginLoc() : SourceLocation());
4658   }
4659 
4660   return *this;
4661 }
4662 
4663 /* VarArg handling */
4664 
4665 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4666   VAListAddr = VE->isMicrosoftABI()
4667                  ? EmitMSVAListRef(VE->getSubExpr())
4668                  : EmitVAListRef(VE->getSubExpr());
4669   QualType Ty = VE->getType();
4670   if (VE->isMicrosoftABI())
4671     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4672   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4673 }
4674