xref: /freebsd-src/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp (revision e8d8bef961a50d4dc22501cde4fb9fb0be1b2532)
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 using namespace clang;
43 using namespace CodeGen;
44 
45 /***/
46 
47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48   switch (CC) {
49   default: return llvm::CallingConv::C;
50   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
51   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
52   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
53   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
54   case CC_Win64: return llvm::CallingConv::Win64;
55   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
56   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
57   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
58   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
59   // TODO: Add support for __pascal to LLVM.
60   case CC_X86Pascal: return llvm::CallingConv::C;
61   // TODO: Add support for __vectorcall to LLVM.
62   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
63   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
64   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
66   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
67   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
68   case CC_Swift: return llvm::CallingConv::Swift;
69   }
70 }
71 
72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
73 /// qualification. Either or both of RD and MD may be null. A null RD indicates
74 /// that there is no meaningful 'this' type, and a null MD can occur when
75 /// calling a method pointer.
76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
77                                          const CXXMethodDecl *MD) {
78   QualType RecTy;
79   if (RD)
80     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
81   else
82     RecTy = Context.VoidTy;
83 
84   if (MD)
85     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
86   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
87 }
88 
89 /// Returns the canonical formal type of the given C++ method.
90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
91   return MD->getType()->getCanonicalTypeUnqualified()
92            .getAs<FunctionProtoType>();
93 }
94 
95 /// Returns the "extra-canonicalized" return type, which discards
96 /// qualifiers on the return type.  Codegen doesn't care about them,
97 /// and it makes ABI code a little easier to be able to assume that
98 /// all parameter and return types are top-level unqualified.
99 static CanQualType GetReturnType(QualType RetTy) {
100   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
101 }
102 
103 /// Arrange the argument and result information for a value of the given
104 /// unprototyped freestanding function type.
105 const CGFunctionInfo &
106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
107   // When translating an unprototyped function type, always use a
108   // variadic type.
109   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
110                                  /*instanceMethod=*/false,
111                                  /*chainCall=*/false, None,
112                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
113 }
114 
115 static void addExtParameterInfosForCall(
116          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
117                                         const FunctionProtoType *proto,
118                                         unsigned prefixArgs,
119                                         unsigned totalArgs) {
120   assert(proto->hasExtParameterInfos());
121   assert(paramInfos.size() <= prefixArgs);
122   assert(proto->getNumParams() + prefixArgs <= totalArgs);
123 
124   paramInfos.reserve(totalArgs);
125 
126   // Add default infos for any prefix args that don't already have infos.
127   paramInfos.resize(prefixArgs);
128 
129   // Add infos for the prototype.
130   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
131     paramInfos.push_back(ParamInfo);
132     // pass_object_size params have no parameter info.
133     if (ParamInfo.hasPassObjectSize())
134       paramInfos.emplace_back();
135   }
136 
137   assert(paramInfos.size() <= totalArgs &&
138          "Did we forget to insert pass_object_size args?");
139   // Add default infos for the variadic and/or suffix arguments.
140   paramInfos.resize(totalArgs);
141 }
142 
143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
145 static void appendParameterTypes(const CodeGenTypes &CGT,
146                                  SmallVectorImpl<CanQualType> &prefix,
147               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
148                                  CanQual<FunctionProtoType> FPT) {
149   // Fast path: don't touch param info if we don't need to.
150   if (!FPT->hasExtParameterInfos()) {
151     assert(paramInfos.empty() &&
152            "We have paramInfos, but the prototype doesn't?");
153     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
154     return;
155   }
156 
157   unsigned PrefixSize = prefix.size();
158   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
159   // parameters; the only thing that can change this is the presence of
160   // pass_object_size. So, we preallocate for the common case.
161   prefix.reserve(prefix.size() + FPT->getNumParams());
162 
163   auto ExtInfos = FPT->getExtParameterInfos();
164   assert(ExtInfos.size() == FPT->getNumParams());
165   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
166     prefix.push_back(FPT->getParamType(I));
167     if (ExtInfos[I].hasPassObjectSize())
168       prefix.push_back(CGT.getContext().getSizeType());
169   }
170 
171   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
172                               prefix.size());
173 }
174 
175 /// Arrange the LLVM function layout for a value of the given function
176 /// type, on top of any implicit parameters already stored.
177 static const CGFunctionInfo &
178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
179                         SmallVectorImpl<CanQualType> &prefix,
180                         CanQual<FunctionProtoType> FTP) {
181   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
182   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
183   // FIXME: Kill copy.
184   appendParameterTypes(CGT, prefix, paramInfos, FTP);
185   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
186 
187   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
188                                      /*chainCall=*/false, prefix,
189                                      FTP->getExtInfo(), paramInfos,
190                                      Required);
191 }
192 
193 /// Arrange the argument and result information for a value of the
194 /// given freestanding function type.
195 const CGFunctionInfo &
196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
197   SmallVector<CanQualType, 16> argTypes;
198   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
199                                    FTP);
200 }
201 
202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
203                                                bool IsWindows) {
204   // Set the appropriate calling convention for the Function.
205   if (D->hasAttr<StdCallAttr>())
206     return CC_X86StdCall;
207 
208   if (D->hasAttr<FastCallAttr>())
209     return CC_X86FastCall;
210 
211   if (D->hasAttr<RegCallAttr>())
212     return CC_X86RegCall;
213 
214   if (D->hasAttr<ThisCallAttr>())
215     return CC_X86ThisCall;
216 
217   if (D->hasAttr<VectorCallAttr>())
218     return CC_X86VectorCall;
219 
220   if (D->hasAttr<PascalAttr>())
221     return CC_X86Pascal;
222 
223   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
224     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
225 
226   if (D->hasAttr<AArch64VectorPcsAttr>())
227     return CC_AArch64VectorCall;
228 
229   if (D->hasAttr<IntelOclBiccAttr>())
230     return CC_IntelOclBicc;
231 
232   if (D->hasAttr<MSABIAttr>())
233     return IsWindows ? CC_C : CC_Win64;
234 
235   if (D->hasAttr<SysVABIAttr>())
236     return IsWindows ? CC_X86_64SysV : CC_C;
237 
238   if (D->hasAttr<PreserveMostAttr>())
239     return CC_PreserveMost;
240 
241   if (D->hasAttr<PreserveAllAttr>())
242     return CC_PreserveAll;
243 
244   return CC_C;
245 }
246 
247 /// Arrange the argument and result information for a call to an
248 /// unknown C++ non-static member function of the given abstract type.
249 /// (A null RD means we don't have any meaningful "this" argument type,
250 ///  so fall back to a generic pointer type).
251 /// The member function must be an ordinary function, i.e. not a
252 /// constructor or destructor.
253 const CGFunctionInfo &
254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
255                                    const FunctionProtoType *FTP,
256                                    const CXXMethodDecl *MD) {
257   SmallVector<CanQualType, 16> argTypes;
258 
259   // Add the 'this' pointer.
260   argTypes.push_back(DeriveThisType(RD, MD));
261 
262   return ::arrangeLLVMFunctionInfo(
263       *this, true, argTypes,
264       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
265 }
266 
267 /// Set calling convention for CUDA/HIP kernel.
268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
269                                            const FunctionDecl *FD) {
270   if (FD->hasAttr<CUDAGlobalAttr>()) {
271     const FunctionType *FT = FTy->getAs<FunctionType>();
272     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
273     FTy = FT->getCanonicalTypeUnqualified();
274   }
275 }
276 
277 /// Arrange the argument and result information for a declaration or
278 /// definition of the given C++ non-static member function.  The
279 /// member function must be an ordinary function, i.e. not a
280 /// constructor or destructor.
281 const CGFunctionInfo &
282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
283   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
284   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
285 
286   CanQualType FT = GetFormalType(MD).getAs<Type>();
287   setCUDAKernelCallingConvention(FT, CGM, MD);
288   auto prototype = FT.getAs<FunctionProtoType>();
289 
290   if (MD->isInstance()) {
291     // The abstract case is perfectly fine.
292     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
293     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
294   }
295 
296   return arrangeFreeFunctionType(prototype);
297 }
298 
299 bool CodeGenTypes::inheritingCtorHasParams(
300     const InheritedConstructor &Inherited, CXXCtorType Type) {
301   // Parameters are unnecessary if we're constructing a base class subobject
302   // and the inherited constructor lives in a virtual base.
303   return Type == Ctor_Complete ||
304          !Inherited.getShadowDecl()->constructsVirtualBase() ||
305          !Target.getCXXABI().hasConstructorVariants();
306 }
307 
308 const CGFunctionInfo &
309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
310   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
311 
312   SmallVector<CanQualType, 16> argTypes;
313   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
314   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
315 
316   bool PassParams = true;
317 
318   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
319     // A base class inheriting constructor doesn't get forwarded arguments
320     // needed to construct a virtual base (or base class thereof).
321     if (auto Inherited = CD->getInheritedConstructor())
322       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
323   }
324 
325   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
326 
327   // Add the formal parameters.
328   if (PassParams)
329     appendParameterTypes(*this, argTypes, paramInfos, FTP);
330 
331   CGCXXABI::AddedStructorArgCounts AddedArgs =
332       TheCXXABI.buildStructorSignature(GD, argTypes);
333   if (!paramInfos.empty()) {
334     // Note: prefix implies after the first param.
335     if (AddedArgs.Prefix)
336       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
337                         FunctionProtoType::ExtParameterInfo{});
338     if (AddedArgs.Suffix)
339       paramInfos.append(AddedArgs.Suffix,
340                         FunctionProtoType::ExtParameterInfo{});
341   }
342 
343   RequiredArgs required =
344       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
345                                       : RequiredArgs::All);
346 
347   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
348   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
349                                ? argTypes.front()
350                                : TheCXXABI.hasMostDerivedReturn(GD)
351                                      ? CGM.getContext().VoidPtrTy
352                                      : Context.VoidTy;
353   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
354                                  /*chainCall=*/false, argTypes, extInfo,
355                                  paramInfos, required);
356 }
357 
358 static SmallVector<CanQualType, 16>
359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
360   SmallVector<CanQualType, 16> argTypes;
361   for (auto &arg : args)
362     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
363   return argTypes;
364 }
365 
366 static SmallVector<CanQualType, 16>
367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
368   SmallVector<CanQualType, 16> argTypes;
369   for (auto &arg : args)
370     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
371   return argTypes;
372 }
373 
374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
375 getExtParameterInfosForCall(const FunctionProtoType *proto,
376                             unsigned prefixArgs, unsigned totalArgs) {
377   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
378   if (proto->hasExtParameterInfos()) {
379     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
380   }
381   return result;
382 }
383 
384 /// Arrange a call to a C++ method, passing the given arguments.
385 ///
386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
387 /// parameter.
388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
389 /// args.
390 /// PassProtoArgs indicates whether `args` has args for the parameters in the
391 /// given CXXConstructorDecl.
392 const CGFunctionInfo &
393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
394                                         const CXXConstructorDecl *D,
395                                         CXXCtorType CtorKind,
396                                         unsigned ExtraPrefixArgs,
397                                         unsigned ExtraSuffixArgs,
398                                         bool PassProtoArgs) {
399   // FIXME: Kill copy.
400   SmallVector<CanQualType, 16> ArgTypes;
401   for (const auto &Arg : args)
402     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
403 
404   // +1 for implicit this, which should always be args[0].
405   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
406 
407   CanQual<FunctionProtoType> FPT = GetFormalType(D);
408   RequiredArgs Required = PassProtoArgs
409                               ? RequiredArgs::forPrototypePlus(
410                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
411                               : RequiredArgs::All;
412 
413   GlobalDecl GD(D, CtorKind);
414   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415                                ? ArgTypes.front()
416                                : TheCXXABI.hasMostDerivedReturn(GD)
417                                      ? CGM.getContext().VoidPtrTy
418                                      : Context.VoidTy;
419 
420   FunctionType::ExtInfo Info = FPT->getExtInfo();
421   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
422   // If the prototype args are elided, we should only have ABI-specific args,
423   // which never have param info.
424   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425     // ABI-specific suffix arguments are treated the same as variadic arguments.
426     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427                                 ArgTypes.size());
428   }
429   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430                                  /*chainCall=*/false, ArgTypes, Info,
431                                  ParamInfos, Required);
432 }
433 
434 /// Arrange the argument and result information for the declaration or
435 /// definition of the given function.
436 const CGFunctionInfo &
437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
438   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439     if (MD->isInstance())
440       return arrangeCXXMethodDeclaration(MD);
441 
442   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
443 
444   assert(isa<FunctionType>(FTy));
445   setCUDAKernelCallingConvention(FTy, CGM, FD);
446 
447   // When declaring a function without a prototype, always use a
448   // non-variadic type.
449   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
450     return arrangeLLVMFunctionInfo(
451         noProto->getReturnType(), /*instanceMethod=*/false,
452         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453   }
454 
455   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
456 }
457 
458 /// Arrange the argument and result information for the declaration or
459 /// definition of an Objective-C method.
460 const CGFunctionInfo &
461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
462   // It happens that this is the same as a call with no optional
463   // arguments, except also using the formal 'self' type.
464   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
465 }
466 
467 /// Arrange the argument and result information for the function type
468 /// through which to perform a send to the given Objective-C method,
469 /// using the given receiver type.  The receiver type is not always
470 /// the 'self' type of the method or even an Objective-C pointer type.
471 /// This is *not* the right method for actually performing such a
472 /// message send, due to the possibility of optional arguments.
473 const CGFunctionInfo &
474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
475                                               QualType receiverType) {
476   SmallVector<CanQualType, 16> argTys;
477   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
478   argTys.push_back(Context.getCanonicalParamType(receiverType));
479   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480   // FIXME: Kill copy?
481   for (const auto *I : MD->parameters()) {
482     argTys.push_back(Context.getCanonicalParamType(I->getType()));
483     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
484         I->hasAttr<NoEscapeAttr>());
485     extParamInfos.push_back(extParamInfo);
486   }
487 
488   FunctionType::ExtInfo einfo;
489   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491 
492   if (getContext().getLangOpts().ObjCAutoRefCount &&
493       MD->hasAttr<NSReturnsRetainedAttr>())
494     einfo = einfo.withProducesResult(true);
495 
496   RequiredArgs required =
497     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498 
499   return arrangeLLVMFunctionInfo(
500       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502 }
503 
504 const CGFunctionInfo &
505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
506                                                  const CallArgList &args) {
507   auto argTypes = getArgTypesForCall(Context, args);
508   FunctionType::ExtInfo einfo;
509 
510   return arrangeLLVMFunctionInfo(
511       GetReturnType(returnType), /*instanceMethod=*/false,
512       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513 }
514 
515 const CGFunctionInfo &
516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
517   // FIXME: Do we need to handle ObjCMethodDecl?
518   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519 
520   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
521       isa<CXXDestructorDecl>(GD.getDecl()))
522     return arrangeCXXStructorDeclaration(GD);
523 
524   return arrangeFunctionDeclaration(FD);
525 }
526 
527 /// Arrange a thunk that takes 'this' as the first parameter followed by
528 /// varargs.  Return a void pointer, regardless of the actual return type.
529 /// The body of the thunk will end in a musttail call to a function of the
530 /// correct type, and the caller will bitcast the function to the correct
531 /// prototype.
532 const CGFunctionInfo &
533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
534   assert(MD->isVirtual() && "only methods have thunks");
535   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
536   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
537   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
538                                  /*chainCall=*/false, ArgTys,
539                                  FTP->getExtInfo(), {}, RequiredArgs(1));
540 }
541 
542 const CGFunctionInfo &
543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
544                                    CXXCtorType CT) {
545   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
546 
547   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
548   SmallVector<CanQualType, 2> ArgTys;
549   const CXXRecordDecl *RD = CD->getParent();
550   ArgTys.push_back(DeriveThisType(RD, CD));
551   if (CT == Ctor_CopyingClosure)
552     ArgTys.push_back(*FTP->param_type_begin());
553   if (RD->getNumVBases() > 0)
554     ArgTys.push_back(Context.IntTy);
555   CallingConv CC = Context.getDefaultCallingConvention(
556       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
557   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
558                                  /*chainCall=*/false, ArgTys,
559                                  FunctionType::ExtInfo(CC), {},
560                                  RequiredArgs::All);
561 }
562 
563 /// Arrange a call as unto a free function, except possibly with an
564 /// additional number of formal parameters considered required.
565 static const CGFunctionInfo &
566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
567                             CodeGenModule &CGM,
568                             const CallArgList &args,
569                             const FunctionType *fnType,
570                             unsigned numExtraRequiredArgs,
571                             bool chainCall) {
572   assert(args.size() >= numExtraRequiredArgs);
573 
574   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
575 
576   // In most cases, there are no optional arguments.
577   RequiredArgs required = RequiredArgs::All;
578 
579   // If we have a variadic prototype, the required arguments are the
580   // extra prefix plus the arguments in the prototype.
581   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
582     if (proto->isVariadic())
583       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
584 
585     if (proto->hasExtParameterInfos())
586       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
587                                   args.size());
588 
589   // If we don't have a prototype at all, but we're supposed to
590   // explicitly use the variadic convention for unprototyped calls,
591   // treat all of the arguments as required but preserve the nominal
592   // possibility of variadics.
593   } else if (CGM.getTargetCodeGenInfo()
594                 .isNoProtoCallVariadic(args,
595                                        cast<FunctionNoProtoType>(fnType))) {
596     required = RequiredArgs(args.size());
597   }
598 
599   // FIXME: Kill copy.
600   SmallVector<CanQualType, 16> argTypes;
601   for (const auto &arg : args)
602     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
603   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
604                                      /*instanceMethod=*/false, chainCall,
605                                      argTypes, fnType->getExtInfo(), paramInfos,
606                                      required);
607 }
608 
609 /// Figure out the rules for calling a function with the given formal
610 /// type using the given arguments.  The arguments are necessary
611 /// because the function might be unprototyped, in which case it's
612 /// target-dependent in crazy ways.
613 const CGFunctionInfo &
614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
615                                       const FunctionType *fnType,
616                                       bool chainCall) {
617   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
618                                      chainCall ? 1 : 0, chainCall);
619 }
620 
621 /// A block function is essentially a free function with an
622 /// extra implicit argument.
623 const CGFunctionInfo &
624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
625                                        const FunctionType *fnType) {
626   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
627                                      /*chainCall=*/false);
628 }
629 
630 const CGFunctionInfo &
631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
632                                               const FunctionArgList &params) {
633   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
634   auto argTypes = getArgTypesForDeclaration(Context, params);
635 
636   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
637                                  /*instanceMethod*/ false, /*chainCall*/ false,
638                                  argTypes, proto->getExtInfo(), paramInfos,
639                                  RequiredArgs::forPrototypePlus(proto, 1));
640 }
641 
642 const CGFunctionInfo &
643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
644                                          const CallArgList &args) {
645   // FIXME: Kill copy.
646   SmallVector<CanQualType, 16> argTypes;
647   for (const auto &Arg : args)
648     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
649   return arrangeLLVMFunctionInfo(
650       GetReturnType(resultType), /*instanceMethod=*/false,
651       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
652       /*paramInfos=*/ {}, RequiredArgs::All);
653 }
654 
655 const CGFunctionInfo &
656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
657                                                 const FunctionArgList &args) {
658   auto argTypes = getArgTypesForDeclaration(Context, args);
659 
660   return arrangeLLVMFunctionInfo(
661       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
662       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
663 }
664 
665 const CGFunctionInfo &
666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
667                                               ArrayRef<CanQualType> argTypes) {
668   return arrangeLLVMFunctionInfo(
669       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
670       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
671 }
672 
673 /// Arrange a call to a C++ method, passing the given arguments.
674 ///
675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
676 /// does not count `this`.
677 const CGFunctionInfo &
678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
679                                    const FunctionProtoType *proto,
680                                    RequiredArgs required,
681                                    unsigned numPrefixArgs) {
682   assert(numPrefixArgs + 1 <= args.size() &&
683          "Emitting a call with less args than the required prefix?");
684   // Add one to account for `this`. It's a bit awkward here, but we don't count
685   // `this` in similar places elsewhere.
686   auto paramInfos =
687     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
688 
689   // FIXME: Kill copy.
690   auto argTypes = getArgTypesForCall(Context, args);
691 
692   FunctionType::ExtInfo info = proto->getExtInfo();
693   return arrangeLLVMFunctionInfo(
694       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
695       /*chainCall=*/false, argTypes, info, paramInfos, required);
696 }
697 
698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
699   return arrangeLLVMFunctionInfo(
700       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
701       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
702 }
703 
704 const CGFunctionInfo &
705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
706                           const CallArgList &args) {
707   assert(signature.arg_size() <= args.size());
708   if (signature.arg_size() == args.size())
709     return signature;
710 
711   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
712   auto sigParamInfos = signature.getExtParameterInfos();
713   if (!sigParamInfos.empty()) {
714     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
715     paramInfos.resize(args.size());
716   }
717 
718   auto argTypes = getArgTypesForCall(Context, args);
719 
720   assert(signature.getRequiredArgs().allowsOptionalArgs());
721   return arrangeLLVMFunctionInfo(signature.getReturnType(),
722                                  signature.isInstanceMethod(),
723                                  signature.isChainCall(),
724                                  argTypes,
725                                  signature.getExtInfo(),
726                                  paramInfos,
727                                  signature.getRequiredArgs());
728 }
729 
730 namespace clang {
731 namespace CodeGen {
732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
733 }
734 }
735 
736 /// Arrange the argument and result information for an abstract value
737 /// of a given function type.  This is the method which all of the
738 /// above functions ultimately defer to.
739 const CGFunctionInfo &
740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
741                                       bool instanceMethod,
742                                       bool chainCall,
743                                       ArrayRef<CanQualType> argTypes,
744                                       FunctionType::ExtInfo info,
745                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
746                                       RequiredArgs required) {
747   assert(llvm::all_of(argTypes,
748                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
749 
750   // Lookup or create unique function info.
751   llvm::FoldingSetNodeID ID;
752   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
753                           required, resultType, argTypes);
754 
755   void *insertPos = nullptr;
756   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
757   if (FI)
758     return *FI;
759 
760   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
761 
762   // Construct the function info.  We co-allocate the ArgInfos.
763   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
764                               paramInfos, resultType, argTypes, required);
765   FunctionInfos.InsertNode(FI, insertPos);
766 
767   bool inserted = FunctionsBeingProcessed.insert(FI).second;
768   (void)inserted;
769   assert(inserted && "Recursively being processed?");
770 
771   // Compute ABI information.
772   if (CC == llvm::CallingConv::SPIR_KERNEL) {
773     // Force target independent argument handling for the host visible
774     // kernel functions.
775     computeSPIRKernelABIInfo(CGM, *FI);
776   } else if (info.getCC() == CC_Swift) {
777     swiftcall::computeABIInfo(CGM, *FI);
778   } else {
779     getABIInfo().computeInfo(*FI);
780   }
781 
782   // Loop over all of the computed argument and return value info.  If any of
783   // them are direct or extend without a specified coerce type, specify the
784   // default now.
785   ABIArgInfo &retInfo = FI->getReturnInfo();
786   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
787     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
788 
789   for (auto &I : FI->arguments())
790     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
791       I.info.setCoerceToType(ConvertType(I.type));
792 
793   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
794   assert(erased && "Not in set?");
795 
796   return *FI;
797 }
798 
799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
800                                        bool instanceMethod,
801                                        bool chainCall,
802                                        const FunctionType::ExtInfo &info,
803                                        ArrayRef<ExtParameterInfo> paramInfos,
804                                        CanQualType resultType,
805                                        ArrayRef<CanQualType> argTypes,
806                                        RequiredArgs required) {
807   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
808   assert(!required.allowsOptionalArgs() ||
809          required.getNumRequiredArgs() <= argTypes.size());
810 
811   void *buffer =
812     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
813                                   argTypes.size() + 1, paramInfos.size()));
814 
815   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
816   FI->CallingConvention = llvmCC;
817   FI->EffectiveCallingConvention = llvmCC;
818   FI->ASTCallingConvention = info.getCC();
819   FI->InstanceMethod = instanceMethod;
820   FI->ChainCall = chainCall;
821   FI->CmseNSCall = info.getCmseNSCall();
822   FI->NoReturn = info.getNoReturn();
823   FI->ReturnsRetained = info.getProducesResult();
824   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825   FI->NoCfCheck = info.getNoCfCheck();
826   FI->Required = required;
827   FI->HasRegParm = info.getHasRegParm();
828   FI->RegParm = info.getRegParm();
829   FI->ArgStruct = nullptr;
830   FI->ArgStructAlign = 0;
831   FI->NumArgs = argTypes.size();
832   FI->HasExtParameterInfos = !paramInfos.empty();
833   FI->getArgsBuffer()[0].type = resultType;
834   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835     FI->getArgsBuffer()[i + 1].type = argTypes[i];
836   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838   return FI;
839 }
840 
841 /***/
842 
843 namespace {
844 // ABIArgInfo::Expand implementation.
845 
846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847 struct TypeExpansion {
848   enum TypeExpansionKind {
849     // Elements of constant arrays are expanded recursively.
850     TEK_ConstantArray,
851     // Record fields are expanded recursively (but if record is a union, only
852     // the field with the largest size is expanded).
853     TEK_Record,
854     // For complex types, real and imaginary parts are expanded recursively.
855     TEK_Complex,
856     // All other types are not expandable.
857     TEK_None
858   };
859 
860   const TypeExpansionKind Kind;
861 
862   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
863   virtual ~TypeExpansion() {}
864 };
865 
866 struct ConstantArrayExpansion : TypeExpansion {
867   QualType EltTy;
868   uint64_t NumElts;
869 
870   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
872   static bool classof(const TypeExpansion *TE) {
873     return TE->Kind == TEK_ConstantArray;
874   }
875 };
876 
877 struct RecordExpansion : TypeExpansion {
878   SmallVector<const CXXBaseSpecifier *, 1> Bases;
879 
880   SmallVector<const FieldDecl *, 1> Fields;
881 
882   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883                   SmallVector<const FieldDecl *, 1> &&Fields)
884       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885         Fields(std::move(Fields)) {}
886   static bool classof(const TypeExpansion *TE) {
887     return TE->Kind == TEK_Record;
888   }
889 };
890 
891 struct ComplexExpansion : TypeExpansion {
892   QualType EltTy;
893 
894   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
895   static bool classof(const TypeExpansion *TE) {
896     return TE->Kind == TEK_Complex;
897   }
898 };
899 
900 struct NoExpansion : TypeExpansion {
901   NoExpansion() : TypeExpansion(TEK_None) {}
902   static bool classof(const TypeExpansion *TE) {
903     return TE->Kind == TEK_None;
904   }
905 };
906 }  // namespace
907 
908 static std::unique_ptr<TypeExpansion>
909 getTypeExpansion(QualType Ty, const ASTContext &Context) {
910   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911     return std::make_unique<ConstantArrayExpansion>(
912         AT->getElementType(), AT->getSize().getZExtValue());
913   }
914   if (const RecordType *RT = Ty->getAs<RecordType>()) {
915     SmallVector<const CXXBaseSpecifier *, 1> Bases;
916     SmallVector<const FieldDecl *, 1> Fields;
917     const RecordDecl *RD = RT->getDecl();
918     assert(!RD->hasFlexibleArrayMember() &&
919            "Cannot expand structure with flexible array.");
920     if (RD->isUnion()) {
921       // Unions can be here only in degenerative cases - all the fields are same
922       // after flattening. Thus we have to use the "largest" field.
923       const FieldDecl *LargestFD = nullptr;
924       CharUnits UnionSize = CharUnits::Zero();
925 
926       for (const auto *FD : RD->fields()) {
927         if (FD->isZeroLengthBitField(Context))
928           continue;
929         assert(!FD->isBitField() &&
930                "Cannot expand structure with bit-field members.");
931         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932         if (UnionSize < FieldSize) {
933           UnionSize = FieldSize;
934           LargestFD = FD;
935         }
936       }
937       if (LargestFD)
938         Fields.push_back(LargestFD);
939     } else {
940       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941         assert(!CXXRD->isDynamicClass() &&
942                "cannot expand vtable pointers in dynamic classes");
943         for (const CXXBaseSpecifier &BS : CXXRD->bases())
944           Bases.push_back(&BS);
945       }
946 
947       for (const auto *FD : RD->fields()) {
948         if (FD->isZeroLengthBitField(Context))
949           continue;
950         assert(!FD->isBitField() &&
951                "Cannot expand structure with bit-field members.");
952         Fields.push_back(FD);
953       }
954     }
955     return std::make_unique<RecordExpansion>(std::move(Bases),
956                                               std::move(Fields));
957   }
958   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959     return std::make_unique<ComplexExpansion>(CT->getElementType());
960   }
961   return std::make_unique<NoExpansion>();
962 }
963 
964 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965   auto Exp = getTypeExpansion(Ty, Context);
966   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968   }
969   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970     int Res = 0;
971     for (auto BS : RExp->Bases)
972       Res += getExpansionSize(BS->getType(), Context);
973     for (auto FD : RExp->Fields)
974       Res += getExpansionSize(FD->getType(), Context);
975     return Res;
976   }
977   if (isa<ComplexExpansion>(Exp.get()))
978     return 2;
979   assert(isa<NoExpansion>(Exp.get()));
980   return 1;
981 }
982 
983 void
984 CodeGenTypes::getExpandedTypes(QualType Ty,
985                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
986   auto Exp = getTypeExpansion(Ty, Context);
987   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989       getExpandedTypes(CAExp->EltTy, TI);
990     }
991   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992     for (auto BS : RExp->Bases)
993       getExpandedTypes(BS->getType(), TI);
994     for (auto FD : RExp->Fields)
995       getExpandedTypes(FD->getType(), TI);
996   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997     llvm::Type *EltTy = ConvertType(CExp->EltTy);
998     *TI++ = EltTy;
999     *TI++ = EltTy;
1000   } else {
1001     assert(isa<NoExpansion>(Exp.get()));
1002     *TI++ = ConvertType(Ty);
1003   }
1004 }
1005 
1006 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1007                                       ConstantArrayExpansion *CAE,
1008                                       Address BaseAddr,
1009                                       llvm::function_ref<void(Address)> Fn) {
1010   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011   CharUnits EltAlign =
1012     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013 
1014   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015     llvm::Value *EltAddr =
1016       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017     Fn(Address(EltAddr, EltAlign));
1018   }
1019 }
1020 
1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1022                                          llvm::Function::arg_iterator &AI) {
1023   assert(LV.isSimple() &&
1024          "Unexpected non-simple lvalue during struct expansion.");
1025 
1026   auto Exp = getTypeExpansion(Ty, getContext());
1027   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028     forConstantArrayExpansion(
1029         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1030           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032         });
1033   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034     Address This = LV.getAddress(*this);
1035     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036       // Perform a single step derived-to-base conversion.
1037       Address Base =
1038           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039                                 /*NullCheckValue=*/false, SourceLocation());
1040       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041 
1042       // Recurse onto bases.
1043       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044     }
1045     for (auto FD : RExp->Fields) {
1046       // FIXME: What are the right qualifiers here?
1047       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049     }
1050   } else if (isa<ComplexExpansion>(Exp.get())) {
1051     auto realValue = &*AI++;
1052     auto imagValue = &*AI++;
1053     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054   } else {
1055     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1056     // primitive store.
1057     assert(isa<NoExpansion>(Exp.get()));
1058     if (LV.isBitField())
1059       EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1060     else
1061       EmitStoreOfScalar(&*AI++, LV);
1062   }
1063 }
1064 
1065 void CodeGenFunction::ExpandTypeToArgs(
1066     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1067     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1068   auto Exp = getTypeExpansion(Ty, getContext());
1069   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1070     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1071                                    : Arg.getKnownRValue().getAggregateAddress();
1072     forConstantArrayExpansion(
1073         *this, CAExp, Addr, [&](Address EltAddr) {
1074           CallArg EltArg = CallArg(
1075               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1076               CAExp->EltTy);
1077           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1078                            IRCallArgPos);
1079         });
1080   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1081     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1082                                    : Arg.getKnownRValue().getAggregateAddress();
1083     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1084       // Perform a single step derived-to-base conversion.
1085       Address Base =
1086           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1087                                 /*NullCheckValue=*/false, SourceLocation());
1088       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1089 
1090       // Recurse onto bases.
1091       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1092                        IRCallArgPos);
1093     }
1094 
1095     LValue LV = MakeAddrLValue(This, Ty);
1096     for (auto FD : RExp->Fields) {
1097       CallArg FldArg =
1098           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1099       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1100                        IRCallArgPos);
1101     }
1102   } else if (isa<ComplexExpansion>(Exp.get())) {
1103     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1104     IRCallArgs[IRCallArgPos++] = CV.first;
1105     IRCallArgs[IRCallArgPos++] = CV.second;
1106   } else {
1107     assert(isa<NoExpansion>(Exp.get()));
1108     auto RV = Arg.getKnownRValue();
1109     assert(RV.isScalar() &&
1110            "Unexpected non-scalar rvalue during struct expansion.");
1111 
1112     // Insert a bitcast as needed.
1113     llvm::Value *V = RV.getScalarVal();
1114     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1115         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1116       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1117 
1118     IRCallArgs[IRCallArgPos++] = V;
1119   }
1120 }
1121 
1122 /// Create a temporary allocation for the purposes of coercion.
1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1124                                            CharUnits MinAlign,
1125                                            const Twine &Name = "tmp") {
1126   // Don't use an alignment that's worse than what LLVM would prefer.
1127   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1128   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1129 
1130   return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1131 }
1132 
1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1134 /// accessing some number of bytes out of it, try to gep into the struct to get
1135 /// at its inner goodness.  Dive as deep as possible without entering an element
1136 /// with an in-memory size smaller than DstSize.
1137 static Address
1138 EnterStructPointerForCoercedAccess(Address SrcPtr,
1139                                    llvm::StructType *SrcSTy,
1140                                    uint64_t DstSize, CodeGenFunction &CGF) {
1141   // We can't dive into a zero-element struct.
1142   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1143 
1144   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1145 
1146   // If the first elt is at least as large as what we're looking for, or if the
1147   // first element is the same size as the whole struct, we can enter it. The
1148   // comparison must be made on the store size and not the alloca size. Using
1149   // the alloca size may overstate the size of the load.
1150   uint64_t FirstEltSize =
1151     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1152   if (FirstEltSize < DstSize &&
1153       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1154     return SrcPtr;
1155 
1156   // GEP into the first element.
1157   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1158 
1159   // If the first element is a struct, recurse.
1160   llvm::Type *SrcTy = SrcPtr.getElementType();
1161   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1162     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1163 
1164   return SrcPtr;
1165 }
1166 
1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1168 /// are either integers or pointers.  This does a truncation of the value if it
1169 /// is too large or a zero extension if it is too small.
1170 ///
1171 /// This behaves as if the value were coerced through memory, so on big-endian
1172 /// targets the high bits are preserved in a truncation, while little-endian
1173 /// targets preserve the low bits.
1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1175                                              llvm::Type *Ty,
1176                                              CodeGenFunction &CGF) {
1177   if (Val->getType() == Ty)
1178     return Val;
1179 
1180   if (isa<llvm::PointerType>(Val->getType())) {
1181     // If this is Pointer->Pointer avoid conversion to and from int.
1182     if (isa<llvm::PointerType>(Ty))
1183       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1184 
1185     // Convert the pointer to an integer so we can play with its width.
1186     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1187   }
1188 
1189   llvm::Type *DestIntTy = Ty;
1190   if (isa<llvm::PointerType>(DestIntTy))
1191     DestIntTy = CGF.IntPtrTy;
1192 
1193   if (Val->getType() != DestIntTy) {
1194     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1195     if (DL.isBigEndian()) {
1196       // Preserve the high bits on big-endian targets.
1197       // That is what memory coercion does.
1198       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1199       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1200 
1201       if (SrcSize > DstSize) {
1202         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1203         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1204       } else {
1205         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1206         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1207       }
1208     } else {
1209       // Little-endian targets preserve the low bits. No shifts required.
1210       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1211     }
1212   }
1213 
1214   if (isa<llvm::PointerType>(Ty))
1215     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1216   return Val;
1217 }
1218 
1219 
1220 
1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1222 /// a pointer to an object of type \arg Ty, known to be aligned to
1223 /// \arg SrcAlign bytes.
1224 ///
1225 /// This safely handles the case when the src type is smaller than the
1226 /// destination type; in this situation the values of bits which not
1227 /// present in the src are undefined.
1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1229                                       CodeGenFunction &CGF) {
1230   llvm::Type *SrcTy = Src.getElementType();
1231 
1232   // If SrcTy and Ty are the same, just do a load.
1233   if (SrcTy == Ty)
1234     return CGF.Builder.CreateLoad(Src);
1235 
1236   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1237 
1238   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1239     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1240                                              DstSize.getFixedSize(), CGF);
1241     SrcTy = Src.getElementType();
1242   }
1243 
1244   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1245 
1246   // If the source and destination are integer or pointer types, just do an
1247   // extension or truncation to the desired type.
1248   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1249       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1250     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1251     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1252   }
1253 
1254   // If load is legal, just bitcast the src pointer.
1255   if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1256       SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1257     // Generally SrcSize is never greater than DstSize, since this means we are
1258     // losing bits. However, this can happen in cases where the structure has
1259     // additional padding, for example due to a user specified alignment.
1260     //
1261     // FIXME: Assert that we aren't truncating non-padding bits when have access
1262     // to that information.
1263     Src = CGF.Builder.CreateBitCast(Src,
1264                                     Ty->getPointerTo(Src.getAddressSpace()));
1265     return CGF.Builder.CreateLoad(Src);
1266   }
1267 
1268   // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1269   // the types match, use the llvm.experimental.vector.insert intrinsic to
1270   // perform the conversion.
1271   if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1272     if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1273       if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1274         auto *Load = CGF.Builder.CreateLoad(Src);
1275         auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1276         auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1277         return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
1278                                               "castScalableSve");
1279       }
1280     }
1281   }
1282 
1283   // Otherwise do coercion through memory. This is stupid, but simple.
1284   Address Tmp =
1285       CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1286   CGF.Builder.CreateMemCpy(
1287       Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1288       Src.getAlignment().getAsAlign(),
1289       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1290   return CGF.Builder.CreateLoad(Tmp);
1291 }
1292 
1293 // Function to store a first-class aggregate into memory.  We prefer to
1294 // store the elements rather than the aggregate to be more friendly to
1295 // fast-isel.
1296 // FIXME: Do we need to recurse here?
1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1298                                          bool DestIsVolatile) {
1299   // Prefer scalar stores to first-class aggregate stores.
1300   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1301     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1302       Address EltPtr = Builder.CreateStructGEP(Dest, i);
1303       llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1304       Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1305     }
1306   } else {
1307     Builder.CreateStore(Val, Dest, DestIsVolatile);
1308   }
1309 }
1310 
1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1312 /// where the source and destination may have different types.  The
1313 /// destination is known to be aligned to \arg DstAlign bytes.
1314 ///
1315 /// This safely handles the case when the src type is larger than the
1316 /// destination type; the upper bits of the src will be lost.
1317 static void CreateCoercedStore(llvm::Value *Src,
1318                                Address Dst,
1319                                bool DstIsVolatile,
1320                                CodeGenFunction &CGF) {
1321   llvm::Type *SrcTy = Src->getType();
1322   llvm::Type *DstTy = Dst.getElementType();
1323   if (SrcTy == DstTy) {
1324     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1325     return;
1326   }
1327 
1328   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1329 
1330   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1331     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1332                                              SrcSize.getFixedSize(), CGF);
1333     DstTy = Dst.getElementType();
1334   }
1335 
1336   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1337   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1338   if (SrcPtrTy && DstPtrTy &&
1339       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1340     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1341     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1342     return;
1343   }
1344 
1345   // If the source and destination are integer or pointer types, just do an
1346   // extension or truncation to the desired type.
1347   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1348       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1349     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1350     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1351     return;
1352   }
1353 
1354   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1355 
1356   // If store is legal, just bitcast the src pointer.
1357   if (isa<llvm::ScalableVectorType>(SrcTy) ||
1358       isa<llvm::ScalableVectorType>(DstTy) ||
1359       SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1360     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1361     CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1362   } else {
1363     // Otherwise do coercion through memory. This is stupid, but
1364     // simple.
1365 
1366     // Generally SrcSize is never greater than DstSize, since this means we are
1367     // losing bits. However, this can happen in cases where the structure has
1368     // additional padding, for example due to a user specified alignment.
1369     //
1370     // FIXME: Assert that we aren't truncating non-padding bits when have access
1371     // to that information.
1372     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1373     CGF.Builder.CreateStore(Src, Tmp);
1374     CGF.Builder.CreateMemCpy(
1375         Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1376         Tmp.getAlignment().getAsAlign(),
1377         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1378   }
1379 }
1380 
1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1382                                    const ABIArgInfo &info) {
1383   if (unsigned offset = info.getDirectOffset()) {
1384     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1385     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1386                                              CharUnits::fromQuantity(offset));
1387     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1388   }
1389   return addr;
1390 }
1391 
1392 namespace {
1393 
1394 /// Encapsulates information about the way function arguments from
1395 /// CGFunctionInfo should be passed to actual LLVM IR function.
1396 class ClangToLLVMArgMapping {
1397   static const unsigned InvalidIndex = ~0U;
1398   unsigned InallocaArgNo;
1399   unsigned SRetArgNo;
1400   unsigned TotalIRArgs;
1401 
1402   /// Arguments of LLVM IR function corresponding to single Clang argument.
1403   struct IRArgs {
1404     unsigned PaddingArgIndex;
1405     // Argument is expanded to IR arguments at positions
1406     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1407     unsigned FirstArgIndex;
1408     unsigned NumberOfArgs;
1409 
1410     IRArgs()
1411         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1412           NumberOfArgs(0) {}
1413   };
1414 
1415   SmallVector<IRArgs, 8> ArgInfo;
1416 
1417 public:
1418   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1419                         bool OnlyRequiredArgs = false)
1420       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1421         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1422     construct(Context, FI, OnlyRequiredArgs);
1423   }
1424 
1425   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1426   unsigned getInallocaArgNo() const {
1427     assert(hasInallocaArg());
1428     return InallocaArgNo;
1429   }
1430 
1431   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1432   unsigned getSRetArgNo() const {
1433     assert(hasSRetArg());
1434     return SRetArgNo;
1435   }
1436 
1437   unsigned totalIRArgs() const { return TotalIRArgs; }
1438 
1439   bool hasPaddingArg(unsigned ArgNo) const {
1440     assert(ArgNo < ArgInfo.size());
1441     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1442   }
1443   unsigned getPaddingArgNo(unsigned ArgNo) const {
1444     assert(hasPaddingArg(ArgNo));
1445     return ArgInfo[ArgNo].PaddingArgIndex;
1446   }
1447 
1448   /// Returns index of first IR argument corresponding to ArgNo, and their
1449   /// quantity.
1450   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1451     assert(ArgNo < ArgInfo.size());
1452     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1453                           ArgInfo[ArgNo].NumberOfArgs);
1454   }
1455 
1456 private:
1457   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1458                  bool OnlyRequiredArgs);
1459 };
1460 
1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1462                                       const CGFunctionInfo &FI,
1463                                       bool OnlyRequiredArgs) {
1464   unsigned IRArgNo = 0;
1465   bool SwapThisWithSRet = false;
1466   const ABIArgInfo &RetAI = FI.getReturnInfo();
1467 
1468   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1469     SwapThisWithSRet = RetAI.isSRetAfterThis();
1470     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1471   }
1472 
1473   unsigned ArgNo = 0;
1474   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1475   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1476        ++I, ++ArgNo) {
1477     assert(I != FI.arg_end());
1478     QualType ArgType = I->type;
1479     const ABIArgInfo &AI = I->info;
1480     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1481     auto &IRArgs = ArgInfo[ArgNo];
1482 
1483     if (AI.getPaddingType())
1484       IRArgs.PaddingArgIndex = IRArgNo++;
1485 
1486     switch (AI.getKind()) {
1487     case ABIArgInfo::Extend:
1488     case ABIArgInfo::Direct: {
1489       // FIXME: handle sseregparm someday...
1490       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1491       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1492         IRArgs.NumberOfArgs = STy->getNumElements();
1493       } else {
1494         IRArgs.NumberOfArgs = 1;
1495       }
1496       break;
1497     }
1498     case ABIArgInfo::Indirect:
1499     case ABIArgInfo::IndirectAliased:
1500       IRArgs.NumberOfArgs = 1;
1501       break;
1502     case ABIArgInfo::Ignore:
1503     case ABIArgInfo::InAlloca:
1504       // ignore and inalloca doesn't have matching LLVM parameters.
1505       IRArgs.NumberOfArgs = 0;
1506       break;
1507     case ABIArgInfo::CoerceAndExpand:
1508       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1509       break;
1510     case ABIArgInfo::Expand:
1511       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1512       break;
1513     }
1514 
1515     if (IRArgs.NumberOfArgs > 0) {
1516       IRArgs.FirstArgIndex = IRArgNo;
1517       IRArgNo += IRArgs.NumberOfArgs;
1518     }
1519 
1520     // Skip over the sret parameter when it comes second.  We already handled it
1521     // above.
1522     if (IRArgNo == 1 && SwapThisWithSRet)
1523       IRArgNo++;
1524   }
1525   assert(ArgNo == ArgInfo.size());
1526 
1527   if (FI.usesInAlloca())
1528     InallocaArgNo = IRArgNo++;
1529 
1530   TotalIRArgs = IRArgNo;
1531 }
1532 }  // namespace
1533 
1534 /***/
1535 
1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1537   const auto &RI = FI.getReturnInfo();
1538   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1539 }
1540 
1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1542   return ReturnTypeUsesSRet(FI) &&
1543          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1544 }
1545 
1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1547   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1548     switch (BT->getKind()) {
1549     default:
1550       return false;
1551     case BuiltinType::Float:
1552       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1553     case BuiltinType::Double:
1554       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1555     case BuiltinType::LongDouble:
1556       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1557     }
1558   }
1559 
1560   return false;
1561 }
1562 
1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1564   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1565     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1566       if (BT->getKind() == BuiltinType::LongDouble)
1567         return getTarget().useObjCFP2RetForComplexLongDouble();
1568     }
1569   }
1570 
1571   return false;
1572 }
1573 
1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1575   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1576   return GetFunctionType(FI);
1577 }
1578 
1579 llvm::FunctionType *
1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1581 
1582   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1583   (void)Inserted;
1584   assert(Inserted && "Recursively being processed?");
1585 
1586   llvm::Type *resultType = nullptr;
1587   const ABIArgInfo &retAI = FI.getReturnInfo();
1588   switch (retAI.getKind()) {
1589   case ABIArgInfo::Expand:
1590   case ABIArgInfo::IndirectAliased:
1591     llvm_unreachable("Invalid ABI kind for return argument");
1592 
1593   case ABIArgInfo::Extend:
1594   case ABIArgInfo::Direct:
1595     resultType = retAI.getCoerceToType();
1596     break;
1597 
1598   case ABIArgInfo::InAlloca:
1599     if (retAI.getInAllocaSRet()) {
1600       // sret things on win32 aren't void, they return the sret pointer.
1601       QualType ret = FI.getReturnType();
1602       llvm::Type *ty = ConvertType(ret);
1603       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1604       resultType = llvm::PointerType::get(ty, addressSpace);
1605     } else {
1606       resultType = llvm::Type::getVoidTy(getLLVMContext());
1607     }
1608     break;
1609 
1610   case ABIArgInfo::Indirect:
1611   case ABIArgInfo::Ignore:
1612     resultType = llvm::Type::getVoidTy(getLLVMContext());
1613     break;
1614 
1615   case ABIArgInfo::CoerceAndExpand:
1616     resultType = retAI.getUnpaddedCoerceAndExpandType();
1617     break;
1618   }
1619 
1620   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1621   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1622 
1623   // Add type for sret argument.
1624   if (IRFunctionArgs.hasSRetArg()) {
1625     QualType Ret = FI.getReturnType();
1626     llvm::Type *Ty = ConvertType(Ret);
1627     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1628     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1629         llvm::PointerType::get(Ty, AddressSpace);
1630   }
1631 
1632   // Add type for inalloca argument.
1633   if (IRFunctionArgs.hasInallocaArg()) {
1634     auto ArgStruct = FI.getArgStruct();
1635     assert(ArgStruct);
1636     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1637   }
1638 
1639   // Add in all of the required arguments.
1640   unsigned ArgNo = 0;
1641   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1642                                      ie = it + FI.getNumRequiredArgs();
1643   for (; it != ie; ++it, ++ArgNo) {
1644     const ABIArgInfo &ArgInfo = it->info;
1645 
1646     // Insert a padding type to ensure proper alignment.
1647     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1648       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1649           ArgInfo.getPaddingType();
1650 
1651     unsigned FirstIRArg, NumIRArgs;
1652     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1653 
1654     switch (ArgInfo.getKind()) {
1655     case ABIArgInfo::Ignore:
1656     case ABIArgInfo::InAlloca:
1657       assert(NumIRArgs == 0);
1658       break;
1659 
1660     case ABIArgInfo::Indirect: {
1661       assert(NumIRArgs == 1);
1662       // indirect arguments are always on the stack, which is alloca addr space.
1663       llvm::Type *LTy = ConvertTypeForMem(it->type);
1664       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1665           CGM.getDataLayout().getAllocaAddrSpace());
1666       break;
1667     }
1668     case ABIArgInfo::IndirectAliased: {
1669       assert(NumIRArgs == 1);
1670       llvm::Type *LTy = ConvertTypeForMem(it->type);
1671       ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1672       break;
1673     }
1674     case ABIArgInfo::Extend:
1675     case ABIArgInfo::Direct: {
1676       // Fast-isel and the optimizer generally like scalar values better than
1677       // FCAs, so we flatten them if this is safe to do for this argument.
1678       llvm::Type *argType = ArgInfo.getCoerceToType();
1679       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1680       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1681         assert(NumIRArgs == st->getNumElements());
1682         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1683           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1684       } else {
1685         assert(NumIRArgs == 1);
1686         ArgTypes[FirstIRArg] = argType;
1687       }
1688       break;
1689     }
1690 
1691     case ABIArgInfo::CoerceAndExpand: {
1692       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1693       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1694         *ArgTypesIter++ = EltTy;
1695       }
1696       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1697       break;
1698     }
1699 
1700     case ABIArgInfo::Expand:
1701       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1702       getExpandedTypes(it->type, ArgTypesIter);
1703       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1704       break;
1705     }
1706   }
1707 
1708   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1709   assert(Erased && "Not in set?");
1710 
1711   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1712 }
1713 
1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1715   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1716   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1717 
1718   if (!isFuncTypeConvertible(FPT))
1719     return llvm::StructType::get(getLLVMContext());
1720 
1721   return GetFunctionType(GD);
1722 }
1723 
1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1725                                                llvm::AttrBuilder &FuncAttrs,
1726                                                const FunctionProtoType *FPT) {
1727   if (!FPT)
1728     return;
1729 
1730   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1731       FPT->isNothrow())
1732     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1733 }
1734 
1735 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1736                                                  bool HasOptnone,
1737                                                  bool AttrOnCallSite,
1738                                                llvm::AttrBuilder &FuncAttrs) {
1739   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1740   if (!HasOptnone) {
1741     if (CodeGenOpts.OptimizeSize)
1742       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1743     if (CodeGenOpts.OptimizeSize == 2)
1744       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1745   }
1746 
1747   if (CodeGenOpts.DisableRedZone)
1748     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1749   if (CodeGenOpts.IndirectTlsSegRefs)
1750     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1751   if (CodeGenOpts.NoImplicitFloat)
1752     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1753 
1754   if (AttrOnCallSite) {
1755     // Attributes that should go on the call site only.
1756     if (!CodeGenOpts.SimplifyLibCalls ||
1757         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1758       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1759     if (!CodeGenOpts.TrapFuncName.empty())
1760       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1761   } else {
1762     StringRef FpKind;
1763     switch (CodeGenOpts.getFramePointer()) {
1764     case CodeGenOptions::FramePointerKind::None:
1765       FpKind = "none";
1766       break;
1767     case CodeGenOptions::FramePointerKind::NonLeaf:
1768       FpKind = "non-leaf";
1769       break;
1770     case CodeGenOptions::FramePointerKind::All:
1771       FpKind = "all";
1772       break;
1773     }
1774     FuncAttrs.addAttribute("frame-pointer", FpKind);
1775 
1776     FuncAttrs.addAttribute("less-precise-fpmad",
1777                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1778 
1779     if (CodeGenOpts.NullPointerIsValid)
1780       FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1781 
1782     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1783       FuncAttrs.addAttribute("denormal-fp-math",
1784                              CodeGenOpts.FPDenormalMode.str());
1785     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1786       FuncAttrs.addAttribute(
1787           "denormal-fp-math-f32",
1788           CodeGenOpts.FP32DenormalMode.str());
1789     }
1790 
1791     FuncAttrs.addAttribute("no-trapping-math",
1792                            llvm::toStringRef(LangOpts.getFPExceptionMode() ==
1793                                              LangOptions::FPE_Ignore));
1794 
1795     // Strict (compliant) code is the default, so only add this attribute to
1796     // indicate that we are trying to workaround a problem case.
1797     if (!CodeGenOpts.StrictFloatCastOverflow)
1798       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1799 
1800     // TODO: Are these all needed?
1801     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1802     FuncAttrs.addAttribute("no-infs-fp-math",
1803                            llvm::toStringRef(LangOpts.NoHonorInfs));
1804     FuncAttrs.addAttribute("no-nans-fp-math",
1805                            llvm::toStringRef(LangOpts.NoHonorNaNs));
1806     FuncAttrs.addAttribute("unsafe-fp-math",
1807                            llvm::toStringRef(LangOpts.UnsafeFPMath));
1808     FuncAttrs.addAttribute("use-soft-float",
1809                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1810     FuncAttrs.addAttribute("stack-protector-buffer-size",
1811                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1812     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1813                            llvm::toStringRef(LangOpts.NoSignedZero));
1814 
1815     // TODO: Reciprocal estimate codegen options should apply to instructions?
1816     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1817     if (!Recips.empty())
1818       FuncAttrs.addAttribute("reciprocal-estimates",
1819                              llvm::join(Recips, ","));
1820 
1821     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1822         CodeGenOpts.PreferVectorWidth != "none")
1823       FuncAttrs.addAttribute("prefer-vector-width",
1824                              CodeGenOpts.PreferVectorWidth);
1825 
1826     if (CodeGenOpts.StackRealignment)
1827       FuncAttrs.addAttribute("stackrealign");
1828     if (CodeGenOpts.Backchain)
1829       FuncAttrs.addAttribute("backchain");
1830     if (CodeGenOpts.EnableSegmentedStacks)
1831       FuncAttrs.addAttribute("split-stack");
1832 
1833     if (CodeGenOpts.SpeculativeLoadHardening)
1834       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1835   }
1836 
1837   if (getLangOpts().assumeFunctionsAreConvergent()) {
1838     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1839     // convergent (meaning, they may call an intrinsically convergent op, such
1840     // as __syncthreads() / barrier(), and so can't have certain optimizations
1841     // applied around them).  LLVM will remove this attribute where it safely
1842     // can.
1843     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1844   }
1845 
1846   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1847     // Exceptions aren't supported in CUDA device code.
1848     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1849   }
1850 
1851   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1852     StringRef Var, Value;
1853     std::tie(Var, Value) = Attr.split('=');
1854     FuncAttrs.addAttribute(Var, Value);
1855   }
1856 }
1857 
1858 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1859   llvm::AttrBuilder FuncAttrs;
1860   getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1861                                /* AttrOnCallSite = */ false, FuncAttrs);
1862   // TODO: call GetCPUAndFeaturesAttributes?
1863   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1864 }
1865 
1866 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1867                                                    llvm::AttrBuilder &attrs) {
1868   getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1869                                /*for call*/ false, attrs);
1870   GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1871 }
1872 
1873 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1874                                    const LangOptions &LangOpts,
1875                                    const NoBuiltinAttr *NBA = nullptr) {
1876   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1877     SmallString<32> AttributeName;
1878     AttributeName += "no-builtin-";
1879     AttributeName += BuiltinName;
1880     FuncAttrs.addAttribute(AttributeName);
1881   };
1882 
1883   // First, handle the language options passed through -fno-builtin.
1884   if (LangOpts.NoBuiltin) {
1885     // -fno-builtin disables them all.
1886     FuncAttrs.addAttribute("no-builtins");
1887     return;
1888   }
1889 
1890   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1891   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1892 
1893   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1894   // the source.
1895   if (!NBA)
1896     return;
1897 
1898   // If there is a wildcard in the builtin names specified through the
1899   // attribute, disable them all.
1900   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1901     FuncAttrs.addAttribute("no-builtins");
1902     return;
1903   }
1904 
1905   // And last, add the rest of the builtin names.
1906   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1907 }
1908 
1909 /// Construct the IR attribute list of a function or call.
1910 ///
1911 /// When adding an attribute, please consider where it should be handled:
1912 ///
1913 ///   - getDefaultFunctionAttributes is for attributes that are essentially
1914 ///     part of the global target configuration (but perhaps can be
1915 ///     overridden on a per-function basis).  Adding attributes there
1916 ///     will cause them to also be set in frontends that build on Clang's
1917 ///     target-configuration logic, as well as for code defined in library
1918 ///     modules such as CUDA's libdevice.
1919 ///
1920 ///   - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1921 ///     and adds declaration-specific, convention-specific, and
1922 ///     frontend-specific logic.  The last is of particular importance:
1923 ///     attributes that restrict how the frontend generates code must be
1924 ///     added here rather than getDefaultFunctionAttributes.
1925 ///
1926 void CodeGenModule::ConstructAttributeList(
1927     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1928     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1929   llvm::AttrBuilder FuncAttrs;
1930   llvm::AttrBuilder RetAttrs;
1931 
1932   // Collect function IR attributes from the CC lowering.
1933   // We'll collect the paramete and result attributes later.
1934   CallingConv = FI.getEffectiveCallingConvention();
1935   if (FI.isNoReturn())
1936     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1937   if (FI.isCmseNSCall())
1938     FuncAttrs.addAttribute("cmse_nonsecure_call");
1939 
1940   // Collect function IR attributes from the callee prototype if we have one.
1941   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1942                                      CalleeInfo.getCalleeFunctionProtoType());
1943 
1944   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1945 
1946   bool HasOptnone = false;
1947   // The NoBuiltinAttr attached to the target FunctionDecl.
1948   const NoBuiltinAttr *NBA = nullptr;
1949 
1950   // Collect function IR attributes based on declaration-specific
1951   // information.
1952   // FIXME: handle sseregparm someday...
1953   if (TargetDecl) {
1954     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1955       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1956     if (TargetDecl->hasAttr<NoThrowAttr>())
1957       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1958     if (TargetDecl->hasAttr<NoReturnAttr>())
1959       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1960     if (TargetDecl->hasAttr<ColdAttr>())
1961       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1962     if (TargetDecl->hasAttr<HotAttr>())
1963       FuncAttrs.addAttribute(llvm::Attribute::Hot);
1964     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1965       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1966     if (TargetDecl->hasAttr<ConvergentAttr>())
1967       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1968 
1969     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1970       AddAttributesFromFunctionProtoType(
1971           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1972       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1973         // A sane operator new returns a non-aliasing pointer.
1974         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1975         if (getCodeGenOpts().AssumeSaneOperatorNew &&
1976             (Kind == OO_New || Kind == OO_Array_New))
1977           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1978       }
1979       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1980       const bool IsVirtualCall = MD && MD->isVirtual();
1981       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1982       // virtual function. These attributes are not inherited by overloads.
1983       if (!(AttrOnCallSite && IsVirtualCall)) {
1984         if (Fn->isNoReturn())
1985           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1986         NBA = Fn->getAttr<NoBuiltinAttr>();
1987       }
1988       // Only place nomerge attribute on call sites, never functions. This
1989       // allows it to work on indirect virtual function calls.
1990       if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
1991         FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
1992     }
1993 
1994     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1995     if (TargetDecl->hasAttr<ConstAttr>()) {
1996       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1997       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1998     } else if (TargetDecl->hasAttr<PureAttr>()) {
1999       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
2000       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2001     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2002       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
2003       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2004     }
2005     if (TargetDecl->hasAttr<RestrictAttr>())
2006       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2007     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2008         !CodeGenOpts.NullPointerIsValid)
2009       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2010     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2011       FuncAttrs.addAttribute("no_caller_saved_registers");
2012     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2013       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2014     if (TargetDecl->hasAttr<LeafAttr>())
2015       FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2016 
2017     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2018     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2019       Optional<unsigned> NumElemsParam;
2020       if (AllocSize->getNumElemsParam().isValid())
2021         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2022       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2023                                  NumElemsParam);
2024     }
2025 
2026     if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2027       if (getLangOpts().OpenCLVersion <= 120) {
2028         // OpenCL v1.2 Work groups are always uniform
2029         FuncAttrs.addAttribute("uniform-work-group-size", "true");
2030       } else {
2031         // OpenCL v2.0 Work groups may be whether uniform or not.
2032         // '-cl-uniform-work-group-size' compile option gets a hint
2033         // to the compiler that the global work-size be a multiple of
2034         // the work-group size specified to clEnqueueNDRangeKernel
2035         // (i.e. work groups are uniform).
2036         FuncAttrs.addAttribute("uniform-work-group-size",
2037                                llvm::toStringRef(CodeGenOpts.UniformWGSize));
2038       }
2039     }
2040 
2041     std::string AssumptionValueStr;
2042     for (AssumptionAttr *AssumptionA :
2043          TargetDecl->specific_attrs<AssumptionAttr>()) {
2044       std::string AS = AssumptionA->getAssumption().str();
2045       if (!AS.empty() && !AssumptionValueStr.empty())
2046         AssumptionValueStr += ",";
2047       AssumptionValueStr += AS;
2048     }
2049 
2050     if (!AssumptionValueStr.empty())
2051       FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
2052   }
2053 
2054   // Attach "no-builtins" attributes to:
2055   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2056   // * definitions: "no-builtins" or "no-builtin-<name>" only.
2057   // The attributes can come from:
2058   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2059   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2060   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2061 
2062   // Collect function IR attributes based on global settiings.
2063   getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2064 
2065   // Override some default IR attributes based on declaration-specific
2066   // information.
2067   if (TargetDecl) {
2068     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2069       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2070     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2071       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2072     if (TargetDecl->hasAttr<NoSplitStackAttr>())
2073       FuncAttrs.removeAttribute("split-stack");
2074 
2075     // Add NonLazyBind attribute to function declarations when -fno-plt
2076     // is used.
2077     // FIXME: what if we just haven't processed the function definition
2078     // yet, or if it's an external definition like C99 inline?
2079     if (CodeGenOpts.NoPLT) {
2080       if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2081         if (!Fn->isDefined() && !AttrOnCallSite) {
2082           FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2083         }
2084       }
2085     }
2086   }
2087 
2088   // Collect non-call-site function IR attributes from declaration-specific
2089   // information.
2090   if (!AttrOnCallSite) {
2091     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2092       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2093 
2094     // Whether tail calls are enabled.
2095     auto shouldDisableTailCalls = [&] {
2096       // Should this be honored in getDefaultFunctionAttributes?
2097       if (CodeGenOpts.DisableTailCalls)
2098         return true;
2099 
2100       if (!TargetDecl)
2101         return false;
2102 
2103       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2104           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2105         return true;
2106 
2107       if (CodeGenOpts.NoEscapingBlockTailCalls) {
2108         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2109           if (!BD->doesNotEscape())
2110             return true;
2111       }
2112 
2113       return false;
2114     };
2115     FuncAttrs.addAttribute("disable-tail-calls",
2116                            llvm::toStringRef(shouldDisableTailCalls()));
2117 
2118     // CPU/feature overrides.  addDefaultFunctionDefinitionAttributes
2119     // handles these separately to set them based on the global defaults.
2120     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2121   }
2122 
2123   // Collect attributes from arguments and return values.
2124   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2125 
2126   QualType RetTy = FI.getReturnType();
2127   const ABIArgInfo &RetAI = FI.getReturnInfo();
2128   switch (RetAI.getKind()) {
2129   case ABIArgInfo::Extend:
2130     if (RetAI.isSignExt())
2131       RetAttrs.addAttribute(llvm::Attribute::SExt);
2132     else
2133       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2134     LLVM_FALLTHROUGH;
2135   case ABIArgInfo::Direct:
2136     if (RetAI.getInReg())
2137       RetAttrs.addAttribute(llvm::Attribute::InReg);
2138     break;
2139   case ABIArgInfo::Ignore:
2140     break;
2141 
2142   case ABIArgInfo::InAlloca:
2143   case ABIArgInfo::Indirect: {
2144     // inalloca and sret disable readnone and readonly
2145     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2146       .removeAttribute(llvm::Attribute::ReadNone);
2147     break;
2148   }
2149 
2150   case ABIArgInfo::CoerceAndExpand:
2151     break;
2152 
2153   case ABIArgInfo::Expand:
2154   case ABIArgInfo::IndirectAliased:
2155     llvm_unreachable("Invalid ABI kind for return argument");
2156   }
2157 
2158   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2159     QualType PTy = RefTy->getPointeeType();
2160     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2161       RetAttrs.addDereferenceableAttr(
2162           getMinimumObjectSize(PTy).getQuantity());
2163     if (getContext().getTargetAddressSpace(PTy) == 0 &&
2164         !CodeGenOpts.NullPointerIsValid)
2165       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2166     if (PTy->isObjectType()) {
2167       llvm::Align Alignment =
2168           getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2169       RetAttrs.addAlignmentAttr(Alignment);
2170     }
2171   }
2172 
2173   bool hasUsedSRet = false;
2174   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2175 
2176   // Attach attributes to sret.
2177   if (IRFunctionArgs.hasSRetArg()) {
2178     llvm::AttrBuilder SRETAttrs;
2179     SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2180     hasUsedSRet = true;
2181     if (RetAI.getInReg())
2182       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2183     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2184     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2185         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2186   }
2187 
2188   // Attach attributes to inalloca argument.
2189   if (IRFunctionArgs.hasInallocaArg()) {
2190     llvm::AttrBuilder Attrs;
2191     Attrs.addAttribute(llvm::Attribute::InAlloca);
2192     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2193         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2194   }
2195 
2196   // Apply `nonnull` and `dereferencable(N)` to the `this` argument.
2197   if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2198       !FI.arg_begin()->type->isVoidPointerType()) {
2199     auto IRArgs = IRFunctionArgs.getIRArgs(0);
2200 
2201     assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2202 
2203     llvm::AttrBuilder Attrs;
2204 
2205     if (!CodeGenOpts.NullPointerIsValid &&
2206         getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2207       Attrs.addAttribute(llvm::Attribute::NonNull);
2208       Attrs.addDereferenceableAttr(
2209           getMinimumObjectSize(
2210               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2211               .getQuantity());
2212     } else {
2213       // FIXME dereferenceable should be correct here, regardless of
2214       // NullPointerIsValid. However, dereferenceable currently does not always
2215       // respect NullPointerIsValid and may imply nonnull and break the program.
2216       // See https://reviews.llvm.org/D66618 for discussions.
2217       Attrs.addDereferenceableOrNullAttr(
2218           getMinimumObjectSize(
2219               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2220               .getQuantity());
2221     }
2222 
2223     ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2224   }
2225 
2226   unsigned ArgNo = 0;
2227   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2228                                           E = FI.arg_end();
2229        I != E; ++I, ++ArgNo) {
2230     QualType ParamType = I->type;
2231     const ABIArgInfo &AI = I->info;
2232     llvm::AttrBuilder Attrs;
2233 
2234     // Add attribute for padding argument, if necessary.
2235     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2236       if (AI.getPaddingInReg()) {
2237         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2238             llvm::AttributeSet::get(
2239                 getLLVMContext(),
2240                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2241       }
2242     }
2243 
2244     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2245     // have the corresponding parameter variable.  It doesn't make
2246     // sense to do it here because parameters are so messed up.
2247     switch (AI.getKind()) {
2248     case ABIArgInfo::Extend:
2249       if (AI.isSignExt())
2250         Attrs.addAttribute(llvm::Attribute::SExt);
2251       else
2252         Attrs.addAttribute(llvm::Attribute::ZExt);
2253       LLVM_FALLTHROUGH;
2254     case ABIArgInfo::Direct:
2255       if (ArgNo == 0 && FI.isChainCall())
2256         Attrs.addAttribute(llvm::Attribute::Nest);
2257       else if (AI.getInReg())
2258         Attrs.addAttribute(llvm::Attribute::InReg);
2259       break;
2260 
2261     case ABIArgInfo::Indirect: {
2262       if (AI.getInReg())
2263         Attrs.addAttribute(llvm::Attribute::InReg);
2264 
2265       if (AI.getIndirectByVal())
2266         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2267 
2268       auto *Decl = ParamType->getAsRecordDecl();
2269       if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2270           Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2271         // When calling the function, the pointer passed in will be the only
2272         // reference to the underlying object. Mark it accordingly.
2273         Attrs.addAttribute(llvm::Attribute::NoAlias);
2274 
2275       // TODO: We could add the byref attribute if not byval, but it would
2276       // require updating many testcases.
2277 
2278       CharUnits Align = AI.getIndirectAlign();
2279 
2280       // In a byval argument, it is important that the required
2281       // alignment of the type is honored, as LLVM might be creating a
2282       // *new* stack object, and needs to know what alignment to give
2283       // it. (Sometimes it can deduce a sensible alignment on its own,
2284       // but not if clang decides it must emit a packed struct, or the
2285       // user specifies increased alignment requirements.)
2286       //
2287       // This is different from indirect *not* byval, where the object
2288       // exists already, and the align attribute is purely
2289       // informative.
2290       assert(!Align.isZero());
2291 
2292       // For now, only add this when we have a byval argument.
2293       // TODO: be less lazy about updating test cases.
2294       if (AI.getIndirectByVal())
2295         Attrs.addAlignmentAttr(Align.getQuantity());
2296 
2297       // byval disables readnone and readonly.
2298       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2299         .removeAttribute(llvm::Attribute::ReadNone);
2300 
2301       break;
2302     }
2303     case ABIArgInfo::IndirectAliased: {
2304       CharUnits Align = AI.getIndirectAlign();
2305       Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2306       Attrs.addAlignmentAttr(Align.getQuantity());
2307       break;
2308     }
2309     case ABIArgInfo::Ignore:
2310     case ABIArgInfo::Expand:
2311     case ABIArgInfo::CoerceAndExpand:
2312       break;
2313 
2314     case ABIArgInfo::InAlloca:
2315       // inalloca disables readnone and readonly.
2316       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2317           .removeAttribute(llvm::Attribute::ReadNone);
2318       continue;
2319     }
2320 
2321     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2322       QualType PTy = RefTy->getPointeeType();
2323       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2324         Attrs.addDereferenceableAttr(
2325             getMinimumObjectSize(PTy).getQuantity());
2326       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2327           !CodeGenOpts.NullPointerIsValid)
2328         Attrs.addAttribute(llvm::Attribute::NonNull);
2329       if (PTy->isObjectType()) {
2330         llvm::Align Alignment =
2331             getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2332         Attrs.addAlignmentAttr(Alignment);
2333       }
2334     }
2335 
2336     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2337     case ParameterABI::Ordinary:
2338       break;
2339 
2340     case ParameterABI::SwiftIndirectResult: {
2341       // Add 'sret' if we haven't already used it for something, but
2342       // only if the result is void.
2343       if (!hasUsedSRet && RetTy->isVoidType()) {
2344         Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2345         hasUsedSRet = true;
2346       }
2347 
2348       // Add 'noalias' in either case.
2349       Attrs.addAttribute(llvm::Attribute::NoAlias);
2350 
2351       // Add 'dereferenceable' and 'alignment'.
2352       auto PTy = ParamType->getPointeeType();
2353       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2354         auto info = getContext().getTypeInfoInChars(PTy);
2355         Attrs.addDereferenceableAttr(info.Width.getQuantity());
2356         Attrs.addAlignmentAttr(info.Align.getAsAlign());
2357       }
2358       break;
2359     }
2360 
2361     case ParameterABI::SwiftErrorResult:
2362       Attrs.addAttribute(llvm::Attribute::SwiftError);
2363       break;
2364 
2365     case ParameterABI::SwiftContext:
2366       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2367       break;
2368     }
2369 
2370     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2371       Attrs.addAttribute(llvm::Attribute::NoCapture);
2372 
2373     if (Attrs.hasAttributes()) {
2374       unsigned FirstIRArg, NumIRArgs;
2375       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2376       for (unsigned i = 0; i < NumIRArgs; i++)
2377         ArgAttrs[FirstIRArg + i] =
2378             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2379     }
2380   }
2381   assert(ArgNo == FI.arg_size());
2382 
2383   AttrList = llvm::AttributeList::get(
2384       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2385       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2386 }
2387 
2388 /// An argument came in as a promoted argument; demote it back to its
2389 /// declared type.
2390 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2391                                          const VarDecl *var,
2392                                          llvm::Value *value) {
2393   llvm::Type *varType = CGF.ConvertType(var->getType());
2394 
2395   // This can happen with promotions that actually don't change the
2396   // underlying type, like the enum promotions.
2397   if (value->getType() == varType) return value;
2398 
2399   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2400          && "unexpected promotion type");
2401 
2402   if (isa<llvm::IntegerType>(varType))
2403     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2404 
2405   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2406 }
2407 
2408 /// Returns the attribute (either parameter attribute, or function
2409 /// attribute), which declares argument ArgNo to be non-null.
2410 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2411                                          QualType ArgType, unsigned ArgNo) {
2412   // FIXME: __attribute__((nonnull)) can also be applied to:
2413   //   - references to pointers, where the pointee is known to be
2414   //     nonnull (apparently a Clang extension)
2415   //   - transparent unions containing pointers
2416   // In the former case, LLVM IR cannot represent the constraint. In
2417   // the latter case, we have no guarantee that the transparent union
2418   // is in fact passed as a pointer.
2419   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2420     return nullptr;
2421   // First, check attribute on parameter itself.
2422   if (PVD) {
2423     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2424       return ParmNNAttr;
2425   }
2426   // Check function attributes.
2427   if (!FD)
2428     return nullptr;
2429   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2430     if (NNAttr->isNonNull(ArgNo))
2431       return NNAttr;
2432   }
2433   return nullptr;
2434 }
2435 
2436 namespace {
2437   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2438     Address Temp;
2439     Address Arg;
2440     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2441     void Emit(CodeGenFunction &CGF, Flags flags) override {
2442       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2443       CGF.Builder.CreateStore(errorValue, Arg);
2444     }
2445   };
2446 }
2447 
2448 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2449                                          llvm::Function *Fn,
2450                                          const FunctionArgList &Args) {
2451   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2452     // Naked functions don't have prologues.
2453     return;
2454 
2455   // If this is an implicit-return-zero function, go ahead and
2456   // initialize the return value.  TODO: it might be nice to have
2457   // a more general mechanism for this that didn't require synthesized
2458   // return statements.
2459   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2460     if (FD->hasImplicitReturnZero()) {
2461       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2462       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2463       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2464       Builder.CreateStore(Zero, ReturnValue);
2465     }
2466   }
2467 
2468   // FIXME: We no longer need the types from FunctionArgList; lift up and
2469   // simplify.
2470 
2471   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2472   assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2473 
2474   // If we're using inalloca, all the memory arguments are GEPs off of the last
2475   // parameter, which is a pointer to the complete memory area.
2476   Address ArgStruct = Address::invalid();
2477   if (IRFunctionArgs.hasInallocaArg()) {
2478     ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2479                         FI.getArgStructAlignment());
2480 
2481     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2482   }
2483 
2484   // Name the struct return parameter.
2485   if (IRFunctionArgs.hasSRetArg()) {
2486     auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2487     AI->setName("agg.result");
2488     AI->addAttr(llvm::Attribute::NoAlias);
2489   }
2490 
2491   // Track if we received the parameter as a pointer (indirect, byval, or
2492   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2493   // into a local alloca for us.
2494   SmallVector<ParamValue, 16> ArgVals;
2495   ArgVals.reserve(Args.size());
2496 
2497   // Create a pointer value for every parameter declaration.  This usually
2498   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2499   // any cleanups or do anything that might unwind.  We do that separately, so
2500   // we can push the cleanups in the correct order for the ABI.
2501   assert(FI.arg_size() == Args.size() &&
2502          "Mismatch between function signature & arguments.");
2503   unsigned ArgNo = 0;
2504   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2505   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2506        i != e; ++i, ++info_it, ++ArgNo) {
2507     const VarDecl *Arg = *i;
2508     const ABIArgInfo &ArgI = info_it->info;
2509 
2510     bool isPromoted =
2511       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2512     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2513     // the parameter is promoted. In this case we convert to
2514     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2515     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2516     assert(hasScalarEvaluationKind(Ty) ==
2517            hasScalarEvaluationKind(Arg->getType()));
2518 
2519     unsigned FirstIRArg, NumIRArgs;
2520     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2521 
2522     switch (ArgI.getKind()) {
2523     case ABIArgInfo::InAlloca: {
2524       assert(NumIRArgs == 0);
2525       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2526       Address V =
2527           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2528       if (ArgI.getInAllocaIndirect())
2529         V = Address(Builder.CreateLoad(V),
2530                     getContext().getTypeAlignInChars(Ty));
2531       ArgVals.push_back(ParamValue::forIndirect(V));
2532       break;
2533     }
2534 
2535     case ABIArgInfo::Indirect:
2536     case ABIArgInfo::IndirectAliased: {
2537       assert(NumIRArgs == 1);
2538       Address ParamAddr =
2539           Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2540 
2541       if (!hasScalarEvaluationKind(Ty)) {
2542         // Aggregates and complex variables are accessed by reference. All we
2543         // need to do is realign the value, if requested. Also, if the address
2544         // may be aliased, copy it to ensure that the parameter variable is
2545         // mutable and has a unique adress, as C requires.
2546         Address V = ParamAddr;
2547         if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2548           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2549 
2550           // Copy from the incoming argument pointer to the temporary with the
2551           // appropriate alignment.
2552           //
2553           // FIXME: We should have a common utility for generating an aggregate
2554           // copy.
2555           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2556           Builder.CreateMemCpy(
2557               AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2558               ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2559               llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2560           V = AlignedTemp;
2561         }
2562         ArgVals.push_back(ParamValue::forIndirect(V));
2563       } else {
2564         // Load scalar value from indirect argument.
2565         llvm::Value *V =
2566             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2567 
2568         if (isPromoted)
2569           V = emitArgumentDemotion(*this, Arg, V);
2570         ArgVals.push_back(ParamValue::forDirect(V));
2571       }
2572       break;
2573     }
2574 
2575     case ABIArgInfo::Extend:
2576     case ABIArgInfo::Direct: {
2577       auto AI = Fn->getArg(FirstIRArg);
2578       llvm::Type *LTy = ConvertType(Arg->getType());
2579 
2580       // Prepare parameter attributes. So far, only attributes for pointer
2581       // parameters are prepared. See
2582       // http://llvm.org/docs/LangRef.html#paramattrs.
2583       if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2584           ArgI.getCoerceToType()->isPointerTy()) {
2585         assert(NumIRArgs == 1);
2586 
2587         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2588           // Set `nonnull` attribute if any.
2589           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2590                              PVD->getFunctionScopeIndex()) &&
2591               !CGM.getCodeGenOpts().NullPointerIsValid)
2592             AI->addAttr(llvm::Attribute::NonNull);
2593 
2594           QualType OTy = PVD->getOriginalType();
2595           if (const auto *ArrTy =
2596               getContext().getAsConstantArrayType(OTy)) {
2597             // A C99 array parameter declaration with the static keyword also
2598             // indicates dereferenceability, and if the size is constant we can
2599             // use the dereferenceable attribute (which requires the size in
2600             // bytes).
2601             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2602               QualType ETy = ArrTy->getElementType();
2603               llvm::Align Alignment =
2604                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2605               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2606               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2607               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2608                   ArrSize) {
2609                 llvm::AttrBuilder Attrs;
2610                 Attrs.addDereferenceableAttr(
2611                     getContext().getTypeSizeInChars(ETy).getQuantity() *
2612                     ArrSize);
2613                 AI->addAttrs(Attrs);
2614               } else if (getContext().getTargetInfo().getNullPointerValue(
2615                              ETy.getAddressSpace()) == 0 &&
2616                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2617                 AI->addAttr(llvm::Attribute::NonNull);
2618               }
2619             }
2620           } else if (const auto *ArrTy =
2621                      getContext().getAsVariableArrayType(OTy)) {
2622             // For C99 VLAs with the static keyword, we don't know the size so
2623             // we can't use the dereferenceable attribute, but in addrspace(0)
2624             // we know that it must be nonnull.
2625             if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2626               QualType ETy = ArrTy->getElementType();
2627               llvm::Align Alignment =
2628                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2629               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2630               if (!getContext().getTargetAddressSpace(ETy) &&
2631                   !CGM.getCodeGenOpts().NullPointerIsValid)
2632                 AI->addAttr(llvm::Attribute::NonNull);
2633             }
2634           }
2635 
2636           // Set `align` attribute if any.
2637           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2638           if (!AVAttr)
2639             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2640               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2641           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2642             // If alignment-assumption sanitizer is enabled, we do *not* add
2643             // alignment attribute here, but emit normal alignment assumption,
2644             // so the UBSAN check could function.
2645             llvm::ConstantInt *AlignmentCI =
2646                 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2647             unsigned AlignmentInt =
2648                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2649             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2650               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2651               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2652                   llvm::Align(AlignmentInt)));
2653             }
2654           }
2655         }
2656 
2657         // Set 'noalias' if an argument type has the `restrict` qualifier.
2658         if (Arg->getType().isRestrictQualified())
2659           AI->addAttr(llvm::Attribute::NoAlias);
2660       }
2661 
2662       // Prepare the argument value. If we have the trivial case, handle it
2663       // with no muss and fuss.
2664       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2665           ArgI.getCoerceToType() == ConvertType(Ty) &&
2666           ArgI.getDirectOffset() == 0) {
2667         assert(NumIRArgs == 1);
2668 
2669         // LLVM expects swifterror parameters to be used in very restricted
2670         // ways.  Copy the value into a less-restricted temporary.
2671         llvm::Value *V = AI;
2672         if (FI.getExtParameterInfo(ArgNo).getABI()
2673               == ParameterABI::SwiftErrorResult) {
2674           QualType pointeeTy = Ty->getPointeeType();
2675           assert(pointeeTy->isPointerType());
2676           Address temp =
2677             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2678           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2679           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2680           Builder.CreateStore(incomingErrorValue, temp);
2681           V = temp.getPointer();
2682 
2683           // Push a cleanup to copy the value back at the end of the function.
2684           // The convention does not guarantee that the value will be written
2685           // back if the function exits with an unwind exception.
2686           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2687         }
2688 
2689         // Ensure the argument is the correct type.
2690         if (V->getType() != ArgI.getCoerceToType())
2691           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2692 
2693         if (isPromoted)
2694           V = emitArgumentDemotion(*this, Arg, V);
2695 
2696         // Because of merging of function types from multiple decls it is
2697         // possible for the type of an argument to not match the corresponding
2698         // type in the function type. Since we are codegening the callee
2699         // in here, add a cast to the argument type.
2700         llvm::Type *LTy = ConvertType(Arg->getType());
2701         if (V->getType() != LTy)
2702           V = Builder.CreateBitCast(V, LTy);
2703 
2704         ArgVals.push_back(ParamValue::forDirect(V));
2705         break;
2706       }
2707 
2708       // VLST arguments are coerced to VLATs at the function boundary for
2709       // ABI consistency. If this is a VLST that was coerced to
2710       // a VLAT at the function boundary and the types match up, use
2711       // llvm.experimental.vector.extract to convert back to the original
2712       // VLST.
2713       if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2714         auto *Coerced = Fn->getArg(FirstIRArg);
2715         if (auto *VecTyFrom =
2716                 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2717           if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2718             llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2719 
2720             assert(NumIRArgs == 1);
2721             Coerced->setName(Arg->getName() + ".coerce");
2722             ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2723                 VecTyTo, Coerced, Zero, "castFixedSve")));
2724             break;
2725           }
2726         }
2727       }
2728 
2729       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2730                                      Arg->getName());
2731 
2732       // Pointer to store into.
2733       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2734 
2735       // Fast-isel and the optimizer generally like scalar values better than
2736       // FCAs, so we flatten them if this is safe to do for this argument.
2737       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2738       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2739           STy->getNumElements() > 1) {
2740         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2741         llvm::Type *DstTy = Ptr.getElementType();
2742         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2743 
2744         Address AddrToStoreInto = Address::invalid();
2745         if (SrcSize <= DstSize) {
2746           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2747         } else {
2748           AddrToStoreInto =
2749             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2750         }
2751 
2752         assert(STy->getNumElements() == NumIRArgs);
2753         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2754           auto AI = Fn->getArg(FirstIRArg + i);
2755           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2756           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2757           Builder.CreateStore(AI, EltPtr);
2758         }
2759 
2760         if (SrcSize > DstSize) {
2761           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2762         }
2763 
2764       } else {
2765         // Simple case, just do a coerced store of the argument into the alloca.
2766         assert(NumIRArgs == 1);
2767         auto AI = Fn->getArg(FirstIRArg);
2768         AI->setName(Arg->getName() + ".coerce");
2769         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2770       }
2771 
2772       // Match to what EmitParmDecl is expecting for this type.
2773       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2774         llvm::Value *V =
2775             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2776         if (isPromoted)
2777           V = emitArgumentDemotion(*this, Arg, V);
2778         ArgVals.push_back(ParamValue::forDirect(V));
2779       } else {
2780         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2781       }
2782       break;
2783     }
2784 
2785     case ABIArgInfo::CoerceAndExpand: {
2786       // Reconstruct into a temporary.
2787       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2788       ArgVals.push_back(ParamValue::forIndirect(alloca));
2789 
2790       auto coercionType = ArgI.getCoerceAndExpandType();
2791       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2792 
2793       unsigned argIndex = FirstIRArg;
2794       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2795         llvm::Type *eltType = coercionType->getElementType(i);
2796         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2797           continue;
2798 
2799         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2800         auto elt = Fn->getArg(argIndex++);
2801         Builder.CreateStore(elt, eltAddr);
2802       }
2803       assert(argIndex == FirstIRArg + NumIRArgs);
2804       break;
2805     }
2806 
2807     case ABIArgInfo::Expand: {
2808       // If this structure was expanded into multiple arguments then
2809       // we need to create a temporary and reconstruct it from the
2810       // arguments.
2811       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2812       LValue LV = MakeAddrLValue(Alloca, Ty);
2813       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2814 
2815       auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2816       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2817       assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2818       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2819         auto AI = Fn->getArg(FirstIRArg + i);
2820         AI->setName(Arg->getName() + "." + Twine(i));
2821       }
2822       break;
2823     }
2824 
2825     case ABIArgInfo::Ignore:
2826       assert(NumIRArgs == 0);
2827       // Initialize the local variable appropriately.
2828       if (!hasScalarEvaluationKind(Ty)) {
2829         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2830       } else {
2831         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2832         ArgVals.push_back(ParamValue::forDirect(U));
2833       }
2834       break;
2835     }
2836   }
2837 
2838   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2839     for (int I = Args.size() - 1; I >= 0; --I)
2840       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2841   } else {
2842     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2843       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2844   }
2845 }
2846 
2847 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2848   while (insn->use_empty()) {
2849     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2850     if (!bitcast) return;
2851 
2852     // This is "safe" because we would have used a ConstantExpr otherwise.
2853     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2854     bitcast->eraseFromParent();
2855   }
2856 }
2857 
2858 /// Try to emit a fused autorelease of a return result.
2859 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2860                                                     llvm::Value *result) {
2861   // We must be immediately followed the cast.
2862   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2863   if (BB->empty()) return nullptr;
2864   if (&BB->back() != result) return nullptr;
2865 
2866   llvm::Type *resultType = result->getType();
2867 
2868   // result is in a BasicBlock and is therefore an Instruction.
2869   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2870 
2871   SmallVector<llvm::Instruction *, 4> InstsToKill;
2872 
2873   // Look for:
2874   //  %generator = bitcast %type1* %generator2 to %type2*
2875   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2876     // We would have emitted this as a constant if the operand weren't
2877     // an Instruction.
2878     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2879 
2880     // Require the generator to be immediately followed by the cast.
2881     if (generator->getNextNode() != bitcast)
2882       return nullptr;
2883 
2884     InstsToKill.push_back(bitcast);
2885   }
2886 
2887   // Look for:
2888   //   %generator = call i8* @objc_retain(i8* %originalResult)
2889   // or
2890   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2891   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2892   if (!call) return nullptr;
2893 
2894   bool doRetainAutorelease;
2895 
2896   if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2897     doRetainAutorelease = true;
2898   } else if (call->getCalledOperand() ==
2899              CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
2900     doRetainAutorelease = false;
2901 
2902     // If we emitted an assembly marker for this call (and the
2903     // ARCEntrypoints field should have been set if so), go looking
2904     // for that call.  If we can't find it, we can't do this
2905     // optimization.  But it should always be the immediately previous
2906     // instruction, unless we needed bitcasts around the call.
2907     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2908       llvm::Instruction *prev = call->getPrevNode();
2909       assert(prev);
2910       if (isa<llvm::BitCastInst>(prev)) {
2911         prev = prev->getPrevNode();
2912         assert(prev);
2913       }
2914       assert(isa<llvm::CallInst>(prev));
2915       assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
2916              CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2917       InstsToKill.push_back(prev);
2918     }
2919   } else {
2920     return nullptr;
2921   }
2922 
2923   result = call->getArgOperand(0);
2924   InstsToKill.push_back(call);
2925 
2926   // Keep killing bitcasts, for sanity.  Note that we no longer care
2927   // about precise ordering as long as there's exactly one use.
2928   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2929     if (!bitcast->hasOneUse()) break;
2930     InstsToKill.push_back(bitcast);
2931     result = bitcast->getOperand(0);
2932   }
2933 
2934   // Delete all the unnecessary instructions, from latest to earliest.
2935   for (auto *I : InstsToKill)
2936     I->eraseFromParent();
2937 
2938   // Do the fused retain/autorelease if we were asked to.
2939   if (doRetainAutorelease)
2940     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2941 
2942   // Cast back to the result type.
2943   return CGF.Builder.CreateBitCast(result, resultType);
2944 }
2945 
2946 /// If this is a +1 of the value of an immutable 'self', remove it.
2947 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2948                                           llvm::Value *result) {
2949   // This is only applicable to a method with an immutable 'self'.
2950   const ObjCMethodDecl *method =
2951     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2952   if (!method) return nullptr;
2953   const VarDecl *self = method->getSelfDecl();
2954   if (!self->getType().isConstQualified()) return nullptr;
2955 
2956   // Look for a retain call.
2957   llvm::CallInst *retainCall =
2958     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2959   if (!retainCall || retainCall->getCalledOperand() !=
2960                          CGF.CGM.getObjCEntrypoints().objc_retain)
2961     return nullptr;
2962 
2963   // Look for an ordinary load of 'self'.
2964   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2965   llvm::LoadInst *load =
2966     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2967   if (!load || load->isAtomic() || load->isVolatile() ||
2968       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2969     return nullptr;
2970 
2971   // Okay!  Burn it all down.  This relies for correctness on the
2972   // assumption that the retain is emitted as part of the return and
2973   // that thereafter everything is used "linearly".
2974   llvm::Type *resultType = result->getType();
2975   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2976   assert(retainCall->use_empty());
2977   retainCall->eraseFromParent();
2978   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2979 
2980   return CGF.Builder.CreateBitCast(load, resultType);
2981 }
2982 
2983 /// Emit an ARC autorelease of the result of a function.
2984 ///
2985 /// \return the value to actually return from the function
2986 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2987                                             llvm::Value *result) {
2988   // If we're returning 'self', kill the initial retain.  This is a
2989   // heuristic attempt to "encourage correctness" in the really unfortunate
2990   // case where we have a return of self during a dealloc and we desperately
2991   // need to avoid the possible autorelease.
2992   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2993     return self;
2994 
2995   // At -O0, try to emit a fused retain/autorelease.
2996   if (CGF.shouldUseFusedARCCalls())
2997     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2998       return fused;
2999 
3000   return CGF.EmitARCAutoreleaseReturnValue(result);
3001 }
3002 
3003 /// Heuristically search for a dominating store to the return-value slot.
3004 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3005   // Check if a User is a store which pointerOperand is the ReturnValue.
3006   // We are looking for stores to the ReturnValue, not for stores of the
3007   // ReturnValue to some other location.
3008   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3009     auto *SI = dyn_cast<llvm::StoreInst>(U);
3010     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
3011       return nullptr;
3012     // These aren't actually possible for non-coerced returns, and we
3013     // only care about non-coerced returns on this code path.
3014     assert(!SI->isAtomic() && !SI->isVolatile());
3015     return SI;
3016   };
3017   // If there are multiple uses of the return-value slot, just check
3018   // for something immediately preceding the IP.  Sometimes this can
3019   // happen with how we generate implicit-returns; it can also happen
3020   // with noreturn cleanups.
3021   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3022     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3023     if (IP->empty()) return nullptr;
3024     llvm::Instruction *I = &IP->back();
3025 
3026     // Skip lifetime markers
3027     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
3028                                             IE = IP->rend();
3029          II != IE; ++II) {
3030       if (llvm::IntrinsicInst *Intrinsic =
3031               dyn_cast<llvm::IntrinsicInst>(&*II)) {
3032         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
3033           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
3034           ++II;
3035           if (II == IE)
3036             break;
3037           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
3038             continue;
3039         }
3040       }
3041       I = &*II;
3042       break;
3043     }
3044 
3045     return GetStoreIfValid(I);
3046   }
3047 
3048   llvm::StoreInst *store =
3049       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3050   if (!store) return nullptr;
3051 
3052   // Now do a first-and-dirty dominance check: just walk up the
3053   // single-predecessors chain from the current insertion point.
3054   llvm::BasicBlock *StoreBB = store->getParent();
3055   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3056   while (IP != StoreBB) {
3057     if (!(IP = IP->getSinglePredecessor()))
3058       return nullptr;
3059   }
3060 
3061   // Okay, the store's basic block dominates the insertion point; we
3062   // can do our thing.
3063   return store;
3064 }
3065 
3066 // Helper functions for EmitCMSEClearRecord
3067 
3068 // Set the bits corresponding to a field having width `BitWidth` and located at
3069 // offset `BitOffset` (from the least significant bit) within a storage unit of
3070 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3071 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3072 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3073                         int BitWidth, int CharWidth) {
3074   assert(CharWidth <= 64);
3075   assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3076 
3077   int Pos = 0;
3078   if (BitOffset >= CharWidth) {
3079     Pos += BitOffset / CharWidth;
3080     BitOffset = BitOffset % CharWidth;
3081   }
3082 
3083   const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3084   if (BitOffset + BitWidth >= CharWidth) {
3085     Bits[Pos++] |= (Used << BitOffset) & Used;
3086     BitWidth -= CharWidth - BitOffset;
3087     BitOffset = 0;
3088   }
3089 
3090   while (BitWidth >= CharWidth) {
3091     Bits[Pos++] = Used;
3092     BitWidth -= CharWidth;
3093   }
3094 
3095   if (BitWidth > 0)
3096     Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3097 }
3098 
3099 // Set the bits corresponding to a field having width `BitWidth` and located at
3100 // offset `BitOffset` (from the least significant bit) within a storage unit of
3101 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3102 // `Bits` corresponds to one target byte. Use target endian layout.
3103 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3104                         int StorageSize, int BitOffset, int BitWidth,
3105                         int CharWidth, bool BigEndian) {
3106 
3107   SmallVector<uint64_t, 8> TmpBits(StorageSize);
3108   setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3109 
3110   if (BigEndian)
3111     std::reverse(TmpBits.begin(), TmpBits.end());
3112 
3113   for (uint64_t V : TmpBits)
3114     Bits[StorageOffset++] |= V;
3115 }
3116 
3117 static void setUsedBits(CodeGenModule &, QualType, int,
3118                         SmallVectorImpl<uint64_t> &);
3119 
3120 // Set the bits in `Bits`, which correspond to the value representations of
3121 // the actual members of the record type `RTy`. Note that this function does
3122 // not handle base classes, virtual tables, etc, since they cannot happen in
3123 // CMSE function arguments or return. The bit mask corresponds to the target
3124 // memory layout, i.e. it's endian dependent.
3125 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3126                         SmallVectorImpl<uint64_t> &Bits) {
3127   ASTContext &Context = CGM.getContext();
3128   int CharWidth = Context.getCharWidth();
3129   const RecordDecl *RD = RTy->getDecl()->getDefinition();
3130   const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3131   const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3132 
3133   int Idx = 0;
3134   for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3135     const FieldDecl *F = *I;
3136 
3137     if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3138         F->getType()->isIncompleteArrayType())
3139       continue;
3140 
3141     if (F->isBitField()) {
3142       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3143       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3144                   BFI.StorageSize / CharWidth, BFI.Offset,
3145                   BFI.Size, CharWidth,
3146                   CGM.getDataLayout().isBigEndian());
3147       continue;
3148     }
3149 
3150     setUsedBits(CGM, F->getType(),
3151                 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3152   }
3153 }
3154 
3155 // Set the bits in `Bits`, which correspond to the value representations of
3156 // the elements of an array type `ATy`.
3157 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3158                         int Offset, SmallVectorImpl<uint64_t> &Bits) {
3159   const ASTContext &Context = CGM.getContext();
3160 
3161   QualType ETy = Context.getBaseElementType(ATy);
3162   int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3163   SmallVector<uint64_t, 4> TmpBits(Size);
3164   setUsedBits(CGM, ETy, 0, TmpBits);
3165 
3166   for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3167     auto Src = TmpBits.begin();
3168     auto Dst = Bits.begin() + Offset + I * Size;
3169     for (int J = 0; J < Size; ++J)
3170       *Dst++ |= *Src++;
3171   }
3172 }
3173 
3174 // Set the bits in `Bits`, which correspond to the value representations of
3175 // the type `QTy`.
3176 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3177                         SmallVectorImpl<uint64_t> &Bits) {
3178   if (const auto *RTy = QTy->getAs<RecordType>())
3179     return setUsedBits(CGM, RTy, Offset, Bits);
3180 
3181   ASTContext &Context = CGM.getContext();
3182   if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3183     return setUsedBits(CGM, ATy, Offset, Bits);
3184 
3185   int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3186   if (Size <= 0)
3187     return;
3188 
3189   std::fill_n(Bits.begin() + Offset, Size,
3190               (uint64_t(1) << Context.getCharWidth()) - 1);
3191 }
3192 
3193 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3194                                    int Pos, int Size, int CharWidth,
3195                                    bool BigEndian) {
3196   assert(Size > 0);
3197   uint64_t Mask = 0;
3198   if (BigEndian) {
3199     for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3200          ++P)
3201       Mask = (Mask << CharWidth) | *P;
3202   } else {
3203     auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3204     do
3205       Mask = (Mask << CharWidth) | *--P;
3206     while (P != End);
3207   }
3208   return Mask;
3209 }
3210 
3211 // Emit code to clear the bits in a record, which aren't a part of any user
3212 // declared member, when the record is a function return.
3213 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3214                                                   llvm::IntegerType *ITy,
3215                                                   QualType QTy) {
3216   assert(Src->getType() == ITy);
3217   assert(ITy->getScalarSizeInBits() <= 64);
3218 
3219   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3220   int Size = DataLayout.getTypeStoreSize(ITy);
3221   SmallVector<uint64_t, 4> Bits(Size);
3222   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3223 
3224   int CharWidth = CGM.getContext().getCharWidth();
3225   uint64_t Mask =
3226       buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3227 
3228   return Builder.CreateAnd(Src, Mask, "cmse.clear");
3229 }
3230 
3231 // Emit code to clear the bits in a record, which aren't a part of any user
3232 // declared member, when the record is a function argument.
3233 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3234                                                   llvm::ArrayType *ATy,
3235                                                   QualType QTy) {
3236   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3237   int Size = DataLayout.getTypeStoreSize(ATy);
3238   SmallVector<uint64_t, 16> Bits(Size);
3239   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3240 
3241   // Clear each element of the LLVM array.
3242   int CharWidth = CGM.getContext().getCharWidth();
3243   int CharsPerElt =
3244       ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3245   int MaskIndex = 0;
3246   llvm::Value *R = llvm::UndefValue::get(ATy);
3247   for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3248     uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3249                                        DataLayout.isBigEndian());
3250     MaskIndex += CharsPerElt;
3251     llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3252     llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3253     R = Builder.CreateInsertValue(R, T1, I);
3254   }
3255 
3256   return R;
3257 }
3258 
3259 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3260                                          bool EmitRetDbgLoc,
3261                                          SourceLocation EndLoc) {
3262   if (FI.isNoReturn()) {
3263     // Noreturn functions don't return.
3264     EmitUnreachable(EndLoc);
3265     return;
3266   }
3267 
3268   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3269     // Naked functions don't have epilogues.
3270     Builder.CreateUnreachable();
3271     return;
3272   }
3273 
3274   // Functions with no result always return void.
3275   if (!ReturnValue.isValid()) {
3276     Builder.CreateRetVoid();
3277     return;
3278   }
3279 
3280   llvm::DebugLoc RetDbgLoc;
3281   llvm::Value *RV = nullptr;
3282   QualType RetTy = FI.getReturnType();
3283   const ABIArgInfo &RetAI = FI.getReturnInfo();
3284 
3285   switch (RetAI.getKind()) {
3286   case ABIArgInfo::InAlloca:
3287     // Aggregrates get evaluated directly into the destination.  Sometimes we
3288     // need to return the sret value in a register, though.
3289     assert(hasAggregateEvaluationKind(RetTy));
3290     if (RetAI.getInAllocaSRet()) {
3291       llvm::Function::arg_iterator EI = CurFn->arg_end();
3292       --EI;
3293       llvm::Value *ArgStruct = &*EI;
3294       llvm::Value *SRet = Builder.CreateStructGEP(
3295           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3296       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
3297     }
3298     break;
3299 
3300   case ABIArgInfo::Indirect: {
3301     auto AI = CurFn->arg_begin();
3302     if (RetAI.isSRetAfterThis())
3303       ++AI;
3304     switch (getEvaluationKind(RetTy)) {
3305     case TEK_Complex: {
3306       ComplexPairTy RT =
3307         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3308       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3309                          /*isInit*/ true);
3310       break;
3311     }
3312     case TEK_Aggregate:
3313       // Do nothing; aggregrates get evaluated directly into the destination.
3314       break;
3315     case TEK_Scalar:
3316       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3317                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
3318                         /*isInit*/ true);
3319       break;
3320     }
3321     break;
3322   }
3323 
3324   case ABIArgInfo::Extend:
3325   case ABIArgInfo::Direct:
3326     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3327         RetAI.getDirectOffset() == 0) {
3328       // The internal return value temp always will have pointer-to-return-type
3329       // type, just do a load.
3330 
3331       // If there is a dominating store to ReturnValue, we can elide
3332       // the load, zap the store, and usually zap the alloca.
3333       if (llvm::StoreInst *SI =
3334               findDominatingStoreToReturnValue(*this)) {
3335         // Reuse the debug location from the store unless there is
3336         // cleanup code to be emitted between the store and return
3337         // instruction.
3338         if (EmitRetDbgLoc && !AutoreleaseResult)
3339           RetDbgLoc = SI->getDebugLoc();
3340         // Get the stored value and nuke the now-dead store.
3341         RV = SI->getValueOperand();
3342         SI->eraseFromParent();
3343 
3344       // Otherwise, we have to do a simple load.
3345       } else {
3346         RV = Builder.CreateLoad(ReturnValue);
3347       }
3348     } else {
3349       // If the value is offset in memory, apply the offset now.
3350       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3351 
3352       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3353     }
3354 
3355     // In ARC, end functions that return a retainable type with a call
3356     // to objc_autoreleaseReturnValue.
3357     if (AutoreleaseResult) {
3358 #ifndef NDEBUG
3359       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3360       // been stripped of the typedefs, so we cannot use RetTy here. Get the
3361       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3362       // CurCodeDecl or BlockInfo.
3363       QualType RT;
3364 
3365       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3366         RT = FD->getReturnType();
3367       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3368         RT = MD->getReturnType();
3369       else if (isa<BlockDecl>(CurCodeDecl))
3370         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3371       else
3372         llvm_unreachable("Unexpected function/method type");
3373 
3374       assert(getLangOpts().ObjCAutoRefCount &&
3375              !FI.isReturnsRetained() &&
3376              RT->isObjCRetainableType());
3377 #endif
3378       RV = emitAutoreleaseOfResult(*this, RV);
3379     }
3380 
3381     break;
3382 
3383   case ABIArgInfo::Ignore:
3384     break;
3385 
3386   case ABIArgInfo::CoerceAndExpand: {
3387     auto coercionType = RetAI.getCoerceAndExpandType();
3388 
3389     // Load all of the coerced elements out into results.
3390     llvm::SmallVector<llvm::Value*, 4> results;
3391     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3392     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3393       auto coercedEltType = coercionType->getElementType(i);
3394       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3395         continue;
3396 
3397       auto eltAddr = Builder.CreateStructGEP(addr, i);
3398       auto elt = Builder.CreateLoad(eltAddr);
3399       results.push_back(elt);
3400     }
3401 
3402     // If we have one result, it's the single direct result type.
3403     if (results.size() == 1) {
3404       RV = results[0];
3405 
3406     // Otherwise, we need to make a first-class aggregate.
3407     } else {
3408       // Construct a return type that lacks padding elements.
3409       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3410 
3411       RV = llvm::UndefValue::get(returnType);
3412       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3413         RV = Builder.CreateInsertValue(RV, results[i], i);
3414       }
3415     }
3416     break;
3417   }
3418   case ABIArgInfo::Expand:
3419   case ABIArgInfo::IndirectAliased:
3420     llvm_unreachable("Invalid ABI kind for return argument");
3421   }
3422 
3423   llvm::Instruction *Ret;
3424   if (RV) {
3425     if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3426       // For certain return types, clear padding bits, as they may reveal
3427       // sensitive information.
3428       // Small struct/union types are passed as integers.
3429       auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3430       if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3431         RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3432     }
3433     EmitReturnValueCheck(RV);
3434     Ret = Builder.CreateRet(RV);
3435   } else {
3436     Ret = Builder.CreateRetVoid();
3437   }
3438 
3439   if (RetDbgLoc)
3440     Ret->setDebugLoc(std::move(RetDbgLoc));
3441 }
3442 
3443 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3444   // A current decl may not be available when emitting vtable thunks.
3445   if (!CurCodeDecl)
3446     return;
3447 
3448   // If the return block isn't reachable, neither is this check, so don't emit
3449   // it.
3450   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3451     return;
3452 
3453   ReturnsNonNullAttr *RetNNAttr = nullptr;
3454   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3455     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3456 
3457   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3458     return;
3459 
3460   // Prefer the returns_nonnull attribute if it's present.
3461   SourceLocation AttrLoc;
3462   SanitizerMask CheckKind;
3463   SanitizerHandler Handler;
3464   if (RetNNAttr) {
3465     assert(!requiresReturnValueNullabilityCheck() &&
3466            "Cannot check nullability and the nonnull attribute");
3467     AttrLoc = RetNNAttr->getLocation();
3468     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3469     Handler = SanitizerHandler::NonnullReturn;
3470   } else {
3471     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3472       if (auto *TSI = DD->getTypeSourceInfo())
3473         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3474           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3475     CheckKind = SanitizerKind::NullabilityReturn;
3476     Handler = SanitizerHandler::NullabilityReturn;
3477   }
3478 
3479   SanitizerScope SanScope(this);
3480 
3481   // Make sure the "return" source location is valid. If we're checking a
3482   // nullability annotation, make sure the preconditions for the check are met.
3483   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3484   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3485   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3486   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3487   if (requiresReturnValueNullabilityCheck())
3488     CanNullCheck =
3489         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3490   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3491   EmitBlock(Check);
3492 
3493   // Now do the null check.
3494   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3495   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3496   llvm::Value *DynamicData[] = {SLocPtr};
3497   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3498 
3499   EmitBlock(NoCheck);
3500 
3501 #ifndef NDEBUG
3502   // The return location should not be used after the check has been emitted.
3503   ReturnLocation = Address::invalid();
3504 #endif
3505 }
3506 
3507 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3508   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3509   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3510 }
3511 
3512 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3513                                           QualType Ty) {
3514   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3515   // placeholders.
3516   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3517   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3518   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3519 
3520   // FIXME: When we generate this IR in one pass, we shouldn't need
3521   // this win32-specific alignment hack.
3522   CharUnits Align = CharUnits::fromQuantity(4);
3523   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3524 
3525   return AggValueSlot::forAddr(Address(Placeholder, Align),
3526                                Ty.getQualifiers(),
3527                                AggValueSlot::IsNotDestructed,
3528                                AggValueSlot::DoesNotNeedGCBarriers,
3529                                AggValueSlot::IsNotAliased,
3530                                AggValueSlot::DoesNotOverlap);
3531 }
3532 
3533 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3534                                           const VarDecl *param,
3535                                           SourceLocation loc) {
3536   // StartFunction converted the ABI-lowered parameter(s) into a
3537   // local alloca.  We need to turn that into an r-value suitable
3538   // for EmitCall.
3539   Address local = GetAddrOfLocalVar(param);
3540 
3541   QualType type = param->getType();
3542 
3543   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3544     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3545   }
3546 
3547   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3548   // but the argument needs to be the original pointer.
3549   if (type->isReferenceType()) {
3550     args.add(RValue::get(Builder.CreateLoad(local)), type);
3551 
3552   // In ARC, move out of consumed arguments so that the release cleanup
3553   // entered by StartFunction doesn't cause an over-release.  This isn't
3554   // optimal -O0 code generation, but it should get cleaned up when
3555   // optimization is enabled.  This also assumes that delegate calls are
3556   // performed exactly once for a set of arguments, but that should be safe.
3557   } else if (getLangOpts().ObjCAutoRefCount &&
3558              param->hasAttr<NSConsumedAttr>() &&
3559              type->isObjCRetainableType()) {
3560     llvm::Value *ptr = Builder.CreateLoad(local);
3561     auto null =
3562       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3563     Builder.CreateStore(null, local);
3564     args.add(RValue::get(ptr), type);
3565 
3566   // For the most part, we just need to load the alloca, except that
3567   // aggregate r-values are actually pointers to temporaries.
3568   } else {
3569     args.add(convertTempToRValue(local, type, loc), type);
3570   }
3571 
3572   // Deactivate the cleanup for the callee-destructed param that was pushed.
3573   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3574       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3575       param->needsDestruction(getContext())) {
3576     EHScopeStack::stable_iterator cleanup =
3577         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3578     assert(cleanup.isValid() &&
3579            "cleanup for callee-destructed param not recorded");
3580     // This unreachable is a temporary marker which will be removed later.
3581     llvm::Instruction *isActive = Builder.CreateUnreachable();
3582     args.addArgCleanupDeactivation(cleanup, isActive);
3583   }
3584 }
3585 
3586 static bool isProvablyNull(llvm::Value *addr) {
3587   return isa<llvm::ConstantPointerNull>(addr);
3588 }
3589 
3590 /// Emit the actual writing-back of a writeback.
3591 static void emitWriteback(CodeGenFunction &CGF,
3592                           const CallArgList::Writeback &writeback) {
3593   const LValue &srcLV = writeback.Source;
3594   Address srcAddr = srcLV.getAddress(CGF);
3595   assert(!isProvablyNull(srcAddr.getPointer()) &&
3596          "shouldn't have writeback for provably null argument");
3597 
3598   llvm::BasicBlock *contBB = nullptr;
3599 
3600   // If the argument wasn't provably non-null, we need to null check
3601   // before doing the store.
3602   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3603                                               CGF.CGM.getDataLayout());
3604   if (!provablyNonNull) {
3605     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3606     contBB = CGF.createBasicBlock("icr.done");
3607 
3608     llvm::Value *isNull =
3609       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3610     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3611     CGF.EmitBlock(writebackBB);
3612   }
3613 
3614   // Load the value to writeback.
3615   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3616 
3617   // Cast it back, in case we're writing an id to a Foo* or something.
3618   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3619                                     "icr.writeback-cast");
3620 
3621   // Perform the writeback.
3622 
3623   // If we have a "to use" value, it's something we need to emit a use
3624   // of.  This has to be carefully threaded in: if it's done after the
3625   // release it's potentially undefined behavior (and the optimizer
3626   // will ignore it), and if it happens before the retain then the
3627   // optimizer could move the release there.
3628   if (writeback.ToUse) {
3629     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3630 
3631     // Retain the new value.  No need to block-copy here:  the block's
3632     // being passed up the stack.
3633     value = CGF.EmitARCRetainNonBlock(value);
3634 
3635     // Emit the intrinsic use here.
3636     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3637 
3638     // Load the old value (primitively).
3639     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3640 
3641     // Put the new value in place (primitively).
3642     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3643 
3644     // Release the old value.
3645     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3646 
3647   // Otherwise, we can just do a normal lvalue store.
3648   } else {
3649     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3650   }
3651 
3652   // Jump to the continuation block.
3653   if (!provablyNonNull)
3654     CGF.EmitBlock(contBB);
3655 }
3656 
3657 static void emitWritebacks(CodeGenFunction &CGF,
3658                            const CallArgList &args) {
3659   for (const auto &I : args.writebacks())
3660     emitWriteback(CGF, I);
3661 }
3662 
3663 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3664                                             const CallArgList &CallArgs) {
3665   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3666     CallArgs.getCleanupsToDeactivate();
3667   // Iterate in reverse to increase the likelihood of popping the cleanup.
3668   for (const auto &I : llvm::reverse(Cleanups)) {
3669     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3670     I.IsActiveIP->eraseFromParent();
3671   }
3672 }
3673 
3674 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3675   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3676     if (uop->getOpcode() == UO_AddrOf)
3677       return uop->getSubExpr();
3678   return nullptr;
3679 }
3680 
3681 /// Emit an argument that's being passed call-by-writeback.  That is,
3682 /// we are passing the address of an __autoreleased temporary; it
3683 /// might be copy-initialized with the current value of the given
3684 /// address, but it will definitely be copied out of after the call.
3685 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3686                              const ObjCIndirectCopyRestoreExpr *CRE) {
3687   LValue srcLV;
3688 
3689   // Make an optimistic effort to emit the address as an l-value.
3690   // This can fail if the argument expression is more complicated.
3691   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3692     srcLV = CGF.EmitLValue(lvExpr);
3693 
3694   // Otherwise, just emit it as a scalar.
3695   } else {
3696     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3697 
3698     QualType srcAddrType =
3699       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3700     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3701   }
3702   Address srcAddr = srcLV.getAddress(CGF);
3703 
3704   // The dest and src types don't necessarily match in LLVM terms
3705   // because of the crazy ObjC compatibility rules.
3706 
3707   llvm::PointerType *destType =
3708     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3709 
3710   // If the address is a constant null, just pass the appropriate null.
3711   if (isProvablyNull(srcAddr.getPointer())) {
3712     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3713              CRE->getType());
3714     return;
3715   }
3716 
3717   // Create the temporary.
3718   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3719                                       CGF.getPointerAlign(),
3720                                       "icr.temp");
3721   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3722   // and that cleanup will be conditional if we can't prove that the l-value
3723   // isn't null, so we need to register a dominating point so that the cleanups
3724   // system will make valid IR.
3725   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3726 
3727   // Zero-initialize it if we're not doing a copy-initialization.
3728   bool shouldCopy = CRE->shouldCopy();
3729   if (!shouldCopy) {
3730     llvm::Value *null =
3731       llvm::ConstantPointerNull::get(
3732         cast<llvm::PointerType>(destType->getElementType()));
3733     CGF.Builder.CreateStore(null, temp);
3734   }
3735 
3736   llvm::BasicBlock *contBB = nullptr;
3737   llvm::BasicBlock *originBB = nullptr;
3738 
3739   // If the address is *not* known to be non-null, we need to switch.
3740   llvm::Value *finalArgument;
3741 
3742   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3743                                               CGF.CGM.getDataLayout());
3744   if (provablyNonNull) {
3745     finalArgument = temp.getPointer();
3746   } else {
3747     llvm::Value *isNull =
3748       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3749 
3750     finalArgument = CGF.Builder.CreateSelect(isNull,
3751                                    llvm::ConstantPointerNull::get(destType),
3752                                              temp.getPointer(), "icr.argument");
3753 
3754     // If we need to copy, then the load has to be conditional, which
3755     // means we need control flow.
3756     if (shouldCopy) {
3757       originBB = CGF.Builder.GetInsertBlock();
3758       contBB = CGF.createBasicBlock("icr.cont");
3759       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3760       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3761       CGF.EmitBlock(copyBB);
3762       condEval.begin(CGF);
3763     }
3764   }
3765 
3766   llvm::Value *valueToUse = nullptr;
3767 
3768   // Perform a copy if necessary.
3769   if (shouldCopy) {
3770     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3771     assert(srcRV.isScalar());
3772 
3773     llvm::Value *src = srcRV.getScalarVal();
3774     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3775                                     "icr.cast");
3776 
3777     // Use an ordinary store, not a store-to-lvalue.
3778     CGF.Builder.CreateStore(src, temp);
3779 
3780     // If optimization is enabled, and the value was held in a
3781     // __strong variable, we need to tell the optimizer that this
3782     // value has to stay alive until we're doing the store back.
3783     // This is because the temporary is effectively unretained,
3784     // and so otherwise we can violate the high-level semantics.
3785     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3786         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3787       valueToUse = src;
3788     }
3789   }
3790 
3791   // Finish the control flow if we needed it.
3792   if (shouldCopy && !provablyNonNull) {
3793     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3794     CGF.EmitBlock(contBB);
3795 
3796     // Make a phi for the value to intrinsically use.
3797     if (valueToUse) {
3798       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3799                                                       "icr.to-use");
3800       phiToUse->addIncoming(valueToUse, copyBB);
3801       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3802                             originBB);
3803       valueToUse = phiToUse;
3804     }
3805 
3806     condEval.end(CGF);
3807   }
3808 
3809   args.addWriteback(srcLV, temp, valueToUse);
3810   args.add(RValue::get(finalArgument), CRE->getType());
3811 }
3812 
3813 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3814   assert(!StackBase);
3815 
3816   // Save the stack.
3817   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3818   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3819 }
3820 
3821 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3822   if (StackBase) {
3823     // Restore the stack after the call.
3824     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3825     CGF.Builder.CreateCall(F, StackBase);
3826   }
3827 }
3828 
3829 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3830                                           SourceLocation ArgLoc,
3831                                           AbstractCallee AC,
3832                                           unsigned ParmNum) {
3833   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3834                          SanOpts.has(SanitizerKind::NullabilityArg)))
3835     return;
3836 
3837   // The param decl may be missing in a variadic function.
3838   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3839   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3840 
3841   // Prefer the nonnull attribute if it's present.
3842   const NonNullAttr *NNAttr = nullptr;
3843   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3844     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3845 
3846   bool CanCheckNullability = false;
3847   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3848     auto Nullability = PVD->getType()->getNullability(getContext());
3849     CanCheckNullability = Nullability &&
3850                           *Nullability == NullabilityKind::NonNull &&
3851                           PVD->getTypeSourceInfo();
3852   }
3853 
3854   if (!NNAttr && !CanCheckNullability)
3855     return;
3856 
3857   SourceLocation AttrLoc;
3858   SanitizerMask CheckKind;
3859   SanitizerHandler Handler;
3860   if (NNAttr) {
3861     AttrLoc = NNAttr->getLocation();
3862     CheckKind = SanitizerKind::NonnullAttribute;
3863     Handler = SanitizerHandler::NonnullArg;
3864   } else {
3865     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3866     CheckKind = SanitizerKind::NullabilityArg;
3867     Handler = SanitizerHandler::NullabilityArg;
3868   }
3869 
3870   SanitizerScope SanScope(this);
3871   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
3872   llvm::Constant *StaticData[] = {
3873       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3874       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3875   };
3876   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3877 }
3878 
3879 // Check if the call is going to use the inalloca convention. This needs to
3880 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
3881 // later, so we can't check it directly.
3882 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
3883                             ArrayRef<QualType> ArgTypes) {
3884   // The Swift calling convention doesn't go through the target-specific
3885   // argument classification, so it never uses inalloca.
3886   // TODO: Consider limiting inalloca use to only calling conventions supported
3887   // by MSVC.
3888   if (ExplicitCC == CC_Swift)
3889     return false;
3890   if (!CGM.getTarget().getCXXABI().isMicrosoft())
3891     return false;
3892   return llvm::any_of(ArgTypes, [&](QualType Ty) {
3893     return isInAllocaArgument(CGM.getCXXABI(), Ty);
3894   });
3895 }
3896 
3897 #ifndef NDEBUG
3898 // Determine whether the given argument is an Objective-C method
3899 // that may have type parameters in its signature.
3900 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
3901   const DeclContext *dc = method->getDeclContext();
3902   if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
3903     return classDecl->getTypeParamListAsWritten();
3904   }
3905 
3906   if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
3907     return catDecl->getTypeParamList();
3908   }
3909 
3910   return false;
3911 }
3912 #endif
3913 
3914 /// EmitCallArgs - Emit call arguments for a function.
3915 void CodeGenFunction::EmitCallArgs(
3916     CallArgList &Args, PrototypeWrapper Prototype,
3917     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3918     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3919   SmallVector<QualType, 16> ArgTypes;
3920 
3921   assert((ParamsToSkip == 0 || Prototype.P) &&
3922          "Can't skip parameters if type info is not provided");
3923 
3924   // This variable only captures *explicitly* written conventions, not those
3925   // applied by default via command line flags or target defaults, such as
3926   // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
3927   // require knowing if this is a C++ instance method or being able to see
3928   // unprototyped FunctionTypes.
3929   CallingConv ExplicitCC = CC_C;
3930 
3931   // First, if a prototype was provided, use those argument types.
3932   bool IsVariadic = false;
3933   if (Prototype.P) {
3934     const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
3935     if (MD) {
3936       IsVariadic = MD->isVariadic();
3937       ExplicitCC = getCallingConventionForDecl(
3938           MD, CGM.getTarget().getTriple().isOSWindows());
3939       ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
3940                       MD->param_type_end());
3941     } else {
3942       const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
3943       IsVariadic = FPT->isVariadic();
3944       ExplicitCC = FPT->getExtInfo().getCC();
3945       ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
3946                       FPT->param_type_end());
3947     }
3948 
3949 #ifndef NDEBUG
3950     // Check that the prototyped types match the argument expression types.
3951     bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
3952     CallExpr::const_arg_iterator Arg = ArgRange.begin();
3953     for (QualType Ty : ArgTypes) {
3954       assert(Arg != ArgRange.end() && "Running over edge of argument list!");
3955       assert(
3956           (isGenericMethod || Ty->isVariablyModifiedType() ||
3957            Ty.getNonReferenceType()->isObjCRetainableType() ||
3958            getContext()
3959                    .getCanonicalType(Ty.getNonReferenceType())
3960                    .getTypePtr() ==
3961                getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
3962           "type mismatch in call argument!");
3963       ++Arg;
3964     }
3965 
3966     // Either we've emitted all the call args, or we have a call to variadic
3967     // function.
3968     assert((Arg == ArgRange.end() || IsVariadic) &&
3969            "Extra arguments in non-variadic function!");
3970 #endif
3971   }
3972 
3973   // If we still have any arguments, emit them using the type of the argument.
3974   for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
3975                                   ArgRange.end()))
3976     ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
3977   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3978 
3979   // We must evaluate arguments from right to left in the MS C++ ABI,
3980   // because arguments are destroyed left to right in the callee. As a special
3981   // case, there are certain language constructs that require left-to-right
3982   // evaluation, and in those cases we consider the evaluation order requirement
3983   // to trump the "destruction order is reverse construction order" guarantee.
3984   bool LeftToRight =
3985       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3986           ? Order == EvaluationOrder::ForceLeftToRight
3987           : Order != EvaluationOrder::ForceRightToLeft;
3988 
3989   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3990                                          RValue EmittedArg) {
3991     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3992       return;
3993     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3994     if (PS == nullptr)
3995       return;
3996 
3997     const auto &Context = getContext();
3998     auto SizeTy = Context.getSizeType();
3999     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4000     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4001     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4002                                                      EmittedArg.getScalarVal(),
4003                                                      PS->isDynamic());
4004     Args.add(RValue::get(V), SizeTy);
4005     // If we're emitting args in reverse, be sure to do so with
4006     // pass_object_size, as well.
4007     if (!LeftToRight)
4008       std::swap(Args.back(), *(&Args.back() - 1));
4009   };
4010 
4011   // Insert a stack save if we're going to need any inalloca args.
4012   if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4013     assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4014            "inalloca only supported on x86");
4015     Args.allocateArgumentMemory(*this);
4016   }
4017 
4018   // Evaluate each argument in the appropriate order.
4019   size_t CallArgsStart = Args.size();
4020   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4021     unsigned Idx = LeftToRight ? I : E - I - 1;
4022     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4023     unsigned InitialArgSize = Args.size();
4024     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4025     // the argument and parameter match or the objc method is parameterized.
4026     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4027             getContext().hasSameUnqualifiedType((*Arg)->getType(),
4028                                                 ArgTypes[Idx]) ||
4029             (isa<ObjCMethodDecl>(AC.getDecl()) &&
4030              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4031            "Argument and parameter types don't match");
4032     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4033     // In particular, we depend on it being the last arg in Args, and the
4034     // objectsize bits depend on there only being one arg if !LeftToRight.
4035     assert(InitialArgSize + 1 == Args.size() &&
4036            "The code below depends on only adding one arg per EmitCallArg");
4037     (void)InitialArgSize;
4038     // Since pointer argument are never emitted as LValue, it is safe to emit
4039     // non-null argument check for r-value only.
4040     if (!Args.back().hasLValue()) {
4041       RValue RVArg = Args.back().getKnownRValue();
4042       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4043                           ParamsToSkip + Idx);
4044       // @llvm.objectsize should never have side-effects and shouldn't need
4045       // destruction/cleanups, so we can safely "emit" it after its arg,
4046       // regardless of right-to-leftness
4047       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4048     }
4049   }
4050 
4051   if (!LeftToRight) {
4052     // Un-reverse the arguments we just evaluated so they match up with the LLVM
4053     // IR function.
4054     std::reverse(Args.begin() + CallArgsStart, Args.end());
4055   }
4056 }
4057 
4058 namespace {
4059 
4060 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4061   DestroyUnpassedArg(Address Addr, QualType Ty)
4062       : Addr(Addr), Ty(Ty) {}
4063 
4064   Address Addr;
4065   QualType Ty;
4066 
4067   void Emit(CodeGenFunction &CGF, Flags flags) override {
4068     QualType::DestructionKind DtorKind = Ty.isDestructedType();
4069     if (DtorKind == QualType::DK_cxx_destructor) {
4070       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4071       assert(!Dtor->isTrivial());
4072       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4073                                 /*Delegating=*/false, Addr, Ty);
4074     } else {
4075       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4076     }
4077   }
4078 };
4079 
4080 struct DisableDebugLocationUpdates {
4081   CodeGenFunction &CGF;
4082   bool disabledDebugInfo;
4083   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4084     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4085       CGF.disableDebugInfo();
4086   }
4087   ~DisableDebugLocationUpdates() {
4088     if (disabledDebugInfo)
4089       CGF.enableDebugInfo();
4090   }
4091 };
4092 
4093 } // end anonymous namespace
4094 
4095 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4096   if (!HasLV)
4097     return RV;
4098   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4099   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4100                         LV.isVolatile());
4101   IsUsed = true;
4102   return RValue::getAggregate(Copy.getAddress(CGF));
4103 }
4104 
4105 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4106   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4107   if (!HasLV && RV.isScalar())
4108     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4109   else if (!HasLV && RV.isComplex())
4110     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4111   else {
4112     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4113     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4114     // We assume that call args are never copied into subobjects.
4115     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4116                           HasLV ? LV.isVolatileQualified()
4117                                 : RV.isVolatileQualified());
4118   }
4119   IsUsed = true;
4120 }
4121 
4122 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4123                                   QualType type) {
4124   DisableDebugLocationUpdates Dis(*this, E);
4125   if (const ObjCIndirectCopyRestoreExpr *CRE
4126         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4127     assert(getLangOpts().ObjCAutoRefCount);
4128     return emitWritebackArg(*this, args, CRE);
4129   }
4130 
4131   assert(type->isReferenceType() == E->isGLValue() &&
4132          "reference binding to unmaterialized r-value!");
4133 
4134   if (E->isGLValue()) {
4135     assert(E->getObjectKind() == OK_Ordinary);
4136     return args.add(EmitReferenceBindingToExpr(E), type);
4137   }
4138 
4139   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4140 
4141   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4142   // However, we still have to push an EH-only cleanup in case we unwind before
4143   // we make it to the call.
4144   if (HasAggregateEvalKind &&
4145       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4146     // If we're using inalloca, use the argument memory.  Otherwise, use a
4147     // temporary.
4148     AggValueSlot Slot;
4149     if (args.isUsingInAlloca())
4150       Slot = createPlaceholderSlot(*this, type);
4151     else
4152       Slot = CreateAggTemp(type, "agg.tmp");
4153 
4154     bool DestroyedInCallee = true, NeedsEHCleanup = true;
4155     if (const auto *RD = type->getAsCXXRecordDecl())
4156       DestroyedInCallee = RD->hasNonTrivialDestructor();
4157     else
4158       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4159 
4160     if (DestroyedInCallee)
4161       Slot.setExternallyDestructed();
4162 
4163     EmitAggExpr(E, Slot);
4164     RValue RV = Slot.asRValue();
4165     args.add(RV, type);
4166 
4167     if (DestroyedInCallee && NeedsEHCleanup) {
4168       // Create a no-op GEP between the placeholder and the cleanup so we can
4169       // RAUW it successfully.  It also serves as a marker of the first
4170       // instruction where the cleanup is active.
4171       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4172                                               type);
4173       // This unreachable is a temporary marker which will be removed later.
4174       llvm::Instruction *IsActive = Builder.CreateUnreachable();
4175       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4176     }
4177     return;
4178   }
4179 
4180   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4181       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4182     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4183     assert(L.isSimple());
4184     args.addUncopiedAggregate(L, type);
4185     return;
4186   }
4187 
4188   args.add(EmitAnyExprToTemp(E), type);
4189 }
4190 
4191 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4192   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4193   // implicitly widens null pointer constants that are arguments to varargs
4194   // functions to pointer-sized ints.
4195   if (!getTarget().getTriple().isOSWindows())
4196     return Arg->getType();
4197 
4198   if (Arg->getType()->isIntegerType() &&
4199       getContext().getTypeSize(Arg->getType()) <
4200           getContext().getTargetInfo().getPointerWidth(0) &&
4201       Arg->isNullPointerConstant(getContext(),
4202                                  Expr::NPC_ValueDependentIsNotNull)) {
4203     return getContext().getIntPtrType();
4204   }
4205 
4206   return Arg->getType();
4207 }
4208 
4209 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4210 // optimizer it can aggressively ignore unwind edges.
4211 void
4212 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4213   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4214       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4215     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4216                       CGM.getNoObjCARCExceptionsMetadata());
4217 }
4218 
4219 /// Emits a call to the given no-arguments nounwind runtime function.
4220 llvm::CallInst *
4221 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4222                                          const llvm::Twine &name) {
4223   return EmitNounwindRuntimeCall(callee, None, name);
4224 }
4225 
4226 /// Emits a call to the given nounwind runtime function.
4227 llvm::CallInst *
4228 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4229                                          ArrayRef<llvm::Value *> args,
4230                                          const llvm::Twine &name) {
4231   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4232   call->setDoesNotThrow();
4233   return call;
4234 }
4235 
4236 /// Emits a simple call (never an invoke) to the given no-arguments
4237 /// runtime function.
4238 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4239                                                  const llvm::Twine &name) {
4240   return EmitRuntimeCall(callee, None, name);
4241 }
4242 
4243 // Calls which may throw must have operand bundles indicating which funclet
4244 // they are nested within.
4245 SmallVector<llvm::OperandBundleDef, 1>
4246 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4247   SmallVector<llvm::OperandBundleDef, 1> BundleList;
4248   // There is no need for a funclet operand bundle if we aren't inside a
4249   // funclet.
4250   if (!CurrentFuncletPad)
4251     return BundleList;
4252 
4253   // Skip intrinsics which cannot throw.
4254   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4255   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4256     return BundleList;
4257 
4258   BundleList.emplace_back("funclet", CurrentFuncletPad);
4259   return BundleList;
4260 }
4261 
4262 /// Emits a simple call (never an invoke) to the given runtime function.
4263 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4264                                                  ArrayRef<llvm::Value *> args,
4265                                                  const llvm::Twine &name) {
4266   llvm::CallInst *call = Builder.CreateCall(
4267       callee, args, getBundlesForFunclet(callee.getCallee()), name);
4268   call->setCallingConv(getRuntimeCC());
4269   return call;
4270 }
4271 
4272 /// Emits a call or invoke to the given noreturn runtime function.
4273 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4274     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4275   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4276       getBundlesForFunclet(callee.getCallee());
4277 
4278   if (getInvokeDest()) {
4279     llvm::InvokeInst *invoke =
4280       Builder.CreateInvoke(callee,
4281                            getUnreachableBlock(),
4282                            getInvokeDest(),
4283                            args,
4284                            BundleList);
4285     invoke->setDoesNotReturn();
4286     invoke->setCallingConv(getRuntimeCC());
4287   } else {
4288     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4289     call->setDoesNotReturn();
4290     call->setCallingConv(getRuntimeCC());
4291     Builder.CreateUnreachable();
4292   }
4293 }
4294 
4295 /// Emits a call or invoke instruction to the given nullary runtime function.
4296 llvm::CallBase *
4297 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4298                                          const Twine &name) {
4299   return EmitRuntimeCallOrInvoke(callee, None, name);
4300 }
4301 
4302 /// Emits a call or invoke instruction to the given runtime function.
4303 llvm::CallBase *
4304 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4305                                          ArrayRef<llvm::Value *> args,
4306                                          const Twine &name) {
4307   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4308   call->setCallingConv(getRuntimeCC());
4309   return call;
4310 }
4311 
4312 /// Emits a call or invoke instruction to the given function, depending
4313 /// on the current state of the EH stack.
4314 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4315                                                   ArrayRef<llvm::Value *> Args,
4316                                                   const Twine &Name) {
4317   llvm::BasicBlock *InvokeDest = getInvokeDest();
4318   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4319       getBundlesForFunclet(Callee.getCallee());
4320 
4321   llvm::CallBase *Inst;
4322   if (!InvokeDest)
4323     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4324   else {
4325     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4326     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4327                                 Name);
4328     EmitBlock(ContBB);
4329   }
4330 
4331   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4332   // optimizer it can aggressively ignore unwind edges.
4333   if (CGM.getLangOpts().ObjCAutoRefCount)
4334     AddObjCARCExceptionMetadata(Inst);
4335 
4336   return Inst;
4337 }
4338 
4339 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4340                                                   llvm::Value *New) {
4341   DeferredReplacements.push_back(std::make_pair(Old, New));
4342 }
4343 
4344 namespace {
4345 
4346 /// Specify given \p NewAlign as the alignment of return value attribute. If
4347 /// such attribute already exists, re-set it to the maximal one of two options.
4348 LLVM_NODISCARD llvm::AttributeList
4349 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4350                                 const llvm::AttributeList &Attrs,
4351                                 llvm::Align NewAlign) {
4352   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4353   if (CurAlign >= NewAlign)
4354     return Attrs;
4355   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4356   return Attrs
4357       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4358                        llvm::Attribute::AttrKind::Alignment)
4359       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4360 }
4361 
4362 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4363 protected:
4364   CodeGenFunction &CGF;
4365 
4366   /// We do nothing if this is, or becomes, nullptr.
4367   const AlignedAttrTy *AA = nullptr;
4368 
4369   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
4370   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4371 
4372   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4373       : CGF(CGF_) {
4374     if (!FuncDecl)
4375       return;
4376     AA = FuncDecl->getAttr<AlignedAttrTy>();
4377   }
4378 
4379 public:
4380   /// If we can, materialize the alignment as an attribute on return value.
4381   LLVM_NODISCARD llvm::AttributeList
4382   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4383     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4384       return Attrs;
4385     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4386     if (!AlignmentCI)
4387       return Attrs;
4388     // We may legitimately have non-power-of-2 alignment here.
4389     // If so, this is UB land, emit it via `@llvm.assume` instead.
4390     if (!AlignmentCI->getValue().isPowerOf2())
4391       return Attrs;
4392     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4393         CGF.getLLVMContext(), Attrs,
4394         llvm::Align(
4395             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4396     AA = nullptr; // We're done. Disallow doing anything else.
4397     return NewAttrs;
4398   }
4399 
4400   /// Emit alignment assumption.
4401   /// This is a general fallback that we take if either there is an offset,
4402   /// or the alignment is variable or we are sanitizing for alignment.
4403   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4404     if (!AA)
4405       return;
4406     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4407                                 AA->getLocation(), Alignment, OffsetCI);
4408     AA = nullptr; // We're done. Disallow doing anything else.
4409   }
4410 };
4411 
4412 /// Helper data structure to emit `AssumeAlignedAttr`.
4413 class AssumeAlignedAttrEmitter final
4414     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4415 public:
4416   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4417       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4418     if (!AA)
4419       return;
4420     // It is guaranteed that the alignment/offset are constants.
4421     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4422     if (Expr *Offset = AA->getOffset()) {
4423       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4424       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4425         OffsetCI = nullptr;
4426     }
4427   }
4428 };
4429 
4430 /// Helper data structure to emit `AllocAlignAttr`.
4431 class AllocAlignAttrEmitter final
4432     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4433 public:
4434   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4435                         const CallArgList &CallArgs)
4436       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4437     if (!AA)
4438       return;
4439     // Alignment may or may not be a constant, and that is okay.
4440     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4441                     .getRValue(CGF)
4442                     .getScalarVal();
4443   }
4444 };
4445 
4446 } // namespace
4447 
4448 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4449                                  const CGCallee &Callee,
4450                                  ReturnValueSlot ReturnValue,
4451                                  const CallArgList &CallArgs,
4452                                  llvm::CallBase **callOrInvoke,
4453                                  SourceLocation Loc) {
4454   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4455 
4456   assert(Callee.isOrdinary() || Callee.isVirtual());
4457 
4458   // Handle struct-return functions by passing a pointer to the
4459   // location that we would like to return into.
4460   QualType RetTy = CallInfo.getReturnType();
4461   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4462 
4463   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4464 
4465   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4466   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4467     // We can only guarantee that a function is called from the correct
4468     // context/function based on the appropriate target attributes,
4469     // so only check in the case where we have both always_inline and target
4470     // since otherwise we could be making a conditional call after a check for
4471     // the proper cpu features (and it won't cause code generation issues due to
4472     // function based code generation).
4473     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4474         TargetDecl->hasAttr<TargetAttr>())
4475       checkTargetFeatures(Loc, FD);
4476 
4477     // Some architectures (such as x86-64) have the ABI changed based on
4478     // attribute-target/features. Give them a chance to diagnose.
4479     CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4480         CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4481   }
4482 
4483 #ifndef NDEBUG
4484   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4485     // For an inalloca varargs function, we don't expect CallInfo to match the
4486     // function pointer's type, because the inalloca struct a will have extra
4487     // fields in it for the varargs parameters.  Code later in this function
4488     // bitcasts the function pointer to the type derived from CallInfo.
4489     //
4490     // In other cases, we assert that the types match up (until pointers stop
4491     // having pointee types).
4492     llvm::Type *TypeFromVal;
4493     if (Callee.isVirtual())
4494       TypeFromVal = Callee.getVirtualFunctionType();
4495     else
4496       TypeFromVal =
4497           Callee.getFunctionPointer()->getType()->getPointerElementType();
4498     assert(IRFuncTy == TypeFromVal);
4499   }
4500 #endif
4501 
4502   // 1. Set up the arguments.
4503 
4504   // If we're using inalloca, insert the allocation after the stack save.
4505   // FIXME: Do this earlier rather than hacking it in here!
4506   Address ArgMemory = Address::invalid();
4507   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4508     const llvm::DataLayout &DL = CGM.getDataLayout();
4509     llvm::Instruction *IP = CallArgs.getStackBase();
4510     llvm::AllocaInst *AI;
4511     if (IP) {
4512       IP = IP->getNextNode();
4513       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4514                                 "argmem", IP);
4515     } else {
4516       AI = CreateTempAlloca(ArgStruct, "argmem");
4517     }
4518     auto Align = CallInfo.getArgStructAlignment();
4519     AI->setAlignment(Align.getAsAlign());
4520     AI->setUsedWithInAlloca(true);
4521     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4522     ArgMemory = Address(AI, Align);
4523   }
4524 
4525   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4526   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4527 
4528   // If the call returns a temporary with struct return, create a temporary
4529   // alloca to hold the result, unless one is given to us.
4530   Address SRetPtr = Address::invalid();
4531   Address SRetAlloca = Address::invalid();
4532   llvm::Value *UnusedReturnSizePtr = nullptr;
4533   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4534     if (!ReturnValue.isNull()) {
4535       SRetPtr = ReturnValue.getValue();
4536     } else {
4537       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4538       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4539         uint64_t size =
4540             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4541         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4542       }
4543     }
4544     if (IRFunctionArgs.hasSRetArg()) {
4545       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4546     } else if (RetAI.isInAlloca()) {
4547       Address Addr =
4548           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4549       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4550     }
4551   }
4552 
4553   Address swiftErrorTemp = Address::invalid();
4554   Address swiftErrorArg = Address::invalid();
4555 
4556   // When passing arguments using temporary allocas, we need to add the
4557   // appropriate lifetime markers. This vector keeps track of all the lifetime
4558   // markers that need to be ended right after the call.
4559   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4560 
4561   // Translate all of the arguments as necessary to match the IR lowering.
4562   assert(CallInfo.arg_size() == CallArgs.size() &&
4563          "Mismatch between function signature & arguments.");
4564   unsigned ArgNo = 0;
4565   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4566   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4567        I != E; ++I, ++info_it, ++ArgNo) {
4568     const ABIArgInfo &ArgInfo = info_it->info;
4569 
4570     // Insert a padding argument to ensure proper alignment.
4571     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4572       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4573           llvm::UndefValue::get(ArgInfo.getPaddingType());
4574 
4575     unsigned FirstIRArg, NumIRArgs;
4576     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4577 
4578     switch (ArgInfo.getKind()) {
4579     case ABIArgInfo::InAlloca: {
4580       assert(NumIRArgs == 0);
4581       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4582       if (I->isAggregate()) {
4583         Address Addr = I->hasLValue()
4584                            ? I->getKnownLValue().getAddress(*this)
4585                            : I->getKnownRValue().getAggregateAddress();
4586         llvm::Instruction *Placeholder =
4587             cast<llvm::Instruction>(Addr.getPointer());
4588 
4589         if (!ArgInfo.getInAllocaIndirect()) {
4590           // Replace the placeholder with the appropriate argument slot GEP.
4591           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4592           Builder.SetInsertPoint(Placeholder);
4593           Addr = Builder.CreateStructGEP(ArgMemory,
4594                                          ArgInfo.getInAllocaFieldIndex());
4595           Builder.restoreIP(IP);
4596         } else {
4597           // For indirect things such as overaligned structs, replace the
4598           // placeholder with a regular aggregate temporary alloca. Store the
4599           // address of this alloca into the struct.
4600           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4601           Address ArgSlot = Builder.CreateStructGEP(
4602               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4603           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4604         }
4605         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4606       } else if (ArgInfo.getInAllocaIndirect()) {
4607         // Make a temporary alloca and store the address of it into the argument
4608         // struct.
4609         Address Addr = CreateMemTempWithoutCast(
4610             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4611             "indirect-arg-temp");
4612         I->copyInto(*this, Addr);
4613         Address ArgSlot =
4614             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4615         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4616       } else {
4617         // Store the RValue into the argument struct.
4618         Address Addr =
4619             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4620         unsigned AS = Addr.getType()->getPointerAddressSpace();
4621         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4622         // There are some cases where a trivial bitcast is not avoidable.  The
4623         // definition of a type later in a translation unit may change it's type
4624         // from {}* to (%struct.foo*)*.
4625         if (Addr.getType() != MemType)
4626           Addr = Builder.CreateBitCast(Addr, MemType);
4627         I->copyInto(*this, Addr);
4628       }
4629       break;
4630     }
4631 
4632     case ABIArgInfo::Indirect:
4633     case ABIArgInfo::IndirectAliased: {
4634       assert(NumIRArgs == 1);
4635       if (!I->isAggregate()) {
4636         // Make a temporary alloca to pass the argument.
4637         Address Addr = CreateMemTempWithoutCast(
4638             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4639         IRCallArgs[FirstIRArg] = Addr.getPointer();
4640 
4641         I->copyInto(*this, Addr);
4642       } else {
4643         // We want to avoid creating an unnecessary temporary+copy here;
4644         // however, we need one in three cases:
4645         // 1. If the argument is not byval, and we are required to copy the
4646         //    source.  (This case doesn't occur on any common architecture.)
4647         // 2. If the argument is byval, RV is not sufficiently aligned, and
4648         //    we cannot force it to be sufficiently aligned.
4649         // 3. If the argument is byval, but RV is not located in default
4650         //    or alloca address space.
4651         Address Addr = I->hasLValue()
4652                            ? I->getKnownLValue().getAddress(*this)
4653                            : I->getKnownRValue().getAggregateAddress();
4654         llvm::Value *V = Addr.getPointer();
4655         CharUnits Align = ArgInfo.getIndirectAlign();
4656         const llvm::DataLayout *TD = &CGM.getDataLayout();
4657 
4658         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4659                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4660                     TD->getAllocaAddrSpace()) &&
4661                "indirect argument must be in alloca address space");
4662 
4663         bool NeedCopy = false;
4664 
4665         if (Addr.getAlignment() < Align &&
4666             llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4667                 Align.getAsAlign()) {
4668           NeedCopy = true;
4669         } else if (I->hasLValue()) {
4670           auto LV = I->getKnownLValue();
4671           auto AS = LV.getAddressSpace();
4672 
4673           if (!ArgInfo.getIndirectByVal() ||
4674               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4675             NeedCopy = true;
4676           }
4677           if (!getLangOpts().OpenCL) {
4678             if ((ArgInfo.getIndirectByVal() &&
4679                 (AS != LangAS::Default &&
4680                  AS != CGM.getASTAllocaAddressSpace()))) {
4681               NeedCopy = true;
4682             }
4683           }
4684           // For OpenCL even if RV is located in default or alloca address space
4685           // we don't want to perform address space cast for it.
4686           else if ((ArgInfo.getIndirectByVal() &&
4687                     Addr.getType()->getAddressSpace() != IRFuncTy->
4688                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4689             NeedCopy = true;
4690           }
4691         }
4692 
4693         if (NeedCopy) {
4694           // Create an aligned temporary, and copy to it.
4695           Address AI = CreateMemTempWithoutCast(
4696               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4697           IRCallArgs[FirstIRArg] = AI.getPointer();
4698 
4699           // Emit lifetime markers for the temporary alloca.
4700           uint64_t ByvalTempElementSize =
4701               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4702           llvm::Value *LifetimeSize =
4703               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4704 
4705           // Add cleanup code to emit the end lifetime marker after the call.
4706           if (LifetimeSize) // In case we disabled lifetime markers.
4707             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4708 
4709           // Generate the copy.
4710           I->copyInto(*this, AI);
4711         } else {
4712           // Skip the extra memcpy call.
4713           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4714               CGM.getDataLayout().getAllocaAddrSpace());
4715           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4716               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4717               true);
4718         }
4719       }
4720       break;
4721     }
4722 
4723     case ABIArgInfo::Ignore:
4724       assert(NumIRArgs == 0);
4725       break;
4726 
4727     case ABIArgInfo::Extend:
4728     case ABIArgInfo::Direct: {
4729       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4730           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4731           ArgInfo.getDirectOffset() == 0) {
4732         assert(NumIRArgs == 1);
4733         llvm::Value *V;
4734         if (!I->isAggregate())
4735           V = I->getKnownRValue().getScalarVal();
4736         else
4737           V = Builder.CreateLoad(
4738               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4739                              : I->getKnownRValue().getAggregateAddress());
4740 
4741         // Implement swifterror by copying into a new swifterror argument.
4742         // We'll write back in the normal path out of the call.
4743         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4744               == ParameterABI::SwiftErrorResult) {
4745           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4746 
4747           QualType pointeeTy = I->Ty->getPointeeType();
4748           swiftErrorArg =
4749             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4750 
4751           swiftErrorTemp =
4752             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4753           V = swiftErrorTemp.getPointer();
4754           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4755 
4756           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4757           Builder.CreateStore(errorValue, swiftErrorTemp);
4758         }
4759 
4760         // We might have to widen integers, but we should never truncate.
4761         if (ArgInfo.getCoerceToType() != V->getType() &&
4762             V->getType()->isIntegerTy())
4763           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4764 
4765         // If the argument doesn't match, perform a bitcast to coerce it.  This
4766         // can happen due to trivial type mismatches.
4767         if (FirstIRArg < IRFuncTy->getNumParams() &&
4768             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4769           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4770 
4771         IRCallArgs[FirstIRArg] = V;
4772         break;
4773       }
4774 
4775       // FIXME: Avoid the conversion through memory if possible.
4776       Address Src = Address::invalid();
4777       if (!I->isAggregate()) {
4778         Src = CreateMemTemp(I->Ty, "coerce");
4779         I->copyInto(*this, Src);
4780       } else {
4781         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4782                              : I->getKnownRValue().getAggregateAddress();
4783       }
4784 
4785       // If the value is offset in memory, apply the offset now.
4786       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4787 
4788       // Fast-isel and the optimizer generally like scalar values better than
4789       // FCAs, so we flatten them if this is safe to do for this argument.
4790       llvm::StructType *STy =
4791             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4792       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4793         llvm::Type *SrcTy = Src.getElementType();
4794         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4795         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4796 
4797         // If the source type is smaller than the destination type of the
4798         // coerce-to logic, copy the source value into a temp alloca the size
4799         // of the destination type to allow loading all of it. The bits past
4800         // the source value are left undef.
4801         if (SrcSize < DstSize) {
4802           Address TempAlloca
4803             = CreateTempAlloca(STy, Src.getAlignment(),
4804                                Src.getName() + ".coerce");
4805           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4806           Src = TempAlloca;
4807         } else {
4808           Src = Builder.CreateBitCast(Src,
4809                                       STy->getPointerTo(Src.getAddressSpace()));
4810         }
4811 
4812         assert(NumIRArgs == STy->getNumElements());
4813         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4814           Address EltPtr = Builder.CreateStructGEP(Src, i);
4815           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4816           IRCallArgs[FirstIRArg + i] = LI;
4817         }
4818       } else {
4819         // In the simple case, just pass the coerced loaded value.
4820         assert(NumIRArgs == 1);
4821         llvm::Value *Load =
4822             CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4823 
4824         if (CallInfo.isCmseNSCall()) {
4825           // For certain parameter types, clear padding bits, as they may reveal
4826           // sensitive information.
4827           // Small struct/union types are passed as integer arrays.
4828           auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4829           if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4830             Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4831         }
4832         IRCallArgs[FirstIRArg] = Load;
4833       }
4834 
4835       break;
4836     }
4837 
4838     case ABIArgInfo::CoerceAndExpand: {
4839       auto coercionType = ArgInfo.getCoerceAndExpandType();
4840       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4841 
4842       llvm::Value *tempSize = nullptr;
4843       Address addr = Address::invalid();
4844       Address AllocaAddr = Address::invalid();
4845       if (I->isAggregate()) {
4846         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4847                               : I->getKnownRValue().getAggregateAddress();
4848 
4849       } else {
4850         RValue RV = I->getKnownRValue();
4851         assert(RV.isScalar()); // complex should always just be direct
4852 
4853         llvm::Type *scalarType = RV.getScalarVal()->getType();
4854         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4855         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4856 
4857         // Materialize to a temporary.
4858         addr = CreateTempAlloca(
4859             RV.getScalarVal()->getType(),
4860             CharUnits::fromQuantity(std::max(
4861                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4862             "tmp",
4863             /*ArraySize=*/nullptr, &AllocaAddr);
4864         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4865 
4866         Builder.CreateStore(RV.getScalarVal(), addr);
4867       }
4868 
4869       addr = Builder.CreateElementBitCast(addr, coercionType);
4870 
4871       unsigned IRArgPos = FirstIRArg;
4872       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4873         llvm::Type *eltType = coercionType->getElementType(i);
4874         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4875         Address eltAddr = Builder.CreateStructGEP(addr, i);
4876         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4877         IRCallArgs[IRArgPos++] = elt;
4878       }
4879       assert(IRArgPos == FirstIRArg + NumIRArgs);
4880 
4881       if (tempSize) {
4882         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4883       }
4884 
4885       break;
4886     }
4887 
4888     case ABIArgInfo::Expand: {
4889       unsigned IRArgPos = FirstIRArg;
4890       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4891       assert(IRArgPos == FirstIRArg + NumIRArgs);
4892       break;
4893     }
4894     }
4895   }
4896 
4897   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4898   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4899 
4900   // If we're using inalloca, set up that argument.
4901   if (ArgMemory.isValid()) {
4902     llvm::Value *Arg = ArgMemory.getPointer();
4903     if (CallInfo.isVariadic()) {
4904       // When passing non-POD arguments by value to variadic functions, we will
4905       // end up with a variadic prototype and an inalloca call site.  In such
4906       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4907       // the callee.
4908       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4909       CalleePtr =
4910           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4911     } else {
4912       llvm::Type *LastParamTy =
4913           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4914       if (Arg->getType() != LastParamTy) {
4915 #ifndef NDEBUG
4916         // Assert that these structs have equivalent element types.
4917         llvm::StructType *FullTy = CallInfo.getArgStruct();
4918         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4919             cast<llvm::PointerType>(LastParamTy)->getElementType());
4920         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4921         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4922                                                 DE = DeclaredTy->element_end(),
4923                                                 FI = FullTy->element_begin();
4924              DI != DE; ++DI, ++FI)
4925           assert(*DI == *FI);
4926 #endif
4927         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4928       }
4929     }
4930     assert(IRFunctionArgs.hasInallocaArg());
4931     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4932   }
4933 
4934   // 2. Prepare the function pointer.
4935 
4936   // If the callee is a bitcast of a non-variadic function to have a
4937   // variadic function pointer type, check to see if we can remove the
4938   // bitcast.  This comes up with unprototyped functions.
4939   //
4940   // This makes the IR nicer, but more importantly it ensures that we
4941   // can inline the function at -O0 if it is marked always_inline.
4942   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4943                                    llvm::Value *Ptr) -> llvm::Function * {
4944     if (!CalleeFT->isVarArg())
4945       return nullptr;
4946 
4947     // Get underlying value if it's a bitcast
4948     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4949       if (CE->getOpcode() == llvm::Instruction::BitCast)
4950         Ptr = CE->getOperand(0);
4951     }
4952 
4953     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4954     if (!OrigFn)
4955       return nullptr;
4956 
4957     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4958 
4959     // If the original type is variadic, or if any of the component types
4960     // disagree, we cannot remove the cast.
4961     if (OrigFT->isVarArg() ||
4962         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4963         OrigFT->getReturnType() != CalleeFT->getReturnType())
4964       return nullptr;
4965 
4966     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4967       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4968         return nullptr;
4969 
4970     return OrigFn;
4971   };
4972 
4973   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4974     CalleePtr = OrigFn;
4975     IRFuncTy = OrigFn->getFunctionType();
4976   }
4977 
4978   // 3. Perform the actual call.
4979 
4980   // Deactivate any cleanups that we're supposed to do immediately before
4981   // the call.
4982   if (!CallArgs.getCleanupsToDeactivate().empty())
4983     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4984 
4985   // Assert that the arguments we computed match up.  The IR verifier
4986   // will catch this, but this is a common enough source of problems
4987   // during IRGen changes that it's way better for debugging to catch
4988   // it ourselves here.
4989 #ifndef NDEBUG
4990   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4991   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4992     // Inalloca argument can have different type.
4993     if (IRFunctionArgs.hasInallocaArg() &&
4994         i == IRFunctionArgs.getInallocaArgNo())
4995       continue;
4996     if (i < IRFuncTy->getNumParams())
4997       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4998   }
4999 #endif
5000 
5001   // Update the largest vector width if any arguments have vector types.
5002   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5003     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
5004       LargestVectorWidth =
5005           std::max((uint64_t)LargestVectorWidth,
5006                    VT->getPrimitiveSizeInBits().getKnownMinSize());
5007   }
5008 
5009   // Compute the calling convention and attributes.
5010   unsigned CallingConv;
5011   llvm::AttributeList Attrs;
5012   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5013                              Callee.getAbstractInfo(), Attrs, CallingConv,
5014                              /*AttrOnCallSite=*/true);
5015 
5016   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5017     if (FD->hasAttr<StrictFPAttr>())
5018       // All calls within a strictfp function are marked strictfp
5019       Attrs =
5020         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5021                            llvm::Attribute::StrictFP);
5022 
5023   // Add call-site nomerge attribute if exists.
5024   if (InNoMergeAttributedStmt)
5025     Attrs =
5026         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5027                            llvm::Attribute::NoMerge);
5028 
5029   // Apply some call-site-specific attributes.
5030   // TODO: work this into building the attribute set.
5031 
5032   // Apply always_inline to all calls within flatten functions.
5033   // FIXME: should this really take priority over __try, below?
5034   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5035       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5036     Attrs =
5037         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5038                            llvm::Attribute::AlwaysInline);
5039   }
5040 
5041   // Disable inlining inside SEH __try blocks.
5042   if (isSEHTryScope()) {
5043     Attrs =
5044         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5045                            llvm::Attribute::NoInline);
5046   }
5047 
5048   // Decide whether to use a call or an invoke.
5049   bool CannotThrow;
5050   if (currentFunctionUsesSEHTry()) {
5051     // SEH cares about asynchronous exceptions, so everything can "throw."
5052     CannotThrow = false;
5053   } else if (isCleanupPadScope() &&
5054              EHPersonality::get(*this).isMSVCXXPersonality()) {
5055     // The MSVC++ personality will implicitly terminate the program if an
5056     // exception is thrown during a cleanup outside of a try/catch.
5057     // We don't need to model anything in IR to get this behavior.
5058     CannotThrow = true;
5059   } else {
5060     // Otherwise, nounwind call sites will never throw.
5061     CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5062 
5063     if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5064       if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5065         CannotThrow = true;
5066   }
5067 
5068   // If we made a temporary, be sure to clean up after ourselves. Note that we
5069   // can't depend on being inside of an ExprWithCleanups, so we need to manually
5070   // pop this cleanup later on. Being eager about this is OK, since this
5071   // temporary is 'invisible' outside of the callee.
5072   if (UnusedReturnSizePtr)
5073     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5074                                          UnusedReturnSizePtr);
5075 
5076   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5077 
5078   SmallVector<llvm::OperandBundleDef, 1> BundleList =
5079       getBundlesForFunclet(CalleePtr);
5080 
5081   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5082     if (FD->hasAttr<StrictFPAttr>())
5083       // All calls within a strictfp function are marked strictfp
5084       Attrs =
5085         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5086                            llvm::Attribute::StrictFP);
5087 
5088   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5089   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5090 
5091   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5092   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5093 
5094   // Emit the actual call/invoke instruction.
5095   llvm::CallBase *CI;
5096   if (!InvokeDest) {
5097     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5098   } else {
5099     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5100     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5101                               BundleList);
5102     EmitBlock(Cont);
5103   }
5104   if (callOrInvoke)
5105     *callOrInvoke = CI;
5106 
5107   // If this is within a function that has the guard(nocf) attribute and is an
5108   // indirect call, add the "guard_nocf" attribute to this call to indicate that
5109   // Control Flow Guard checks should not be added, even if the call is inlined.
5110   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5111     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5112       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5113         Attrs = Attrs.addAttribute(
5114             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5115     }
5116   }
5117 
5118   // Apply the attributes and calling convention.
5119   CI->setAttributes(Attrs);
5120   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5121 
5122   // Apply various metadata.
5123 
5124   if (!CI->getType()->isVoidTy())
5125     CI->setName("call");
5126 
5127   // Update largest vector width from the return type.
5128   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5129     LargestVectorWidth =
5130         std::max((uint64_t)LargestVectorWidth,
5131                  VT->getPrimitiveSizeInBits().getKnownMinSize());
5132 
5133   // Insert instrumentation or attach profile metadata at indirect call sites.
5134   // For more details, see the comment before the definition of
5135   // IPVK_IndirectCallTarget in InstrProfData.inc.
5136   if (!CI->getCalledFunction())
5137     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5138                      CI, CalleePtr);
5139 
5140   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5141   // optimizer it can aggressively ignore unwind edges.
5142   if (CGM.getLangOpts().ObjCAutoRefCount)
5143     AddObjCARCExceptionMetadata(CI);
5144 
5145   // Suppress tail calls if requested.
5146   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5147     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5148       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5149   }
5150 
5151   // Add metadata for calls to MSAllocator functions
5152   if (getDebugInfo() && TargetDecl &&
5153       TargetDecl->hasAttr<MSAllocatorAttr>())
5154     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5155 
5156   // 4. Finish the call.
5157 
5158   // If the call doesn't return, finish the basic block and clear the
5159   // insertion point; this allows the rest of IRGen to discard
5160   // unreachable code.
5161   if (CI->doesNotReturn()) {
5162     if (UnusedReturnSizePtr)
5163       PopCleanupBlock();
5164 
5165     // Strip away the noreturn attribute to better diagnose unreachable UB.
5166     if (SanOpts.has(SanitizerKind::Unreachable)) {
5167       // Also remove from function since CallBase::hasFnAttr additionally checks
5168       // attributes of the called function.
5169       if (auto *F = CI->getCalledFunction())
5170         F->removeFnAttr(llvm::Attribute::NoReturn);
5171       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5172                           llvm::Attribute::NoReturn);
5173 
5174       // Avoid incompatibility with ASan which relies on the `noreturn`
5175       // attribute to insert handler calls.
5176       if (SanOpts.hasOneOf(SanitizerKind::Address |
5177                            SanitizerKind::KernelAddress)) {
5178         SanitizerScope SanScope(this);
5179         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5180         Builder.SetInsertPoint(CI);
5181         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5182         llvm::FunctionCallee Fn =
5183             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5184         EmitNounwindRuntimeCall(Fn);
5185       }
5186     }
5187 
5188     EmitUnreachable(Loc);
5189     Builder.ClearInsertionPoint();
5190 
5191     // FIXME: For now, emit a dummy basic block because expr emitters in
5192     // generally are not ready to handle emitting expressions at unreachable
5193     // points.
5194     EnsureInsertPoint();
5195 
5196     // Return a reasonable RValue.
5197     return GetUndefRValue(RetTy);
5198   }
5199 
5200   // Perform the swifterror writeback.
5201   if (swiftErrorTemp.isValid()) {
5202     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5203     Builder.CreateStore(errorResult, swiftErrorArg);
5204   }
5205 
5206   // Emit any call-associated writebacks immediately.  Arguably this
5207   // should happen after any return-value munging.
5208   if (CallArgs.hasWritebacks())
5209     emitWritebacks(*this, CallArgs);
5210 
5211   // The stack cleanup for inalloca arguments has to run out of the normal
5212   // lexical order, so deactivate it and run it manually here.
5213   CallArgs.freeArgumentMemory(*this);
5214 
5215   // Extract the return value.
5216   RValue Ret = [&] {
5217     switch (RetAI.getKind()) {
5218     case ABIArgInfo::CoerceAndExpand: {
5219       auto coercionType = RetAI.getCoerceAndExpandType();
5220 
5221       Address addr = SRetPtr;
5222       addr = Builder.CreateElementBitCast(addr, coercionType);
5223 
5224       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5225       bool requiresExtract = isa<llvm::StructType>(CI->getType());
5226 
5227       unsigned unpaddedIndex = 0;
5228       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5229         llvm::Type *eltType = coercionType->getElementType(i);
5230         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5231         Address eltAddr = Builder.CreateStructGEP(addr, i);
5232         llvm::Value *elt = CI;
5233         if (requiresExtract)
5234           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5235         else
5236           assert(unpaddedIndex == 0);
5237         Builder.CreateStore(elt, eltAddr);
5238       }
5239       // FALLTHROUGH
5240       LLVM_FALLTHROUGH;
5241     }
5242 
5243     case ABIArgInfo::InAlloca:
5244     case ABIArgInfo::Indirect: {
5245       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5246       if (UnusedReturnSizePtr)
5247         PopCleanupBlock();
5248       return ret;
5249     }
5250 
5251     case ABIArgInfo::Ignore:
5252       // If we are ignoring an argument that had a result, make sure to
5253       // construct the appropriate return value for our caller.
5254       return GetUndefRValue(RetTy);
5255 
5256     case ABIArgInfo::Extend:
5257     case ABIArgInfo::Direct: {
5258       llvm::Type *RetIRTy = ConvertType(RetTy);
5259       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5260         switch (getEvaluationKind(RetTy)) {
5261         case TEK_Complex: {
5262           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5263           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5264           return RValue::getComplex(std::make_pair(Real, Imag));
5265         }
5266         case TEK_Aggregate: {
5267           Address DestPtr = ReturnValue.getValue();
5268           bool DestIsVolatile = ReturnValue.isVolatile();
5269 
5270           if (!DestPtr.isValid()) {
5271             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5272             DestIsVolatile = false;
5273           }
5274           EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5275           return RValue::getAggregate(DestPtr);
5276         }
5277         case TEK_Scalar: {
5278           // If the argument doesn't match, perform a bitcast to coerce it.  This
5279           // can happen due to trivial type mismatches.
5280           llvm::Value *V = CI;
5281           if (V->getType() != RetIRTy)
5282             V = Builder.CreateBitCast(V, RetIRTy);
5283           return RValue::get(V);
5284         }
5285         }
5286         llvm_unreachable("bad evaluation kind");
5287       }
5288 
5289       Address DestPtr = ReturnValue.getValue();
5290       bool DestIsVolatile = ReturnValue.isVolatile();
5291 
5292       if (!DestPtr.isValid()) {
5293         DestPtr = CreateMemTemp(RetTy, "coerce");
5294         DestIsVolatile = false;
5295       }
5296 
5297       // If the value is offset in memory, apply the offset now.
5298       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5299       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5300 
5301       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5302     }
5303 
5304     case ABIArgInfo::Expand:
5305     case ABIArgInfo::IndirectAliased:
5306       llvm_unreachable("Invalid ABI kind for return argument");
5307     }
5308 
5309     llvm_unreachable("Unhandled ABIArgInfo::Kind");
5310   } ();
5311 
5312   // Emit the assume_aligned check on the return value.
5313   if (Ret.isScalar() && TargetDecl) {
5314     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5315     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5316   }
5317 
5318   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5319   // we can't use the full cleanup mechanism.
5320   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5321     LifetimeEnd.Emit(*this, /*Flags=*/{});
5322 
5323   if (!ReturnValue.isExternallyDestructed() &&
5324       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5325     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5326                 RetTy);
5327 
5328   return Ret;
5329 }
5330 
5331 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5332   if (isVirtual()) {
5333     const CallExpr *CE = getVirtualCallExpr();
5334     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5335         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5336         CE ? CE->getBeginLoc() : SourceLocation());
5337   }
5338 
5339   return *this;
5340 }
5341 
5342 /* VarArg handling */
5343 
5344 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5345   VAListAddr = VE->isMicrosoftABI()
5346                  ? EmitMSVAListRef(VE->getSubExpr())
5347                  : EmitVAListRef(VE->getSubExpr());
5348   QualType Ty = VE->getType();
5349   if (VE->isMicrosoftABI())
5350     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5351   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5352 }
5353