1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 using namespace clang; 43 using namespace CodeGen; 44 45 /***/ 46 47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 48 switch (CC) { 49 default: return llvm::CallingConv::C; 50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 54 case CC_Win64: return llvm::CallingConv::Win64; 55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 59 // TODO: Add support for __pascal to LLVM. 60 case CC_X86Pascal: return llvm::CallingConv::C; 61 // TODO: Add support for __vectorcall to LLVM. 62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 68 case CC_Swift: return llvm::CallingConv::Swift; 69 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; 70 } 71 } 72 73 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 74 /// qualification. Either or both of RD and MD may be null. A null RD indicates 75 /// that there is no meaningful 'this' type, and a null MD can occur when 76 /// calling a method pointer. 77 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 78 const CXXMethodDecl *MD) { 79 QualType RecTy; 80 if (RD) 81 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 82 else 83 RecTy = Context.VoidTy; 84 85 if (MD) 86 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 87 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 88 } 89 90 /// Returns the canonical formal type of the given C++ method. 91 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 92 return MD->getType()->getCanonicalTypeUnqualified() 93 .getAs<FunctionProtoType>(); 94 } 95 96 /// Returns the "extra-canonicalized" return type, which discards 97 /// qualifiers on the return type. Codegen doesn't care about them, 98 /// and it makes ABI code a little easier to be able to assume that 99 /// all parameter and return types are top-level unqualified. 100 static CanQualType GetReturnType(QualType RetTy) { 101 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 102 } 103 104 /// Arrange the argument and result information for a value of the given 105 /// unprototyped freestanding function type. 106 const CGFunctionInfo & 107 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 108 // When translating an unprototyped function type, always use a 109 // variadic type. 110 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 111 /*instanceMethod=*/false, 112 /*chainCall=*/false, None, 113 FTNP->getExtInfo(), {}, RequiredArgs(0)); 114 } 115 116 static void addExtParameterInfosForCall( 117 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 118 const FunctionProtoType *proto, 119 unsigned prefixArgs, 120 unsigned totalArgs) { 121 assert(proto->hasExtParameterInfos()); 122 assert(paramInfos.size() <= prefixArgs); 123 assert(proto->getNumParams() + prefixArgs <= totalArgs); 124 125 paramInfos.reserve(totalArgs); 126 127 // Add default infos for any prefix args that don't already have infos. 128 paramInfos.resize(prefixArgs); 129 130 // Add infos for the prototype. 131 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 132 paramInfos.push_back(ParamInfo); 133 // pass_object_size params have no parameter info. 134 if (ParamInfo.hasPassObjectSize()) 135 paramInfos.emplace_back(); 136 } 137 138 assert(paramInfos.size() <= totalArgs && 139 "Did we forget to insert pass_object_size args?"); 140 // Add default infos for the variadic and/or suffix arguments. 141 paramInfos.resize(totalArgs); 142 } 143 144 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 145 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 146 static void appendParameterTypes(const CodeGenTypes &CGT, 147 SmallVectorImpl<CanQualType> &prefix, 148 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 149 CanQual<FunctionProtoType> FPT) { 150 // Fast path: don't touch param info if we don't need to. 151 if (!FPT->hasExtParameterInfos()) { 152 assert(paramInfos.empty() && 153 "We have paramInfos, but the prototype doesn't?"); 154 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 155 return; 156 } 157 158 unsigned PrefixSize = prefix.size(); 159 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 160 // parameters; the only thing that can change this is the presence of 161 // pass_object_size. So, we preallocate for the common case. 162 prefix.reserve(prefix.size() + FPT->getNumParams()); 163 164 auto ExtInfos = FPT->getExtParameterInfos(); 165 assert(ExtInfos.size() == FPT->getNumParams()); 166 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 167 prefix.push_back(FPT->getParamType(I)); 168 if (ExtInfos[I].hasPassObjectSize()) 169 prefix.push_back(CGT.getContext().getSizeType()); 170 } 171 172 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 173 prefix.size()); 174 } 175 176 /// Arrange the LLVM function layout for a value of the given function 177 /// type, on top of any implicit parameters already stored. 178 static const CGFunctionInfo & 179 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 180 SmallVectorImpl<CanQualType> &prefix, 181 CanQual<FunctionProtoType> FTP) { 182 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 183 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 184 // FIXME: Kill copy. 185 appendParameterTypes(CGT, prefix, paramInfos, FTP); 186 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 187 188 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 189 /*chainCall=*/false, prefix, 190 FTP->getExtInfo(), paramInfos, 191 Required); 192 } 193 194 /// Arrange the argument and result information for a value of the 195 /// given freestanding function type. 196 const CGFunctionInfo & 197 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 198 SmallVector<CanQualType, 16> argTypes; 199 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 200 FTP); 201 } 202 203 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 204 bool IsWindows) { 205 // Set the appropriate calling convention for the Function. 206 if (D->hasAttr<StdCallAttr>()) 207 return CC_X86StdCall; 208 209 if (D->hasAttr<FastCallAttr>()) 210 return CC_X86FastCall; 211 212 if (D->hasAttr<RegCallAttr>()) 213 return CC_X86RegCall; 214 215 if (D->hasAttr<ThisCallAttr>()) 216 return CC_X86ThisCall; 217 218 if (D->hasAttr<VectorCallAttr>()) 219 return CC_X86VectorCall; 220 221 if (D->hasAttr<PascalAttr>()) 222 return CC_X86Pascal; 223 224 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 225 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 226 227 if (D->hasAttr<AArch64VectorPcsAttr>()) 228 return CC_AArch64VectorCall; 229 230 if (D->hasAttr<IntelOclBiccAttr>()) 231 return CC_IntelOclBicc; 232 233 if (D->hasAttr<MSABIAttr>()) 234 return IsWindows ? CC_C : CC_Win64; 235 236 if (D->hasAttr<SysVABIAttr>()) 237 return IsWindows ? CC_X86_64SysV : CC_C; 238 239 if (D->hasAttr<PreserveMostAttr>()) 240 return CC_PreserveMost; 241 242 if (D->hasAttr<PreserveAllAttr>()) 243 return CC_PreserveAll; 244 245 return CC_C; 246 } 247 248 /// Arrange the argument and result information for a call to an 249 /// unknown C++ non-static member function of the given abstract type. 250 /// (A null RD means we don't have any meaningful "this" argument type, 251 /// so fall back to a generic pointer type). 252 /// The member function must be an ordinary function, i.e. not a 253 /// constructor or destructor. 254 const CGFunctionInfo & 255 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 256 const FunctionProtoType *FTP, 257 const CXXMethodDecl *MD) { 258 SmallVector<CanQualType, 16> argTypes; 259 260 // Add the 'this' pointer. 261 argTypes.push_back(DeriveThisType(RD, MD)); 262 263 return ::arrangeLLVMFunctionInfo( 264 *this, true, argTypes, 265 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 266 } 267 268 /// Set calling convention for CUDA/HIP kernel. 269 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 270 const FunctionDecl *FD) { 271 if (FD->hasAttr<CUDAGlobalAttr>()) { 272 const FunctionType *FT = FTy->getAs<FunctionType>(); 273 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 274 FTy = FT->getCanonicalTypeUnqualified(); 275 } 276 } 277 278 /// Arrange the argument and result information for a declaration or 279 /// definition of the given C++ non-static member function. The 280 /// member function must be an ordinary function, i.e. not a 281 /// constructor or destructor. 282 const CGFunctionInfo & 283 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 284 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 285 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 286 287 CanQualType FT = GetFormalType(MD).getAs<Type>(); 288 setCUDAKernelCallingConvention(FT, CGM, MD); 289 auto prototype = FT.getAs<FunctionProtoType>(); 290 291 if (MD->isInstance()) { 292 // The abstract case is perfectly fine. 293 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 294 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 295 } 296 297 return arrangeFreeFunctionType(prototype); 298 } 299 300 bool CodeGenTypes::inheritingCtorHasParams( 301 const InheritedConstructor &Inherited, CXXCtorType Type) { 302 // Parameters are unnecessary if we're constructing a base class subobject 303 // and the inherited constructor lives in a virtual base. 304 return Type == Ctor_Complete || 305 !Inherited.getShadowDecl()->constructsVirtualBase() || 306 !Target.getCXXABI().hasConstructorVariants(); 307 } 308 309 const CGFunctionInfo & 310 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 311 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 312 313 SmallVector<CanQualType, 16> argTypes; 314 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 315 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 316 317 bool PassParams = true; 318 319 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 320 // A base class inheriting constructor doesn't get forwarded arguments 321 // needed to construct a virtual base (or base class thereof). 322 if (auto Inherited = CD->getInheritedConstructor()) 323 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 324 } 325 326 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 327 328 // Add the formal parameters. 329 if (PassParams) 330 appendParameterTypes(*this, argTypes, paramInfos, FTP); 331 332 CGCXXABI::AddedStructorArgCounts AddedArgs = 333 TheCXXABI.buildStructorSignature(GD, argTypes); 334 if (!paramInfos.empty()) { 335 // Note: prefix implies after the first param. 336 if (AddedArgs.Prefix) 337 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 338 FunctionProtoType::ExtParameterInfo{}); 339 if (AddedArgs.Suffix) 340 paramInfos.append(AddedArgs.Suffix, 341 FunctionProtoType::ExtParameterInfo{}); 342 } 343 344 RequiredArgs required = 345 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 346 : RequiredArgs::All); 347 348 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 349 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 350 ? argTypes.front() 351 : TheCXXABI.hasMostDerivedReturn(GD) 352 ? CGM.getContext().VoidPtrTy 353 : Context.VoidTy; 354 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 355 /*chainCall=*/false, argTypes, extInfo, 356 paramInfos, required); 357 } 358 359 static SmallVector<CanQualType, 16> 360 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 361 SmallVector<CanQualType, 16> argTypes; 362 for (auto &arg : args) 363 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 364 return argTypes; 365 } 366 367 static SmallVector<CanQualType, 16> 368 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 369 SmallVector<CanQualType, 16> argTypes; 370 for (auto &arg : args) 371 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 372 return argTypes; 373 } 374 375 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 376 getExtParameterInfosForCall(const FunctionProtoType *proto, 377 unsigned prefixArgs, unsigned totalArgs) { 378 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 379 if (proto->hasExtParameterInfos()) { 380 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 381 } 382 return result; 383 } 384 385 /// Arrange a call to a C++ method, passing the given arguments. 386 /// 387 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 388 /// parameter. 389 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 390 /// args. 391 /// PassProtoArgs indicates whether `args` has args for the parameters in the 392 /// given CXXConstructorDecl. 393 const CGFunctionInfo & 394 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 395 const CXXConstructorDecl *D, 396 CXXCtorType CtorKind, 397 unsigned ExtraPrefixArgs, 398 unsigned ExtraSuffixArgs, 399 bool PassProtoArgs) { 400 // FIXME: Kill copy. 401 SmallVector<CanQualType, 16> ArgTypes; 402 for (const auto &Arg : args) 403 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 404 405 // +1 for implicit this, which should always be args[0]. 406 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 407 408 CanQual<FunctionProtoType> FPT = GetFormalType(D); 409 RequiredArgs Required = PassProtoArgs 410 ? RequiredArgs::forPrototypePlus( 411 FPT, TotalPrefixArgs + ExtraSuffixArgs) 412 : RequiredArgs::All; 413 414 GlobalDecl GD(D, CtorKind); 415 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 416 ? ArgTypes.front() 417 : TheCXXABI.hasMostDerivedReturn(GD) 418 ? CGM.getContext().VoidPtrTy 419 : Context.VoidTy; 420 421 FunctionType::ExtInfo Info = FPT->getExtInfo(); 422 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 423 // If the prototype args are elided, we should only have ABI-specific args, 424 // which never have param info. 425 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 426 // ABI-specific suffix arguments are treated the same as variadic arguments. 427 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 428 ArgTypes.size()); 429 } 430 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 431 /*chainCall=*/false, ArgTypes, Info, 432 ParamInfos, Required); 433 } 434 435 /// Arrange the argument and result information for the declaration or 436 /// definition of the given function. 437 const CGFunctionInfo & 438 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 439 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 440 if (MD->isInstance()) 441 return arrangeCXXMethodDeclaration(MD); 442 443 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 444 445 assert(isa<FunctionType>(FTy)); 446 setCUDAKernelCallingConvention(FTy, CGM, FD); 447 448 // When declaring a function without a prototype, always use a 449 // non-variadic type. 450 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 451 return arrangeLLVMFunctionInfo( 452 noProto->getReturnType(), /*instanceMethod=*/false, 453 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 454 } 455 456 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 457 } 458 459 /// Arrange the argument and result information for the declaration or 460 /// definition of an Objective-C method. 461 const CGFunctionInfo & 462 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 463 // It happens that this is the same as a call with no optional 464 // arguments, except also using the formal 'self' type. 465 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 466 } 467 468 /// Arrange the argument and result information for the function type 469 /// through which to perform a send to the given Objective-C method, 470 /// using the given receiver type. The receiver type is not always 471 /// the 'self' type of the method or even an Objective-C pointer type. 472 /// This is *not* the right method for actually performing such a 473 /// message send, due to the possibility of optional arguments. 474 const CGFunctionInfo & 475 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 476 QualType receiverType) { 477 SmallVector<CanQualType, 16> argTys; 478 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 479 argTys.push_back(Context.getCanonicalParamType(receiverType)); 480 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 481 // FIXME: Kill copy? 482 for (const auto *I : MD->parameters()) { 483 argTys.push_back(Context.getCanonicalParamType(I->getType())); 484 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 485 I->hasAttr<NoEscapeAttr>()); 486 extParamInfos.push_back(extParamInfo); 487 } 488 489 FunctionType::ExtInfo einfo; 490 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 491 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 492 493 if (getContext().getLangOpts().ObjCAutoRefCount && 494 MD->hasAttr<NSReturnsRetainedAttr>()) 495 einfo = einfo.withProducesResult(true); 496 497 RequiredArgs required = 498 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 499 500 return arrangeLLVMFunctionInfo( 501 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 502 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 503 } 504 505 const CGFunctionInfo & 506 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 507 const CallArgList &args) { 508 auto argTypes = getArgTypesForCall(Context, args); 509 FunctionType::ExtInfo einfo; 510 511 return arrangeLLVMFunctionInfo( 512 GetReturnType(returnType), /*instanceMethod=*/false, 513 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 514 } 515 516 const CGFunctionInfo & 517 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 518 // FIXME: Do we need to handle ObjCMethodDecl? 519 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 520 521 if (isa<CXXConstructorDecl>(GD.getDecl()) || 522 isa<CXXDestructorDecl>(GD.getDecl())) 523 return arrangeCXXStructorDeclaration(GD); 524 525 return arrangeFunctionDeclaration(FD); 526 } 527 528 /// Arrange a thunk that takes 'this' as the first parameter followed by 529 /// varargs. Return a void pointer, regardless of the actual return type. 530 /// The body of the thunk will end in a musttail call to a function of the 531 /// correct type, and the caller will bitcast the function to the correct 532 /// prototype. 533 const CGFunctionInfo & 534 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 535 assert(MD->isVirtual() && "only methods have thunks"); 536 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 537 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 538 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 539 /*chainCall=*/false, ArgTys, 540 FTP->getExtInfo(), {}, RequiredArgs(1)); 541 } 542 543 const CGFunctionInfo & 544 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 545 CXXCtorType CT) { 546 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 547 548 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 549 SmallVector<CanQualType, 2> ArgTys; 550 const CXXRecordDecl *RD = CD->getParent(); 551 ArgTys.push_back(DeriveThisType(RD, CD)); 552 if (CT == Ctor_CopyingClosure) 553 ArgTys.push_back(*FTP->param_type_begin()); 554 if (RD->getNumVBases() > 0) 555 ArgTys.push_back(Context.IntTy); 556 CallingConv CC = Context.getDefaultCallingConvention( 557 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 558 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 559 /*chainCall=*/false, ArgTys, 560 FunctionType::ExtInfo(CC), {}, 561 RequiredArgs::All); 562 } 563 564 /// Arrange a call as unto a free function, except possibly with an 565 /// additional number of formal parameters considered required. 566 static const CGFunctionInfo & 567 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 568 CodeGenModule &CGM, 569 const CallArgList &args, 570 const FunctionType *fnType, 571 unsigned numExtraRequiredArgs, 572 bool chainCall) { 573 assert(args.size() >= numExtraRequiredArgs); 574 575 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 576 577 // In most cases, there are no optional arguments. 578 RequiredArgs required = RequiredArgs::All; 579 580 // If we have a variadic prototype, the required arguments are the 581 // extra prefix plus the arguments in the prototype. 582 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 583 if (proto->isVariadic()) 584 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 585 586 if (proto->hasExtParameterInfos()) 587 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 588 args.size()); 589 590 // If we don't have a prototype at all, but we're supposed to 591 // explicitly use the variadic convention for unprototyped calls, 592 // treat all of the arguments as required but preserve the nominal 593 // possibility of variadics. 594 } else if (CGM.getTargetCodeGenInfo() 595 .isNoProtoCallVariadic(args, 596 cast<FunctionNoProtoType>(fnType))) { 597 required = RequiredArgs(args.size()); 598 } 599 600 // FIXME: Kill copy. 601 SmallVector<CanQualType, 16> argTypes; 602 for (const auto &arg : args) 603 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 604 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 605 /*instanceMethod=*/false, chainCall, 606 argTypes, fnType->getExtInfo(), paramInfos, 607 required); 608 } 609 610 /// Figure out the rules for calling a function with the given formal 611 /// type using the given arguments. The arguments are necessary 612 /// because the function might be unprototyped, in which case it's 613 /// target-dependent in crazy ways. 614 const CGFunctionInfo & 615 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 616 const FunctionType *fnType, 617 bool chainCall) { 618 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 619 chainCall ? 1 : 0, chainCall); 620 } 621 622 /// A block function is essentially a free function with an 623 /// extra implicit argument. 624 const CGFunctionInfo & 625 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 626 const FunctionType *fnType) { 627 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 628 /*chainCall=*/false); 629 } 630 631 const CGFunctionInfo & 632 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 633 const FunctionArgList ¶ms) { 634 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 635 auto argTypes = getArgTypesForDeclaration(Context, params); 636 637 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 638 /*instanceMethod*/ false, /*chainCall*/ false, 639 argTypes, proto->getExtInfo(), paramInfos, 640 RequiredArgs::forPrototypePlus(proto, 1)); 641 } 642 643 const CGFunctionInfo & 644 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 645 const CallArgList &args) { 646 // FIXME: Kill copy. 647 SmallVector<CanQualType, 16> argTypes; 648 for (const auto &Arg : args) 649 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 650 return arrangeLLVMFunctionInfo( 651 GetReturnType(resultType), /*instanceMethod=*/false, 652 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 653 /*paramInfos=*/ {}, RequiredArgs::All); 654 } 655 656 const CGFunctionInfo & 657 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 658 const FunctionArgList &args) { 659 auto argTypes = getArgTypesForDeclaration(Context, args); 660 661 return arrangeLLVMFunctionInfo( 662 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 663 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 664 } 665 666 const CGFunctionInfo & 667 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 668 ArrayRef<CanQualType> argTypes) { 669 return arrangeLLVMFunctionInfo( 670 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 671 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 672 } 673 674 /// Arrange a call to a C++ method, passing the given arguments. 675 /// 676 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 677 /// does not count `this`. 678 const CGFunctionInfo & 679 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 680 const FunctionProtoType *proto, 681 RequiredArgs required, 682 unsigned numPrefixArgs) { 683 assert(numPrefixArgs + 1 <= args.size() && 684 "Emitting a call with less args than the required prefix?"); 685 // Add one to account for `this`. It's a bit awkward here, but we don't count 686 // `this` in similar places elsewhere. 687 auto paramInfos = 688 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 689 690 // FIXME: Kill copy. 691 auto argTypes = getArgTypesForCall(Context, args); 692 693 FunctionType::ExtInfo info = proto->getExtInfo(); 694 return arrangeLLVMFunctionInfo( 695 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 696 /*chainCall=*/false, argTypes, info, paramInfos, required); 697 } 698 699 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 700 return arrangeLLVMFunctionInfo( 701 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 702 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 703 } 704 705 const CGFunctionInfo & 706 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 707 const CallArgList &args) { 708 assert(signature.arg_size() <= args.size()); 709 if (signature.arg_size() == args.size()) 710 return signature; 711 712 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 713 auto sigParamInfos = signature.getExtParameterInfos(); 714 if (!sigParamInfos.empty()) { 715 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 716 paramInfos.resize(args.size()); 717 } 718 719 auto argTypes = getArgTypesForCall(Context, args); 720 721 assert(signature.getRequiredArgs().allowsOptionalArgs()); 722 return arrangeLLVMFunctionInfo(signature.getReturnType(), 723 signature.isInstanceMethod(), 724 signature.isChainCall(), 725 argTypes, 726 signature.getExtInfo(), 727 paramInfos, 728 signature.getRequiredArgs()); 729 } 730 731 namespace clang { 732 namespace CodeGen { 733 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 734 } 735 } 736 737 /// Arrange the argument and result information for an abstract value 738 /// of a given function type. This is the method which all of the 739 /// above functions ultimately defer to. 740 const CGFunctionInfo & 741 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 742 bool instanceMethod, 743 bool chainCall, 744 ArrayRef<CanQualType> argTypes, 745 FunctionType::ExtInfo info, 746 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 747 RequiredArgs required) { 748 assert(llvm::all_of(argTypes, 749 [](CanQualType T) { return T.isCanonicalAsParam(); })); 750 751 // Lookup or create unique function info. 752 llvm::FoldingSetNodeID ID; 753 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 754 required, resultType, argTypes); 755 756 void *insertPos = nullptr; 757 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 758 if (FI) 759 return *FI; 760 761 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 762 763 // Construct the function info. We co-allocate the ArgInfos. 764 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 765 paramInfos, resultType, argTypes, required); 766 FunctionInfos.InsertNode(FI, insertPos); 767 768 bool inserted = FunctionsBeingProcessed.insert(FI).second; 769 (void)inserted; 770 assert(inserted && "Recursively being processed?"); 771 772 // Compute ABI information. 773 if (CC == llvm::CallingConv::SPIR_KERNEL) { 774 // Force target independent argument handling for the host visible 775 // kernel functions. 776 computeSPIRKernelABIInfo(CGM, *FI); 777 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { 778 swiftcall::computeABIInfo(CGM, *FI); 779 } else { 780 getABIInfo().computeInfo(*FI); 781 } 782 783 // Loop over all of the computed argument and return value info. If any of 784 // them are direct or extend without a specified coerce type, specify the 785 // default now. 786 ABIArgInfo &retInfo = FI->getReturnInfo(); 787 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 788 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 789 790 for (auto &I : FI->arguments()) 791 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 792 I.info.setCoerceToType(ConvertType(I.type)); 793 794 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 795 assert(erased && "Not in set?"); 796 797 return *FI; 798 } 799 800 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 801 bool instanceMethod, 802 bool chainCall, 803 const FunctionType::ExtInfo &info, 804 ArrayRef<ExtParameterInfo> paramInfos, 805 CanQualType resultType, 806 ArrayRef<CanQualType> argTypes, 807 RequiredArgs required) { 808 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 809 assert(!required.allowsOptionalArgs() || 810 required.getNumRequiredArgs() <= argTypes.size()); 811 812 void *buffer = 813 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 814 argTypes.size() + 1, paramInfos.size())); 815 816 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 817 FI->CallingConvention = llvmCC; 818 FI->EffectiveCallingConvention = llvmCC; 819 FI->ASTCallingConvention = info.getCC(); 820 FI->InstanceMethod = instanceMethod; 821 FI->ChainCall = chainCall; 822 FI->CmseNSCall = info.getCmseNSCall(); 823 FI->NoReturn = info.getNoReturn(); 824 FI->ReturnsRetained = info.getProducesResult(); 825 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 826 FI->NoCfCheck = info.getNoCfCheck(); 827 FI->Required = required; 828 FI->HasRegParm = info.getHasRegParm(); 829 FI->RegParm = info.getRegParm(); 830 FI->ArgStruct = nullptr; 831 FI->ArgStructAlign = 0; 832 FI->NumArgs = argTypes.size(); 833 FI->HasExtParameterInfos = !paramInfos.empty(); 834 FI->getArgsBuffer()[0].type = resultType; 835 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 836 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 837 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 838 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 839 return FI; 840 } 841 842 /***/ 843 844 namespace { 845 // ABIArgInfo::Expand implementation. 846 847 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 848 struct TypeExpansion { 849 enum TypeExpansionKind { 850 // Elements of constant arrays are expanded recursively. 851 TEK_ConstantArray, 852 // Record fields are expanded recursively (but if record is a union, only 853 // the field with the largest size is expanded). 854 TEK_Record, 855 // For complex types, real and imaginary parts are expanded recursively. 856 TEK_Complex, 857 // All other types are not expandable. 858 TEK_None 859 }; 860 861 const TypeExpansionKind Kind; 862 863 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 864 virtual ~TypeExpansion() {} 865 }; 866 867 struct ConstantArrayExpansion : TypeExpansion { 868 QualType EltTy; 869 uint64_t NumElts; 870 871 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 872 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 873 static bool classof(const TypeExpansion *TE) { 874 return TE->Kind == TEK_ConstantArray; 875 } 876 }; 877 878 struct RecordExpansion : TypeExpansion { 879 SmallVector<const CXXBaseSpecifier *, 1> Bases; 880 881 SmallVector<const FieldDecl *, 1> Fields; 882 883 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 884 SmallVector<const FieldDecl *, 1> &&Fields) 885 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 886 Fields(std::move(Fields)) {} 887 static bool classof(const TypeExpansion *TE) { 888 return TE->Kind == TEK_Record; 889 } 890 }; 891 892 struct ComplexExpansion : TypeExpansion { 893 QualType EltTy; 894 895 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 896 static bool classof(const TypeExpansion *TE) { 897 return TE->Kind == TEK_Complex; 898 } 899 }; 900 901 struct NoExpansion : TypeExpansion { 902 NoExpansion() : TypeExpansion(TEK_None) {} 903 static bool classof(const TypeExpansion *TE) { 904 return TE->Kind == TEK_None; 905 } 906 }; 907 } // namespace 908 909 static std::unique_ptr<TypeExpansion> 910 getTypeExpansion(QualType Ty, const ASTContext &Context) { 911 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 912 return std::make_unique<ConstantArrayExpansion>( 913 AT->getElementType(), AT->getSize().getZExtValue()); 914 } 915 if (const RecordType *RT = Ty->getAs<RecordType>()) { 916 SmallVector<const CXXBaseSpecifier *, 1> Bases; 917 SmallVector<const FieldDecl *, 1> Fields; 918 const RecordDecl *RD = RT->getDecl(); 919 assert(!RD->hasFlexibleArrayMember() && 920 "Cannot expand structure with flexible array."); 921 if (RD->isUnion()) { 922 // Unions can be here only in degenerative cases - all the fields are same 923 // after flattening. Thus we have to use the "largest" field. 924 const FieldDecl *LargestFD = nullptr; 925 CharUnits UnionSize = CharUnits::Zero(); 926 927 for (const auto *FD : RD->fields()) { 928 if (FD->isZeroLengthBitField(Context)) 929 continue; 930 assert(!FD->isBitField() && 931 "Cannot expand structure with bit-field members."); 932 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 933 if (UnionSize < FieldSize) { 934 UnionSize = FieldSize; 935 LargestFD = FD; 936 } 937 } 938 if (LargestFD) 939 Fields.push_back(LargestFD); 940 } else { 941 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 942 assert(!CXXRD->isDynamicClass() && 943 "cannot expand vtable pointers in dynamic classes"); 944 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 945 Bases.push_back(&BS); 946 } 947 948 for (const auto *FD : RD->fields()) { 949 if (FD->isZeroLengthBitField(Context)) 950 continue; 951 assert(!FD->isBitField() && 952 "Cannot expand structure with bit-field members."); 953 Fields.push_back(FD); 954 } 955 } 956 return std::make_unique<RecordExpansion>(std::move(Bases), 957 std::move(Fields)); 958 } 959 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 960 return std::make_unique<ComplexExpansion>(CT->getElementType()); 961 } 962 return std::make_unique<NoExpansion>(); 963 } 964 965 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 966 auto Exp = getTypeExpansion(Ty, Context); 967 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 968 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 969 } 970 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 971 int Res = 0; 972 for (auto BS : RExp->Bases) 973 Res += getExpansionSize(BS->getType(), Context); 974 for (auto FD : RExp->Fields) 975 Res += getExpansionSize(FD->getType(), Context); 976 return Res; 977 } 978 if (isa<ComplexExpansion>(Exp.get())) 979 return 2; 980 assert(isa<NoExpansion>(Exp.get())); 981 return 1; 982 } 983 984 void 985 CodeGenTypes::getExpandedTypes(QualType Ty, 986 SmallVectorImpl<llvm::Type *>::iterator &TI) { 987 auto Exp = getTypeExpansion(Ty, Context); 988 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 989 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 990 getExpandedTypes(CAExp->EltTy, TI); 991 } 992 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 993 for (auto BS : RExp->Bases) 994 getExpandedTypes(BS->getType(), TI); 995 for (auto FD : RExp->Fields) 996 getExpandedTypes(FD->getType(), TI); 997 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 998 llvm::Type *EltTy = ConvertType(CExp->EltTy); 999 *TI++ = EltTy; 1000 *TI++ = EltTy; 1001 } else { 1002 assert(isa<NoExpansion>(Exp.get())); 1003 *TI++ = ConvertType(Ty); 1004 } 1005 } 1006 1007 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1008 ConstantArrayExpansion *CAE, 1009 Address BaseAddr, 1010 llvm::function_ref<void(Address)> Fn) { 1011 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1012 CharUnits EltAlign = 1013 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1014 1015 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1016 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( 1017 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); 1018 Fn(Address(EltAddr, EltAlign)); 1019 } 1020 } 1021 1022 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1023 llvm::Function::arg_iterator &AI) { 1024 assert(LV.isSimple() && 1025 "Unexpected non-simple lvalue during struct expansion."); 1026 1027 auto Exp = getTypeExpansion(Ty, getContext()); 1028 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1029 forConstantArrayExpansion( 1030 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1031 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1032 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1033 }); 1034 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1035 Address This = LV.getAddress(*this); 1036 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1037 // Perform a single step derived-to-base conversion. 1038 Address Base = 1039 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1040 /*NullCheckValue=*/false, SourceLocation()); 1041 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1042 1043 // Recurse onto bases. 1044 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1045 } 1046 for (auto FD : RExp->Fields) { 1047 // FIXME: What are the right qualifiers here? 1048 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1049 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1050 } 1051 } else if (isa<ComplexExpansion>(Exp.get())) { 1052 auto realValue = &*AI++; 1053 auto imagValue = &*AI++; 1054 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1055 } else { 1056 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1057 // primitive store. 1058 assert(isa<NoExpansion>(Exp.get())); 1059 if (LV.isBitField()) 1060 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1061 else 1062 EmitStoreOfScalar(&*AI++, LV); 1063 } 1064 } 1065 1066 void CodeGenFunction::ExpandTypeToArgs( 1067 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1068 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1069 auto Exp = getTypeExpansion(Ty, getContext()); 1070 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1071 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1072 : Arg.getKnownRValue().getAggregateAddress(); 1073 forConstantArrayExpansion( 1074 *this, CAExp, Addr, [&](Address EltAddr) { 1075 CallArg EltArg = CallArg( 1076 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1077 CAExp->EltTy); 1078 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1079 IRCallArgPos); 1080 }); 1081 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1082 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1083 : Arg.getKnownRValue().getAggregateAddress(); 1084 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1085 // Perform a single step derived-to-base conversion. 1086 Address Base = 1087 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1088 /*NullCheckValue=*/false, SourceLocation()); 1089 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1090 1091 // Recurse onto bases. 1092 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1093 IRCallArgPos); 1094 } 1095 1096 LValue LV = MakeAddrLValue(This, Ty); 1097 for (auto FD : RExp->Fields) { 1098 CallArg FldArg = 1099 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1100 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1101 IRCallArgPos); 1102 } 1103 } else if (isa<ComplexExpansion>(Exp.get())) { 1104 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1105 IRCallArgs[IRCallArgPos++] = CV.first; 1106 IRCallArgs[IRCallArgPos++] = CV.second; 1107 } else { 1108 assert(isa<NoExpansion>(Exp.get())); 1109 auto RV = Arg.getKnownRValue(); 1110 assert(RV.isScalar() && 1111 "Unexpected non-scalar rvalue during struct expansion."); 1112 1113 // Insert a bitcast as needed. 1114 llvm::Value *V = RV.getScalarVal(); 1115 if (IRCallArgPos < IRFuncTy->getNumParams() && 1116 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1117 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1118 1119 IRCallArgs[IRCallArgPos++] = V; 1120 } 1121 } 1122 1123 /// Create a temporary allocation for the purposes of coercion. 1124 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1125 CharUnits MinAlign, 1126 const Twine &Name = "tmp") { 1127 // Don't use an alignment that's worse than what LLVM would prefer. 1128 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1129 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1130 1131 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1132 } 1133 1134 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1135 /// accessing some number of bytes out of it, try to gep into the struct to get 1136 /// at its inner goodness. Dive as deep as possible without entering an element 1137 /// with an in-memory size smaller than DstSize. 1138 static Address 1139 EnterStructPointerForCoercedAccess(Address SrcPtr, 1140 llvm::StructType *SrcSTy, 1141 uint64_t DstSize, CodeGenFunction &CGF) { 1142 // We can't dive into a zero-element struct. 1143 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1144 1145 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1146 1147 // If the first elt is at least as large as what we're looking for, or if the 1148 // first element is the same size as the whole struct, we can enter it. The 1149 // comparison must be made on the store size and not the alloca size. Using 1150 // the alloca size may overstate the size of the load. 1151 uint64_t FirstEltSize = 1152 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1153 if (FirstEltSize < DstSize && 1154 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1155 return SrcPtr; 1156 1157 // GEP into the first element. 1158 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1159 1160 // If the first element is a struct, recurse. 1161 llvm::Type *SrcTy = SrcPtr.getElementType(); 1162 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1163 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1164 1165 return SrcPtr; 1166 } 1167 1168 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1169 /// are either integers or pointers. This does a truncation of the value if it 1170 /// is too large or a zero extension if it is too small. 1171 /// 1172 /// This behaves as if the value were coerced through memory, so on big-endian 1173 /// targets the high bits are preserved in a truncation, while little-endian 1174 /// targets preserve the low bits. 1175 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1176 llvm::Type *Ty, 1177 CodeGenFunction &CGF) { 1178 if (Val->getType() == Ty) 1179 return Val; 1180 1181 if (isa<llvm::PointerType>(Val->getType())) { 1182 // If this is Pointer->Pointer avoid conversion to and from int. 1183 if (isa<llvm::PointerType>(Ty)) 1184 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1185 1186 // Convert the pointer to an integer so we can play with its width. 1187 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1188 } 1189 1190 llvm::Type *DestIntTy = Ty; 1191 if (isa<llvm::PointerType>(DestIntTy)) 1192 DestIntTy = CGF.IntPtrTy; 1193 1194 if (Val->getType() != DestIntTy) { 1195 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1196 if (DL.isBigEndian()) { 1197 // Preserve the high bits on big-endian targets. 1198 // That is what memory coercion does. 1199 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1200 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1201 1202 if (SrcSize > DstSize) { 1203 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1204 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1205 } else { 1206 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1207 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1208 } 1209 } else { 1210 // Little-endian targets preserve the low bits. No shifts required. 1211 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1212 } 1213 } 1214 1215 if (isa<llvm::PointerType>(Ty)) 1216 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1217 return Val; 1218 } 1219 1220 1221 1222 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1223 /// a pointer to an object of type \arg Ty, known to be aligned to 1224 /// \arg SrcAlign bytes. 1225 /// 1226 /// This safely handles the case when the src type is smaller than the 1227 /// destination type; in this situation the values of bits which not 1228 /// present in the src are undefined. 1229 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1230 CodeGenFunction &CGF) { 1231 llvm::Type *SrcTy = Src.getElementType(); 1232 1233 // If SrcTy and Ty are the same, just do a load. 1234 if (SrcTy == Ty) 1235 return CGF.Builder.CreateLoad(Src); 1236 1237 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1238 1239 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1240 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1241 DstSize.getFixedSize(), CGF); 1242 SrcTy = Src.getElementType(); 1243 } 1244 1245 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1246 1247 // If the source and destination are integer or pointer types, just do an 1248 // extension or truncation to the desired type. 1249 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1250 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1251 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1252 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1253 } 1254 1255 // If load is legal, just bitcast the src pointer. 1256 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1257 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1258 // Generally SrcSize is never greater than DstSize, since this means we are 1259 // losing bits. However, this can happen in cases where the structure has 1260 // additional padding, for example due to a user specified alignment. 1261 // 1262 // FIXME: Assert that we aren't truncating non-padding bits when have access 1263 // to that information. 1264 Src = CGF.Builder.CreateBitCast(Src, 1265 Ty->getPointerTo(Src.getAddressSpace())); 1266 return CGF.Builder.CreateLoad(Src); 1267 } 1268 1269 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1270 // the types match, use the llvm.experimental.vector.insert intrinsic to 1271 // perform the conversion. 1272 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1273 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1274 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 1275 // vector, use a vector insert and bitcast the result. 1276 bool NeedsBitcast = false; 1277 auto PredType = 1278 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); 1279 llvm::Type *OrigType = Ty; 1280 if (ScalableDst == PredType && 1281 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { 1282 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); 1283 NeedsBitcast = true; 1284 } 1285 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1286 auto *Load = CGF.Builder.CreateLoad(Src); 1287 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1288 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1289 llvm::Value *Result = CGF.Builder.CreateInsertVector( 1290 ScalableDst, UndefVec, Load, Zero, "castScalableSve"); 1291 if (NeedsBitcast) 1292 Result = CGF.Builder.CreateBitCast(Result, OrigType); 1293 return Result; 1294 } 1295 } 1296 } 1297 1298 // Otherwise do coercion through memory. This is stupid, but simple. 1299 Address Tmp = 1300 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1301 CGF.Builder.CreateMemCpy( 1302 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1303 Src.getAlignment().getAsAlign(), 1304 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1305 return CGF.Builder.CreateLoad(Tmp); 1306 } 1307 1308 // Function to store a first-class aggregate into memory. We prefer to 1309 // store the elements rather than the aggregate to be more friendly to 1310 // fast-isel. 1311 // FIXME: Do we need to recurse here? 1312 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1313 bool DestIsVolatile) { 1314 // Prefer scalar stores to first-class aggregate stores. 1315 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1316 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1317 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1318 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1319 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1320 } 1321 } else { 1322 Builder.CreateStore(Val, Dest, DestIsVolatile); 1323 } 1324 } 1325 1326 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1327 /// where the source and destination may have different types. The 1328 /// destination is known to be aligned to \arg DstAlign bytes. 1329 /// 1330 /// This safely handles the case when the src type is larger than the 1331 /// destination type; the upper bits of the src will be lost. 1332 static void CreateCoercedStore(llvm::Value *Src, 1333 Address Dst, 1334 bool DstIsVolatile, 1335 CodeGenFunction &CGF) { 1336 llvm::Type *SrcTy = Src->getType(); 1337 llvm::Type *DstTy = Dst.getElementType(); 1338 if (SrcTy == DstTy) { 1339 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1340 return; 1341 } 1342 1343 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1344 1345 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1346 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1347 SrcSize.getFixedSize(), CGF); 1348 DstTy = Dst.getElementType(); 1349 } 1350 1351 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1352 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1353 if (SrcPtrTy && DstPtrTy && 1354 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1355 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1356 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1357 return; 1358 } 1359 1360 // If the source and destination are integer or pointer types, just do an 1361 // extension or truncation to the desired type. 1362 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1363 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1364 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1365 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1366 return; 1367 } 1368 1369 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1370 1371 // If store is legal, just bitcast the src pointer. 1372 if (isa<llvm::ScalableVectorType>(SrcTy) || 1373 isa<llvm::ScalableVectorType>(DstTy) || 1374 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1375 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1376 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1377 } else { 1378 // Otherwise do coercion through memory. This is stupid, but 1379 // simple. 1380 1381 // Generally SrcSize is never greater than DstSize, since this means we are 1382 // losing bits. However, this can happen in cases where the structure has 1383 // additional padding, for example due to a user specified alignment. 1384 // 1385 // FIXME: Assert that we aren't truncating non-padding bits when have access 1386 // to that information. 1387 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1388 CGF.Builder.CreateStore(Src, Tmp); 1389 CGF.Builder.CreateMemCpy( 1390 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1391 Tmp.getAlignment().getAsAlign(), 1392 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1393 } 1394 } 1395 1396 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1397 const ABIArgInfo &info) { 1398 if (unsigned offset = info.getDirectOffset()) { 1399 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1400 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1401 CharUnits::fromQuantity(offset)); 1402 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1403 } 1404 return addr; 1405 } 1406 1407 namespace { 1408 1409 /// Encapsulates information about the way function arguments from 1410 /// CGFunctionInfo should be passed to actual LLVM IR function. 1411 class ClangToLLVMArgMapping { 1412 static const unsigned InvalidIndex = ~0U; 1413 unsigned InallocaArgNo; 1414 unsigned SRetArgNo; 1415 unsigned TotalIRArgs; 1416 1417 /// Arguments of LLVM IR function corresponding to single Clang argument. 1418 struct IRArgs { 1419 unsigned PaddingArgIndex; 1420 // Argument is expanded to IR arguments at positions 1421 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1422 unsigned FirstArgIndex; 1423 unsigned NumberOfArgs; 1424 1425 IRArgs() 1426 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1427 NumberOfArgs(0) {} 1428 }; 1429 1430 SmallVector<IRArgs, 8> ArgInfo; 1431 1432 public: 1433 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1434 bool OnlyRequiredArgs = false) 1435 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1436 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1437 construct(Context, FI, OnlyRequiredArgs); 1438 } 1439 1440 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1441 unsigned getInallocaArgNo() const { 1442 assert(hasInallocaArg()); 1443 return InallocaArgNo; 1444 } 1445 1446 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1447 unsigned getSRetArgNo() const { 1448 assert(hasSRetArg()); 1449 return SRetArgNo; 1450 } 1451 1452 unsigned totalIRArgs() const { return TotalIRArgs; } 1453 1454 bool hasPaddingArg(unsigned ArgNo) const { 1455 assert(ArgNo < ArgInfo.size()); 1456 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1457 } 1458 unsigned getPaddingArgNo(unsigned ArgNo) const { 1459 assert(hasPaddingArg(ArgNo)); 1460 return ArgInfo[ArgNo].PaddingArgIndex; 1461 } 1462 1463 /// Returns index of first IR argument corresponding to ArgNo, and their 1464 /// quantity. 1465 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1466 assert(ArgNo < ArgInfo.size()); 1467 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1468 ArgInfo[ArgNo].NumberOfArgs); 1469 } 1470 1471 private: 1472 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1473 bool OnlyRequiredArgs); 1474 }; 1475 1476 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1477 const CGFunctionInfo &FI, 1478 bool OnlyRequiredArgs) { 1479 unsigned IRArgNo = 0; 1480 bool SwapThisWithSRet = false; 1481 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1482 1483 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1484 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1485 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1486 } 1487 1488 unsigned ArgNo = 0; 1489 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1490 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1491 ++I, ++ArgNo) { 1492 assert(I != FI.arg_end()); 1493 QualType ArgType = I->type; 1494 const ABIArgInfo &AI = I->info; 1495 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1496 auto &IRArgs = ArgInfo[ArgNo]; 1497 1498 if (AI.getPaddingType()) 1499 IRArgs.PaddingArgIndex = IRArgNo++; 1500 1501 switch (AI.getKind()) { 1502 case ABIArgInfo::Extend: 1503 case ABIArgInfo::Direct: { 1504 // FIXME: handle sseregparm someday... 1505 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1506 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1507 IRArgs.NumberOfArgs = STy->getNumElements(); 1508 } else { 1509 IRArgs.NumberOfArgs = 1; 1510 } 1511 break; 1512 } 1513 case ABIArgInfo::Indirect: 1514 case ABIArgInfo::IndirectAliased: 1515 IRArgs.NumberOfArgs = 1; 1516 break; 1517 case ABIArgInfo::Ignore: 1518 case ABIArgInfo::InAlloca: 1519 // ignore and inalloca doesn't have matching LLVM parameters. 1520 IRArgs.NumberOfArgs = 0; 1521 break; 1522 case ABIArgInfo::CoerceAndExpand: 1523 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1524 break; 1525 case ABIArgInfo::Expand: 1526 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1527 break; 1528 } 1529 1530 if (IRArgs.NumberOfArgs > 0) { 1531 IRArgs.FirstArgIndex = IRArgNo; 1532 IRArgNo += IRArgs.NumberOfArgs; 1533 } 1534 1535 // Skip over the sret parameter when it comes second. We already handled it 1536 // above. 1537 if (IRArgNo == 1 && SwapThisWithSRet) 1538 IRArgNo++; 1539 } 1540 assert(ArgNo == ArgInfo.size()); 1541 1542 if (FI.usesInAlloca()) 1543 InallocaArgNo = IRArgNo++; 1544 1545 TotalIRArgs = IRArgNo; 1546 } 1547 } // namespace 1548 1549 /***/ 1550 1551 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1552 const auto &RI = FI.getReturnInfo(); 1553 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1554 } 1555 1556 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1557 return ReturnTypeUsesSRet(FI) && 1558 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1559 } 1560 1561 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1562 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1563 switch (BT->getKind()) { 1564 default: 1565 return false; 1566 case BuiltinType::Float: 1567 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); 1568 case BuiltinType::Double: 1569 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); 1570 case BuiltinType::LongDouble: 1571 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); 1572 } 1573 } 1574 1575 return false; 1576 } 1577 1578 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1579 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1580 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1581 if (BT->getKind() == BuiltinType::LongDouble) 1582 return getTarget().useObjCFP2RetForComplexLongDouble(); 1583 } 1584 } 1585 1586 return false; 1587 } 1588 1589 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1590 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1591 return GetFunctionType(FI); 1592 } 1593 1594 llvm::FunctionType * 1595 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1596 1597 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1598 (void)Inserted; 1599 assert(Inserted && "Recursively being processed?"); 1600 1601 llvm::Type *resultType = nullptr; 1602 const ABIArgInfo &retAI = FI.getReturnInfo(); 1603 switch (retAI.getKind()) { 1604 case ABIArgInfo::Expand: 1605 case ABIArgInfo::IndirectAliased: 1606 llvm_unreachable("Invalid ABI kind for return argument"); 1607 1608 case ABIArgInfo::Extend: 1609 case ABIArgInfo::Direct: 1610 resultType = retAI.getCoerceToType(); 1611 break; 1612 1613 case ABIArgInfo::InAlloca: 1614 if (retAI.getInAllocaSRet()) { 1615 // sret things on win32 aren't void, they return the sret pointer. 1616 QualType ret = FI.getReturnType(); 1617 llvm::Type *ty = ConvertType(ret); 1618 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1619 resultType = llvm::PointerType::get(ty, addressSpace); 1620 } else { 1621 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1622 } 1623 break; 1624 1625 case ABIArgInfo::Indirect: 1626 case ABIArgInfo::Ignore: 1627 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1628 break; 1629 1630 case ABIArgInfo::CoerceAndExpand: 1631 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1632 break; 1633 } 1634 1635 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1636 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1637 1638 // Add type for sret argument. 1639 if (IRFunctionArgs.hasSRetArg()) { 1640 QualType Ret = FI.getReturnType(); 1641 llvm::Type *Ty = ConvertType(Ret); 1642 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1643 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1644 llvm::PointerType::get(Ty, AddressSpace); 1645 } 1646 1647 // Add type for inalloca argument. 1648 if (IRFunctionArgs.hasInallocaArg()) { 1649 auto ArgStruct = FI.getArgStruct(); 1650 assert(ArgStruct); 1651 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1652 } 1653 1654 // Add in all of the required arguments. 1655 unsigned ArgNo = 0; 1656 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1657 ie = it + FI.getNumRequiredArgs(); 1658 for (; it != ie; ++it, ++ArgNo) { 1659 const ABIArgInfo &ArgInfo = it->info; 1660 1661 // Insert a padding type to ensure proper alignment. 1662 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1663 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1664 ArgInfo.getPaddingType(); 1665 1666 unsigned FirstIRArg, NumIRArgs; 1667 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1668 1669 switch (ArgInfo.getKind()) { 1670 case ABIArgInfo::Ignore: 1671 case ABIArgInfo::InAlloca: 1672 assert(NumIRArgs == 0); 1673 break; 1674 1675 case ABIArgInfo::Indirect: { 1676 assert(NumIRArgs == 1); 1677 // indirect arguments are always on the stack, which is alloca addr space. 1678 llvm::Type *LTy = ConvertTypeForMem(it->type); 1679 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1680 CGM.getDataLayout().getAllocaAddrSpace()); 1681 break; 1682 } 1683 case ABIArgInfo::IndirectAliased: { 1684 assert(NumIRArgs == 1); 1685 llvm::Type *LTy = ConvertTypeForMem(it->type); 1686 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1687 break; 1688 } 1689 case ABIArgInfo::Extend: 1690 case ABIArgInfo::Direct: { 1691 // Fast-isel and the optimizer generally like scalar values better than 1692 // FCAs, so we flatten them if this is safe to do for this argument. 1693 llvm::Type *argType = ArgInfo.getCoerceToType(); 1694 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1695 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1696 assert(NumIRArgs == st->getNumElements()); 1697 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1698 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1699 } else { 1700 assert(NumIRArgs == 1); 1701 ArgTypes[FirstIRArg] = argType; 1702 } 1703 break; 1704 } 1705 1706 case ABIArgInfo::CoerceAndExpand: { 1707 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1708 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1709 *ArgTypesIter++ = EltTy; 1710 } 1711 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1712 break; 1713 } 1714 1715 case ABIArgInfo::Expand: 1716 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1717 getExpandedTypes(it->type, ArgTypesIter); 1718 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1719 break; 1720 } 1721 } 1722 1723 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1724 assert(Erased && "Not in set?"); 1725 1726 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1727 } 1728 1729 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1730 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1731 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1732 1733 if (!isFuncTypeConvertible(FPT)) 1734 return llvm::StructType::get(getLLVMContext()); 1735 1736 return GetFunctionType(GD); 1737 } 1738 1739 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1740 llvm::AttrBuilder &FuncAttrs, 1741 const FunctionProtoType *FPT) { 1742 if (!FPT) 1743 return; 1744 1745 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1746 FPT->isNothrow()) 1747 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1748 } 1749 1750 static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, 1751 const Decl *Callee) { 1752 if (!Callee) 1753 return; 1754 1755 SmallVector<StringRef, 4> Attrs; 1756 1757 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) 1758 AA->getAssumption().split(Attrs, ","); 1759 1760 if (!Attrs.empty()) 1761 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, 1762 llvm::join(Attrs.begin(), Attrs.end(), ",")); 1763 } 1764 1765 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1766 QualType ReturnType) { 1767 // We can't just discard the return value for a record type with a 1768 // complex destructor or a non-trivially copyable type. 1769 if (const RecordType *RT = 1770 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1771 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1772 return ClassDecl->hasTrivialDestructor(); 1773 } 1774 return ReturnType.isTriviallyCopyableType(Context); 1775 } 1776 1777 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1778 bool HasOptnone, 1779 bool AttrOnCallSite, 1780 llvm::AttrBuilder &FuncAttrs) { 1781 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1782 if (!HasOptnone) { 1783 if (CodeGenOpts.OptimizeSize) 1784 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1785 if (CodeGenOpts.OptimizeSize == 2) 1786 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1787 } 1788 1789 if (CodeGenOpts.DisableRedZone) 1790 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1791 if (CodeGenOpts.IndirectTlsSegRefs) 1792 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1793 if (CodeGenOpts.NoImplicitFloat) 1794 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1795 1796 if (AttrOnCallSite) { 1797 // Attributes that should go on the call site only. 1798 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) 1799 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1800 if (!CodeGenOpts.TrapFuncName.empty()) 1801 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1802 } else { 1803 StringRef FpKind; 1804 switch (CodeGenOpts.getFramePointer()) { 1805 case CodeGenOptions::FramePointerKind::None: 1806 FpKind = "none"; 1807 break; 1808 case CodeGenOptions::FramePointerKind::NonLeaf: 1809 FpKind = "non-leaf"; 1810 break; 1811 case CodeGenOptions::FramePointerKind::All: 1812 FpKind = "all"; 1813 break; 1814 } 1815 FuncAttrs.addAttribute("frame-pointer", FpKind); 1816 1817 if (CodeGenOpts.LessPreciseFPMAD) 1818 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1819 1820 if (CodeGenOpts.NullPointerIsValid) 1821 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1822 1823 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1824 FuncAttrs.addAttribute("denormal-fp-math", 1825 CodeGenOpts.FPDenormalMode.str()); 1826 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1827 FuncAttrs.addAttribute( 1828 "denormal-fp-math-f32", 1829 CodeGenOpts.FP32DenormalMode.str()); 1830 } 1831 1832 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1833 FuncAttrs.addAttribute("no-trapping-math", "true"); 1834 1835 // Strict (compliant) code is the default, so only add this attribute to 1836 // indicate that we are trying to workaround a problem case. 1837 if (!CodeGenOpts.StrictFloatCastOverflow) 1838 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1839 1840 // TODO: Are these all needed? 1841 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1842 if (LangOpts.NoHonorInfs) 1843 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1844 if (LangOpts.NoHonorNaNs) 1845 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1846 if (LangOpts.ApproxFunc) 1847 FuncAttrs.addAttribute("approx-func-fp-math", "true"); 1848 if (LangOpts.UnsafeFPMath) 1849 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1850 if (CodeGenOpts.SoftFloat) 1851 FuncAttrs.addAttribute("use-soft-float", "true"); 1852 FuncAttrs.addAttribute("stack-protector-buffer-size", 1853 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1854 if (LangOpts.NoSignedZero) 1855 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1856 1857 // TODO: Reciprocal estimate codegen options should apply to instructions? 1858 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1859 if (!Recips.empty()) 1860 FuncAttrs.addAttribute("reciprocal-estimates", 1861 llvm::join(Recips, ",")); 1862 1863 if (!CodeGenOpts.PreferVectorWidth.empty() && 1864 CodeGenOpts.PreferVectorWidth != "none") 1865 FuncAttrs.addAttribute("prefer-vector-width", 1866 CodeGenOpts.PreferVectorWidth); 1867 1868 if (CodeGenOpts.StackRealignment) 1869 FuncAttrs.addAttribute("stackrealign"); 1870 if (CodeGenOpts.Backchain) 1871 FuncAttrs.addAttribute("backchain"); 1872 if (CodeGenOpts.EnableSegmentedStacks) 1873 FuncAttrs.addAttribute("split-stack"); 1874 1875 if (CodeGenOpts.SpeculativeLoadHardening) 1876 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1877 } 1878 1879 if (getLangOpts().assumeFunctionsAreConvergent()) { 1880 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1881 // convergent (meaning, they may call an intrinsically convergent op, such 1882 // as __syncthreads() / barrier(), and so can't have certain optimizations 1883 // applied around them). LLVM will remove this attribute where it safely 1884 // can. 1885 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1886 } 1887 1888 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1889 // Exceptions aren't supported in CUDA device code. 1890 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1891 } 1892 1893 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1894 StringRef Var, Value; 1895 std::tie(Var, Value) = Attr.split('='); 1896 FuncAttrs.addAttribute(Var, Value); 1897 } 1898 } 1899 1900 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1901 llvm::AttrBuilder FuncAttrs; 1902 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1903 /* AttrOnCallSite = */ false, FuncAttrs); 1904 // TODO: call GetCPUAndFeaturesAttributes? 1905 F.addFnAttrs(FuncAttrs); 1906 } 1907 1908 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1909 llvm::AttrBuilder &attrs) { 1910 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1911 /*for call*/ false, attrs); 1912 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1913 } 1914 1915 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1916 const LangOptions &LangOpts, 1917 const NoBuiltinAttr *NBA = nullptr) { 1918 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1919 SmallString<32> AttributeName; 1920 AttributeName += "no-builtin-"; 1921 AttributeName += BuiltinName; 1922 FuncAttrs.addAttribute(AttributeName); 1923 }; 1924 1925 // First, handle the language options passed through -fno-builtin. 1926 if (LangOpts.NoBuiltin) { 1927 // -fno-builtin disables them all. 1928 FuncAttrs.addAttribute("no-builtins"); 1929 return; 1930 } 1931 1932 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1933 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1934 1935 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1936 // the source. 1937 if (!NBA) 1938 return; 1939 1940 // If there is a wildcard in the builtin names specified through the 1941 // attribute, disable them all. 1942 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1943 FuncAttrs.addAttribute("no-builtins"); 1944 return; 1945 } 1946 1947 // And last, add the rest of the builtin names. 1948 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1949 } 1950 1951 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 1952 const llvm::DataLayout &DL, const ABIArgInfo &AI, 1953 bool CheckCoerce = true) { 1954 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 1955 if (AI.getKind() == ABIArgInfo::Indirect) 1956 return true; 1957 if (AI.getKind() == ABIArgInfo::Extend) 1958 return true; 1959 if (!DL.typeSizeEqualsStoreSize(Ty)) 1960 // TODO: This will result in a modest amount of values not marked noundef 1961 // when they could be. We care about values that *invisibly* contain undef 1962 // bits from the perspective of LLVM IR. 1963 return false; 1964 if (CheckCoerce && AI.canHaveCoerceToType()) { 1965 llvm::Type *CoerceTy = AI.getCoerceToType(); 1966 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 1967 DL.getTypeSizeInBits(Ty))) 1968 // If we're coercing to a type with a greater size than the canonical one, 1969 // we're introducing new undef bits. 1970 // Coercing to a type of smaller or equal size is ok, as we know that 1971 // there's no internal padding (typeSizeEqualsStoreSize). 1972 return false; 1973 } 1974 if (QTy->isExtIntType()) 1975 return true; 1976 if (QTy->isReferenceType()) 1977 return true; 1978 if (QTy->isNullPtrType()) 1979 return false; 1980 if (QTy->isMemberPointerType()) 1981 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 1982 // now, never mark them. 1983 return false; 1984 if (QTy->isScalarType()) { 1985 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 1986 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 1987 return true; 1988 } 1989 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 1990 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 1991 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 1992 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 1993 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 1994 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 1995 1996 // TODO: Some structs may be `noundef`, in specific situations. 1997 return false; 1998 } 1999 2000 /// Construct the IR attribute list of a function or call. 2001 /// 2002 /// When adding an attribute, please consider where it should be handled: 2003 /// 2004 /// - getDefaultFunctionAttributes is for attributes that are essentially 2005 /// part of the global target configuration (but perhaps can be 2006 /// overridden on a per-function basis). Adding attributes there 2007 /// will cause them to also be set in frontends that build on Clang's 2008 /// target-configuration logic, as well as for code defined in library 2009 /// modules such as CUDA's libdevice. 2010 /// 2011 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 2012 /// and adds declaration-specific, convention-specific, and 2013 /// frontend-specific logic. The last is of particular importance: 2014 /// attributes that restrict how the frontend generates code must be 2015 /// added here rather than getDefaultFunctionAttributes. 2016 /// 2017 void CodeGenModule::ConstructAttributeList(StringRef Name, 2018 const CGFunctionInfo &FI, 2019 CGCalleeInfo CalleeInfo, 2020 llvm::AttributeList &AttrList, 2021 unsigned &CallingConv, 2022 bool AttrOnCallSite, bool IsThunk) { 2023 llvm::AttrBuilder FuncAttrs; 2024 llvm::AttrBuilder RetAttrs; 2025 2026 // Collect function IR attributes from the CC lowering. 2027 // We'll collect the paramete and result attributes later. 2028 CallingConv = FI.getEffectiveCallingConvention(); 2029 if (FI.isNoReturn()) 2030 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2031 if (FI.isCmseNSCall()) 2032 FuncAttrs.addAttribute("cmse_nonsecure_call"); 2033 2034 // Collect function IR attributes from the callee prototype if we have one. 2035 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2036 CalleeInfo.getCalleeFunctionProtoType()); 2037 2038 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2039 2040 // Attach assumption attributes to the declaration. If this is a call 2041 // site, attach assumptions from the caller to the call as well. 2042 AddAttributesFromAssumes(FuncAttrs, TargetDecl); 2043 2044 bool HasOptnone = false; 2045 // The NoBuiltinAttr attached to the target FunctionDecl. 2046 const NoBuiltinAttr *NBA = nullptr; 2047 2048 // Collect function IR attributes based on declaration-specific 2049 // information. 2050 // FIXME: handle sseregparm someday... 2051 if (TargetDecl) { 2052 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2053 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2054 if (TargetDecl->hasAttr<NoThrowAttr>()) 2055 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2056 if (TargetDecl->hasAttr<NoReturnAttr>()) 2057 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2058 if (TargetDecl->hasAttr<ColdAttr>()) 2059 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2060 if (TargetDecl->hasAttr<HotAttr>()) 2061 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2062 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2063 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2064 if (TargetDecl->hasAttr<ConvergentAttr>()) 2065 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2066 2067 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2068 AddAttributesFromFunctionProtoType( 2069 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2070 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2071 // A sane operator new returns a non-aliasing pointer. 2072 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2073 if (getCodeGenOpts().AssumeSaneOperatorNew && 2074 (Kind == OO_New || Kind == OO_Array_New)) 2075 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2076 } 2077 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2078 const bool IsVirtualCall = MD && MD->isVirtual(); 2079 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2080 // virtual function. These attributes are not inherited by overloads. 2081 if (!(AttrOnCallSite && IsVirtualCall)) { 2082 if (Fn->isNoReturn()) 2083 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2084 NBA = Fn->getAttr<NoBuiltinAttr>(); 2085 } 2086 // Only place nomerge attribute on call sites, never functions. This 2087 // allows it to work on indirect virtual function calls. 2088 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2089 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2090 } 2091 2092 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2093 if (TargetDecl->hasAttr<ConstAttr>()) { 2094 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 2095 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2096 // gcc specifies that 'const' functions have greater restrictions than 2097 // 'pure' functions, so they also cannot have infinite loops. 2098 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2099 } else if (TargetDecl->hasAttr<PureAttr>()) { 2100 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2101 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2102 // gcc specifies that 'pure' functions cannot have infinite loops. 2103 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2104 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2105 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2106 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2107 } 2108 if (TargetDecl->hasAttr<RestrictAttr>()) 2109 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2110 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2111 !CodeGenOpts.NullPointerIsValid) 2112 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2113 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2114 FuncAttrs.addAttribute("no_caller_saved_registers"); 2115 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2116 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2117 if (TargetDecl->hasAttr<LeafAttr>()) 2118 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2119 2120 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2121 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2122 Optional<unsigned> NumElemsParam; 2123 if (AllocSize->getNumElemsParam().isValid()) 2124 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2125 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2126 NumElemsParam); 2127 } 2128 2129 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2130 if (getLangOpts().OpenCLVersion <= 120) { 2131 // OpenCL v1.2 Work groups are always uniform 2132 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2133 } else { 2134 // OpenCL v2.0 Work groups may be whether uniform or not. 2135 // '-cl-uniform-work-group-size' compile option gets a hint 2136 // to the compiler that the global work-size be a multiple of 2137 // the work-group size specified to clEnqueueNDRangeKernel 2138 // (i.e. work groups are uniform). 2139 FuncAttrs.addAttribute("uniform-work-group-size", 2140 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2141 } 2142 } 2143 } 2144 2145 // Attach "no-builtins" attributes to: 2146 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2147 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2148 // The attributes can come from: 2149 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2150 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2151 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2152 2153 // Collect function IR attributes based on global settiings. 2154 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2155 2156 // Override some default IR attributes based on declaration-specific 2157 // information. 2158 if (TargetDecl) { 2159 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2160 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2161 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2162 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2163 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2164 FuncAttrs.removeAttribute("split-stack"); 2165 2166 // Add NonLazyBind attribute to function declarations when -fno-plt 2167 // is used. 2168 // FIXME: what if we just haven't processed the function definition 2169 // yet, or if it's an external definition like C99 inline? 2170 if (CodeGenOpts.NoPLT) { 2171 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2172 if (!Fn->isDefined() && !AttrOnCallSite) { 2173 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2174 } 2175 } 2176 } 2177 } 2178 2179 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2180 // functions with -funique-internal-linkage-names. 2181 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2182 if (isa<FunctionDecl>(TargetDecl)) { 2183 if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) == 2184 llvm::GlobalValue::InternalLinkage) 2185 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2186 "selected"); 2187 } 2188 } 2189 2190 // Collect non-call-site function IR attributes from declaration-specific 2191 // information. 2192 if (!AttrOnCallSite) { 2193 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2194 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2195 2196 // Whether tail calls are enabled. 2197 auto shouldDisableTailCalls = [&] { 2198 // Should this be honored in getDefaultFunctionAttributes? 2199 if (CodeGenOpts.DisableTailCalls) 2200 return true; 2201 2202 if (!TargetDecl) 2203 return false; 2204 2205 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2206 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2207 return true; 2208 2209 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2210 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2211 if (!BD->doesNotEscape()) 2212 return true; 2213 } 2214 2215 return false; 2216 }; 2217 if (shouldDisableTailCalls()) 2218 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2219 2220 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2221 // handles these separately to set them based on the global defaults. 2222 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2223 } 2224 2225 // Collect attributes from arguments and return values. 2226 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2227 2228 QualType RetTy = FI.getReturnType(); 2229 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2230 const llvm::DataLayout &DL = getDataLayout(); 2231 2232 // C++ explicitly makes returning undefined values UB. C's rule only applies 2233 // to used values, so we never mark them noundef for now. 2234 bool HasStrictReturn = getLangOpts().CPlusPlus; 2235 if (TargetDecl && HasStrictReturn) { 2236 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) 2237 HasStrictReturn &= !FDecl->isExternC(); 2238 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) 2239 // Function pointer 2240 HasStrictReturn &= !VDecl->isExternC(); 2241 } 2242 2243 // We don't want to be too aggressive with the return checking, unless 2244 // it's explicit in the code opts or we're using an appropriate sanitizer. 2245 // Try to respect what the programmer intended. 2246 HasStrictReturn &= getCodeGenOpts().StrictReturn || 2247 !MayDropFunctionReturn(getContext(), RetTy) || 2248 getLangOpts().Sanitize.has(SanitizerKind::Memory) || 2249 getLangOpts().Sanitize.has(SanitizerKind::Return); 2250 2251 // Determine if the return type could be partially undef 2252 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) { 2253 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2254 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2255 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2256 } 2257 2258 switch (RetAI.getKind()) { 2259 case ABIArgInfo::Extend: 2260 if (RetAI.isSignExt()) 2261 RetAttrs.addAttribute(llvm::Attribute::SExt); 2262 else 2263 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2264 LLVM_FALLTHROUGH; 2265 case ABIArgInfo::Direct: 2266 if (RetAI.getInReg()) 2267 RetAttrs.addAttribute(llvm::Attribute::InReg); 2268 break; 2269 case ABIArgInfo::Ignore: 2270 break; 2271 2272 case ABIArgInfo::InAlloca: 2273 case ABIArgInfo::Indirect: { 2274 // inalloca and sret disable readnone and readonly 2275 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2276 .removeAttribute(llvm::Attribute::ReadNone); 2277 break; 2278 } 2279 2280 case ABIArgInfo::CoerceAndExpand: 2281 break; 2282 2283 case ABIArgInfo::Expand: 2284 case ABIArgInfo::IndirectAliased: 2285 llvm_unreachable("Invalid ABI kind for return argument"); 2286 } 2287 2288 if (!IsThunk) { 2289 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2290 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2291 QualType PTy = RefTy->getPointeeType(); 2292 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2293 RetAttrs.addDereferenceableAttr( 2294 getMinimumObjectSize(PTy).getQuantity()); 2295 if (getContext().getTargetAddressSpace(PTy) == 0 && 2296 !CodeGenOpts.NullPointerIsValid) 2297 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2298 if (PTy->isObjectType()) { 2299 llvm::Align Alignment = 2300 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2301 RetAttrs.addAlignmentAttr(Alignment); 2302 } 2303 } 2304 } 2305 2306 bool hasUsedSRet = false; 2307 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2308 2309 // Attach attributes to sret. 2310 if (IRFunctionArgs.hasSRetArg()) { 2311 llvm::AttrBuilder SRETAttrs; 2312 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2313 hasUsedSRet = true; 2314 if (RetAI.getInReg()) 2315 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2316 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2317 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2318 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2319 } 2320 2321 // Attach attributes to inalloca argument. 2322 if (IRFunctionArgs.hasInallocaArg()) { 2323 llvm::AttrBuilder Attrs; 2324 Attrs.addInAllocaAttr(FI.getArgStruct()); 2325 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2326 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2327 } 2328 2329 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, 2330 // unless this is a thunk function. 2331 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2332 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2333 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { 2334 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2335 2336 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2337 2338 llvm::AttrBuilder Attrs; 2339 2340 QualType ThisTy = 2341 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); 2342 2343 if (!CodeGenOpts.NullPointerIsValid && 2344 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2345 Attrs.addAttribute(llvm::Attribute::NonNull); 2346 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); 2347 } else { 2348 // FIXME dereferenceable should be correct here, regardless of 2349 // NullPointerIsValid. However, dereferenceable currently does not always 2350 // respect NullPointerIsValid and may imply nonnull and break the program. 2351 // See https://reviews.llvm.org/D66618 for discussions. 2352 Attrs.addDereferenceableOrNullAttr( 2353 getMinimumObjectSize( 2354 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2355 .getQuantity()); 2356 } 2357 2358 llvm::Align Alignment = 2359 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, 2360 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) 2361 .getAsAlign(); 2362 Attrs.addAlignmentAttr(Alignment); 2363 2364 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2365 } 2366 2367 unsigned ArgNo = 0; 2368 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2369 E = FI.arg_end(); 2370 I != E; ++I, ++ArgNo) { 2371 QualType ParamType = I->type; 2372 const ABIArgInfo &AI = I->info; 2373 llvm::AttrBuilder Attrs; 2374 2375 // Add attribute for padding argument, if necessary. 2376 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2377 if (AI.getPaddingInReg()) { 2378 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2379 llvm::AttributeSet::get( 2380 getLLVMContext(), 2381 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2382 } 2383 } 2384 2385 // Decide whether the argument we're handling could be partially undef 2386 bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI); 2387 if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef) 2388 Attrs.addAttribute(llvm::Attribute::NoUndef); 2389 2390 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2391 // have the corresponding parameter variable. It doesn't make 2392 // sense to do it here because parameters are so messed up. 2393 switch (AI.getKind()) { 2394 case ABIArgInfo::Extend: 2395 if (AI.isSignExt()) 2396 Attrs.addAttribute(llvm::Attribute::SExt); 2397 else 2398 Attrs.addAttribute(llvm::Attribute::ZExt); 2399 LLVM_FALLTHROUGH; 2400 case ABIArgInfo::Direct: 2401 if (ArgNo == 0 && FI.isChainCall()) 2402 Attrs.addAttribute(llvm::Attribute::Nest); 2403 else if (AI.getInReg()) 2404 Attrs.addAttribute(llvm::Attribute::InReg); 2405 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); 2406 break; 2407 2408 case ABIArgInfo::Indirect: { 2409 if (AI.getInReg()) 2410 Attrs.addAttribute(llvm::Attribute::InReg); 2411 2412 if (AI.getIndirectByVal()) 2413 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2414 2415 auto *Decl = ParamType->getAsRecordDecl(); 2416 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2417 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2418 // When calling the function, the pointer passed in will be the only 2419 // reference to the underlying object. Mark it accordingly. 2420 Attrs.addAttribute(llvm::Attribute::NoAlias); 2421 2422 // TODO: We could add the byref attribute if not byval, but it would 2423 // require updating many testcases. 2424 2425 CharUnits Align = AI.getIndirectAlign(); 2426 2427 // In a byval argument, it is important that the required 2428 // alignment of the type is honored, as LLVM might be creating a 2429 // *new* stack object, and needs to know what alignment to give 2430 // it. (Sometimes it can deduce a sensible alignment on its own, 2431 // but not if clang decides it must emit a packed struct, or the 2432 // user specifies increased alignment requirements.) 2433 // 2434 // This is different from indirect *not* byval, where the object 2435 // exists already, and the align attribute is purely 2436 // informative. 2437 assert(!Align.isZero()); 2438 2439 // For now, only add this when we have a byval argument. 2440 // TODO: be less lazy about updating test cases. 2441 if (AI.getIndirectByVal()) 2442 Attrs.addAlignmentAttr(Align.getQuantity()); 2443 2444 // byval disables readnone and readonly. 2445 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2446 .removeAttribute(llvm::Attribute::ReadNone); 2447 2448 break; 2449 } 2450 case ABIArgInfo::IndirectAliased: { 2451 CharUnits Align = AI.getIndirectAlign(); 2452 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2453 Attrs.addAlignmentAttr(Align.getQuantity()); 2454 break; 2455 } 2456 case ABIArgInfo::Ignore: 2457 case ABIArgInfo::Expand: 2458 case ABIArgInfo::CoerceAndExpand: 2459 break; 2460 2461 case ABIArgInfo::InAlloca: 2462 // inalloca disables readnone and readonly. 2463 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2464 .removeAttribute(llvm::Attribute::ReadNone); 2465 continue; 2466 } 2467 2468 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2469 QualType PTy = RefTy->getPointeeType(); 2470 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2471 Attrs.addDereferenceableAttr( 2472 getMinimumObjectSize(PTy).getQuantity()); 2473 if (getContext().getTargetAddressSpace(PTy) == 0 && 2474 !CodeGenOpts.NullPointerIsValid) 2475 Attrs.addAttribute(llvm::Attribute::NonNull); 2476 if (PTy->isObjectType()) { 2477 llvm::Align Alignment = 2478 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2479 Attrs.addAlignmentAttr(Alignment); 2480 } 2481 } 2482 2483 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2484 case ParameterABI::Ordinary: 2485 break; 2486 2487 case ParameterABI::SwiftIndirectResult: { 2488 // Add 'sret' if we haven't already used it for something, but 2489 // only if the result is void. 2490 if (!hasUsedSRet && RetTy->isVoidType()) { 2491 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2492 hasUsedSRet = true; 2493 } 2494 2495 // Add 'noalias' in either case. 2496 Attrs.addAttribute(llvm::Attribute::NoAlias); 2497 2498 // Add 'dereferenceable' and 'alignment'. 2499 auto PTy = ParamType->getPointeeType(); 2500 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2501 auto info = getContext().getTypeInfoInChars(PTy); 2502 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2503 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2504 } 2505 break; 2506 } 2507 2508 case ParameterABI::SwiftErrorResult: 2509 Attrs.addAttribute(llvm::Attribute::SwiftError); 2510 break; 2511 2512 case ParameterABI::SwiftContext: 2513 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2514 break; 2515 2516 case ParameterABI::SwiftAsyncContext: 2517 Attrs.addAttribute(llvm::Attribute::SwiftAsync); 2518 break; 2519 } 2520 2521 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2522 Attrs.addAttribute(llvm::Attribute::NoCapture); 2523 2524 if (Attrs.hasAttributes()) { 2525 unsigned FirstIRArg, NumIRArgs; 2526 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2527 for (unsigned i = 0; i < NumIRArgs; i++) 2528 ArgAttrs[FirstIRArg + i] = 2529 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2530 } 2531 } 2532 assert(ArgNo == FI.arg_size()); 2533 2534 AttrList = llvm::AttributeList::get( 2535 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2536 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2537 } 2538 2539 /// An argument came in as a promoted argument; demote it back to its 2540 /// declared type. 2541 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2542 const VarDecl *var, 2543 llvm::Value *value) { 2544 llvm::Type *varType = CGF.ConvertType(var->getType()); 2545 2546 // This can happen with promotions that actually don't change the 2547 // underlying type, like the enum promotions. 2548 if (value->getType() == varType) return value; 2549 2550 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2551 && "unexpected promotion type"); 2552 2553 if (isa<llvm::IntegerType>(varType)) 2554 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2555 2556 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2557 } 2558 2559 /// Returns the attribute (either parameter attribute, or function 2560 /// attribute), which declares argument ArgNo to be non-null. 2561 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2562 QualType ArgType, unsigned ArgNo) { 2563 // FIXME: __attribute__((nonnull)) can also be applied to: 2564 // - references to pointers, where the pointee is known to be 2565 // nonnull (apparently a Clang extension) 2566 // - transparent unions containing pointers 2567 // In the former case, LLVM IR cannot represent the constraint. In 2568 // the latter case, we have no guarantee that the transparent union 2569 // is in fact passed as a pointer. 2570 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2571 return nullptr; 2572 // First, check attribute on parameter itself. 2573 if (PVD) { 2574 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2575 return ParmNNAttr; 2576 } 2577 // Check function attributes. 2578 if (!FD) 2579 return nullptr; 2580 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2581 if (NNAttr->isNonNull(ArgNo)) 2582 return NNAttr; 2583 } 2584 return nullptr; 2585 } 2586 2587 namespace { 2588 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2589 Address Temp; 2590 Address Arg; 2591 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2592 void Emit(CodeGenFunction &CGF, Flags flags) override { 2593 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2594 CGF.Builder.CreateStore(errorValue, Arg); 2595 } 2596 }; 2597 } 2598 2599 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2600 llvm::Function *Fn, 2601 const FunctionArgList &Args) { 2602 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2603 // Naked functions don't have prologues. 2604 return; 2605 2606 // If this is an implicit-return-zero function, go ahead and 2607 // initialize the return value. TODO: it might be nice to have 2608 // a more general mechanism for this that didn't require synthesized 2609 // return statements. 2610 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2611 if (FD->hasImplicitReturnZero()) { 2612 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2613 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2614 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2615 Builder.CreateStore(Zero, ReturnValue); 2616 } 2617 } 2618 2619 // FIXME: We no longer need the types from FunctionArgList; lift up and 2620 // simplify. 2621 2622 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2623 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2624 2625 // If we're using inalloca, all the memory arguments are GEPs off of the last 2626 // parameter, which is a pointer to the complete memory area. 2627 Address ArgStruct = Address::invalid(); 2628 if (IRFunctionArgs.hasInallocaArg()) { 2629 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2630 FI.getArgStructAlignment()); 2631 2632 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2633 } 2634 2635 // Name the struct return parameter. 2636 if (IRFunctionArgs.hasSRetArg()) { 2637 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2638 AI->setName("agg.result"); 2639 AI->addAttr(llvm::Attribute::NoAlias); 2640 } 2641 2642 // Track if we received the parameter as a pointer (indirect, byval, or 2643 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2644 // into a local alloca for us. 2645 SmallVector<ParamValue, 16> ArgVals; 2646 ArgVals.reserve(Args.size()); 2647 2648 // Create a pointer value for every parameter declaration. This usually 2649 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2650 // any cleanups or do anything that might unwind. We do that separately, so 2651 // we can push the cleanups in the correct order for the ABI. 2652 assert(FI.arg_size() == Args.size() && 2653 "Mismatch between function signature & arguments."); 2654 unsigned ArgNo = 0; 2655 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2656 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2657 i != e; ++i, ++info_it, ++ArgNo) { 2658 const VarDecl *Arg = *i; 2659 const ABIArgInfo &ArgI = info_it->info; 2660 2661 bool isPromoted = 2662 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2663 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2664 // the parameter is promoted. In this case we convert to 2665 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2666 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2667 assert(hasScalarEvaluationKind(Ty) == 2668 hasScalarEvaluationKind(Arg->getType())); 2669 2670 unsigned FirstIRArg, NumIRArgs; 2671 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2672 2673 switch (ArgI.getKind()) { 2674 case ABIArgInfo::InAlloca: { 2675 assert(NumIRArgs == 0); 2676 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2677 Address V = 2678 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2679 if (ArgI.getInAllocaIndirect()) 2680 V = Address(Builder.CreateLoad(V), 2681 getContext().getTypeAlignInChars(Ty)); 2682 ArgVals.push_back(ParamValue::forIndirect(V)); 2683 break; 2684 } 2685 2686 case ABIArgInfo::Indirect: 2687 case ABIArgInfo::IndirectAliased: { 2688 assert(NumIRArgs == 1); 2689 Address ParamAddr = 2690 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); 2691 2692 if (!hasScalarEvaluationKind(Ty)) { 2693 // Aggregates and complex variables are accessed by reference. All we 2694 // need to do is realign the value, if requested. Also, if the address 2695 // may be aliased, copy it to ensure that the parameter variable is 2696 // mutable and has a unique adress, as C requires. 2697 Address V = ParamAddr; 2698 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2699 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2700 2701 // Copy from the incoming argument pointer to the temporary with the 2702 // appropriate alignment. 2703 // 2704 // FIXME: We should have a common utility for generating an aggregate 2705 // copy. 2706 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2707 Builder.CreateMemCpy( 2708 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2709 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2710 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2711 V = AlignedTemp; 2712 } 2713 ArgVals.push_back(ParamValue::forIndirect(V)); 2714 } else { 2715 // Load scalar value from indirect argument. 2716 llvm::Value *V = 2717 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2718 2719 if (isPromoted) 2720 V = emitArgumentDemotion(*this, Arg, V); 2721 ArgVals.push_back(ParamValue::forDirect(V)); 2722 } 2723 break; 2724 } 2725 2726 case ABIArgInfo::Extend: 2727 case ABIArgInfo::Direct: { 2728 auto AI = Fn->getArg(FirstIRArg); 2729 llvm::Type *LTy = ConvertType(Arg->getType()); 2730 2731 // Prepare parameter attributes. So far, only attributes for pointer 2732 // parameters are prepared. See 2733 // http://llvm.org/docs/LangRef.html#paramattrs. 2734 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2735 ArgI.getCoerceToType()->isPointerTy()) { 2736 assert(NumIRArgs == 1); 2737 2738 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2739 // Set `nonnull` attribute if any. 2740 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2741 PVD->getFunctionScopeIndex()) && 2742 !CGM.getCodeGenOpts().NullPointerIsValid) 2743 AI->addAttr(llvm::Attribute::NonNull); 2744 2745 QualType OTy = PVD->getOriginalType(); 2746 if (const auto *ArrTy = 2747 getContext().getAsConstantArrayType(OTy)) { 2748 // A C99 array parameter declaration with the static keyword also 2749 // indicates dereferenceability, and if the size is constant we can 2750 // use the dereferenceable attribute (which requires the size in 2751 // bytes). 2752 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2753 QualType ETy = ArrTy->getElementType(); 2754 llvm::Align Alignment = 2755 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2756 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2757 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2758 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2759 ArrSize) { 2760 llvm::AttrBuilder Attrs; 2761 Attrs.addDereferenceableAttr( 2762 getContext().getTypeSizeInChars(ETy).getQuantity() * 2763 ArrSize); 2764 AI->addAttrs(Attrs); 2765 } else if (getContext().getTargetInfo().getNullPointerValue( 2766 ETy.getAddressSpace()) == 0 && 2767 !CGM.getCodeGenOpts().NullPointerIsValid) { 2768 AI->addAttr(llvm::Attribute::NonNull); 2769 } 2770 } 2771 } else if (const auto *ArrTy = 2772 getContext().getAsVariableArrayType(OTy)) { 2773 // For C99 VLAs with the static keyword, we don't know the size so 2774 // we can't use the dereferenceable attribute, but in addrspace(0) 2775 // we know that it must be nonnull. 2776 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2777 QualType ETy = ArrTy->getElementType(); 2778 llvm::Align Alignment = 2779 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2780 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2781 if (!getContext().getTargetAddressSpace(ETy) && 2782 !CGM.getCodeGenOpts().NullPointerIsValid) 2783 AI->addAttr(llvm::Attribute::NonNull); 2784 } 2785 } 2786 2787 // Set `align` attribute if any. 2788 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2789 if (!AVAttr) 2790 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2791 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2792 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2793 // If alignment-assumption sanitizer is enabled, we do *not* add 2794 // alignment attribute here, but emit normal alignment assumption, 2795 // so the UBSAN check could function. 2796 llvm::ConstantInt *AlignmentCI = 2797 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2798 uint64_t AlignmentInt = 2799 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2800 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2801 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2802 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( 2803 llvm::Align(AlignmentInt))); 2804 } 2805 } 2806 } 2807 2808 // Set 'noalias' if an argument type has the `restrict` qualifier. 2809 if (Arg->getType().isRestrictQualified()) 2810 AI->addAttr(llvm::Attribute::NoAlias); 2811 } 2812 2813 // Prepare the argument value. If we have the trivial case, handle it 2814 // with no muss and fuss. 2815 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2816 ArgI.getCoerceToType() == ConvertType(Ty) && 2817 ArgI.getDirectOffset() == 0) { 2818 assert(NumIRArgs == 1); 2819 2820 // LLVM expects swifterror parameters to be used in very restricted 2821 // ways. Copy the value into a less-restricted temporary. 2822 llvm::Value *V = AI; 2823 if (FI.getExtParameterInfo(ArgNo).getABI() 2824 == ParameterABI::SwiftErrorResult) { 2825 QualType pointeeTy = Ty->getPointeeType(); 2826 assert(pointeeTy->isPointerType()); 2827 Address temp = 2828 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2829 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2830 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2831 Builder.CreateStore(incomingErrorValue, temp); 2832 V = temp.getPointer(); 2833 2834 // Push a cleanup to copy the value back at the end of the function. 2835 // The convention does not guarantee that the value will be written 2836 // back if the function exits with an unwind exception. 2837 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2838 } 2839 2840 // Ensure the argument is the correct type. 2841 if (V->getType() != ArgI.getCoerceToType()) 2842 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2843 2844 if (isPromoted) 2845 V = emitArgumentDemotion(*this, Arg, V); 2846 2847 // Because of merging of function types from multiple decls it is 2848 // possible for the type of an argument to not match the corresponding 2849 // type in the function type. Since we are codegening the callee 2850 // in here, add a cast to the argument type. 2851 llvm::Type *LTy = ConvertType(Arg->getType()); 2852 if (V->getType() != LTy) 2853 V = Builder.CreateBitCast(V, LTy); 2854 2855 ArgVals.push_back(ParamValue::forDirect(V)); 2856 break; 2857 } 2858 2859 // VLST arguments are coerced to VLATs at the function boundary for 2860 // ABI consistency. If this is a VLST that was coerced to 2861 // a VLAT at the function boundary and the types match up, use 2862 // llvm.experimental.vector.extract to convert back to the original 2863 // VLST. 2864 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2865 llvm::Value *Coerced = Fn->getArg(FirstIRArg); 2866 if (auto *VecTyFrom = 2867 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2868 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2869 // vector, bitcast the source and use a vector extract. 2870 auto PredType = 2871 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2872 if (VecTyFrom == PredType && 2873 VecTyTo->getElementType() == Builder.getInt8Ty()) { 2874 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2875 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); 2876 } 2877 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2878 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2879 2880 assert(NumIRArgs == 1); 2881 Coerced->setName(Arg->getName() + ".coerce"); 2882 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2883 VecTyTo, Coerced, Zero, "castFixedSve"))); 2884 break; 2885 } 2886 } 2887 } 2888 2889 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2890 Arg->getName()); 2891 2892 // Pointer to store into. 2893 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2894 2895 // Fast-isel and the optimizer generally like scalar values better than 2896 // FCAs, so we flatten them if this is safe to do for this argument. 2897 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2898 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2899 STy->getNumElements() > 1) { 2900 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2901 llvm::Type *DstTy = Ptr.getElementType(); 2902 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2903 2904 Address AddrToStoreInto = Address::invalid(); 2905 if (SrcSize <= DstSize) { 2906 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2907 } else { 2908 AddrToStoreInto = 2909 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2910 } 2911 2912 assert(STy->getNumElements() == NumIRArgs); 2913 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2914 auto AI = Fn->getArg(FirstIRArg + i); 2915 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2916 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2917 Builder.CreateStore(AI, EltPtr); 2918 } 2919 2920 if (SrcSize > DstSize) { 2921 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2922 } 2923 2924 } else { 2925 // Simple case, just do a coerced store of the argument into the alloca. 2926 assert(NumIRArgs == 1); 2927 auto AI = Fn->getArg(FirstIRArg); 2928 AI->setName(Arg->getName() + ".coerce"); 2929 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2930 } 2931 2932 // Match to what EmitParmDecl is expecting for this type. 2933 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2934 llvm::Value *V = 2935 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2936 if (isPromoted) 2937 V = emitArgumentDemotion(*this, Arg, V); 2938 ArgVals.push_back(ParamValue::forDirect(V)); 2939 } else { 2940 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2941 } 2942 break; 2943 } 2944 2945 case ABIArgInfo::CoerceAndExpand: { 2946 // Reconstruct into a temporary. 2947 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2948 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2949 2950 auto coercionType = ArgI.getCoerceAndExpandType(); 2951 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2952 2953 unsigned argIndex = FirstIRArg; 2954 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2955 llvm::Type *eltType = coercionType->getElementType(i); 2956 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2957 continue; 2958 2959 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2960 auto elt = Fn->getArg(argIndex++); 2961 Builder.CreateStore(elt, eltAddr); 2962 } 2963 assert(argIndex == FirstIRArg + NumIRArgs); 2964 break; 2965 } 2966 2967 case ABIArgInfo::Expand: { 2968 // If this structure was expanded into multiple arguments then 2969 // we need to create a temporary and reconstruct it from the 2970 // arguments. 2971 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2972 LValue LV = MakeAddrLValue(Alloca, Ty); 2973 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2974 2975 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2976 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2977 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2978 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2979 auto AI = Fn->getArg(FirstIRArg + i); 2980 AI->setName(Arg->getName() + "." + Twine(i)); 2981 } 2982 break; 2983 } 2984 2985 case ABIArgInfo::Ignore: 2986 assert(NumIRArgs == 0); 2987 // Initialize the local variable appropriately. 2988 if (!hasScalarEvaluationKind(Ty)) { 2989 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2990 } else { 2991 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2992 ArgVals.push_back(ParamValue::forDirect(U)); 2993 } 2994 break; 2995 } 2996 } 2997 2998 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2999 for (int I = Args.size() - 1; I >= 0; --I) 3000 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3001 } else { 3002 for (unsigned I = 0, E = Args.size(); I != E; ++I) 3003 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3004 } 3005 } 3006 3007 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 3008 while (insn->use_empty()) { 3009 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 3010 if (!bitcast) return; 3011 3012 // This is "safe" because we would have used a ConstantExpr otherwise. 3013 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 3014 bitcast->eraseFromParent(); 3015 } 3016 } 3017 3018 /// Try to emit a fused autorelease of a return result. 3019 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 3020 llvm::Value *result) { 3021 // We must be immediately followed the cast. 3022 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 3023 if (BB->empty()) return nullptr; 3024 if (&BB->back() != result) return nullptr; 3025 3026 llvm::Type *resultType = result->getType(); 3027 3028 // result is in a BasicBlock and is therefore an Instruction. 3029 llvm::Instruction *generator = cast<llvm::Instruction>(result); 3030 3031 SmallVector<llvm::Instruction *, 4> InstsToKill; 3032 3033 // Look for: 3034 // %generator = bitcast %type1* %generator2 to %type2* 3035 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 3036 // We would have emitted this as a constant if the operand weren't 3037 // an Instruction. 3038 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 3039 3040 // Require the generator to be immediately followed by the cast. 3041 if (generator->getNextNode() != bitcast) 3042 return nullptr; 3043 3044 InstsToKill.push_back(bitcast); 3045 } 3046 3047 // Look for: 3048 // %generator = call i8* @objc_retain(i8* %originalResult) 3049 // or 3050 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 3051 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3052 if (!call) return nullptr; 3053 3054 bool doRetainAutorelease; 3055 3056 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3057 doRetainAutorelease = true; 3058 } else if (call->getCalledOperand() == 3059 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3060 doRetainAutorelease = false; 3061 3062 // If we emitted an assembly marker for this call (and the 3063 // ARCEntrypoints field should have been set if so), go looking 3064 // for that call. If we can't find it, we can't do this 3065 // optimization. But it should always be the immediately previous 3066 // instruction, unless we needed bitcasts around the call. 3067 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3068 llvm::Instruction *prev = call->getPrevNode(); 3069 assert(prev); 3070 if (isa<llvm::BitCastInst>(prev)) { 3071 prev = prev->getPrevNode(); 3072 assert(prev); 3073 } 3074 assert(isa<llvm::CallInst>(prev)); 3075 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3076 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3077 InstsToKill.push_back(prev); 3078 } 3079 } else { 3080 return nullptr; 3081 } 3082 3083 result = call->getArgOperand(0); 3084 InstsToKill.push_back(call); 3085 3086 // Keep killing bitcasts, for sanity. Note that we no longer care 3087 // about precise ordering as long as there's exactly one use. 3088 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3089 if (!bitcast->hasOneUse()) break; 3090 InstsToKill.push_back(bitcast); 3091 result = bitcast->getOperand(0); 3092 } 3093 3094 // Delete all the unnecessary instructions, from latest to earliest. 3095 for (auto *I : InstsToKill) 3096 I->eraseFromParent(); 3097 3098 // Do the fused retain/autorelease if we were asked to. 3099 if (doRetainAutorelease) 3100 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3101 3102 // Cast back to the result type. 3103 return CGF.Builder.CreateBitCast(result, resultType); 3104 } 3105 3106 /// If this is a +1 of the value of an immutable 'self', remove it. 3107 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3108 llvm::Value *result) { 3109 // This is only applicable to a method with an immutable 'self'. 3110 const ObjCMethodDecl *method = 3111 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3112 if (!method) return nullptr; 3113 const VarDecl *self = method->getSelfDecl(); 3114 if (!self->getType().isConstQualified()) return nullptr; 3115 3116 // Look for a retain call. 3117 llvm::CallInst *retainCall = 3118 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3119 if (!retainCall || retainCall->getCalledOperand() != 3120 CGF.CGM.getObjCEntrypoints().objc_retain) 3121 return nullptr; 3122 3123 // Look for an ordinary load of 'self'. 3124 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3125 llvm::LoadInst *load = 3126 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3127 if (!load || load->isAtomic() || load->isVolatile() || 3128 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3129 return nullptr; 3130 3131 // Okay! Burn it all down. This relies for correctness on the 3132 // assumption that the retain is emitted as part of the return and 3133 // that thereafter everything is used "linearly". 3134 llvm::Type *resultType = result->getType(); 3135 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3136 assert(retainCall->use_empty()); 3137 retainCall->eraseFromParent(); 3138 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3139 3140 return CGF.Builder.CreateBitCast(load, resultType); 3141 } 3142 3143 /// Emit an ARC autorelease of the result of a function. 3144 /// 3145 /// \return the value to actually return from the function 3146 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3147 llvm::Value *result) { 3148 // If we're returning 'self', kill the initial retain. This is a 3149 // heuristic attempt to "encourage correctness" in the really unfortunate 3150 // case where we have a return of self during a dealloc and we desperately 3151 // need to avoid the possible autorelease. 3152 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3153 return self; 3154 3155 // At -O0, try to emit a fused retain/autorelease. 3156 if (CGF.shouldUseFusedARCCalls()) 3157 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3158 return fused; 3159 3160 return CGF.EmitARCAutoreleaseReturnValue(result); 3161 } 3162 3163 /// Heuristically search for a dominating store to the return-value slot. 3164 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3165 // Check if a User is a store which pointerOperand is the ReturnValue. 3166 // We are looking for stores to the ReturnValue, not for stores of the 3167 // ReturnValue to some other location. 3168 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3169 auto *SI = dyn_cast<llvm::StoreInst>(U); 3170 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 3171 return nullptr; 3172 // These aren't actually possible for non-coerced returns, and we 3173 // only care about non-coerced returns on this code path. 3174 assert(!SI->isAtomic() && !SI->isVolatile()); 3175 return SI; 3176 }; 3177 // If there are multiple uses of the return-value slot, just check 3178 // for something immediately preceding the IP. Sometimes this can 3179 // happen with how we generate implicit-returns; it can also happen 3180 // with noreturn cleanups. 3181 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3182 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3183 if (IP->empty()) return nullptr; 3184 llvm::Instruction *I = &IP->back(); 3185 3186 // Skip lifetime markers 3187 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 3188 IE = IP->rend(); 3189 II != IE; ++II) { 3190 if (llvm::IntrinsicInst *Intrinsic = 3191 dyn_cast<llvm::IntrinsicInst>(&*II)) { 3192 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 3193 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 3194 ++II; 3195 if (II == IE) 3196 break; 3197 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 3198 continue; 3199 } 3200 } 3201 I = &*II; 3202 break; 3203 } 3204 3205 return GetStoreIfValid(I); 3206 } 3207 3208 llvm::StoreInst *store = 3209 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3210 if (!store) return nullptr; 3211 3212 // Now do a first-and-dirty dominance check: just walk up the 3213 // single-predecessors chain from the current insertion point. 3214 llvm::BasicBlock *StoreBB = store->getParent(); 3215 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3216 while (IP != StoreBB) { 3217 if (!(IP = IP->getSinglePredecessor())) 3218 return nullptr; 3219 } 3220 3221 // Okay, the store's basic block dominates the insertion point; we 3222 // can do our thing. 3223 return store; 3224 } 3225 3226 // Helper functions for EmitCMSEClearRecord 3227 3228 // Set the bits corresponding to a field having width `BitWidth` and located at 3229 // offset `BitOffset` (from the least significant bit) within a storage unit of 3230 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3231 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3232 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3233 int BitWidth, int CharWidth) { 3234 assert(CharWidth <= 64); 3235 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3236 3237 int Pos = 0; 3238 if (BitOffset >= CharWidth) { 3239 Pos += BitOffset / CharWidth; 3240 BitOffset = BitOffset % CharWidth; 3241 } 3242 3243 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3244 if (BitOffset + BitWidth >= CharWidth) { 3245 Bits[Pos++] |= (Used << BitOffset) & Used; 3246 BitWidth -= CharWidth - BitOffset; 3247 BitOffset = 0; 3248 } 3249 3250 while (BitWidth >= CharWidth) { 3251 Bits[Pos++] = Used; 3252 BitWidth -= CharWidth; 3253 } 3254 3255 if (BitWidth > 0) 3256 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3257 } 3258 3259 // Set the bits corresponding to a field having width `BitWidth` and located at 3260 // offset `BitOffset` (from the least significant bit) within a storage unit of 3261 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3262 // `Bits` corresponds to one target byte. Use target endian layout. 3263 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3264 int StorageSize, int BitOffset, int BitWidth, 3265 int CharWidth, bool BigEndian) { 3266 3267 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3268 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3269 3270 if (BigEndian) 3271 std::reverse(TmpBits.begin(), TmpBits.end()); 3272 3273 for (uint64_t V : TmpBits) 3274 Bits[StorageOffset++] |= V; 3275 } 3276 3277 static void setUsedBits(CodeGenModule &, QualType, int, 3278 SmallVectorImpl<uint64_t> &); 3279 3280 // Set the bits in `Bits`, which correspond to the value representations of 3281 // the actual members of the record type `RTy`. Note that this function does 3282 // not handle base classes, virtual tables, etc, since they cannot happen in 3283 // CMSE function arguments or return. The bit mask corresponds to the target 3284 // memory layout, i.e. it's endian dependent. 3285 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3286 SmallVectorImpl<uint64_t> &Bits) { 3287 ASTContext &Context = CGM.getContext(); 3288 int CharWidth = Context.getCharWidth(); 3289 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3290 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3291 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3292 3293 int Idx = 0; 3294 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3295 const FieldDecl *F = *I; 3296 3297 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3298 F->getType()->isIncompleteArrayType()) 3299 continue; 3300 3301 if (F->isBitField()) { 3302 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3303 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3304 BFI.StorageSize / CharWidth, BFI.Offset, 3305 BFI.Size, CharWidth, 3306 CGM.getDataLayout().isBigEndian()); 3307 continue; 3308 } 3309 3310 setUsedBits(CGM, F->getType(), 3311 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3312 } 3313 } 3314 3315 // Set the bits in `Bits`, which correspond to the value representations of 3316 // the elements of an array type `ATy`. 3317 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3318 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3319 const ASTContext &Context = CGM.getContext(); 3320 3321 QualType ETy = Context.getBaseElementType(ATy); 3322 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3323 SmallVector<uint64_t, 4> TmpBits(Size); 3324 setUsedBits(CGM, ETy, 0, TmpBits); 3325 3326 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3327 auto Src = TmpBits.begin(); 3328 auto Dst = Bits.begin() + Offset + I * Size; 3329 for (int J = 0; J < Size; ++J) 3330 *Dst++ |= *Src++; 3331 } 3332 } 3333 3334 // Set the bits in `Bits`, which correspond to the value representations of 3335 // the type `QTy`. 3336 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3337 SmallVectorImpl<uint64_t> &Bits) { 3338 if (const auto *RTy = QTy->getAs<RecordType>()) 3339 return setUsedBits(CGM, RTy, Offset, Bits); 3340 3341 ASTContext &Context = CGM.getContext(); 3342 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3343 return setUsedBits(CGM, ATy, Offset, Bits); 3344 3345 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3346 if (Size <= 0) 3347 return; 3348 3349 std::fill_n(Bits.begin() + Offset, Size, 3350 (uint64_t(1) << Context.getCharWidth()) - 1); 3351 } 3352 3353 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3354 int Pos, int Size, int CharWidth, 3355 bool BigEndian) { 3356 assert(Size > 0); 3357 uint64_t Mask = 0; 3358 if (BigEndian) { 3359 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3360 ++P) 3361 Mask = (Mask << CharWidth) | *P; 3362 } else { 3363 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3364 do 3365 Mask = (Mask << CharWidth) | *--P; 3366 while (P != End); 3367 } 3368 return Mask; 3369 } 3370 3371 // Emit code to clear the bits in a record, which aren't a part of any user 3372 // declared member, when the record is a function return. 3373 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3374 llvm::IntegerType *ITy, 3375 QualType QTy) { 3376 assert(Src->getType() == ITy); 3377 assert(ITy->getScalarSizeInBits() <= 64); 3378 3379 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3380 int Size = DataLayout.getTypeStoreSize(ITy); 3381 SmallVector<uint64_t, 4> Bits(Size); 3382 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3383 3384 int CharWidth = CGM.getContext().getCharWidth(); 3385 uint64_t Mask = 3386 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3387 3388 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3389 } 3390 3391 // Emit code to clear the bits in a record, which aren't a part of any user 3392 // declared member, when the record is a function argument. 3393 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3394 llvm::ArrayType *ATy, 3395 QualType QTy) { 3396 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3397 int Size = DataLayout.getTypeStoreSize(ATy); 3398 SmallVector<uint64_t, 16> Bits(Size); 3399 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3400 3401 // Clear each element of the LLVM array. 3402 int CharWidth = CGM.getContext().getCharWidth(); 3403 int CharsPerElt = 3404 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3405 int MaskIndex = 0; 3406 llvm::Value *R = llvm::UndefValue::get(ATy); 3407 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3408 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3409 DataLayout.isBigEndian()); 3410 MaskIndex += CharsPerElt; 3411 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3412 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3413 R = Builder.CreateInsertValue(R, T1, I); 3414 } 3415 3416 return R; 3417 } 3418 3419 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3420 bool EmitRetDbgLoc, 3421 SourceLocation EndLoc) { 3422 if (FI.isNoReturn()) { 3423 // Noreturn functions don't return. 3424 EmitUnreachable(EndLoc); 3425 return; 3426 } 3427 3428 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3429 // Naked functions don't have epilogues. 3430 Builder.CreateUnreachable(); 3431 return; 3432 } 3433 3434 // Functions with no result always return void. 3435 if (!ReturnValue.isValid()) { 3436 Builder.CreateRetVoid(); 3437 return; 3438 } 3439 3440 llvm::DebugLoc RetDbgLoc; 3441 llvm::Value *RV = nullptr; 3442 QualType RetTy = FI.getReturnType(); 3443 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3444 3445 switch (RetAI.getKind()) { 3446 case ABIArgInfo::InAlloca: 3447 // Aggregrates get evaluated directly into the destination. Sometimes we 3448 // need to return the sret value in a register, though. 3449 assert(hasAggregateEvaluationKind(RetTy)); 3450 if (RetAI.getInAllocaSRet()) { 3451 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3452 --EI; 3453 llvm::Value *ArgStruct = &*EI; 3454 llvm::Value *SRet = Builder.CreateStructGEP( 3455 EI->getType()->getPointerElementType(), ArgStruct, 3456 RetAI.getInAllocaFieldIndex()); 3457 llvm::Type *Ty = 3458 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); 3459 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); 3460 } 3461 break; 3462 3463 case ABIArgInfo::Indirect: { 3464 auto AI = CurFn->arg_begin(); 3465 if (RetAI.isSRetAfterThis()) 3466 ++AI; 3467 switch (getEvaluationKind(RetTy)) { 3468 case TEK_Complex: { 3469 ComplexPairTy RT = 3470 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3471 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3472 /*isInit*/ true); 3473 break; 3474 } 3475 case TEK_Aggregate: 3476 // Do nothing; aggregrates get evaluated directly into the destination. 3477 break; 3478 case TEK_Scalar: 3479 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3480 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3481 /*isInit*/ true); 3482 break; 3483 } 3484 break; 3485 } 3486 3487 case ABIArgInfo::Extend: 3488 case ABIArgInfo::Direct: 3489 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3490 RetAI.getDirectOffset() == 0) { 3491 // The internal return value temp always will have pointer-to-return-type 3492 // type, just do a load. 3493 3494 // If there is a dominating store to ReturnValue, we can elide 3495 // the load, zap the store, and usually zap the alloca. 3496 if (llvm::StoreInst *SI = 3497 findDominatingStoreToReturnValue(*this)) { 3498 // Reuse the debug location from the store unless there is 3499 // cleanup code to be emitted between the store and return 3500 // instruction. 3501 if (EmitRetDbgLoc && !AutoreleaseResult) 3502 RetDbgLoc = SI->getDebugLoc(); 3503 // Get the stored value and nuke the now-dead store. 3504 RV = SI->getValueOperand(); 3505 SI->eraseFromParent(); 3506 3507 // Otherwise, we have to do a simple load. 3508 } else { 3509 RV = Builder.CreateLoad(ReturnValue); 3510 } 3511 } else { 3512 // If the value is offset in memory, apply the offset now. 3513 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3514 3515 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3516 } 3517 3518 // In ARC, end functions that return a retainable type with a call 3519 // to objc_autoreleaseReturnValue. 3520 if (AutoreleaseResult) { 3521 #ifndef NDEBUG 3522 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3523 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3524 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3525 // CurCodeDecl or BlockInfo. 3526 QualType RT; 3527 3528 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3529 RT = FD->getReturnType(); 3530 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3531 RT = MD->getReturnType(); 3532 else if (isa<BlockDecl>(CurCodeDecl)) 3533 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3534 else 3535 llvm_unreachable("Unexpected function/method type"); 3536 3537 assert(getLangOpts().ObjCAutoRefCount && 3538 !FI.isReturnsRetained() && 3539 RT->isObjCRetainableType()); 3540 #endif 3541 RV = emitAutoreleaseOfResult(*this, RV); 3542 } 3543 3544 break; 3545 3546 case ABIArgInfo::Ignore: 3547 break; 3548 3549 case ABIArgInfo::CoerceAndExpand: { 3550 auto coercionType = RetAI.getCoerceAndExpandType(); 3551 3552 // Load all of the coerced elements out into results. 3553 llvm::SmallVector<llvm::Value*, 4> results; 3554 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3555 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3556 auto coercedEltType = coercionType->getElementType(i); 3557 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3558 continue; 3559 3560 auto eltAddr = Builder.CreateStructGEP(addr, i); 3561 auto elt = Builder.CreateLoad(eltAddr); 3562 results.push_back(elt); 3563 } 3564 3565 // If we have one result, it's the single direct result type. 3566 if (results.size() == 1) { 3567 RV = results[0]; 3568 3569 // Otherwise, we need to make a first-class aggregate. 3570 } else { 3571 // Construct a return type that lacks padding elements. 3572 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3573 3574 RV = llvm::UndefValue::get(returnType); 3575 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3576 RV = Builder.CreateInsertValue(RV, results[i], i); 3577 } 3578 } 3579 break; 3580 } 3581 case ABIArgInfo::Expand: 3582 case ABIArgInfo::IndirectAliased: 3583 llvm_unreachable("Invalid ABI kind for return argument"); 3584 } 3585 3586 llvm::Instruction *Ret; 3587 if (RV) { 3588 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3589 // For certain return types, clear padding bits, as they may reveal 3590 // sensitive information. 3591 // Small struct/union types are passed as integers. 3592 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3593 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3594 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3595 } 3596 EmitReturnValueCheck(RV); 3597 Ret = Builder.CreateRet(RV); 3598 } else { 3599 Ret = Builder.CreateRetVoid(); 3600 } 3601 3602 if (RetDbgLoc) 3603 Ret->setDebugLoc(std::move(RetDbgLoc)); 3604 } 3605 3606 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3607 // A current decl may not be available when emitting vtable thunks. 3608 if (!CurCodeDecl) 3609 return; 3610 3611 // If the return block isn't reachable, neither is this check, so don't emit 3612 // it. 3613 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3614 return; 3615 3616 ReturnsNonNullAttr *RetNNAttr = nullptr; 3617 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3618 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3619 3620 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3621 return; 3622 3623 // Prefer the returns_nonnull attribute if it's present. 3624 SourceLocation AttrLoc; 3625 SanitizerMask CheckKind; 3626 SanitizerHandler Handler; 3627 if (RetNNAttr) { 3628 assert(!requiresReturnValueNullabilityCheck() && 3629 "Cannot check nullability and the nonnull attribute"); 3630 AttrLoc = RetNNAttr->getLocation(); 3631 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3632 Handler = SanitizerHandler::NonnullReturn; 3633 } else { 3634 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3635 if (auto *TSI = DD->getTypeSourceInfo()) 3636 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3637 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3638 CheckKind = SanitizerKind::NullabilityReturn; 3639 Handler = SanitizerHandler::NullabilityReturn; 3640 } 3641 3642 SanitizerScope SanScope(this); 3643 3644 // Make sure the "return" source location is valid. If we're checking a 3645 // nullability annotation, make sure the preconditions for the check are met. 3646 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3647 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3648 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3649 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3650 if (requiresReturnValueNullabilityCheck()) 3651 CanNullCheck = 3652 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3653 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3654 EmitBlock(Check); 3655 3656 // Now do the null check. 3657 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3658 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3659 llvm::Value *DynamicData[] = {SLocPtr}; 3660 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3661 3662 EmitBlock(NoCheck); 3663 3664 #ifndef NDEBUG 3665 // The return location should not be used after the check has been emitted. 3666 ReturnLocation = Address::invalid(); 3667 #endif 3668 } 3669 3670 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3671 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3672 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3673 } 3674 3675 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3676 QualType Ty) { 3677 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3678 // placeholders. 3679 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3680 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3681 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3682 3683 // FIXME: When we generate this IR in one pass, we shouldn't need 3684 // this win32-specific alignment hack. 3685 CharUnits Align = CharUnits::fromQuantity(4); 3686 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3687 3688 return AggValueSlot::forAddr(Address(Placeholder, Align), 3689 Ty.getQualifiers(), 3690 AggValueSlot::IsNotDestructed, 3691 AggValueSlot::DoesNotNeedGCBarriers, 3692 AggValueSlot::IsNotAliased, 3693 AggValueSlot::DoesNotOverlap); 3694 } 3695 3696 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3697 const VarDecl *param, 3698 SourceLocation loc) { 3699 // StartFunction converted the ABI-lowered parameter(s) into a 3700 // local alloca. We need to turn that into an r-value suitable 3701 // for EmitCall. 3702 Address local = GetAddrOfLocalVar(param); 3703 3704 QualType type = param->getType(); 3705 3706 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3707 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3708 } 3709 3710 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3711 // but the argument needs to be the original pointer. 3712 if (type->isReferenceType()) { 3713 args.add(RValue::get(Builder.CreateLoad(local)), type); 3714 3715 // In ARC, move out of consumed arguments so that the release cleanup 3716 // entered by StartFunction doesn't cause an over-release. This isn't 3717 // optimal -O0 code generation, but it should get cleaned up when 3718 // optimization is enabled. This also assumes that delegate calls are 3719 // performed exactly once for a set of arguments, but that should be safe. 3720 } else if (getLangOpts().ObjCAutoRefCount && 3721 param->hasAttr<NSConsumedAttr>() && 3722 type->isObjCRetainableType()) { 3723 llvm::Value *ptr = Builder.CreateLoad(local); 3724 auto null = 3725 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3726 Builder.CreateStore(null, local); 3727 args.add(RValue::get(ptr), type); 3728 3729 // For the most part, we just need to load the alloca, except that 3730 // aggregate r-values are actually pointers to temporaries. 3731 } else { 3732 args.add(convertTempToRValue(local, type, loc), type); 3733 } 3734 3735 // Deactivate the cleanup for the callee-destructed param that was pushed. 3736 if (type->isRecordType() && !CurFuncIsThunk && 3737 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3738 param->needsDestruction(getContext())) { 3739 EHScopeStack::stable_iterator cleanup = 3740 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3741 assert(cleanup.isValid() && 3742 "cleanup for callee-destructed param not recorded"); 3743 // This unreachable is a temporary marker which will be removed later. 3744 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3745 args.addArgCleanupDeactivation(cleanup, isActive); 3746 } 3747 } 3748 3749 static bool isProvablyNull(llvm::Value *addr) { 3750 return isa<llvm::ConstantPointerNull>(addr); 3751 } 3752 3753 /// Emit the actual writing-back of a writeback. 3754 static void emitWriteback(CodeGenFunction &CGF, 3755 const CallArgList::Writeback &writeback) { 3756 const LValue &srcLV = writeback.Source; 3757 Address srcAddr = srcLV.getAddress(CGF); 3758 assert(!isProvablyNull(srcAddr.getPointer()) && 3759 "shouldn't have writeback for provably null argument"); 3760 3761 llvm::BasicBlock *contBB = nullptr; 3762 3763 // If the argument wasn't provably non-null, we need to null check 3764 // before doing the store. 3765 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3766 CGF.CGM.getDataLayout()); 3767 if (!provablyNonNull) { 3768 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3769 contBB = CGF.createBasicBlock("icr.done"); 3770 3771 llvm::Value *isNull = 3772 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3773 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3774 CGF.EmitBlock(writebackBB); 3775 } 3776 3777 // Load the value to writeback. 3778 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3779 3780 // Cast it back, in case we're writing an id to a Foo* or something. 3781 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3782 "icr.writeback-cast"); 3783 3784 // Perform the writeback. 3785 3786 // If we have a "to use" value, it's something we need to emit a use 3787 // of. This has to be carefully threaded in: if it's done after the 3788 // release it's potentially undefined behavior (and the optimizer 3789 // will ignore it), and if it happens before the retain then the 3790 // optimizer could move the release there. 3791 if (writeback.ToUse) { 3792 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3793 3794 // Retain the new value. No need to block-copy here: the block's 3795 // being passed up the stack. 3796 value = CGF.EmitARCRetainNonBlock(value); 3797 3798 // Emit the intrinsic use here. 3799 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3800 3801 // Load the old value (primitively). 3802 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3803 3804 // Put the new value in place (primitively). 3805 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3806 3807 // Release the old value. 3808 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3809 3810 // Otherwise, we can just do a normal lvalue store. 3811 } else { 3812 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3813 } 3814 3815 // Jump to the continuation block. 3816 if (!provablyNonNull) 3817 CGF.EmitBlock(contBB); 3818 } 3819 3820 static void emitWritebacks(CodeGenFunction &CGF, 3821 const CallArgList &args) { 3822 for (const auto &I : args.writebacks()) 3823 emitWriteback(CGF, I); 3824 } 3825 3826 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3827 const CallArgList &CallArgs) { 3828 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3829 CallArgs.getCleanupsToDeactivate(); 3830 // Iterate in reverse to increase the likelihood of popping the cleanup. 3831 for (const auto &I : llvm::reverse(Cleanups)) { 3832 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3833 I.IsActiveIP->eraseFromParent(); 3834 } 3835 } 3836 3837 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3838 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3839 if (uop->getOpcode() == UO_AddrOf) 3840 return uop->getSubExpr(); 3841 return nullptr; 3842 } 3843 3844 /// Emit an argument that's being passed call-by-writeback. That is, 3845 /// we are passing the address of an __autoreleased temporary; it 3846 /// might be copy-initialized with the current value of the given 3847 /// address, but it will definitely be copied out of after the call. 3848 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3849 const ObjCIndirectCopyRestoreExpr *CRE) { 3850 LValue srcLV; 3851 3852 // Make an optimistic effort to emit the address as an l-value. 3853 // This can fail if the argument expression is more complicated. 3854 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3855 srcLV = CGF.EmitLValue(lvExpr); 3856 3857 // Otherwise, just emit it as a scalar. 3858 } else { 3859 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3860 3861 QualType srcAddrType = 3862 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3863 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3864 } 3865 Address srcAddr = srcLV.getAddress(CGF); 3866 3867 // The dest and src types don't necessarily match in LLVM terms 3868 // because of the crazy ObjC compatibility rules. 3869 3870 llvm::PointerType *destType = 3871 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3872 3873 // If the address is a constant null, just pass the appropriate null. 3874 if (isProvablyNull(srcAddr.getPointer())) { 3875 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3876 CRE->getType()); 3877 return; 3878 } 3879 3880 // Create the temporary. 3881 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3882 CGF.getPointerAlign(), 3883 "icr.temp"); 3884 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3885 // and that cleanup will be conditional if we can't prove that the l-value 3886 // isn't null, so we need to register a dominating point so that the cleanups 3887 // system will make valid IR. 3888 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3889 3890 // Zero-initialize it if we're not doing a copy-initialization. 3891 bool shouldCopy = CRE->shouldCopy(); 3892 if (!shouldCopy) { 3893 llvm::Value *null = 3894 llvm::ConstantPointerNull::get( 3895 cast<llvm::PointerType>(destType->getElementType())); 3896 CGF.Builder.CreateStore(null, temp); 3897 } 3898 3899 llvm::BasicBlock *contBB = nullptr; 3900 llvm::BasicBlock *originBB = nullptr; 3901 3902 // If the address is *not* known to be non-null, we need to switch. 3903 llvm::Value *finalArgument; 3904 3905 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3906 CGF.CGM.getDataLayout()); 3907 if (provablyNonNull) { 3908 finalArgument = temp.getPointer(); 3909 } else { 3910 llvm::Value *isNull = 3911 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3912 3913 finalArgument = CGF.Builder.CreateSelect(isNull, 3914 llvm::ConstantPointerNull::get(destType), 3915 temp.getPointer(), "icr.argument"); 3916 3917 // If we need to copy, then the load has to be conditional, which 3918 // means we need control flow. 3919 if (shouldCopy) { 3920 originBB = CGF.Builder.GetInsertBlock(); 3921 contBB = CGF.createBasicBlock("icr.cont"); 3922 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3923 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3924 CGF.EmitBlock(copyBB); 3925 condEval.begin(CGF); 3926 } 3927 } 3928 3929 llvm::Value *valueToUse = nullptr; 3930 3931 // Perform a copy if necessary. 3932 if (shouldCopy) { 3933 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3934 assert(srcRV.isScalar()); 3935 3936 llvm::Value *src = srcRV.getScalarVal(); 3937 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3938 "icr.cast"); 3939 3940 // Use an ordinary store, not a store-to-lvalue. 3941 CGF.Builder.CreateStore(src, temp); 3942 3943 // If optimization is enabled, and the value was held in a 3944 // __strong variable, we need to tell the optimizer that this 3945 // value has to stay alive until we're doing the store back. 3946 // This is because the temporary is effectively unretained, 3947 // and so otherwise we can violate the high-level semantics. 3948 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3949 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3950 valueToUse = src; 3951 } 3952 } 3953 3954 // Finish the control flow if we needed it. 3955 if (shouldCopy && !provablyNonNull) { 3956 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3957 CGF.EmitBlock(contBB); 3958 3959 // Make a phi for the value to intrinsically use. 3960 if (valueToUse) { 3961 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3962 "icr.to-use"); 3963 phiToUse->addIncoming(valueToUse, copyBB); 3964 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3965 originBB); 3966 valueToUse = phiToUse; 3967 } 3968 3969 condEval.end(CGF); 3970 } 3971 3972 args.addWriteback(srcLV, temp, valueToUse); 3973 args.add(RValue::get(finalArgument), CRE->getType()); 3974 } 3975 3976 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3977 assert(!StackBase); 3978 3979 // Save the stack. 3980 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3981 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3982 } 3983 3984 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3985 if (StackBase) { 3986 // Restore the stack after the call. 3987 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3988 CGF.Builder.CreateCall(F, StackBase); 3989 } 3990 } 3991 3992 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3993 SourceLocation ArgLoc, 3994 AbstractCallee AC, 3995 unsigned ParmNum) { 3996 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3997 SanOpts.has(SanitizerKind::NullabilityArg))) 3998 return; 3999 4000 // The param decl may be missing in a variadic function. 4001 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 4002 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 4003 4004 // Prefer the nonnull attribute if it's present. 4005 const NonNullAttr *NNAttr = nullptr; 4006 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 4007 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 4008 4009 bool CanCheckNullability = false; 4010 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 4011 auto Nullability = PVD->getType()->getNullability(getContext()); 4012 CanCheckNullability = Nullability && 4013 *Nullability == NullabilityKind::NonNull && 4014 PVD->getTypeSourceInfo(); 4015 } 4016 4017 if (!NNAttr && !CanCheckNullability) 4018 return; 4019 4020 SourceLocation AttrLoc; 4021 SanitizerMask CheckKind; 4022 SanitizerHandler Handler; 4023 if (NNAttr) { 4024 AttrLoc = NNAttr->getLocation(); 4025 CheckKind = SanitizerKind::NonnullAttribute; 4026 Handler = SanitizerHandler::NonnullArg; 4027 } else { 4028 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 4029 CheckKind = SanitizerKind::NullabilityArg; 4030 Handler = SanitizerHandler::NullabilityArg; 4031 } 4032 4033 SanitizerScope SanScope(this); 4034 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 4035 llvm::Constant *StaticData[] = { 4036 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 4037 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 4038 }; 4039 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 4040 } 4041 4042 // Check if the call is going to use the inalloca convention. This needs to 4043 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 4044 // later, so we can't check it directly. 4045 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 4046 ArrayRef<QualType> ArgTypes) { 4047 // The Swift calling conventions don't go through the target-specific 4048 // argument classification, they never use inalloca. 4049 // TODO: Consider limiting inalloca use to only calling conventions supported 4050 // by MSVC. 4051 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) 4052 return false; 4053 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 4054 return false; 4055 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4056 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4057 }); 4058 } 4059 4060 #ifndef NDEBUG 4061 // Determine whether the given argument is an Objective-C method 4062 // that may have type parameters in its signature. 4063 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4064 const DeclContext *dc = method->getDeclContext(); 4065 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4066 return classDecl->getTypeParamListAsWritten(); 4067 } 4068 4069 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4070 return catDecl->getTypeParamList(); 4071 } 4072 4073 return false; 4074 } 4075 #endif 4076 4077 /// EmitCallArgs - Emit call arguments for a function. 4078 void CodeGenFunction::EmitCallArgs( 4079 CallArgList &Args, PrototypeWrapper Prototype, 4080 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4081 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4082 SmallVector<QualType, 16> ArgTypes; 4083 4084 assert((ParamsToSkip == 0 || Prototype.P) && 4085 "Can't skip parameters if type info is not provided"); 4086 4087 // This variable only captures *explicitly* written conventions, not those 4088 // applied by default via command line flags or target defaults, such as 4089 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4090 // require knowing if this is a C++ instance method or being able to see 4091 // unprototyped FunctionTypes. 4092 CallingConv ExplicitCC = CC_C; 4093 4094 // First, if a prototype was provided, use those argument types. 4095 bool IsVariadic = false; 4096 if (Prototype.P) { 4097 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4098 if (MD) { 4099 IsVariadic = MD->isVariadic(); 4100 ExplicitCC = getCallingConventionForDecl( 4101 MD, CGM.getTarget().getTriple().isOSWindows()); 4102 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4103 MD->param_type_end()); 4104 } else { 4105 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4106 IsVariadic = FPT->isVariadic(); 4107 ExplicitCC = FPT->getExtInfo().getCC(); 4108 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4109 FPT->param_type_end()); 4110 } 4111 4112 #ifndef NDEBUG 4113 // Check that the prototyped types match the argument expression types. 4114 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4115 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4116 for (QualType Ty : ArgTypes) { 4117 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4118 assert( 4119 (isGenericMethod || Ty->isVariablyModifiedType() || 4120 Ty.getNonReferenceType()->isObjCRetainableType() || 4121 getContext() 4122 .getCanonicalType(Ty.getNonReferenceType()) 4123 .getTypePtr() == 4124 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4125 "type mismatch in call argument!"); 4126 ++Arg; 4127 } 4128 4129 // Either we've emitted all the call args, or we have a call to variadic 4130 // function. 4131 assert((Arg == ArgRange.end() || IsVariadic) && 4132 "Extra arguments in non-variadic function!"); 4133 #endif 4134 } 4135 4136 // If we still have any arguments, emit them using the type of the argument. 4137 for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()), 4138 ArgRange.end())) 4139 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4140 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4141 4142 // We must evaluate arguments from right to left in the MS C++ ABI, 4143 // because arguments are destroyed left to right in the callee. As a special 4144 // case, there are certain language constructs that require left-to-right 4145 // evaluation, and in those cases we consider the evaluation order requirement 4146 // to trump the "destruction order is reverse construction order" guarantee. 4147 bool LeftToRight = 4148 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4149 ? Order == EvaluationOrder::ForceLeftToRight 4150 : Order != EvaluationOrder::ForceRightToLeft; 4151 4152 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4153 RValue EmittedArg) { 4154 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4155 return; 4156 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4157 if (PS == nullptr) 4158 return; 4159 4160 const auto &Context = getContext(); 4161 auto SizeTy = Context.getSizeType(); 4162 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4163 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4164 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4165 EmittedArg.getScalarVal(), 4166 PS->isDynamic()); 4167 Args.add(RValue::get(V), SizeTy); 4168 // If we're emitting args in reverse, be sure to do so with 4169 // pass_object_size, as well. 4170 if (!LeftToRight) 4171 std::swap(Args.back(), *(&Args.back() - 1)); 4172 }; 4173 4174 // Insert a stack save if we're going to need any inalloca args. 4175 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4176 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4177 "inalloca only supported on x86"); 4178 Args.allocateArgumentMemory(*this); 4179 } 4180 4181 // Evaluate each argument in the appropriate order. 4182 size_t CallArgsStart = Args.size(); 4183 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4184 unsigned Idx = LeftToRight ? I : E - I - 1; 4185 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4186 unsigned InitialArgSize = Args.size(); 4187 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4188 // the argument and parameter match or the objc method is parameterized. 4189 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4190 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4191 ArgTypes[Idx]) || 4192 (isa<ObjCMethodDecl>(AC.getDecl()) && 4193 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4194 "Argument and parameter types don't match"); 4195 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4196 // In particular, we depend on it being the last arg in Args, and the 4197 // objectsize bits depend on there only being one arg if !LeftToRight. 4198 assert(InitialArgSize + 1 == Args.size() && 4199 "The code below depends on only adding one arg per EmitCallArg"); 4200 (void)InitialArgSize; 4201 // Since pointer argument are never emitted as LValue, it is safe to emit 4202 // non-null argument check for r-value only. 4203 if (!Args.back().hasLValue()) { 4204 RValue RVArg = Args.back().getKnownRValue(); 4205 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4206 ParamsToSkip + Idx); 4207 // @llvm.objectsize should never have side-effects and shouldn't need 4208 // destruction/cleanups, so we can safely "emit" it after its arg, 4209 // regardless of right-to-leftness 4210 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4211 } 4212 } 4213 4214 if (!LeftToRight) { 4215 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4216 // IR function. 4217 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4218 } 4219 } 4220 4221 namespace { 4222 4223 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4224 DestroyUnpassedArg(Address Addr, QualType Ty) 4225 : Addr(Addr), Ty(Ty) {} 4226 4227 Address Addr; 4228 QualType Ty; 4229 4230 void Emit(CodeGenFunction &CGF, Flags flags) override { 4231 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4232 if (DtorKind == QualType::DK_cxx_destructor) { 4233 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4234 assert(!Dtor->isTrivial()); 4235 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4236 /*Delegating=*/false, Addr, Ty); 4237 } else { 4238 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4239 } 4240 } 4241 }; 4242 4243 struct DisableDebugLocationUpdates { 4244 CodeGenFunction &CGF; 4245 bool disabledDebugInfo; 4246 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4247 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4248 CGF.disableDebugInfo(); 4249 } 4250 ~DisableDebugLocationUpdates() { 4251 if (disabledDebugInfo) 4252 CGF.enableDebugInfo(); 4253 } 4254 }; 4255 4256 } // end anonymous namespace 4257 4258 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4259 if (!HasLV) 4260 return RV; 4261 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4262 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4263 LV.isVolatile()); 4264 IsUsed = true; 4265 return RValue::getAggregate(Copy.getAddress(CGF)); 4266 } 4267 4268 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4269 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4270 if (!HasLV && RV.isScalar()) 4271 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4272 else if (!HasLV && RV.isComplex()) 4273 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4274 else { 4275 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4276 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4277 // We assume that call args are never copied into subobjects. 4278 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4279 HasLV ? LV.isVolatileQualified() 4280 : RV.isVolatileQualified()); 4281 } 4282 IsUsed = true; 4283 } 4284 4285 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4286 QualType type) { 4287 DisableDebugLocationUpdates Dis(*this, E); 4288 if (const ObjCIndirectCopyRestoreExpr *CRE 4289 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4290 assert(getLangOpts().ObjCAutoRefCount); 4291 return emitWritebackArg(*this, args, CRE); 4292 } 4293 4294 assert(type->isReferenceType() == E->isGLValue() && 4295 "reference binding to unmaterialized r-value!"); 4296 4297 if (E->isGLValue()) { 4298 assert(E->getObjectKind() == OK_Ordinary); 4299 return args.add(EmitReferenceBindingToExpr(E), type); 4300 } 4301 4302 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4303 4304 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4305 // However, we still have to push an EH-only cleanup in case we unwind before 4306 // we make it to the call. 4307 if (type->isRecordType() && 4308 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4309 // If we're using inalloca, use the argument memory. Otherwise, use a 4310 // temporary. 4311 AggValueSlot Slot; 4312 if (args.isUsingInAlloca()) 4313 Slot = createPlaceholderSlot(*this, type); 4314 else 4315 Slot = CreateAggTemp(type, "agg.tmp"); 4316 4317 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4318 if (const auto *RD = type->getAsCXXRecordDecl()) 4319 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4320 else 4321 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4322 4323 if (DestroyedInCallee) 4324 Slot.setExternallyDestructed(); 4325 4326 EmitAggExpr(E, Slot); 4327 RValue RV = Slot.asRValue(); 4328 args.add(RV, type); 4329 4330 if (DestroyedInCallee && NeedsEHCleanup) { 4331 // Create a no-op GEP between the placeholder and the cleanup so we can 4332 // RAUW it successfully. It also serves as a marker of the first 4333 // instruction where the cleanup is active. 4334 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4335 type); 4336 // This unreachable is a temporary marker which will be removed later. 4337 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4338 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 4339 } 4340 return; 4341 } 4342 4343 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4344 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4345 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4346 assert(L.isSimple()); 4347 args.addUncopiedAggregate(L, type); 4348 return; 4349 } 4350 4351 args.add(EmitAnyExprToTemp(E), type); 4352 } 4353 4354 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4355 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4356 // implicitly widens null pointer constants that are arguments to varargs 4357 // functions to pointer-sized ints. 4358 if (!getTarget().getTriple().isOSWindows()) 4359 return Arg->getType(); 4360 4361 if (Arg->getType()->isIntegerType() && 4362 getContext().getTypeSize(Arg->getType()) < 4363 getContext().getTargetInfo().getPointerWidth(0) && 4364 Arg->isNullPointerConstant(getContext(), 4365 Expr::NPC_ValueDependentIsNotNull)) { 4366 return getContext().getIntPtrType(); 4367 } 4368 4369 return Arg->getType(); 4370 } 4371 4372 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4373 // optimizer it can aggressively ignore unwind edges. 4374 void 4375 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4376 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4377 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4378 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4379 CGM.getNoObjCARCExceptionsMetadata()); 4380 } 4381 4382 /// Emits a call to the given no-arguments nounwind runtime function. 4383 llvm::CallInst * 4384 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4385 const llvm::Twine &name) { 4386 return EmitNounwindRuntimeCall(callee, None, name); 4387 } 4388 4389 /// Emits a call to the given nounwind runtime function. 4390 llvm::CallInst * 4391 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4392 ArrayRef<llvm::Value *> args, 4393 const llvm::Twine &name) { 4394 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4395 call->setDoesNotThrow(); 4396 return call; 4397 } 4398 4399 /// Emits a simple call (never an invoke) to the given no-arguments 4400 /// runtime function. 4401 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4402 const llvm::Twine &name) { 4403 return EmitRuntimeCall(callee, None, name); 4404 } 4405 4406 // Calls which may throw must have operand bundles indicating which funclet 4407 // they are nested within. 4408 SmallVector<llvm::OperandBundleDef, 1> 4409 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4410 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4411 // There is no need for a funclet operand bundle if we aren't inside a 4412 // funclet. 4413 if (!CurrentFuncletPad) 4414 return BundleList; 4415 4416 // Skip intrinsics which cannot throw. 4417 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4418 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4419 return BundleList; 4420 4421 BundleList.emplace_back("funclet", CurrentFuncletPad); 4422 return BundleList; 4423 } 4424 4425 /// Emits a simple call (never an invoke) to the given runtime function. 4426 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4427 ArrayRef<llvm::Value *> args, 4428 const llvm::Twine &name) { 4429 llvm::CallInst *call = Builder.CreateCall( 4430 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4431 call->setCallingConv(getRuntimeCC()); 4432 return call; 4433 } 4434 4435 /// Emits a call or invoke to the given noreturn runtime function. 4436 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4437 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4438 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4439 getBundlesForFunclet(callee.getCallee()); 4440 4441 if (getInvokeDest()) { 4442 llvm::InvokeInst *invoke = 4443 Builder.CreateInvoke(callee, 4444 getUnreachableBlock(), 4445 getInvokeDest(), 4446 args, 4447 BundleList); 4448 invoke->setDoesNotReturn(); 4449 invoke->setCallingConv(getRuntimeCC()); 4450 } else { 4451 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4452 call->setDoesNotReturn(); 4453 call->setCallingConv(getRuntimeCC()); 4454 Builder.CreateUnreachable(); 4455 } 4456 } 4457 4458 /// Emits a call or invoke instruction to the given nullary runtime function. 4459 llvm::CallBase * 4460 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4461 const Twine &name) { 4462 return EmitRuntimeCallOrInvoke(callee, None, name); 4463 } 4464 4465 /// Emits a call or invoke instruction to the given runtime function. 4466 llvm::CallBase * 4467 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4468 ArrayRef<llvm::Value *> args, 4469 const Twine &name) { 4470 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4471 call->setCallingConv(getRuntimeCC()); 4472 return call; 4473 } 4474 4475 /// Emits a call or invoke instruction to the given function, depending 4476 /// on the current state of the EH stack. 4477 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4478 ArrayRef<llvm::Value *> Args, 4479 const Twine &Name) { 4480 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4481 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4482 getBundlesForFunclet(Callee.getCallee()); 4483 4484 llvm::CallBase *Inst; 4485 if (!InvokeDest) 4486 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4487 else { 4488 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4489 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4490 Name); 4491 EmitBlock(ContBB); 4492 } 4493 4494 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4495 // optimizer it can aggressively ignore unwind edges. 4496 if (CGM.getLangOpts().ObjCAutoRefCount) 4497 AddObjCARCExceptionMetadata(Inst); 4498 4499 return Inst; 4500 } 4501 4502 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4503 llvm::Value *New) { 4504 DeferredReplacements.push_back( 4505 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4506 } 4507 4508 namespace { 4509 4510 /// Specify given \p NewAlign as the alignment of return value attribute. If 4511 /// such attribute already exists, re-set it to the maximal one of two options. 4512 LLVM_NODISCARD llvm::AttributeList 4513 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4514 const llvm::AttributeList &Attrs, 4515 llvm::Align NewAlign) { 4516 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4517 if (CurAlign >= NewAlign) 4518 return Attrs; 4519 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4520 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) 4521 .addRetAttribute(Ctx, AlignAttr); 4522 } 4523 4524 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4525 protected: 4526 CodeGenFunction &CGF; 4527 4528 /// We do nothing if this is, or becomes, nullptr. 4529 const AlignedAttrTy *AA = nullptr; 4530 4531 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4532 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4533 4534 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4535 : CGF(CGF_) { 4536 if (!FuncDecl) 4537 return; 4538 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4539 } 4540 4541 public: 4542 /// If we can, materialize the alignment as an attribute on return value. 4543 LLVM_NODISCARD llvm::AttributeList 4544 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4545 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4546 return Attrs; 4547 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4548 if (!AlignmentCI) 4549 return Attrs; 4550 // We may legitimately have non-power-of-2 alignment here. 4551 // If so, this is UB land, emit it via `@llvm.assume` instead. 4552 if (!AlignmentCI->getValue().isPowerOf2()) 4553 return Attrs; 4554 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4555 CGF.getLLVMContext(), Attrs, 4556 llvm::Align( 4557 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4558 AA = nullptr; // We're done. Disallow doing anything else. 4559 return NewAttrs; 4560 } 4561 4562 /// Emit alignment assumption. 4563 /// This is a general fallback that we take if either there is an offset, 4564 /// or the alignment is variable or we are sanitizing for alignment. 4565 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4566 if (!AA) 4567 return; 4568 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4569 AA->getLocation(), Alignment, OffsetCI); 4570 AA = nullptr; // We're done. Disallow doing anything else. 4571 } 4572 }; 4573 4574 /// Helper data structure to emit `AssumeAlignedAttr`. 4575 class AssumeAlignedAttrEmitter final 4576 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4577 public: 4578 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4579 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4580 if (!AA) 4581 return; 4582 // It is guaranteed that the alignment/offset are constants. 4583 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4584 if (Expr *Offset = AA->getOffset()) { 4585 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4586 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4587 OffsetCI = nullptr; 4588 } 4589 } 4590 }; 4591 4592 /// Helper data structure to emit `AllocAlignAttr`. 4593 class AllocAlignAttrEmitter final 4594 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4595 public: 4596 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4597 const CallArgList &CallArgs) 4598 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4599 if (!AA) 4600 return; 4601 // Alignment may or may not be a constant, and that is okay. 4602 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4603 .getRValue(CGF) 4604 .getScalarVal(); 4605 } 4606 }; 4607 4608 } // namespace 4609 4610 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4611 const CGCallee &Callee, 4612 ReturnValueSlot ReturnValue, 4613 const CallArgList &CallArgs, 4614 llvm::CallBase **callOrInvoke, bool IsMustTail, 4615 SourceLocation Loc) { 4616 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4617 4618 assert(Callee.isOrdinary() || Callee.isVirtual()); 4619 4620 // Handle struct-return functions by passing a pointer to the 4621 // location that we would like to return into. 4622 QualType RetTy = CallInfo.getReturnType(); 4623 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4624 4625 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4626 4627 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4628 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4629 // We can only guarantee that a function is called from the correct 4630 // context/function based on the appropriate target attributes, 4631 // so only check in the case where we have both always_inline and target 4632 // since otherwise we could be making a conditional call after a check for 4633 // the proper cpu features (and it won't cause code generation issues due to 4634 // function based code generation). 4635 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4636 TargetDecl->hasAttr<TargetAttr>()) 4637 checkTargetFeatures(Loc, FD); 4638 4639 // Some architectures (such as x86-64) have the ABI changed based on 4640 // attribute-target/features. Give them a chance to diagnose. 4641 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4642 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4643 } 4644 4645 #ifndef NDEBUG 4646 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4647 // For an inalloca varargs function, we don't expect CallInfo to match the 4648 // function pointer's type, because the inalloca struct a will have extra 4649 // fields in it for the varargs parameters. Code later in this function 4650 // bitcasts the function pointer to the type derived from CallInfo. 4651 // 4652 // In other cases, we assert that the types match up (until pointers stop 4653 // having pointee types). 4654 llvm::Type *TypeFromVal; 4655 if (Callee.isVirtual()) 4656 TypeFromVal = Callee.getVirtualFunctionType(); 4657 else 4658 TypeFromVal = 4659 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4660 assert(IRFuncTy == TypeFromVal); 4661 } 4662 #endif 4663 4664 // 1. Set up the arguments. 4665 4666 // If we're using inalloca, insert the allocation after the stack save. 4667 // FIXME: Do this earlier rather than hacking it in here! 4668 Address ArgMemory = Address::invalid(); 4669 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4670 const llvm::DataLayout &DL = CGM.getDataLayout(); 4671 llvm::Instruction *IP = CallArgs.getStackBase(); 4672 llvm::AllocaInst *AI; 4673 if (IP) { 4674 IP = IP->getNextNode(); 4675 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4676 "argmem", IP); 4677 } else { 4678 AI = CreateTempAlloca(ArgStruct, "argmem"); 4679 } 4680 auto Align = CallInfo.getArgStructAlignment(); 4681 AI->setAlignment(Align.getAsAlign()); 4682 AI->setUsedWithInAlloca(true); 4683 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4684 ArgMemory = Address(AI, Align); 4685 } 4686 4687 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4688 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4689 4690 // If the call returns a temporary with struct return, create a temporary 4691 // alloca to hold the result, unless one is given to us. 4692 Address SRetPtr = Address::invalid(); 4693 Address SRetAlloca = Address::invalid(); 4694 llvm::Value *UnusedReturnSizePtr = nullptr; 4695 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4696 if (!ReturnValue.isNull()) { 4697 SRetPtr = ReturnValue.getValue(); 4698 } else { 4699 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4700 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4701 llvm::TypeSize size = 4702 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4703 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4704 } 4705 } 4706 if (IRFunctionArgs.hasSRetArg()) { 4707 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4708 } else if (RetAI.isInAlloca()) { 4709 Address Addr = 4710 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4711 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4712 } 4713 } 4714 4715 Address swiftErrorTemp = Address::invalid(); 4716 Address swiftErrorArg = Address::invalid(); 4717 4718 // When passing arguments using temporary allocas, we need to add the 4719 // appropriate lifetime markers. This vector keeps track of all the lifetime 4720 // markers that need to be ended right after the call. 4721 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4722 4723 // Translate all of the arguments as necessary to match the IR lowering. 4724 assert(CallInfo.arg_size() == CallArgs.size() && 4725 "Mismatch between function signature & arguments."); 4726 unsigned ArgNo = 0; 4727 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4728 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4729 I != E; ++I, ++info_it, ++ArgNo) { 4730 const ABIArgInfo &ArgInfo = info_it->info; 4731 4732 // Insert a padding argument to ensure proper alignment. 4733 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4734 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4735 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4736 4737 unsigned FirstIRArg, NumIRArgs; 4738 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4739 4740 switch (ArgInfo.getKind()) { 4741 case ABIArgInfo::InAlloca: { 4742 assert(NumIRArgs == 0); 4743 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4744 if (I->isAggregate()) { 4745 Address Addr = I->hasLValue() 4746 ? I->getKnownLValue().getAddress(*this) 4747 : I->getKnownRValue().getAggregateAddress(); 4748 llvm::Instruction *Placeholder = 4749 cast<llvm::Instruction>(Addr.getPointer()); 4750 4751 if (!ArgInfo.getInAllocaIndirect()) { 4752 // Replace the placeholder with the appropriate argument slot GEP. 4753 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4754 Builder.SetInsertPoint(Placeholder); 4755 Addr = Builder.CreateStructGEP(ArgMemory, 4756 ArgInfo.getInAllocaFieldIndex()); 4757 Builder.restoreIP(IP); 4758 } else { 4759 // For indirect things such as overaligned structs, replace the 4760 // placeholder with a regular aggregate temporary alloca. Store the 4761 // address of this alloca into the struct. 4762 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4763 Address ArgSlot = Builder.CreateStructGEP( 4764 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4765 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4766 } 4767 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4768 } else if (ArgInfo.getInAllocaIndirect()) { 4769 // Make a temporary alloca and store the address of it into the argument 4770 // struct. 4771 Address Addr = CreateMemTempWithoutCast( 4772 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4773 "indirect-arg-temp"); 4774 I->copyInto(*this, Addr); 4775 Address ArgSlot = 4776 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4777 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4778 } else { 4779 // Store the RValue into the argument struct. 4780 Address Addr = 4781 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4782 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4783 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4784 // There are some cases where a trivial bitcast is not avoidable. The 4785 // definition of a type later in a translation unit may change it's type 4786 // from {}* to (%struct.foo*)*. 4787 if (Addr.getType() != MemType) 4788 Addr = Builder.CreateBitCast(Addr, MemType); 4789 I->copyInto(*this, Addr); 4790 } 4791 break; 4792 } 4793 4794 case ABIArgInfo::Indirect: 4795 case ABIArgInfo::IndirectAliased: { 4796 assert(NumIRArgs == 1); 4797 if (!I->isAggregate()) { 4798 // Make a temporary alloca to pass the argument. 4799 Address Addr = CreateMemTempWithoutCast( 4800 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4801 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4802 4803 I->copyInto(*this, Addr); 4804 } else { 4805 // We want to avoid creating an unnecessary temporary+copy here; 4806 // however, we need one in three cases: 4807 // 1. If the argument is not byval, and we are required to copy the 4808 // source. (This case doesn't occur on any common architecture.) 4809 // 2. If the argument is byval, RV is not sufficiently aligned, and 4810 // we cannot force it to be sufficiently aligned. 4811 // 3. If the argument is byval, but RV is not located in default 4812 // or alloca address space. 4813 Address Addr = I->hasLValue() 4814 ? I->getKnownLValue().getAddress(*this) 4815 : I->getKnownRValue().getAggregateAddress(); 4816 llvm::Value *V = Addr.getPointer(); 4817 CharUnits Align = ArgInfo.getIndirectAlign(); 4818 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4819 4820 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4821 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4822 TD->getAllocaAddrSpace()) && 4823 "indirect argument must be in alloca address space"); 4824 4825 bool NeedCopy = false; 4826 4827 if (Addr.getAlignment() < Align && 4828 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4829 Align.getAsAlign()) { 4830 NeedCopy = true; 4831 } else if (I->hasLValue()) { 4832 auto LV = I->getKnownLValue(); 4833 auto AS = LV.getAddressSpace(); 4834 4835 if (!ArgInfo.getIndirectByVal() || 4836 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4837 NeedCopy = true; 4838 } 4839 if (!getLangOpts().OpenCL) { 4840 if ((ArgInfo.getIndirectByVal() && 4841 (AS != LangAS::Default && 4842 AS != CGM.getASTAllocaAddressSpace()))) { 4843 NeedCopy = true; 4844 } 4845 } 4846 // For OpenCL even if RV is located in default or alloca address space 4847 // we don't want to perform address space cast for it. 4848 else if ((ArgInfo.getIndirectByVal() && 4849 Addr.getType()->getAddressSpace() != IRFuncTy-> 4850 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4851 NeedCopy = true; 4852 } 4853 } 4854 4855 if (NeedCopy) { 4856 // Create an aligned temporary, and copy to it. 4857 Address AI = CreateMemTempWithoutCast( 4858 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4859 IRCallArgs[FirstIRArg] = AI.getPointer(); 4860 4861 // Emit lifetime markers for the temporary alloca. 4862 llvm::TypeSize ByvalTempElementSize = 4863 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4864 llvm::Value *LifetimeSize = 4865 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4866 4867 // Add cleanup code to emit the end lifetime marker after the call. 4868 if (LifetimeSize) // In case we disabled lifetime markers. 4869 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4870 4871 // Generate the copy. 4872 I->copyInto(*this, AI); 4873 } else { 4874 // Skip the extra memcpy call. 4875 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4876 CGM.getDataLayout().getAllocaAddrSpace()); 4877 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4878 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4879 true); 4880 } 4881 } 4882 break; 4883 } 4884 4885 case ABIArgInfo::Ignore: 4886 assert(NumIRArgs == 0); 4887 break; 4888 4889 case ABIArgInfo::Extend: 4890 case ABIArgInfo::Direct: { 4891 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4892 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4893 ArgInfo.getDirectOffset() == 0) { 4894 assert(NumIRArgs == 1); 4895 llvm::Value *V; 4896 if (!I->isAggregate()) 4897 V = I->getKnownRValue().getScalarVal(); 4898 else 4899 V = Builder.CreateLoad( 4900 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4901 : I->getKnownRValue().getAggregateAddress()); 4902 4903 // Implement swifterror by copying into a new swifterror argument. 4904 // We'll write back in the normal path out of the call. 4905 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4906 == ParameterABI::SwiftErrorResult) { 4907 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4908 4909 QualType pointeeTy = I->Ty->getPointeeType(); 4910 swiftErrorArg = 4911 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4912 4913 swiftErrorTemp = 4914 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4915 V = swiftErrorTemp.getPointer(); 4916 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4917 4918 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4919 Builder.CreateStore(errorValue, swiftErrorTemp); 4920 } 4921 4922 // We might have to widen integers, but we should never truncate. 4923 if (ArgInfo.getCoerceToType() != V->getType() && 4924 V->getType()->isIntegerTy()) 4925 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4926 4927 // If the argument doesn't match, perform a bitcast to coerce it. This 4928 // can happen due to trivial type mismatches. 4929 if (FirstIRArg < IRFuncTy->getNumParams() && 4930 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4931 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4932 4933 IRCallArgs[FirstIRArg] = V; 4934 break; 4935 } 4936 4937 // FIXME: Avoid the conversion through memory if possible. 4938 Address Src = Address::invalid(); 4939 if (!I->isAggregate()) { 4940 Src = CreateMemTemp(I->Ty, "coerce"); 4941 I->copyInto(*this, Src); 4942 } else { 4943 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4944 : I->getKnownRValue().getAggregateAddress(); 4945 } 4946 4947 // If the value is offset in memory, apply the offset now. 4948 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4949 4950 // Fast-isel and the optimizer generally like scalar values better than 4951 // FCAs, so we flatten them if this is safe to do for this argument. 4952 llvm::StructType *STy = 4953 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4954 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4955 llvm::Type *SrcTy = Src.getElementType(); 4956 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4957 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4958 4959 // If the source type is smaller than the destination type of the 4960 // coerce-to logic, copy the source value into a temp alloca the size 4961 // of the destination type to allow loading all of it. The bits past 4962 // the source value are left undef. 4963 if (SrcSize < DstSize) { 4964 Address TempAlloca 4965 = CreateTempAlloca(STy, Src.getAlignment(), 4966 Src.getName() + ".coerce"); 4967 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4968 Src = TempAlloca; 4969 } else { 4970 Src = Builder.CreateBitCast(Src, 4971 STy->getPointerTo(Src.getAddressSpace())); 4972 } 4973 4974 assert(NumIRArgs == STy->getNumElements()); 4975 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4976 Address EltPtr = Builder.CreateStructGEP(Src, i); 4977 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4978 IRCallArgs[FirstIRArg + i] = LI; 4979 } 4980 } else { 4981 // In the simple case, just pass the coerced loaded value. 4982 assert(NumIRArgs == 1); 4983 llvm::Value *Load = 4984 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4985 4986 if (CallInfo.isCmseNSCall()) { 4987 // For certain parameter types, clear padding bits, as they may reveal 4988 // sensitive information. 4989 // Small struct/union types are passed as integer arrays. 4990 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4991 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4992 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4993 } 4994 IRCallArgs[FirstIRArg] = Load; 4995 } 4996 4997 break; 4998 } 4999 5000 case ABIArgInfo::CoerceAndExpand: { 5001 auto coercionType = ArgInfo.getCoerceAndExpandType(); 5002 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 5003 5004 llvm::Value *tempSize = nullptr; 5005 Address addr = Address::invalid(); 5006 Address AllocaAddr = Address::invalid(); 5007 if (I->isAggregate()) { 5008 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5009 : I->getKnownRValue().getAggregateAddress(); 5010 5011 } else { 5012 RValue RV = I->getKnownRValue(); 5013 assert(RV.isScalar()); // complex should always just be direct 5014 5015 llvm::Type *scalarType = RV.getScalarVal()->getType(); 5016 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 5017 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 5018 5019 // Materialize to a temporary. 5020 addr = 5021 CreateTempAlloca(RV.getScalarVal()->getType(), 5022 CharUnits::fromQuantity(std::max( 5023 layout->getAlignment().value(), scalarAlign)), 5024 "tmp", 5025 /*ArraySize=*/nullptr, &AllocaAddr); 5026 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 5027 5028 Builder.CreateStore(RV.getScalarVal(), addr); 5029 } 5030 5031 addr = Builder.CreateElementBitCast(addr, coercionType); 5032 5033 unsigned IRArgPos = FirstIRArg; 5034 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5035 llvm::Type *eltType = coercionType->getElementType(i); 5036 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5037 Address eltAddr = Builder.CreateStructGEP(addr, i); 5038 llvm::Value *elt = Builder.CreateLoad(eltAddr); 5039 IRCallArgs[IRArgPos++] = elt; 5040 } 5041 assert(IRArgPos == FirstIRArg + NumIRArgs); 5042 5043 if (tempSize) { 5044 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 5045 } 5046 5047 break; 5048 } 5049 5050 case ABIArgInfo::Expand: { 5051 unsigned IRArgPos = FirstIRArg; 5052 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5053 assert(IRArgPos == FirstIRArg + NumIRArgs); 5054 break; 5055 } 5056 } 5057 } 5058 5059 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5060 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5061 5062 // If we're using inalloca, set up that argument. 5063 if (ArgMemory.isValid()) { 5064 llvm::Value *Arg = ArgMemory.getPointer(); 5065 if (CallInfo.isVariadic()) { 5066 // When passing non-POD arguments by value to variadic functions, we will 5067 // end up with a variadic prototype and an inalloca call site. In such 5068 // cases, we can't do any parameter mismatch checks. Give up and bitcast 5069 // the callee. 5070 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 5071 CalleePtr = 5072 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 5073 } else { 5074 llvm::Type *LastParamTy = 5075 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 5076 if (Arg->getType() != LastParamTy) { 5077 #ifndef NDEBUG 5078 // Assert that these structs have equivalent element types. 5079 llvm::StructType *FullTy = CallInfo.getArgStruct(); 5080 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 5081 cast<llvm::PointerType>(LastParamTy)->getElementType()); 5082 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 5083 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 5084 DE = DeclaredTy->element_end(), 5085 FI = FullTy->element_begin(); 5086 DI != DE; ++DI, ++FI) 5087 assert(*DI == *FI); 5088 #endif 5089 Arg = Builder.CreateBitCast(Arg, LastParamTy); 5090 } 5091 } 5092 assert(IRFunctionArgs.hasInallocaArg()); 5093 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5094 } 5095 5096 // 2. Prepare the function pointer. 5097 5098 // If the callee is a bitcast of a non-variadic function to have a 5099 // variadic function pointer type, check to see if we can remove the 5100 // bitcast. This comes up with unprototyped functions. 5101 // 5102 // This makes the IR nicer, but more importantly it ensures that we 5103 // can inline the function at -O0 if it is marked always_inline. 5104 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5105 llvm::Value *Ptr) -> llvm::Function * { 5106 if (!CalleeFT->isVarArg()) 5107 return nullptr; 5108 5109 // Get underlying value if it's a bitcast 5110 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5111 if (CE->getOpcode() == llvm::Instruction::BitCast) 5112 Ptr = CE->getOperand(0); 5113 } 5114 5115 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5116 if (!OrigFn) 5117 return nullptr; 5118 5119 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5120 5121 // If the original type is variadic, or if any of the component types 5122 // disagree, we cannot remove the cast. 5123 if (OrigFT->isVarArg() || 5124 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5125 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5126 return nullptr; 5127 5128 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5129 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5130 return nullptr; 5131 5132 return OrigFn; 5133 }; 5134 5135 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5136 CalleePtr = OrigFn; 5137 IRFuncTy = OrigFn->getFunctionType(); 5138 } 5139 5140 // 3. Perform the actual call. 5141 5142 // Deactivate any cleanups that we're supposed to do immediately before 5143 // the call. 5144 if (!CallArgs.getCleanupsToDeactivate().empty()) 5145 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5146 5147 // Assert that the arguments we computed match up. The IR verifier 5148 // will catch this, but this is a common enough source of problems 5149 // during IRGen changes that it's way better for debugging to catch 5150 // it ourselves here. 5151 #ifndef NDEBUG 5152 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5153 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5154 // Inalloca argument can have different type. 5155 if (IRFunctionArgs.hasInallocaArg() && 5156 i == IRFunctionArgs.getInallocaArgNo()) 5157 continue; 5158 if (i < IRFuncTy->getNumParams()) 5159 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5160 } 5161 #endif 5162 5163 // Update the largest vector width if any arguments have vector types. 5164 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5165 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 5166 LargestVectorWidth = 5167 std::max((uint64_t)LargestVectorWidth, 5168 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5169 } 5170 5171 // Compute the calling convention and attributes. 5172 unsigned CallingConv; 5173 llvm::AttributeList Attrs; 5174 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5175 Callee.getAbstractInfo(), Attrs, CallingConv, 5176 /*AttrOnCallSite=*/true, 5177 /*IsThunk=*/false); 5178 5179 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5180 if (FD->hasAttr<StrictFPAttr>()) 5181 // All calls within a strictfp function are marked strictfp 5182 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5183 5184 // Add call-site nomerge attribute if exists. 5185 if (InNoMergeAttributedStmt) 5186 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); 5187 5188 // Apply some call-site-specific attributes. 5189 // TODO: work this into building the attribute set. 5190 5191 // Apply always_inline to all calls within flatten functions. 5192 // FIXME: should this really take priority over __try, below? 5193 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5194 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5195 Attrs = 5196 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5197 } 5198 5199 // Disable inlining inside SEH __try blocks. 5200 if (isSEHTryScope()) { 5201 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5202 } 5203 5204 // Decide whether to use a call or an invoke. 5205 bool CannotThrow; 5206 if (currentFunctionUsesSEHTry()) { 5207 // SEH cares about asynchronous exceptions, so everything can "throw." 5208 CannotThrow = false; 5209 } else if (isCleanupPadScope() && 5210 EHPersonality::get(*this).isMSVCXXPersonality()) { 5211 // The MSVC++ personality will implicitly terminate the program if an 5212 // exception is thrown during a cleanup outside of a try/catch. 5213 // We don't need to model anything in IR to get this behavior. 5214 CannotThrow = true; 5215 } else { 5216 // Otherwise, nounwind call sites will never throw. 5217 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); 5218 5219 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5220 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5221 CannotThrow = true; 5222 } 5223 5224 // If we made a temporary, be sure to clean up after ourselves. Note that we 5225 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5226 // pop this cleanup later on. Being eager about this is OK, since this 5227 // temporary is 'invisible' outside of the callee. 5228 if (UnusedReturnSizePtr) 5229 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5230 UnusedReturnSizePtr); 5231 5232 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5233 5234 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5235 getBundlesForFunclet(CalleePtr); 5236 5237 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5238 if (FD->hasAttr<StrictFPAttr>()) 5239 // All calls within a strictfp function are marked strictfp 5240 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5241 5242 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5243 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5244 5245 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5246 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5247 5248 // Emit the actual call/invoke instruction. 5249 llvm::CallBase *CI; 5250 if (!InvokeDest) { 5251 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5252 } else { 5253 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5254 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5255 BundleList); 5256 EmitBlock(Cont); 5257 } 5258 if (callOrInvoke) 5259 *callOrInvoke = CI; 5260 5261 // If this is within a function that has the guard(nocf) attribute and is an 5262 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5263 // Control Flow Guard checks should not be added, even if the call is inlined. 5264 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5265 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5266 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5267 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); 5268 } 5269 } 5270 5271 // Apply the attributes and calling convention. 5272 CI->setAttributes(Attrs); 5273 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5274 5275 // Apply various metadata. 5276 5277 if (!CI->getType()->isVoidTy()) 5278 CI->setName("call"); 5279 5280 // Update largest vector width from the return type. 5281 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 5282 LargestVectorWidth = 5283 std::max((uint64_t)LargestVectorWidth, 5284 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5285 5286 // Insert instrumentation or attach profile metadata at indirect call sites. 5287 // For more details, see the comment before the definition of 5288 // IPVK_IndirectCallTarget in InstrProfData.inc. 5289 if (!CI->getCalledFunction()) 5290 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5291 CI, CalleePtr); 5292 5293 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5294 // optimizer it can aggressively ignore unwind edges. 5295 if (CGM.getLangOpts().ObjCAutoRefCount) 5296 AddObjCARCExceptionMetadata(CI); 5297 5298 // Set tail call kind if necessary. 5299 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5300 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5301 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5302 else if (IsMustTail) 5303 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 5304 } 5305 5306 // Add metadata for calls to MSAllocator functions 5307 if (getDebugInfo() && TargetDecl && 5308 TargetDecl->hasAttr<MSAllocatorAttr>()) 5309 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5310 5311 // Add metadata if calling an __attribute__((error(""))) or warning fn. 5312 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { 5313 llvm::ConstantInt *Line = 5314 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); 5315 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); 5316 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); 5317 CI->setMetadata("srcloc", MDT); 5318 } 5319 5320 // 4. Finish the call. 5321 5322 // If the call doesn't return, finish the basic block and clear the 5323 // insertion point; this allows the rest of IRGen to discard 5324 // unreachable code. 5325 if (CI->doesNotReturn()) { 5326 if (UnusedReturnSizePtr) 5327 PopCleanupBlock(); 5328 5329 // Strip away the noreturn attribute to better diagnose unreachable UB. 5330 if (SanOpts.has(SanitizerKind::Unreachable)) { 5331 // Also remove from function since CallBase::hasFnAttr additionally checks 5332 // attributes of the called function. 5333 if (auto *F = CI->getCalledFunction()) 5334 F->removeFnAttr(llvm::Attribute::NoReturn); 5335 CI->removeFnAttr(llvm::Attribute::NoReturn); 5336 5337 // Avoid incompatibility with ASan which relies on the `noreturn` 5338 // attribute to insert handler calls. 5339 if (SanOpts.hasOneOf(SanitizerKind::Address | 5340 SanitizerKind::KernelAddress)) { 5341 SanitizerScope SanScope(this); 5342 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5343 Builder.SetInsertPoint(CI); 5344 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5345 llvm::FunctionCallee Fn = 5346 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5347 EmitNounwindRuntimeCall(Fn); 5348 } 5349 } 5350 5351 EmitUnreachable(Loc); 5352 Builder.ClearInsertionPoint(); 5353 5354 // FIXME: For now, emit a dummy basic block because expr emitters in 5355 // generally are not ready to handle emitting expressions at unreachable 5356 // points. 5357 EnsureInsertPoint(); 5358 5359 // Return a reasonable RValue. 5360 return GetUndefRValue(RetTy); 5361 } 5362 5363 // If this is a musttail call, return immediately. We do not branch to the 5364 // epilogue in this case. 5365 if (IsMustTail) { 5366 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); 5367 ++it) { 5368 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); 5369 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) 5370 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); 5371 } 5372 if (CI->getType()->isVoidTy()) 5373 Builder.CreateRetVoid(); 5374 else 5375 Builder.CreateRet(CI); 5376 Builder.ClearInsertionPoint(); 5377 EnsureInsertPoint(); 5378 return GetUndefRValue(RetTy); 5379 } 5380 5381 // Perform the swifterror writeback. 5382 if (swiftErrorTemp.isValid()) { 5383 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5384 Builder.CreateStore(errorResult, swiftErrorArg); 5385 } 5386 5387 // Emit any call-associated writebacks immediately. Arguably this 5388 // should happen after any return-value munging. 5389 if (CallArgs.hasWritebacks()) 5390 emitWritebacks(*this, CallArgs); 5391 5392 // The stack cleanup for inalloca arguments has to run out of the normal 5393 // lexical order, so deactivate it and run it manually here. 5394 CallArgs.freeArgumentMemory(*this); 5395 5396 // Extract the return value. 5397 RValue Ret = [&] { 5398 switch (RetAI.getKind()) { 5399 case ABIArgInfo::CoerceAndExpand: { 5400 auto coercionType = RetAI.getCoerceAndExpandType(); 5401 5402 Address addr = SRetPtr; 5403 addr = Builder.CreateElementBitCast(addr, coercionType); 5404 5405 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5406 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5407 5408 unsigned unpaddedIndex = 0; 5409 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5410 llvm::Type *eltType = coercionType->getElementType(i); 5411 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5412 Address eltAddr = Builder.CreateStructGEP(addr, i); 5413 llvm::Value *elt = CI; 5414 if (requiresExtract) 5415 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5416 else 5417 assert(unpaddedIndex == 0); 5418 Builder.CreateStore(elt, eltAddr); 5419 } 5420 // FALLTHROUGH 5421 LLVM_FALLTHROUGH; 5422 } 5423 5424 case ABIArgInfo::InAlloca: 5425 case ABIArgInfo::Indirect: { 5426 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5427 if (UnusedReturnSizePtr) 5428 PopCleanupBlock(); 5429 return ret; 5430 } 5431 5432 case ABIArgInfo::Ignore: 5433 // If we are ignoring an argument that had a result, make sure to 5434 // construct the appropriate return value for our caller. 5435 return GetUndefRValue(RetTy); 5436 5437 case ABIArgInfo::Extend: 5438 case ABIArgInfo::Direct: { 5439 llvm::Type *RetIRTy = ConvertType(RetTy); 5440 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5441 switch (getEvaluationKind(RetTy)) { 5442 case TEK_Complex: { 5443 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5444 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5445 return RValue::getComplex(std::make_pair(Real, Imag)); 5446 } 5447 case TEK_Aggregate: { 5448 Address DestPtr = ReturnValue.getValue(); 5449 bool DestIsVolatile = ReturnValue.isVolatile(); 5450 5451 if (!DestPtr.isValid()) { 5452 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5453 DestIsVolatile = false; 5454 } 5455 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5456 return RValue::getAggregate(DestPtr); 5457 } 5458 case TEK_Scalar: { 5459 // If the argument doesn't match, perform a bitcast to coerce it. This 5460 // can happen due to trivial type mismatches. 5461 llvm::Value *V = CI; 5462 if (V->getType() != RetIRTy) 5463 V = Builder.CreateBitCast(V, RetIRTy); 5464 return RValue::get(V); 5465 } 5466 } 5467 llvm_unreachable("bad evaluation kind"); 5468 } 5469 5470 Address DestPtr = ReturnValue.getValue(); 5471 bool DestIsVolatile = ReturnValue.isVolatile(); 5472 5473 if (!DestPtr.isValid()) { 5474 DestPtr = CreateMemTemp(RetTy, "coerce"); 5475 DestIsVolatile = false; 5476 } 5477 5478 // If the value is offset in memory, apply the offset now. 5479 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5480 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5481 5482 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5483 } 5484 5485 case ABIArgInfo::Expand: 5486 case ABIArgInfo::IndirectAliased: 5487 llvm_unreachable("Invalid ABI kind for return argument"); 5488 } 5489 5490 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5491 } (); 5492 5493 // Emit the assume_aligned check on the return value. 5494 if (Ret.isScalar() && TargetDecl) { 5495 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5496 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5497 } 5498 5499 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5500 // we can't use the full cleanup mechanism. 5501 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5502 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5503 5504 if (!ReturnValue.isExternallyDestructed() && 5505 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5506 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5507 RetTy); 5508 5509 return Ret; 5510 } 5511 5512 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5513 if (isVirtual()) { 5514 const CallExpr *CE = getVirtualCallExpr(); 5515 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5516 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5517 CE ? CE->getBeginLoc() : SourceLocation()); 5518 } 5519 5520 return *this; 5521 } 5522 5523 /* VarArg handling */ 5524 5525 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5526 VAListAddr = VE->isMicrosoftABI() 5527 ? EmitMSVAListRef(VE->getSubExpr()) 5528 : EmitVAListRef(VE->getSubExpr()); 5529 QualType Ty = VE->getType(); 5530 if (VE->isMicrosoftABI()) 5531 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5532 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5533 } 5534