Lines Matching +full:cold +full:- +full:temp
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
77 // clang-format off
79 // clang-format on
91 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
96 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
102 return MD->getType()->getCanonicalTypeUnqualified()
106 /// Returns the "extra-canonicalized" return type, which discards
109 /// all parameter and return types are top-level unqualified.
111 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
120 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
122 FTNP->getExtInfo(), {}, RequiredArgs(0));
130 assert(proto->hasExtParameterInfos());
132 assert(proto->getNumParams() + prefixArgs <= totalArgs);
140 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
160 if (!FPT->hasExtParameterInfos()) {
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
168 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
195 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
200 FTP->getExtInfo(), paramInfos, Required);
215 if (D->hasAttr<StdCallAttr>())
218 if (D->hasAttr<FastCallAttr>())
221 if (D->hasAttr<RegCallAttr>())
224 if (D->hasAttr<ThisCallAttr>())
227 if (D->hasAttr<VectorCallAttr>())
230 if (D->hasAttr<PascalAttr>())
233 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
234 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
236 if (D->hasAttr<AArch64VectorPcsAttr>())
239 if (D->hasAttr<AArch64SVEPcsAttr>())
242 if (D->hasAttr<AMDGPUKernelCallAttr>())
245 if (D->hasAttr<IntelOclBiccAttr>())
248 if (D->hasAttr<MSABIAttr>())
251 if (D->hasAttr<SysVABIAttr>())
254 if (D->hasAttr<PreserveMostAttr>())
257 if (D->hasAttr<PreserveAllAttr>())
260 if (D->hasAttr<M68kRTDAttr>())
263 if (D->hasAttr<PreserveNoneAttr>())
266 if (D->hasAttr<RISCVVectorCCAttr>())
273 /// unknown C++ non-static member function of the given abstract type.
289 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
295 if (FD->hasAttr<CUDAGlobalAttr>()) {
296 const FunctionType *FT = FTy->getAs<FunctionType>();
298 FTy = FT->getCanonicalTypeUnqualified();
303 /// definition of the given C++ non-static member function. The
315 if (MD->isImplicitObjectMemberFunction()) {
330 !Inherited.getShadowDecl()->constructsVirtualBase() ||
349 if (auto Inherited = CD->getInheritedConstructor())
372 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
375 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
396 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
404 if (proto->hasExtParameterInfos()) {
412 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
414 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
445 FunctionType::ExtInfo Info = FPT->getExtInfo();
447 // If the prototype args are elided, we should only have ABI-specific args,
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
450 // ABI-specific suffix arguments are treated the same as variadic arguments.
464 if (MD->isImplicitObjectMemberFunction())
467 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
473 // non-variadic type.
475 return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None,
476 std::nullopt, noProto->getExtInfo(), {},
484 /// definition of an Objective-C method.
489 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
493 /// through which to perform a send to the given Objective-C method,
495 /// the 'self' type of the method or even an Objective-C pointer type.
503 MD->isDirectMethod() ? 1 : 2);
505 if (!MD->isDirectMethod())
508 for (const auto *I : MD->parameters()) {
509 argTys.push_back(Context.getCanonicalParamType(I->getType()));
511 I->hasAttr<NoEscapeAttr>());
520 MD->hasAttr<NSReturnsRetainedAttr>())
524 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
526 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()),
560 assert(MD->isVirtual() && "only methods have thunks");
562 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
564 FTP->getExtInfo(), {}, RequiredArgs(1));
574 const CXXRecordDecl *RD = CD->getParent();
577 ArgTys.push_back(*FTP->param_type_begin());
578 if (RD->getNumVBases() > 0)
606 if (proto->isVariadic())
609 if (proto->hasExtParameterInfos())
628 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
629 opts, argTypes, fnType->getExtInfo(),
636 /// target-dependent in crazy ways.
660 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
662 proto->getExtInfo(), paramInfos,
698 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
715 FunctionType::ExtInfo info = proto->getExtInfo();
716 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
791 // Construct the function info. We co-allocate the ArgInfos.
814 ABIArgInfo &retInfo = FI->getReturnInfo();
816 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
818 for (auto &I : FI->arguments())
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
850 FI->CmseNSCall = info.getCmseNSCall();
851 FI->NoReturn = info.getNoReturn();
852 FI->ReturnsRetained = info.getProducesResult();
853 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
854 FI->NoCfCheck = info.getNoCfCheck();
855 FI->Required = required;
856 FI->HasRegParm = info.getHasRegParm();
857 FI->RegParm = info.getRegParm();
858 FI->ArgStruct = nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].type = resultType;
863 FI->MaxVectorWidth = 0;
865 FI->getArgsBuffer()[i + 1].type = argTypes[i];
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
903 return TE->Kind == TEK_ConstantArray;
917 return TE->Kind == TEK_Record;
926 return TE->Kind == TEK_Complex;
933 return TE->Kind == TEK_None;
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
942 AT->getZExtSize());
944 if (const RecordType *RT = Ty->getAs<RecordType>()) {
947 const RecordDecl *RD = RT->getDecl();
948 assert(!RD->hasFlexibleArrayMember() &&
950 if (RD->isUnion()) {
951 // Unions can be here only in degenerative cases - all the fields are same
956 for (const auto *FD : RD->fields()) {
957 if (FD->isZeroLengthBitField(Context))
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
961 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
971 assert(!CXXRD->isDynamicClass() &&
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
976 for (const auto *FD : RD->fields()) {
977 if (FD->isZeroLengthBitField(Context))
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
987 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
988 return std::make_unique<ComplexExpansion>(CT->getElementType());
996 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
1000 for (auto BS : RExp->Bases)
1001 Res += getExpansionSize(BS->getType(), Context);
1002 for (auto FD : RExp->Fields)
1003 Res += getExpansionSize(FD->getType(), Context);
1017 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1018 getExpandedTypes(CAExp->EltTy, TI);
1021 for (auto BS : RExp->Bases)
1022 getExpandedTypes(BS->getType(), TI);
1023 for (auto FD : RExp->Fields)
1024 getExpandedTypes(FD->getType(), TI);
1026 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1039 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1059 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1060 // Perform a single step derived-to-base conversion.
1062 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1064 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1067 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1069 for (auto FD : RExp->Fields) {
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1089 if (Arg->getType()->isPointerTy()) {
1108 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1109 CAExp->EltTy);
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1116 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1117 // Perform a single step derived-to-base conversion.
1119 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1121 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1124 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1129 for (auto FD : RExp->Fields) {
1131 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1143 "Unexpected non-scalar rvalue during struct expansion.");
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1167 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1170 /// with an in-memory size smaller than DstSize.
1175 // We can't dive into a zero-element struct.
1176 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1178 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1201 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1205 /// This behaves as if the value were coerced through memory, so on big-endian
1206 /// targets the high bits are preserved in a truncation, while little-endian
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1215 // If this is Pointer->Pointer avoid conversion to and from int.
1227 if (Val->getType() != DestIntTy) {
1230 // Preserve the high bits on big-endian targets.
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1236 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1240 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1243 // Little-endian targets preserve the low bits. No shifts required.
1255 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1295 // FIXME: Assert that we aren't truncating non-padding bits when have access
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1344 llvm::Type *SrcTy = Src->getType();
1360 if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() &&
1366 dyn_cast<llvm::StructType>(Src->getType())) {
1367 // Prefer scalar stores to first-class aggregate stores.
1369 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1377 } else if (SrcTy->isIntegerTy()) {
1390 // FIXME: Assert that we aren't truncating non-padding bits when have access
1499 QualType ArgType = I->type;
1500 const ABIArgInfo &AI = I->info;
1513 IRArgs.NumberOfArgs = STy->getNumElements();
1573 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1574 switch (BT->getKind()) {
1590 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1591 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1592 if (BT->getKind() == BuiltinType::LongDouble)
1666 const ABIArgInfo &ArgInfo = it->info;
1695 // Fast-isel and the optimizer generally like scalar values better than
1700 assert(NumIRArgs == st->getNumElements());
1701 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1702 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1721 getExpandedTypes(it->type, ArgTypesIter);
1735 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1749 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1750 FPT->isNothrow())
1753 unsigned SMEBits = FPT->getAArch64SMEAttributes();
1787 for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1788 AA->getAssumption().split(Attrs, ",");
1798 // complex destructor or a non-trivially copyable type.
1800 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1801 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1802 return ClassDecl->hasTrivialDestructor();
1809 // As-is msan can not tolerate noundef mismatch between caller and
1810 // implementation. Mismatch is possible for e.g. indirect calls from C-caller
1822 if (FDecl->isExternC())
1826 if (VDecl->isExternC())
1839 /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
1841 /// -f32 case.
1846 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
1849 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
1853 /// -mlink-builtin-bitcode and should not simply overwrite any existing
1866 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1877 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1883 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
1884 // the -fno-builtin-foo list.
1888 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1897 FuncAttrs.addAttribute("frame-pointer",
1903 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1909 FuncAttrs.addAttribute("no-trapping-math", "true");
1912 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1914 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1916 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1918 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1925 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1927 FuncAttrs.addAttribute("use-soft-float", "true");
1928 FuncAttrs.addAttribute("stack-protector-buffer-size",
1931 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1936 FuncAttrs.addAttribute("reciprocal-estimates",
1941 FuncAttrs.addAttribute("prefer-vector-width",
1949 FuncAttrs.addAttribute("split-stack");
1954 // Add zero-call-used-regs attribute.
1957 FuncAttrs.removeAttribute("zero-call-used-regs");
1960 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1963 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1966 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1969 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1972 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1975 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
1978 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
1981 FuncAttrs.addAttribute("zero-call-used-regs", "all");
2012 /// Merges `target-features` from \TargetOpts and \F, and sets the result in
2021 auto FFeatures = F.getFnAttribute("target-features");
2031 assert(Feature[0] == '+' || Feature[0] == '-');
2045 FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ","));
2058 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
2060 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
2067 // Do not promote "dynamic" denormal-fp-math to this translation unit's
2089 AttrsToRemove.addAttribute("denormal-fp-math");
2092 FuncAttrs.addAttribute("denormal-fp-math",
2097 AttrsToRemove.addAttribute("denormal-fp-math-f32");
2100 FuncAttrs.addAttribute("denormal-fp-math-f32",
2144 AttributeName += "no-builtin-";
2149 // First, handle the language options passed through -fno-builtin.
2151 // -fno-builtin disables them all.
2152 FuncAttrs.addAttribute("no-builtins");
2156 // Then, add attributes for builtins specified through -fno-builtin-<name>.
2166 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2167 FuncAttrs.addAttribute("no-builtins");
2172 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2199 if (QTy->isBitIntType())
2201 if (QTy->isReferenceType())
2203 if (QTy->isNullPtrType())
2205 if (QTy->isMemberPointerType())
2209 if (QTy->isScalarType()) {
2211 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
2215 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
2217 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2219 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2237 if (ArgNo < FD->getNumParams()) {
2238 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2239 if (Param && Param->hasAttr<MaybeUndefAttr>())
2251 if (!ParamType->hasFloatingRepresentation())
2254 // The promoted-to IR type also needs to support nofpclass.
2261 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2269 /// Return the nofpclass mask that can be applied to floating-point parameters.
2294 /// - getDefaultFunctionAttributes is for attributes that are essentially
2296 /// overridden on a per-function basis). Adding attributes there
2298 /// target-configuration logic, as well as for code defined in library
2301 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2302 /// and adds declaration-specific, convention-specific, and
2303 /// frontend-specific logic. The last is of particular importance:
2347 // Collect function IR attributes based on declaration-specific
2351 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2353 if (TargetDecl->hasAttr<NoThrowAttr>())
2355 if (TargetDecl->hasAttr<NoReturnAttr>())
2357 if (TargetDecl->hasAttr<ColdAttr>())
2358 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2359 if (TargetDecl->hasAttr<HotAttr>())
2361 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2363 if (TargetDecl->hasAttr<ConvergentAttr>())
2368 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2369 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2370 // A sane operator new returns a non-aliasing pointer.
2371 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2377 const bool IsVirtualCall = MD && MD->isVirtual();
2381 if (Fn->isNoReturn())
2383 NBA = Fn->getAttr<NoBuiltinAttr>();
2390 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2395 if (TargetDecl->hasAttr<ConstAttr>()) {
2401 } else if (TargetDecl->hasAttr<PureAttr>()) {
2406 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2410 if (TargetDecl->hasAttr<RestrictAttr>())
2412 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2415 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2417 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2419 if (TargetDecl->hasAttr<LeafAttr>())
2422 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2423 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2425 if (AllocSize->getNumElemsParam().isValid())
2426 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2427 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2431 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2434 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2437 // '-cl-uniform-work-group-size' compile option gets a hint
2438 // to the compiler that the global work-size be a multiple of
2439 // the work-group size specified to clEnqueueNDRangeKernel
2442 "uniform-work-group-size",
2447 if (TargetDecl->hasAttr<CUDAGlobalAttr>() &&
2449 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2451 if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>())
2455 // Attach "no-builtins" attributes to:
2456 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2457 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2459 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2466 // Override some default IR attributes based on declaration-specific
2469 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2471 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2473 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2474 FuncAttrs.removeAttribute("split-stack");
2475 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
2476 // A function "__attribute__((...))" overrides the command-line flag.
2478 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2479 FuncAttrs.removeAttribute("zero-call-used-regs");
2481 "zero-call-used-regs",
2485 // Add NonLazyBind attribute to function declarations when -fno-plt
2491 if (!Fn->isDefined() && !AttrOnCallSite) {
2498 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2499 // functions with -funique-internal-linkage-names.
2502 if (!FD->isExternallyVisible())
2503 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2508 // Collect non-call-site function IR attributes from declaration-specific
2511 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2523 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2524 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2529 if (!BD->doesNotEscape())
2536 FuncAttrs.addAttribute("disable-tail-calls", "true");
2553 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2593 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2594 QualType PTy = RefTy->getPointeeType();
2595 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2601 if (PTy->isObjectType()) {
2638 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2646 FI.arg_begin()->type.getTypePtr()->getPointeeType();
2649 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2659 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2676 QualType ParamType = I->type;
2677 const ABIArgInfo &AI = I->info;
2696 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2723 auto *Decl = ParamType->getAsRecordDecl();
2725 Decl->getArgPassingRestrictions() ==
2774 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2775 QualType PTy = RefTy->getPointeeType();
2776 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2782 if (PTy->isObjectType()) {
2793 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
2794 ParamType->isPointerType()) {
2795 QualType PTy = ParamType->getPointeeType();
2796 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2810 if (!hasUsedSRet && RetTy->isVoidType()) {
2819 auto PTy = ParamType->getPointeeType();
2820 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2864 llvm::Type *varType = CGF.ConvertType(var->getType());
2868 if (value->getType() == varType) return value;
2870 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2880 /// attribute), which declares argument ArgNo to be non-null.
2884 // - references to pointers, where the pointee is known to be
2886 // - transparent unions containing pointers
2890 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2894 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2900 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2901 if (NNAttr->isNonNull(ArgNo))
2909 Address Temp;
2911 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2913 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2922 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2926 // If this is an implicit-return-zero function, go ahead and
2931 if (FD->hasImplicitReturnZero()) {
2932 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2943 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2949 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2954 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2955 AI->setName("agg.result");
2956 AI->addAttr(llvm::Attribute::NoAlias);
2976 const ABIArgInfo &ArgI = info_it->info;
2979 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2983 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2985 hasScalarEvaluationKind(Arg->getType()));
2995 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
3007 Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr,
3035 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
3046 auto AI = Fn->getArg(FirstIRArg);
3047 llvm::Type *LTy = ConvertType(Arg->getType());
3052 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
3053 ArgI.getCoerceToType()->isPointerTy()) {
3058 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
3059 PVD->getFunctionScopeIndex()) &&
3061 AI->addAttr(llvm::Attribute::NonNull);
3063 QualType OTy = PVD->getOriginalType();
3070 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3071 QualType ETy = ArrTy->getElementType();
3074 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3075 uint64_t ArrSize = ArrTy->getZExtSize();
3076 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
3082 AI->addAttrs(Attrs);
3086 AI->addAttr(llvm::Attribute::NonNull);
3094 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3095 QualType ETy = ArrTy->getElementType();
3098 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3101 AI->addAttr(llvm::Attribute::NonNull);
3106 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3108 if (const auto *TOTy = OTy->getAs<TypedefType>())
3109 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3111 // If alignment-assumption sanitizer is enabled, we do *not* add
3115 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
3117 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3118 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3119 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3120 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
3127 if (Arg->getType().isRestrictQualified())
3128 AI->addAttr(llvm::Attribute::NoAlias);
3139 // ways. Copy the value into a less-restricted temporary.
3143 QualType pointeeTy = Ty->getPointeeType();
3144 assert(pointeeTy->isPointerType());
3145 RawAddress temp =
3146 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3150 Builder.CreateStore(incomingErrorValue, temp);
3151 V = temp.getPointer();
3156 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
3160 if (V->getType() != ArgI.getCoerceToType())
3170 llvm::Type *LTy = ConvertType(Arg->getType());
3171 if (V->getType() != LTy)
3183 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3185 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3188 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3189 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3190 VecTyTo->getElementType() == Builder.getInt8Ty()) {
3192 VecTyTo->getElementType(),
3193 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3196 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3200 Coerced->setName(Arg->getName() + ".coerce");
3211 STy->getNumElements() > 1) {
3216 if (STy->containsHomogeneousScalableVectorTypes()) {
3218 "Only allow non-fractional movement of structure with"
3227 Arg->getName());
3232 // Fast-isel and the optimizer generally like scalar values better than
3235 STy->getNumElements() > 1) {
3240 assert(STy->containsHomogeneousScalableVectorTypes() &&
3244 "Only allow non-fractional movement of structure with"
3246 assert(STy->getNumElements() == NumIRArgs);
3249 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3250 auto *AI = Fn->getArg(FirstIRArg + i);
3251 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3269 assert(STy->getNumElements() == NumIRArgs);
3270 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3271 auto AI = Fn->getArg(FirstIRArg + i);
3272 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3284 auto AI = Fn->getArg(FirstIRArg);
3285 AI->setName(Arg->getName() + ".coerce");
3289 getContext().getTypeSizeInChars(Ty).getQuantity() -
3297 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
3316 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3317 llvm::Type *eltType = coercionType->getElementType(i);
3322 auto elt = Fn->getArg(argIndex++);
3337 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3339 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3341 auto AI = Fn->getArg(FirstIRArg + i);
3342 AI->setName(Arg->getName() + "." + Twine(i));
3353 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
3361 for (int I = Args.size() - 1; I >= 0; --I)
3370 while (insn->use_empty()) {
3375 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3376 bitcast->eraseFromParent();
3385 if (BB->empty()) return nullptr;
3386 if (&BB->back() != result) return nullptr;
3388 llvm::Type *resultType = result->getType();
3400 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3403 if (generator->getNextNode() != bitcast)
3418 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3420 } else if (call->getCalledOperand() ==
3430 llvm::Instruction *prev = call->getPrevNode();
3433 prev = prev->getPrevNode();
3437 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3445 result = call->getArgOperand(0);
3451 if (!bitcast->hasOneUse()) break;
3453 result = bitcast->getOperand(0);
3458 I->eraseFromParent();
3475 const VarDecl *self = method->getSelfDecl();
3476 if (!self->getType().isConstQualified()) return nullptr;
3481 if (!retainCall || retainCall->getCalledOperand() !=
3486 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3488 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3489 if (!load || load->isAtomic() || load->isVolatile() ||
3490 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getBasePointer())
3496 llvm::Type *resultType = result->getType();
3498 assert(retainCall->use_empty());
3499 retainCall->eraseFromParent();
3517 // At -O0, try to emit a fused retain/autorelease.
3525 /// Heuristically search for a dominating store to the return-value slot.
3533 ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * {
3535 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3536 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
3538 // These aren't actually possible for non-coerced returns, and we
3539 // only care about non-coerced returns on this code path.
3541 assert(!SI->isAtomic() &&
3542 (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry()));
3545 // If there are multiple uses of the return-value slot, just check
3547 // happen with how we generate implicit-returns; it can also happen
3549 if (!ReturnValuePtr->hasOneUse()) {
3551 if (IP->empty()) return nullptr;
3555 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3559 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3567 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3570 // Now do a first-and-dirty dominance check: just walk up the
3571 // single-predecessors chain from the current insertion point.
3572 llvm::BasicBlock *StoreBB = store->getParent();
3576 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3590 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3602 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3605 BitWidth -= CharWidth - BitOffset;
3611 BitWidth -= CharWidth;
3615 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3648 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3653 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3656 if (F->isUnnamedBitField() || F->isZeroLengthBitField(Context) ||
3657 F->getType()->isIncompleteArrayType())
3660 if (F->isBitField()) {
3669 setUsedBits(CGM, F->getType(),
3697 if (const auto *RTy = QTy->getAs<RecordType>())
3709 (uint64_t(1) << Context.getCharWidth()) - 1);
3724 Mask = (Mask << CharWidth) | *--P;
3735 assert(Src->getType() == ITy);
3736 assert(ITy->getScalarSizeInBits() <= 64);
3741 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3758 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3763 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3766 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3787 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3810 llvm::Function::arg_iterator EI = CurFn->arg_end();
3811 --EI;
3816 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3822 auto AI = CurFn->arg_begin();
3857 // The internal return value temp always will have pointer-to-return-type
3868 RetDbgLoc = SI->getDebugLoc();
3869 // Get the stored value and nuke the now-dead store.
3870 RV = SI->getValueOperand();
3871 SI->eraseFromParent();
3895 RT = FD->getReturnType();
3897 RT = MD->getReturnType();
3899 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3905 RT->isObjCRetainableType());
3921 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3922 auto coercedEltType = coercionType->getElementType(i);
3935 // Otherwise, we need to make a first-class aggregate.
3954 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3958 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3969 Ret->setDebugLoc(std::move(RetDbgLoc));
3979 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3984 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3996 AttrLoc = RetNNAttr->getLocation();
4001 if (auto *TSI = DD->getTypeSourceInfo())
4002 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
4037 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
4050 // this win32-specific alignment hack.
4065 // StartFunction converted the ABI-lowered parameter(s) into a
4066 // local alloca. We need to turn that into an r-value suitable
4070 QualType type = param->getType();
4072 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
4074 if (type->isReferenceType()) {
4078 // entered by StartFunction doesn't cause an over-release. This isn't
4079 // optimal -O0 code generation, but it should get cleaned up when
4083 param->hasAttr<NSConsumedAttr>() &&
4084 type->isObjCRetainableType()) {
4087 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4092 // aggregate r-values are actually pointers to temporaries.
4097 // Deactivate the cleanup for the callee-destructed param that was pushed.
4098 if (type->isRecordType() && !CurFuncIsThunk &&
4099 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
4100 param->needsDestruction(getContext())) {
4104 "cleanup for callee-destructed param not recorded");
4119 /// Emit the actual writing-back of a writeback.
4129 // If the argument wasn't provably non-null, we need to null check
4147 "icr.writeback-cast");
4159 // Retain the new value. No need to block-copy here: the block's
4198 I.IsActiveIP->eraseFromParent();
4203 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
4204 if (uop->getOpcode() == UO_AddrOf)
4205 return uop->getSubExpr();
4209 /// Emit an argument that's being passed call-by-writeback. That is,
4211 /// might be copy-initialized with the current value of the given
4217 // Make an optimistic effort to emit the address as an l-value.
4219 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
4224 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
4227 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
4236 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
4238 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
4243 CRE->getType());
4248 Address temp =
4249 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
4250 // Loading an l-value can introduce a cleanup if the l-value is __weak,
4251 // and that cleanup will be conditional if we can't prove that the l-value
4256 // Zero-initialize it if we're not doing a copy-initialization.
4257 bool shouldCopy = CRE->shouldCopy();
4261 CGF.Builder.CreateStore(null, temp);
4267 // If the address is *not* known to be non-null, we need to switch.
4273 finalArgument = temp.emitRawPointer(CGF);
4279 temp.emitRawPointer(CGF), "icr.argument");
4303 // Use an ordinary store, not a store-to-lvalue.
4304 CGF.Builder.CreateStore(src, temp);
4310 // and so otherwise we can violate the high-level semantics.
4324 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4325 "icr.to-use");
4326 phiToUse->addIncoming(valueToUse, copyBB);
4327 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4335 args.addWriteback(srcLV, temp, valueToUse);
4336 args.add(RValue::get(finalArgument), CRE->getType());
4363 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4372 !PVD->getType()->isRecordType()) {
4373 auto Nullability = PVD->getType()->getNullability();
4376 PVD->getTypeSourceInfo();
4386 AttrLoc = NNAttr->getLocation();
4390 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4419 // The Swift calling conventions don't go through the target-specific
4433 // Determine whether the given argument is an Objective-C method
4436 const DeclContext *dc = method->getDeclContext();
4438 return classDecl->getTypeParamListAsWritten();
4442 return catDecl->getTypeParamList();
4449 /// EmitCallArgs - Emit call arguments for a function.
4461 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4471 IsVariadic = MD->isVariadic();
4474 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4475 MD->param_type_end());
4478 IsVariadic = FPT->isVariadic();
4479 ExplicitCC = FPT->getExtInfo().getCC();
4480 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4481 FPT->param_type_end());
4491 (isGenericMethod || Ty->isVariablyModifiedType() ||
4492 Ty.getNonReferenceType()->isObjCRetainableType() ||
4496 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4504 "Extra arguments in non-variadic function!");
4510 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4511 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4515 // case, there are certain language constructs that require left-to-right
4527 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4535 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4537 PS->isDynamic());
4542 std::swap(Args.back(), *(&Args.back() - 1));
4555 unsigned Idx = LeftToRight ? I : E - I - 1;
4561 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4573 // non-null argument check for r-value only.
4576 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4578 // @llvm.objectsize should never have side-effects and shouldn't need
4580 // regardless of right-to-leftness
4586 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4604 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4605 assert(!Dtor->isTrivial());
4665 assert(type->isReferenceType() == E->isGLValue() &&
4666 "reference binding to unmaterialized r-value!");
4668 if (E->isGLValue()) {
4669 assert(E->getObjectKind() == OK_Ordinary);
4676 // However, we still have to push an EH-only cleanup in case we unwind before
4678 if (type->isRecordType() &&
4679 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4686 if (const auto *RD = type->getAsCXXRecordDecl())
4687 DestroyedInCallee = RD->hasNonTrivialDestructor();
4699 // Create a no-op GEP between the placeholder and the cleanup so we can
4713 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue &&
4714 !type->isArrayParameterType()) {
4715 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4727 // functions to pointer-sized ints.
4729 return Arg->getType();
4731 if (Arg->getType()->isIntegerType() &&
4732 getContext().getTypeSize(Arg->getType()) <
4734 Arg->isNullPointerConstant(getContext(),
4739 return Arg->getType();
4748 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4752 /// Emits a call to the given no-arguments nounwind runtime function.
4775 call->setDoesNotThrow();
4779 /// Emits a simple call (never an invoke) to the given no-arguments
4797 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4798 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4799 auto IID = CalleeFn->getIntrinsicID();
4816 call->setCallingConv(getRuntimeCC());
4818 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
4836 invoke->setDoesNotReturn();
4837 invoke->setCallingConv(getRuntimeCC());
4840 call->setDoesNotReturn();
4841 call->setCallingConv(getRuntimeCC());
4859 call->setCallingConv(getRuntimeCC());
4899 /// such attribute already exists, re-set it to the maximal one of two options.
4926 AA = FuncDecl->getAttr<AlignedAttrTy>();
4938 // We may legitimately have non-power-of-2 alignment here.
4940 if (!AlignmentCI->getValue().isPowerOf2())
4945 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4957 AA->getLocation(), Alignment, OffsetCI);
4971 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4972 if (Expr *Offset = AA->getOffset()) {
4974 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4990 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5000 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5002 return getMaxVectorWidth(AT->getElementType());
5006 for (auto *I : ST->elements())
5022 // Handle struct-return functions by passing a pointer to the
5037 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
5038 (TargetDecl->hasAttr<TargetAttr>() ||
5039 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
5043 // Some architectures (such as x86-64) have the ABI changed based on
5044 // attribute-target/features. Give them a chance to diagnose.
5059 IP = IP->getNextNode();
5066 AI->setAlignment(Align.getAsAlign());
5067 AI->setUsedWithInAlloca(true);
5068 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5082 SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() +
5120 const ABIArgInfo &ArgInfo = info_it->info;
5137 if (I->isAggregate()) {
5138 RawAddress Addr = I->hasLValue()
5139 ? I->getKnownLValue().getAddress()
5140 : I->getKnownRValue().getAggregateAddress();
5155 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
5165 I->Ty, getContext().getTypeAlignInChars(I->Ty),
5166 "indirect-arg-temp");
5167 I->copyInto(*this, Addr);
5175 Addr = Addr.withElementType(ConvertTypeForMem(I->Ty));
5176 I->copyInto(*this, Addr);
5184 if (I->isAggregate()) {
5193 Address Addr = I->hasLValue()
5194 ? I->getKnownLValue().getAddress()
5195 : I->getKnownRValue().getAggregateAddress();
5199 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5200 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5201 TD->getAllocaAddrSpace()) &&
5210 } else if (I->hasLValue()) {
5211 auto LV = I->getKnownLValue();
5218 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
5231 Addr.getType()->getAddressSpace() != IRFuncTy->
5232 getParamType(FirstIRArg)->getPointerAddressSpace())) {
5239 llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty);
5253 // For non-aggregate args and aggregate args meeting conditions above
5256 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
5257 llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
5273 I->copyInto(*this, AI);
5284 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
5288 if (!I->isAggregate())
5289 V = I->getKnownRValue().getScalarVal();
5292 I->hasLValue() ? I->getKnownLValue().getAddress()
5293 : I->getKnownRValue().getAggregateAddress());
5301 QualType pointeeTy = I->Ty->getPointeeType();
5306 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
5308 cast<llvm::AllocaInst>(V)->setSwiftError(true);
5315 if (ArgInfo.getCoerceToType() != V->getType() &&
5316 V->getType()->isIntegerTy())
5321 if (FirstIRArg < IRFuncTy->getNumParams() &&
5322 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5323 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5334 llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
5339 if (STy->containsHomogeneousScalableVectorTypes()) {
5341 "Only allow non-fractional movement of structure with "
5344 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5351 if (!I->isAggregate()) {
5352 Src = CreateMemTemp(I->Ty, "coerce");
5353 I->copyInto(*this, Src);
5355 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5356 : I->getKnownRValue().getAggregateAddress();
5362 // Fast-isel and the optimizer generally like scalar values better than
5370 assert(STy->containsHomogeneousScalableVectorTypes() &&
5374 "Only allow non-fractional movement of structure with "
5376 assert(NumIRArgs == STy->getNumElements());
5380 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5390 // coerce-to logic, copy the source value into a temp alloca the size
5402 assert(NumIRArgs == STy->getNumElements());
5403 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5421 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5422 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5423 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
5441 if (I->isAggregate()) {
5442 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5443 : I->getKnownRValue().getAggregateAddress();
5446 RValue RV = I->getKnownRValue();
5449 llvm::Type *scalarType = RV.getScalarVal()->getType();
5455 RV.getScalarVal()->getType(),
5456 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)),
5467 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5468 llvm::Type *eltType = coercionType->getElementType(i);
5487 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5506 // If the callee is a bitcast of a non-variadic function to have a
5511 // can inline the function at -O0 if it is marked always_inline.
5513 llvm::Value *Ptr) -> llvm::Function * {
5514 if (!CalleeFT->isVarArg())
5519 if (CE->getOpcode() == llvm::Instruction::BitCast)
5520 Ptr = CE->getOperand(0);
5527 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5531 if (OrigFT->isVarArg() ||
5532 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5533 OrigFT->getReturnType() != CalleeFT->getReturnType())
5536 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5537 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5545 IRFuncTy = OrigFn->getFunctionType();
5560 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5566 if (i < IRFuncTy->getNumParams())
5567 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5574 getMaxVectorWidth(IRCallArgs[i]->getType()));
5579 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5591 if (FD->hasAttr<StrictFPAttr>())
5595 // If -ffast-math is enabled and the function is guarded by an
5598 if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath)
5599 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5602 // Add call-site nomerge attribute if exists.
5606 // Add call-site noinline attribute if exists.
5610 // Add call-site always_inline attribute if exists.
5615 // Apply some call-site-specific attributes.
5620 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5622 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5648 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5669 // Add the pointer-authentication bundle.
5673 if (FD->hasAttr<StrictFPAttr>())
5693 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5694 CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
5704 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5705 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5711 CI->setAttributes(Attrs);
5712 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5716 if (!CI->getType()->isVoidTy())
5717 CI->setName("call");
5719 if (CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
5724 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
5729 if (!CI->getCalledFunction())
5740 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5741 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5746 else if (!getTarget().hasFeature("pcrelative-memops")) {
5749 else if (Call->isIndirectCall())
5752 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5769 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5775 TargetDecl->hasAttr<MSAllocatorAttr>())
5776 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5779 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
5784 CI->setMetadata("srcloc", MDT);
5792 if (CI->doesNotReturn()) {
5800 if (auto *F = CI->getCalledFunction())
5801 F->removeFnAttr(llvm::Attribute::NoReturn);
5802 CI->removeFnAttr(llvm::Attribute::NoReturn);
5836 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5839 if (CI->getType()->isVoidTy())
5854 // Emit any call-associated writebacks immediately. Arguably this
5855 // should happen after any return-value munging.
5878 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5879 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5882 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5883 llvm::Type *eltType = coercionType->getElementType(i);
5927 if (V->getType() != RetIRTy)
5940 dyn_cast<llvm::ScalableVectorType>(V->getType())) {
5941 if (FixedDstTy->getElementType() ==
5942 ScalableSrcTy->getElementType()) {
5963 // no_unique_address); omit the store for such types - as there is no
5970 llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()),
5992 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
6010 CE ? CE->getBeginLoc() : SourceLocation());
6020 VAListAddr = VE->isMicrosoftABI() ? EmitMSVAListRef(VE->getSubExpr())
6021 : EmitVAListRef(VE->getSubExpr());
6022 QualType Ty = VE->getType();
6023 if (VE->isMicrosoftABI())