1*06c3fb27SDimitry Andric //===- ARM.cpp ------------------------------------------------------------===// 2*06c3fb27SDimitry Andric // 3*06c3fb27SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*06c3fb27SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*06c3fb27SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*06c3fb27SDimitry Andric // 7*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 8*06c3fb27SDimitry Andric 9*06c3fb27SDimitry Andric #include "ABIInfoImpl.h" 10*06c3fb27SDimitry Andric #include "TargetInfo.h" 11*06c3fb27SDimitry Andric 12*06c3fb27SDimitry Andric using namespace clang; 13*06c3fb27SDimitry Andric using namespace clang::CodeGen; 14*06c3fb27SDimitry Andric 15*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 16*06c3fb27SDimitry Andric // ARM ABI Implementation 17*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 18*06c3fb27SDimitry Andric 19*06c3fb27SDimitry Andric namespace { 20*06c3fb27SDimitry Andric 21*06c3fb27SDimitry Andric class ARMABIInfo : public ABIInfo { 22*06c3fb27SDimitry Andric ARMABIKind Kind; 23*06c3fb27SDimitry Andric bool IsFloatABISoftFP; 24*06c3fb27SDimitry Andric 25*06c3fb27SDimitry Andric public: 26*06c3fb27SDimitry Andric ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) { 27*06c3fb27SDimitry Andric setCCs(); 28*06c3fb27SDimitry Andric IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || 29*06c3fb27SDimitry Andric CGT.getCodeGenOpts().FloatABI == ""; // default 30*06c3fb27SDimitry Andric } 31*06c3fb27SDimitry Andric 32*06c3fb27SDimitry Andric bool isEABI() const { 33*06c3fb27SDimitry Andric switch (getTarget().getTriple().getEnvironment()) { 34*06c3fb27SDimitry Andric case llvm::Triple::Android: 35*06c3fb27SDimitry Andric case llvm::Triple::EABI: 36*06c3fb27SDimitry Andric case llvm::Triple::EABIHF: 37*06c3fb27SDimitry Andric case llvm::Triple::GNUEABI: 38*06c3fb27SDimitry Andric case llvm::Triple::GNUEABIHF: 39*06c3fb27SDimitry Andric case llvm::Triple::MuslEABI: 40*06c3fb27SDimitry Andric case llvm::Triple::MuslEABIHF: 41*06c3fb27SDimitry Andric return true; 42*06c3fb27SDimitry Andric default: 43*06c3fb27SDimitry Andric return getTarget().getTriple().isOHOSFamily(); 44*06c3fb27SDimitry Andric } 45*06c3fb27SDimitry Andric } 46*06c3fb27SDimitry Andric 47*06c3fb27SDimitry Andric bool isEABIHF() const { 48*06c3fb27SDimitry Andric switch (getTarget().getTriple().getEnvironment()) { 49*06c3fb27SDimitry Andric case llvm::Triple::EABIHF: 50*06c3fb27SDimitry Andric case llvm::Triple::GNUEABIHF: 51*06c3fb27SDimitry Andric case llvm::Triple::MuslEABIHF: 52*06c3fb27SDimitry Andric return true; 53*06c3fb27SDimitry Andric default: 54*06c3fb27SDimitry Andric return false; 55*06c3fb27SDimitry Andric } 56*06c3fb27SDimitry Andric } 57*06c3fb27SDimitry Andric 58*06c3fb27SDimitry Andric ARMABIKind getABIKind() const { return Kind; } 59*06c3fb27SDimitry Andric 60*06c3fb27SDimitry Andric bool allowBFloatArgsAndRet() const override { 61*06c3fb27SDimitry Andric return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); 62*06c3fb27SDimitry Andric } 63*06c3fb27SDimitry Andric 64*06c3fb27SDimitry Andric private: 65*06c3fb27SDimitry Andric ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 66*06c3fb27SDimitry Andric unsigned functionCallConv) const; 67*06c3fb27SDimitry Andric ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 68*06c3fb27SDimitry Andric unsigned functionCallConv) const; 69*06c3fb27SDimitry Andric ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 70*06c3fb27SDimitry Andric uint64_t Members) const; 71*06c3fb27SDimitry Andric ABIArgInfo coerceIllegalVector(QualType Ty) const; 72*06c3fb27SDimitry Andric bool isIllegalVectorType(QualType Ty) const; 73*06c3fb27SDimitry Andric bool containsAnyFP16Vectors(QualType Ty) const; 74*06c3fb27SDimitry Andric 75*06c3fb27SDimitry Andric bool isHomogeneousAggregateBaseType(QualType Ty) const override; 76*06c3fb27SDimitry Andric bool isHomogeneousAggregateSmallEnough(const Type *Ty, 77*06c3fb27SDimitry Andric uint64_t Members) const override; 78*06c3fb27SDimitry Andric bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override; 79*06c3fb27SDimitry Andric 80*06c3fb27SDimitry Andric bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 81*06c3fb27SDimitry Andric 82*06c3fb27SDimitry Andric void computeInfo(CGFunctionInfo &FI) const override; 83*06c3fb27SDimitry Andric 84*06c3fb27SDimitry Andric Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 85*06c3fb27SDimitry Andric QualType Ty) const override; 86*06c3fb27SDimitry Andric 87*06c3fb27SDimitry Andric llvm::CallingConv::ID getLLVMDefaultCC() const; 88*06c3fb27SDimitry Andric llvm::CallingConv::ID getABIDefaultCC() const; 89*06c3fb27SDimitry Andric void setCCs(); 90*06c3fb27SDimitry Andric }; 91*06c3fb27SDimitry Andric 92*06c3fb27SDimitry Andric class ARMSwiftABIInfo : public SwiftABIInfo { 93*06c3fb27SDimitry Andric public: 94*06c3fb27SDimitry Andric explicit ARMSwiftABIInfo(CodeGenTypes &CGT) 95*06c3fb27SDimitry Andric : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {} 96*06c3fb27SDimitry Andric 97*06c3fb27SDimitry Andric bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, 98*06c3fb27SDimitry Andric unsigned NumElts) const override; 99*06c3fb27SDimitry Andric }; 100*06c3fb27SDimitry Andric 101*06c3fb27SDimitry Andric class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 102*06c3fb27SDimitry Andric public: 103*06c3fb27SDimitry Andric ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K) 104*06c3fb27SDimitry Andric : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) { 105*06c3fb27SDimitry Andric SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT); 106*06c3fb27SDimitry Andric } 107*06c3fb27SDimitry Andric 108*06c3fb27SDimitry Andric int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 109*06c3fb27SDimitry Andric return 13; 110*06c3fb27SDimitry Andric } 111*06c3fb27SDimitry Andric 112*06c3fb27SDimitry Andric StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 113*06c3fb27SDimitry Andric return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 114*06c3fb27SDimitry Andric } 115*06c3fb27SDimitry Andric 116*06c3fb27SDimitry Andric bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 117*06c3fb27SDimitry Andric llvm::Value *Address) const override { 118*06c3fb27SDimitry Andric llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 119*06c3fb27SDimitry Andric 120*06c3fb27SDimitry Andric // 0-15 are the 16 integer registers. 121*06c3fb27SDimitry Andric AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 122*06c3fb27SDimitry Andric return false; 123*06c3fb27SDimitry Andric } 124*06c3fb27SDimitry Andric 125*06c3fb27SDimitry Andric unsigned getSizeOfUnwindException() const override { 126*06c3fb27SDimitry Andric if (getABIInfo<ARMABIInfo>().isEABI()) 127*06c3fb27SDimitry Andric return 88; 128*06c3fb27SDimitry Andric return TargetCodeGenInfo::getSizeOfUnwindException(); 129*06c3fb27SDimitry Andric } 130*06c3fb27SDimitry Andric 131*06c3fb27SDimitry Andric void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 132*06c3fb27SDimitry Andric CodeGen::CodeGenModule &CGM) const override { 133*06c3fb27SDimitry Andric if (GV->isDeclaration()) 134*06c3fb27SDimitry Andric return; 135*06c3fb27SDimitry Andric const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 136*06c3fb27SDimitry Andric if (!FD) 137*06c3fb27SDimitry Andric return; 138*06c3fb27SDimitry Andric auto *Fn = cast<llvm::Function>(GV); 139*06c3fb27SDimitry Andric 140*06c3fb27SDimitry Andric if (const auto *TA = FD->getAttr<TargetAttr>()) { 141*06c3fb27SDimitry Andric ParsedTargetAttr Attr = 142*06c3fb27SDimitry Andric CGM.getTarget().parseTargetAttr(TA->getFeaturesStr()); 143*06c3fb27SDimitry Andric if (!Attr.BranchProtection.empty()) { 144*06c3fb27SDimitry Andric TargetInfo::BranchProtectionInfo BPI; 145*06c3fb27SDimitry Andric StringRef DiagMsg; 146*06c3fb27SDimitry Andric StringRef Arch = 147*06c3fb27SDimitry Andric Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU; 148*06c3fb27SDimitry Andric if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 149*06c3fb27SDimitry Andric Arch, BPI, DiagMsg)) { 150*06c3fb27SDimitry Andric CGM.getDiags().Report( 151*06c3fb27SDimitry Andric D->getLocation(), 152*06c3fb27SDimitry Andric diag::warn_target_unsupported_branch_protection_attribute) 153*06c3fb27SDimitry Andric << Arch; 154*06c3fb27SDimitry Andric } else { 155*06c3fb27SDimitry Andric static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; 156*06c3fb27SDimitry Andric assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 && 157*06c3fb27SDimitry Andric "Unexpected SignReturnAddressScopeKind"); 158*06c3fb27SDimitry Andric Fn->addFnAttr( 159*06c3fb27SDimitry Andric "sign-return-address", 160*06c3fb27SDimitry Andric SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); 161*06c3fb27SDimitry Andric 162*06c3fb27SDimitry Andric Fn->addFnAttr("branch-target-enforcement", 163*06c3fb27SDimitry Andric BPI.BranchTargetEnforcement ? "true" : "false"); 164*06c3fb27SDimitry Andric } 165*06c3fb27SDimitry Andric } else if (CGM.getLangOpts().BranchTargetEnforcement || 166*06c3fb27SDimitry Andric CGM.getLangOpts().hasSignReturnAddress()) { 167*06c3fb27SDimitry Andric // If the Branch Protection attribute is missing, validate the target 168*06c3fb27SDimitry Andric // Architecture attribute against Branch Protection command line 169*06c3fb27SDimitry Andric // settings. 170*06c3fb27SDimitry Andric if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU)) 171*06c3fb27SDimitry Andric CGM.getDiags().Report( 172*06c3fb27SDimitry Andric D->getLocation(), 173*06c3fb27SDimitry Andric diag::warn_target_unsupported_branch_protection_attribute) 174*06c3fb27SDimitry Andric << Attr.CPU; 175*06c3fb27SDimitry Andric } 176*06c3fb27SDimitry Andric } 177*06c3fb27SDimitry Andric 178*06c3fb27SDimitry Andric const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 179*06c3fb27SDimitry Andric if (!Attr) 180*06c3fb27SDimitry Andric return; 181*06c3fb27SDimitry Andric 182*06c3fb27SDimitry Andric const char *Kind; 183*06c3fb27SDimitry Andric switch (Attr->getInterrupt()) { 184*06c3fb27SDimitry Andric case ARMInterruptAttr::Generic: Kind = ""; break; 185*06c3fb27SDimitry Andric case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 186*06c3fb27SDimitry Andric case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 187*06c3fb27SDimitry Andric case ARMInterruptAttr::SWI: Kind = "SWI"; break; 188*06c3fb27SDimitry Andric case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 189*06c3fb27SDimitry Andric case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 190*06c3fb27SDimitry Andric } 191*06c3fb27SDimitry Andric 192*06c3fb27SDimitry Andric Fn->addFnAttr("interrupt", Kind); 193*06c3fb27SDimitry Andric 194*06c3fb27SDimitry Andric ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind(); 195*06c3fb27SDimitry Andric if (ABI == ARMABIKind::APCS) 196*06c3fb27SDimitry Andric return; 197*06c3fb27SDimitry Andric 198*06c3fb27SDimitry Andric // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 199*06c3fb27SDimitry Andric // however this is not necessarily true on taking any interrupt. Instruct 200*06c3fb27SDimitry Andric // the backend to perform a realignment as part of the function prologue. 201*06c3fb27SDimitry Andric llvm::AttrBuilder B(Fn->getContext()); 202*06c3fb27SDimitry Andric B.addStackAlignmentAttr(8); 203*06c3fb27SDimitry Andric Fn->addFnAttrs(B); 204*06c3fb27SDimitry Andric } 205*06c3fb27SDimitry Andric }; 206*06c3fb27SDimitry Andric 207*06c3fb27SDimitry Andric class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 208*06c3fb27SDimitry Andric public: 209*06c3fb27SDimitry Andric WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K) 210*06c3fb27SDimitry Andric : ARMTargetCodeGenInfo(CGT, K) {} 211*06c3fb27SDimitry Andric 212*06c3fb27SDimitry Andric void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 213*06c3fb27SDimitry Andric CodeGen::CodeGenModule &CGM) const override; 214*06c3fb27SDimitry Andric 215*06c3fb27SDimitry Andric void getDependentLibraryOption(llvm::StringRef Lib, 216*06c3fb27SDimitry Andric llvm::SmallString<24> &Opt) const override { 217*06c3fb27SDimitry Andric Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 218*06c3fb27SDimitry Andric } 219*06c3fb27SDimitry Andric 220*06c3fb27SDimitry Andric void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 221*06c3fb27SDimitry Andric llvm::SmallString<32> &Opt) const override { 222*06c3fb27SDimitry Andric Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 223*06c3fb27SDimitry Andric } 224*06c3fb27SDimitry Andric }; 225*06c3fb27SDimitry Andric 226*06c3fb27SDimitry Andric void WindowsARMTargetCodeGenInfo::setTargetAttributes( 227*06c3fb27SDimitry Andric const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 228*06c3fb27SDimitry Andric ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 229*06c3fb27SDimitry Andric if (GV->isDeclaration()) 230*06c3fb27SDimitry Andric return; 231*06c3fb27SDimitry Andric addStackProbeTargetAttributes(D, GV, CGM); 232*06c3fb27SDimitry Andric } 233*06c3fb27SDimitry Andric } 234*06c3fb27SDimitry Andric 235*06c3fb27SDimitry Andric void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 236*06c3fb27SDimitry Andric if (!::classifyReturnType(getCXXABI(), FI, *this)) 237*06c3fb27SDimitry Andric FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 238*06c3fb27SDimitry Andric FI.getCallingConvention()); 239*06c3fb27SDimitry Andric 240*06c3fb27SDimitry Andric for (auto &I : FI.arguments()) 241*06c3fb27SDimitry Andric I.info = classifyArgumentType(I.type, FI.isVariadic(), 242*06c3fb27SDimitry Andric FI.getCallingConvention()); 243*06c3fb27SDimitry Andric 244*06c3fb27SDimitry Andric 245*06c3fb27SDimitry Andric // Always honor user-specified calling convention. 246*06c3fb27SDimitry Andric if (FI.getCallingConvention() != llvm::CallingConv::C) 247*06c3fb27SDimitry Andric return; 248*06c3fb27SDimitry Andric 249*06c3fb27SDimitry Andric llvm::CallingConv::ID cc = getRuntimeCC(); 250*06c3fb27SDimitry Andric if (cc != llvm::CallingConv::C) 251*06c3fb27SDimitry Andric FI.setEffectiveCallingConvention(cc); 252*06c3fb27SDimitry Andric } 253*06c3fb27SDimitry Andric 254*06c3fb27SDimitry Andric /// Return the default calling convention that LLVM will use. 255*06c3fb27SDimitry Andric llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 256*06c3fb27SDimitry Andric // The default calling convention that LLVM will infer. 257*06c3fb27SDimitry Andric if (isEABIHF() || getTarget().getTriple().isWatchABI()) 258*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_AAPCS_VFP; 259*06c3fb27SDimitry Andric else if (isEABI()) 260*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_AAPCS; 261*06c3fb27SDimitry Andric else 262*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_APCS; 263*06c3fb27SDimitry Andric } 264*06c3fb27SDimitry Andric 265*06c3fb27SDimitry Andric /// Return the calling convention that our ABI would like us to use 266*06c3fb27SDimitry Andric /// as the C calling convention. 267*06c3fb27SDimitry Andric llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 268*06c3fb27SDimitry Andric switch (getABIKind()) { 269*06c3fb27SDimitry Andric case ARMABIKind::APCS: 270*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_APCS; 271*06c3fb27SDimitry Andric case ARMABIKind::AAPCS: 272*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_AAPCS; 273*06c3fb27SDimitry Andric case ARMABIKind::AAPCS_VFP: 274*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_AAPCS_VFP; 275*06c3fb27SDimitry Andric case ARMABIKind::AAPCS16_VFP: 276*06c3fb27SDimitry Andric return llvm::CallingConv::ARM_AAPCS_VFP; 277*06c3fb27SDimitry Andric } 278*06c3fb27SDimitry Andric llvm_unreachable("bad ABI kind"); 279*06c3fb27SDimitry Andric } 280*06c3fb27SDimitry Andric 281*06c3fb27SDimitry Andric void ARMABIInfo::setCCs() { 282*06c3fb27SDimitry Andric assert(getRuntimeCC() == llvm::CallingConv::C); 283*06c3fb27SDimitry Andric 284*06c3fb27SDimitry Andric // Don't muddy up the IR with a ton of explicit annotations if 285*06c3fb27SDimitry Andric // they'd just match what LLVM will infer from the triple. 286*06c3fb27SDimitry Andric llvm::CallingConv::ID abiCC = getABIDefaultCC(); 287*06c3fb27SDimitry Andric if (abiCC != getLLVMDefaultCC()) 288*06c3fb27SDimitry Andric RuntimeCC = abiCC; 289*06c3fb27SDimitry Andric } 290*06c3fb27SDimitry Andric 291*06c3fb27SDimitry Andric ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 292*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(Ty); 293*06c3fb27SDimitry Andric if (Size <= 32) { 294*06c3fb27SDimitry Andric llvm::Type *ResType = 295*06c3fb27SDimitry Andric llvm::Type::getInt32Ty(getVMContext()); 296*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 297*06c3fb27SDimitry Andric } 298*06c3fb27SDimitry Andric if (Size == 64 || Size == 128) { 299*06c3fb27SDimitry Andric auto *ResType = llvm::FixedVectorType::get( 300*06c3fb27SDimitry Andric llvm::Type::getInt32Ty(getVMContext()), Size / 32); 301*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 302*06c3fb27SDimitry Andric } 303*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 304*06c3fb27SDimitry Andric } 305*06c3fb27SDimitry Andric 306*06c3fb27SDimitry Andric ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 307*06c3fb27SDimitry Andric const Type *Base, 308*06c3fb27SDimitry Andric uint64_t Members) const { 309*06c3fb27SDimitry Andric assert(Base && "Base class should be set for homogeneous aggregate"); 310*06c3fb27SDimitry Andric // Base can be a floating-point or a vector. 311*06c3fb27SDimitry Andric if (const VectorType *VT = Base->getAs<VectorType>()) { 312*06c3fb27SDimitry Andric // FP16 vectors should be converted to integer vectors 313*06c3fb27SDimitry Andric if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { 314*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(VT); 315*06c3fb27SDimitry Andric auto *NewVecTy = llvm::FixedVectorType::get( 316*06c3fb27SDimitry Andric llvm::Type::getInt32Ty(getVMContext()), Size / 32); 317*06c3fb27SDimitry Andric llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 318*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 319*06c3fb27SDimitry Andric } 320*06c3fb27SDimitry Andric } 321*06c3fb27SDimitry Andric unsigned Align = 0; 322*06c3fb27SDimitry Andric if (getABIKind() == ARMABIKind::AAPCS || 323*06c3fb27SDimitry Andric getABIKind() == ARMABIKind::AAPCS_VFP) { 324*06c3fb27SDimitry Andric // For alignment adjusted HFAs, cap the argument alignment to 8, leave it 325*06c3fb27SDimitry Andric // default otherwise. 326*06c3fb27SDimitry Andric Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 327*06c3fb27SDimitry Andric unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); 328*06c3fb27SDimitry Andric Align = (Align > BaseAlign && Align >= 8) ? 8 : 0; 329*06c3fb27SDimitry Andric } 330*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align); 331*06c3fb27SDimitry Andric } 332*06c3fb27SDimitry Andric 333*06c3fb27SDimitry Andric ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 334*06c3fb27SDimitry Andric unsigned functionCallConv) const { 335*06c3fb27SDimitry Andric // 6.1.2.1 The following argument types are VFP CPRCs: 336*06c3fb27SDimitry Andric // A single-precision floating-point type (including promoted 337*06c3fb27SDimitry Andric // half-precision types); A double-precision floating-point type; 338*06c3fb27SDimitry Andric // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 339*06c3fb27SDimitry Andric // with a Base Type of a single- or double-precision floating-point type, 340*06c3fb27SDimitry Andric // 64-bit containerized vectors or 128-bit containerized vectors with one 341*06c3fb27SDimitry Andric // to four Elements. 342*06c3fb27SDimitry Andric // Variadic functions should always marshal to the base standard. 343*06c3fb27SDimitry Andric bool IsAAPCS_VFP = 344*06c3fb27SDimitry Andric !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 345*06c3fb27SDimitry Andric 346*06c3fb27SDimitry Andric Ty = useFirstFieldIfTransparentUnion(Ty); 347*06c3fb27SDimitry Andric 348*06c3fb27SDimitry Andric // Handle illegal vector types here. 349*06c3fb27SDimitry Andric if (isIllegalVectorType(Ty)) 350*06c3fb27SDimitry Andric return coerceIllegalVector(Ty); 351*06c3fb27SDimitry Andric 352*06c3fb27SDimitry Andric if (!isAggregateTypeForABI(Ty)) { 353*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 354*06c3fb27SDimitry Andric if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 355*06c3fb27SDimitry Andric Ty = EnumTy->getDecl()->getIntegerType(); 356*06c3fb27SDimitry Andric } 357*06c3fb27SDimitry Andric 358*06c3fb27SDimitry Andric if (const auto *EIT = Ty->getAs<BitIntType>()) 359*06c3fb27SDimitry Andric if (EIT->getNumBits() > 64) 360*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 361*06c3fb27SDimitry Andric 362*06c3fb27SDimitry Andric return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 363*06c3fb27SDimitry Andric : ABIArgInfo::getDirect()); 364*06c3fb27SDimitry Andric } 365*06c3fb27SDimitry Andric 366*06c3fb27SDimitry Andric if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 367*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 368*06c3fb27SDimitry Andric } 369*06c3fb27SDimitry Andric 370*06c3fb27SDimitry Andric // Ignore empty records. 371*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), Ty, true)) 372*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 373*06c3fb27SDimitry Andric 374*06c3fb27SDimitry Andric if (IsAAPCS_VFP) { 375*06c3fb27SDimitry Andric // Homogeneous Aggregates need to be expanded when we can fit the aggregate 376*06c3fb27SDimitry Andric // into VFP registers. 377*06c3fb27SDimitry Andric const Type *Base = nullptr; 378*06c3fb27SDimitry Andric uint64_t Members = 0; 379*06c3fb27SDimitry Andric if (isHomogeneousAggregate(Ty, Base, Members)) 380*06c3fb27SDimitry Andric return classifyHomogeneousAggregate(Ty, Base, Members); 381*06c3fb27SDimitry Andric } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) { 382*06c3fb27SDimitry Andric // WatchOS does have homogeneous aggregates. Note that we intentionally use 383*06c3fb27SDimitry Andric // this convention even for a variadic function: the backend will use GPRs 384*06c3fb27SDimitry Andric // if needed. 385*06c3fb27SDimitry Andric const Type *Base = nullptr; 386*06c3fb27SDimitry Andric uint64_t Members = 0; 387*06c3fb27SDimitry Andric if (isHomogeneousAggregate(Ty, Base, Members)) { 388*06c3fb27SDimitry Andric assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 389*06c3fb27SDimitry Andric llvm::Type *Ty = 390*06c3fb27SDimitry Andric llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 391*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 392*06c3fb27SDimitry Andric } 393*06c3fb27SDimitry Andric } 394*06c3fb27SDimitry Andric 395*06c3fb27SDimitry Andric if (getABIKind() == ARMABIKind::AAPCS16_VFP && 396*06c3fb27SDimitry Andric getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 397*06c3fb27SDimitry Andric // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 398*06c3fb27SDimitry Andric // bigger than 128-bits, they get placed in space allocated by the caller, 399*06c3fb27SDimitry Andric // and a pointer is passed. 400*06c3fb27SDimitry Andric return ABIArgInfo::getIndirect( 401*06c3fb27SDimitry Andric CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 402*06c3fb27SDimitry Andric } 403*06c3fb27SDimitry Andric 404*06c3fb27SDimitry Andric // Support byval for ARM. 405*06c3fb27SDimitry Andric // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 406*06c3fb27SDimitry Andric // most 8-byte. We realign the indirect argument if type alignment is bigger 407*06c3fb27SDimitry Andric // than ABI alignment. 408*06c3fb27SDimitry Andric uint64_t ABIAlign = 4; 409*06c3fb27SDimitry Andric uint64_t TyAlign; 410*06c3fb27SDimitry Andric if (getABIKind() == ARMABIKind::AAPCS_VFP || 411*06c3fb27SDimitry Andric getABIKind() == ARMABIKind::AAPCS) { 412*06c3fb27SDimitry Andric TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 413*06c3fb27SDimitry Andric ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8); 414*06c3fb27SDimitry Andric } else { 415*06c3fb27SDimitry Andric TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 416*06c3fb27SDimitry Andric } 417*06c3fb27SDimitry Andric if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 418*06c3fb27SDimitry Andric assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval"); 419*06c3fb27SDimitry Andric return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 420*06c3fb27SDimitry Andric /*ByVal=*/true, 421*06c3fb27SDimitry Andric /*Realign=*/TyAlign > ABIAlign); 422*06c3fb27SDimitry Andric } 423*06c3fb27SDimitry Andric 424*06c3fb27SDimitry Andric // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 425*06c3fb27SDimitry Andric // same size and alignment. 426*06c3fb27SDimitry Andric if (getTarget().isRenderScriptTarget()) { 427*06c3fb27SDimitry Andric return coerceToIntArray(Ty, getContext(), getVMContext()); 428*06c3fb27SDimitry Andric } 429*06c3fb27SDimitry Andric 430*06c3fb27SDimitry Andric // Otherwise, pass by coercing to a structure of the appropriate size. 431*06c3fb27SDimitry Andric llvm::Type* ElemTy; 432*06c3fb27SDimitry Andric unsigned SizeRegs; 433*06c3fb27SDimitry Andric // FIXME: Try to match the types of the arguments more accurately where 434*06c3fb27SDimitry Andric // we can. 435*06c3fb27SDimitry Andric if (TyAlign <= 4) { 436*06c3fb27SDimitry Andric ElemTy = llvm::Type::getInt32Ty(getVMContext()); 437*06c3fb27SDimitry Andric SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 438*06c3fb27SDimitry Andric } else { 439*06c3fb27SDimitry Andric ElemTy = llvm::Type::getInt64Ty(getVMContext()); 440*06c3fb27SDimitry Andric SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 441*06c3fb27SDimitry Andric } 442*06c3fb27SDimitry Andric 443*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 444*06c3fb27SDimitry Andric } 445*06c3fb27SDimitry Andric 446*06c3fb27SDimitry Andric static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 447*06c3fb27SDimitry Andric llvm::LLVMContext &VMContext) { 448*06c3fb27SDimitry Andric // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 449*06c3fb27SDimitry Andric // is called integer-like if its size is less than or equal to one word, and 450*06c3fb27SDimitry Andric // the offset of each of its addressable sub-fields is zero. 451*06c3fb27SDimitry Andric 452*06c3fb27SDimitry Andric uint64_t Size = Context.getTypeSize(Ty); 453*06c3fb27SDimitry Andric 454*06c3fb27SDimitry Andric // Check that the type fits in a word. 455*06c3fb27SDimitry Andric if (Size > 32) 456*06c3fb27SDimitry Andric return false; 457*06c3fb27SDimitry Andric 458*06c3fb27SDimitry Andric // FIXME: Handle vector types! 459*06c3fb27SDimitry Andric if (Ty->isVectorType()) 460*06c3fb27SDimitry Andric return false; 461*06c3fb27SDimitry Andric 462*06c3fb27SDimitry Andric // Float types are never treated as "integer like". 463*06c3fb27SDimitry Andric if (Ty->isRealFloatingType()) 464*06c3fb27SDimitry Andric return false; 465*06c3fb27SDimitry Andric 466*06c3fb27SDimitry Andric // If this is a builtin or pointer type then it is ok. 467*06c3fb27SDimitry Andric if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 468*06c3fb27SDimitry Andric return true; 469*06c3fb27SDimitry Andric 470*06c3fb27SDimitry Andric // Small complex integer types are "integer like". 471*06c3fb27SDimitry Andric if (const ComplexType *CT = Ty->getAs<ComplexType>()) 472*06c3fb27SDimitry Andric return isIntegerLikeType(CT->getElementType(), Context, VMContext); 473*06c3fb27SDimitry Andric 474*06c3fb27SDimitry Andric // Single element and zero sized arrays should be allowed, by the definition 475*06c3fb27SDimitry Andric // above, but they are not. 476*06c3fb27SDimitry Andric 477*06c3fb27SDimitry Andric // Otherwise, it must be a record type. 478*06c3fb27SDimitry Andric const RecordType *RT = Ty->getAs<RecordType>(); 479*06c3fb27SDimitry Andric if (!RT) return false; 480*06c3fb27SDimitry Andric 481*06c3fb27SDimitry Andric // Ignore records with flexible arrays. 482*06c3fb27SDimitry Andric const RecordDecl *RD = RT->getDecl(); 483*06c3fb27SDimitry Andric if (RD->hasFlexibleArrayMember()) 484*06c3fb27SDimitry Andric return false; 485*06c3fb27SDimitry Andric 486*06c3fb27SDimitry Andric // Check that all sub-fields are at offset 0, and are themselves "integer 487*06c3fb27SDimitry Andric // like". 488*06c3fb27SDimitry Andric const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 489*06c3fb27SDimitry Andric 490*06c3fb27SDimitry Andric bool HadField = false; 491*06c3fb27SDimitry Andric unsigned idx = 0; 492*06c3fb27SDimitry Andric for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 493*06c3fb27SDimitry Andric i != e; ++i, ++idx) { 494*06c3fb27SDimitry Andric const FieldDecl *FD = *i; 495*06c3fb27SDimitry Andric 496*06c3fb27SDimitry Andric // Bit-fields are not addressable, we only need to verify they are "integer 497*06c3fb27SDimitry Andric // like". We still have to disallow a subsequent non-bitfield, for example: 498*06c3fb27SDimitry Andric // struct { int : 0; int x } 499*06c3fb27SDimitry Andric // is non-integer like according to gcc. 500*06c3fb27SDimitry Andric if (FD->isBitField()) { 501*06c3fb27SDimitry Andric if (!RD->isUnion()) 502*06c3fb27SDimitry Andric HadField = true; 503*06c3fb27SDimitry Andric 504*06c3fb27SDimitry Andric if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 505*06c3fb27SDimitry Andric return false; 506*06c3fb27SDimitry Andric 507*06c3fb27SDimitry Andric continue; 508*06c3fb27SDimitry Andric } 509*06c3fb27SDimitry Andric 510*06c3fb27SDimitry Andric // Check if this field is at offset 0. 511*06c3fb27SDimitry Andric if (Layout.getFieldOffset(idx) != 0) 512*06c3fb27SDimitry Andric return false; 513*06c3fb27SDimitry Andric 514*06c3fb27SDimitry Andric if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 515*06c3fb27SDimitry Andric return false; 516*06c3fb27SDimitry Andric 517*06c3fb27SDimitry Andric // Only allow at most one field in a structure. This doesn't match the 518*06c3fb27SDimitry Andric // wording above, but follows gcc in situations with a field following an 519*06c3fb27SDimitry Andric // empty structure. 520*06c3fb27SDimitry Andric if (!RD->isUnion()) { 521*06c3fb27SDimitry Andric if (HadField) 522*06c3fb27SDimitry Andric return false; 523*06c3fb27SDimitry Andric 524*06c3fb27SDimitry Andric HadField = true; 525*06c3fb27SDimitry Andric } 526*06c3fb27SDimitry Andric } 527*06c3fb27SDimitry Andric 528*06c3fb27SDimitry Andric return true; 529*06c3fb27SDimitry Andric } 530*06c3fb27SDimitry Andric 531*06c3fb27SDimitry Andric ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 532*06c3fb27SDimitry Andric unsigned functionCallConv) const { 533*06c3fb27SDimitry Andric 534*06c3fb27SDimitry Andric // Variadic functions should always marshal to the base standard. 535*06c3fb27SDimitry Andric bool IsAAPCS_VFP = 536*06c3fb27SDimitry Andric !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 537*06c3fb27SDimitry Andric 538*06c3fb27SDimitry Andric if (RetTy->isVoidType()) 539*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 540*06c3fb27SDimitry Andric 541*06c3fb27SDimitry Andric if (const VectorType *VT = RetTy->getAs<VectorType>()) { 542*06c3fb27SDimitry Andric // Large vector types should be returned via memory. 543*06c3fb27SDimitry Andric if (getContext().getTypeSize(RetTy) > 128) 544*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 545*06c3fb27SDimitry Andric // TODO: FP16/BF16 vectors should be converted to integer vectors 546*06c3fb27SDimitry Andric // This check is similar to isIllegalVectorType - refactor? 547*06c3fb27SDimitry Andric if ((!getTarget().hasLegalHalfType() && 548*06c3fb27SDimitry Andric (VT->getElementType()->isFloat16Type() || 549*06c3fb27SDimitry Andric VT->getElementType()->isHalfType())) || 550*06c3fb27SDimitry Andric (IsFloatABISoftFP && 551*06c3fb27SDimitry Andric VT->getElementType()->isBFloat16Type())) 552*06c3fb27SDimitry Andric return coerceIllegalVector(RetTy); 553*06c3fb27SDimitry Andric } 554*06c3fb27SDimitry Andric 555*06c3fb27SDimitry Andric if (!isAggregateTypeForABI(RetTy)) { 556*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 557*06c3fb27SDimitry Andric if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 558*06c3fb27SDimitry Andric RetTy = EnumTy->getDecl()->getIntegerType(); 559*06c3fb27SDimitry Andric 560*06c3fb27SDimitry Andric if (const auto *EIT = RetTy->getAs<BitIntType>()) 561*06c3fb27SDimitry Andric if (EIT->getNumBits() > 64) 562*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 563*06c3fb27SDimitry Andric 564*06c3fb27SDimitry Andric return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 565*06c3fb27SDimitry Andric : ABIArgInfo::getDirect(); 566*06c3fb27SDimitry Andric } 567*06c3fb27SDimitry Andric 568*06c3fb27SDimitry Andric // Are we following APCS? 569*06c3fb27SDimitry Andric if (getABIKind() == ARMABIKind::APCS) { 570*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), RetTy, false)) 571*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 572*06c3fb27SDimitry Andric 573*06c3fb27SDimitry Andric // Complex types are all returned as packed integers. 574*06c3fb27SDimitry Andric // 575*06c3fb27SDimitry Andric // FIXME: Consider using 2 x vector types if the back end handles them 576*06c3fb27SDimitry Andric // correctly. 577*06c3fb27SDimitry Andric if (RetTy->isAnyComplexType()) 578*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::IntegerType::get( 579*06c3fb27SDimitry Andric getVMContext(), getContext().getTypeSize(RetTy))); 580*06c3fb27SDimitry Andric 581*06c3fb27SDimitry Andric // Integer like structures are returned in r0. 582*06c3fb27SDimitry Andric if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 583*06c3fb27SDimitry Andric // Return in the smallest viable integer type. 584*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(RetTy); 585*06c3fb27SDimitry Andric if (Size <= 8) 586*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 587*06c3fb27SDimitry Andric if (Size <= 16) 588*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 589*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 590*06c3fb27SDimitry Andric } 591*06c3fb27SDimitry Andric 592*06c3fb27SDimitry Andric // Otherwise return in memory. 593*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 594*06c3fb27SDimitry Andric } 595*06c3fb27SDimitry Andric 596*06c3fb27SDimitry Andric // Otherwise this is an AAPCS variant. 597*06c3fb27SDimitry Andric 598*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), RetTy, true)) 599*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 600*06c3fb27SDimitry Andric 601*06c3fb27SDimitry Andric // Check for homogeneous aggregates with AAPCS-VFP. 602*06c3fb27SDimitry Andric if (IsAAPCS_VFP) { 603*06c3fb27SDimitry Andric const Type *Base = nullptr; 604*06c3fb27SDimitry Andric uint64_t Members = 0; 605*06c3fb27SDimitry Andric if (isHomogeneousAggregate(RetTy, Base, Members)) 606*06c3fb27SDimitry Andric return classifyHomogeneousAggregate(RetTy, Base, Members); 607*06c3fb27SDimitry Andric } 608*06c3fb27SDimitry Andric 609*06c3fb27SDimitry Andric // Aggregates <= 4 bytes are returned in r0; other aggregates 610*06c3fb27SDimitry Andric // are returned indirectly. 611*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(RetTy); 612*06c3fb27SDimitry Andric if (Size <= 32) { 613*06c3fb27SDimitry Andric // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 614*06c3fb27SDimitry Andric // same size and alignment. 615*06c3fb27SDimitry Andric if (getTarget().isRenderScriptTarget()) { 616*06c3fb27SDimitry Andric return coerceToIntArray(RetTy, getContext(), getVMContext()); 617*06c3fb27SDimitry Andric } 618*06c3fb27SDimitry Andric if (getDataLayout().isBigEndian()) 619*06c3fb27SDimitry Andric // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 620*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 621*06c3fb27SDimitry Andric 622*06c3fb27SDimitry Andric // Return in the smallest viable integer type. 623*06c3fb27SDimitry Andric if (Size <= 8) 624*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 625*06c3fb27SDimitry Andric if (Size <= 16) 626*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 627*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 628*06c3fb27SDimitry Andric } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) { 629*06c3fb27SDimitry Andric llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 630*06c3fb27SDimitry Andric llvm::Type *CoerceTy = 631*06c3fb27SDimitry Andric llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 632*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(CoerceTy); 633*06c3fb27SDimitry Andric } 634*06c3fb27SDimitry Andric 635*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 636*06c3fb27SDimitry Andric } 637*06c3fb27SDimitry Andric 638*06c3fb27SDimitry Andric /// isIllegalVector - check whether Ty is an illegal vector type. 639*06c3fb27SDimitry Andric bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 640*06c3fb27SDimitry Andric if (const VectorType *VT = Ty->getAs<VectorType> ()) { 641*06c3fb27SDimitry Andric // On targets that don't support half, fp16 or bfloat, they are expanded 642*06c3fb27SDimitry Andric // into float, and we don't want the ABI to depend on whether or not they 643*06c3fb27SDimitry Andric // are supported in hardware. Thus return false to coerce vectors of these 644*06c3fb27SDimitry Andric // types into integer vectors. 645*06c3fb27SDimitry Andric // We do not depend on hasLegalHalfType for bfloat as it is a 646*06c3fb27SDimitry Andric // separate IR type. 647*06c3fb27SDimitry Andric if ((!getTarget().hasLegalHalfType() && 648*06c3fb27SDimitry Andric (VT->getElementType()->isFloat16Type() || 649*06c3fb27SDimitry Andric VT->getElementType()->isHalfType())) || 650*06c3fb27SDimitry Andric (IsFloatABISoftFP && 651*06c3fb27SDimitry Andric VT->getElementType()->isBFloat16Type())) 652*06c3fb27SDimitry Andric return true; 653*06c3fb27SDimitry Andric if (isAndroid()) { 654*06c3fb27SDimitry Andric // Android shipped using Clang 3.1, which supported a slightly different 655*06c3fb27SDimitry Andric // vector ABI. The primary differences were that 3-element vector types 656*06c3fb27SDimitry Andric // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 657*06c3fb27SDimitry Andric // accepts that legacy behavior for Android only. 658*06c3fb27SDimitry Andric // Check whether VT is legal. 659*06c3fb27SDimitry Andric unsigned NumElements = VT->getNumElements(); 660*06c3fb27SDimitry Andric // NumElements should be power of 2 or equal to 3. 661*06c3fb27SDimitry Andric if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 662*06c3fb27SDimitry Andric return true; 663*06c3fb27SDimitry Andric } else { 664*06c3fb27SDimitry Andric // Check whether VT is legal. 665*06c3fb27SDimitry Andric unsigned NumElements = VT->getNumElements(); 666*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(VT); 667*06c3fb27SDimitry Andric // NumElements should be power of 2. 668*06c3fb27SDimitry Andric if (!llvm::isPowerOf2_32(NumElements)) 669*06c3fb27SDimitry Andric return true; 670*06c3fb27SDimitry Andric // Size should be greater than 32 bits. 671*06c3fb27SDimitry Andric return Size <= 32; 672*06c3fb27SDimitry Andric } 673*06c3fb27SDimitry Andric } 674*06c3fb27SDimitry Andric return false; 675*06c3fb27SDimitry Andric } 676*06c3fb27SDimitry Andric 677*06c3fb27SDimitry Andric /// Return true if a type contains any 16-bit floating point vectors 678*06c3fb27SDimitry Andric bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { 679*06c3fb27SDimitry Andric if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 680*06c3fb27SDimitry Andric uint64_t NElements = AT->getSize().getZExtValue(); 681*06c3fb27SDimitry Andric if (NElements == 0) 682*06c3fb27SDimitry Andric return false; 683*06c3fb27SDimitry Andric return containsAnyFP16Vectors(AT->getElementType()); 684*06c3fb27SDimitry Andric } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 685*06c3fb27SDimitry Andric const RecordDecl *RD = RT->getDecl(); 686*06c3fb27SDimitry Andric 687*06c3fb27SDimitry Andric // If this is a C++ record, check the bases first. 688*06c3fb27SDimitry Andric if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 689*06c3fb27SDimitry Andric if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { 690*06c3fb27SDimitry Andric return containsAnyFP16Vectors(B.getType()); 691*06c3fb27SDimitry Andric })) 692*06c3fb27SDimitry Andric return true; 693*06c3fb27SDimitry Andric 694*06c3fb27SDimitry Andric if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { 695*06c3fb27SDimitry Andric return FD && containsAnyFP16Vectors(FD->getType()); 696*06c3fb27SDimitry Andric })) 697*06c3fb27SDimitry Andric return true; 698*06c3fb27SDimitry Andric 699*06c3fb27SDimitry Andric return false; 700*06c3fb27SDimitry Andric } else { 701*06c3fb27SDimitry Andric if (const VectorType *VT = Ty->getAs<VectorType>()) 702*06c3fb27SDimitry Andric return (VT->getElementType()->isFloat16Type() || 703*06c3fb27SDimitry Andric VT->getElementType()->isBFloat16Type() || 704*06c3fb27SDimitry Andric VT->getElementType()->isHalfType()); 705*06c3fb27SDimitry Andric return false; 706*06c3fb27SDimitry Andric } 707*06c3fb27SDimitry Andric } 708*06c3fb27SDimitry Andric 709*06c3fb27SDimitry Andric bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, 710*06c3fb27SDimitry Andric unsigned NumElts) const { 711*06c3fb27SDimitry Andric if (!llvm::isPowerOf2_32(NumElts)) 712*06c3fb27SDimitry Andric return false; 713*06c3fb27SDimitry Andric unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy); 714*06c3fb27SDimitry Andric if (size > 64) 715*06c3fb27SDimitry Andric return false; 716*06c3fb27SDimitry Andric if (VectorSize.getQuantity() != 8 && 717*06c3fb27SDimitry Andric (VectorSize.getQuantity() != 16 || NumElts == 1)) 718*06c3fb27SDimitry Andric return false; 719*06c3fb27SDimitry Andric return true; 720*06c3fb27SDimitry Andric } 721*06c3fb27SDimitry Andric 722*06c3fb27SDimitry Andric bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 723*06c3fb27SDimitry Andric // Homogeneous aggregates for AAPCS-VFP must have base types of float, 724*06c3fb27SDimitry Andric // double, or 64-bit or 128-bit vectors. 725*06c3fb27SDimitry Andric if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 726*06c3fb27SDimitry Andric if (BT->getKind() == BuiltinType::Float || 727*06c3fb27SDimitry Andric BT->getKind() == BuiltinType::Double || 728*06c3fb27SDimitry Andric BT->getKind() == BuiltinType::LongDouble) 729*06c3fb27SDimitry Andric return true; 730*06c3fb27SDimitry Andric } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 731*06c3fb27SDimitry Andric unsigned VecSize = getContext().getTypeSize(VT); 732*06c3fb27SDimitry Andric if (VecSize == 64 || VecSize == 128) 733*06c3fb27SDimitry Andric return true; 734*06c3fb27SDimitry Andric } 735*06c3fb27SDimitry Andric return false; 736*06c3fb27SDimitry Andric } 737*06c3fb27SDimitry Andric 738*06c3fb27SDimitry Andric bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 739*06c3fb27SDimitry Andric uint64_t Members) const { 740*06c3fb27SDimitry Andric return Members <= 4; 741*06c3fb27SDimitry Andric } 742*06c3fb27SDimitry Andric 743*06c3fb27SDimitry Andric bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { 744*06c3fb27SDimitry Andric // AAPCS32 says that the rule for whether something is a homogeneous 745*06c3fb27SDimitry Andric // aggregate is applied to the output of the data layout decision. So 746*06c3fb27SDimitry Andric // anything that doesn't affect the data layout also does not affect 747*06c3fb27SDimitry Andric // homogeneity. In particular, zero-length bitfields don't stop a struct 748*06c3fb27SDimitry Andric // being homogeneous. 749*06c3fb27SDimitry Andric return true; 750*06c3fb27SDimitry Andric } 751*06c3fb27SDimitry Andric 752*06c3fb27SDimitry Andric bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 753*06c3fb27SDimitry Andric bool acceptHalf) const { 754*06c3fb27SDimitry Andric // Give precedence to user-specified calling conventions. 755*06c3fb27SDimitry Andric if (callConvention != llvm::CallingConv::C) 756*06c3fb27SDimitry Andric return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 757*06c3fb27SDimitry Andric else 758*06c3fb27SDimitry Andric return (getABIKind() == ARMABIKind::AAPCS_VFP) || 759*06c3fb27SDimitry Andric (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP)); 760*06c3fb27SDimitry Andric } 761*06c3fb27SDimitry Andric 762*06c3fb27SDimitry Andric Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 763*06c3fb27SDimitry Andric QualType Ty) const { 764*06c3fb27SDimitry Andric CharUnits SlotSize = CharUnits::fromQuantity(4); 765*06c3fb27SDimitry Andric 766*06c3fb27SDimitry Andric // Empty records are ignored for parameter passing purposes. 767*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), Ty, true)) { 768*06c3fb27SDimitry Andric VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy); 769*06c3fb27SDimitry Andric auto *Load = CGF.Builder.CreateLoad(VAListAddr); 770*06c3fb27SDimitry Andric return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize); 771*06c3fb27SDimitry Andric } 772*06c3fb27SDimitry Andric 773*06c3fb27SDimitry Andric CharUnits TySize = getContext().getTypeSizeInChars(Ty); 774*06c3fb27SDimitry Andric CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 775*06c3fb27SDimitry Andric 776*06c3fb27SDimitry Andric // Use indirect if size of the illegal vector is bigger than 16 bytes. 777*06c3fb27SDimitry Andric bool IsIndirect = false; 778*06c3fb27SDimitry Andric const Type *Base = nullptr; 779*06c3fb27SDimitry Andric uint64_t Members = 0; 780*06c3fb27SDimitry Andric if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 781*06c3fb27SDimitry Andric IsIndirect = true; 782*06c3fb27SDimitry Andric 783*06c3fb27SDimitry Andric // ARMv7k passes structs bigger than 16 bytes indirectly, in space 784*06c3fb27SDimitry Andric // allocated by the caller. 785*06c3fb27SDimitry Andric } else if (TySize > CharUnits::fromQuantity(16) && 786*06c3fb27SDimitry Andric getABIKind() == ARMABIKind::AAPCS16_VFP && 787*06c3fb27SDimitry Andric !isHomogeneousAggregate(Ty, Base, Members)) { 788*06c3fb27SDimitry Andric IsIndirect = true; 789*06c3fb27SDimitry Andric 790*06c3fb27SDimitry Andric // Otherwise, bound the type's ABI alignment. 791*06c3fb27SDimitry Andric // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 792*06c3fb27SDimitry Andric // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 793*06c3fb27SDimitry Andric // Our callers should be prepared to handle an under-aligned address. 794*06c3fb27SDimitry Andric } else if (getABIKind() == ARMABIKind::AAPCS_VFP || 795*06c3fb27SDimitry Andric getABIKind() == ARMABIKind::AAPCS) { 796*06c3fb27SDimitry Andric TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 797*06c3fb27SDimitry Andric TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 798*06c3fb27SDimitry Andric } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) { 799*06c3fb27SDimitry Andric // ARMv7k allows type alignment up to 16 bytes. 800*06c3fb27SDimitry Andric TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 801*06c3fb27SDimitry Andric TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 802*06c3fb27SDimitry Andric } else { 803*06c3fb27SDimitry Andric TyAlignForABI = CharUnits::fromQuantity(4); 804*06c3fb27SDimitry Andric } 805*06c3fb27SDimitry Andric 806*06c3fb27SDimitry Andric TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None); 807*06c3fb27SDimitry Andric return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 808*06c3fb27SDimitry Andric SlotSize, /*AllowHigherAlign*/ true); 809*06c3fb27SDimitry Andric } 810*06c3fb27SDimitry Andric 811*06c3fb27SDimitry Andric std::unique_ptr<TargetCodeGenInfo> 812*06c3fb27SDimitry Andric CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) { 813*06c3fb27SDimitry Andric return std::make_unique<ARMTargetCodeGenInfo>(CGM.getTypes(), Kind); 814*06c3fb27SDimitry Andric } 815*06c3fb27SDimitry Andric 816*06c3fb27SDimitry Andric std::unique_ptr<TargetCodeGenInfo> 817*06c3fb27SDimitry Andric CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) { 818*06c3fb27SDimitry Andric return std::make_unique<WindowsARMTargetCodeGenInfo>(CGM.getTypes(), K); 819*06c3fb27SDimitry Andric } 820