1*06c3fb27SDimitry Andric //===- AArch64.cpp --------------------------------------------------------===// 2*06c3fb27SDimitry Andric // 3*06c3fb27SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*06c3fb27SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*06c3fb27SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*06c3fb27SDimitry Andric // 7*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 8*06c3fb27SDimitry Andric 9*06c3fb27SDimitry Andric #include "ABIInfoImpl.h" 10*06c3fb27SDimitry Andric #include "TargetInfo.h" 11*06c3fb27SDimitry Andric 12*06c3fb27SDimitry Andric using namespace clang; 13*06c3fb27SDimitry Andric using namespace clang::CodeGen; 14*06c3fb27SDimitry Andric 15*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 16*06c3fb27SDimitry Andric // AArch64 ABI Implementation 17*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 18*06c3fb27SDimitry Andric 19*06c3fb27SDimitry Andric namespace { 20*06c3fb27SDimitry Andric 21*06c3fb27SDimitry Andric class AArch64ABIInfo : public ABIInfo { 22*06c3fb27SDimitry Andric AArch64ABIKind Kind; 23*06c3fb27SDimitry Andric 24*06c3fb27SDimitry Andric public: 25*06c3fb27SDimitry Andric AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind) 26*06c3fb27SDimitry Andric : ABIInfo(CGT), Kind(Kind) {} 27*06c3fb27SDimitry Andric 28*06c3fb27SDimitry Andric private: 29*06c3fb27SDimitry Andric AArch64ABIKind getABIKind() const { return Kind; } 30*06c3fb27SDimitry Andric bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; } 31*06c3fb27SDimitry Andric 32*06c3fb27SDimitry Andric ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; 33*06c3fb27SDimitry Andric ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, 34*06c3fb27SDimitry Andric unsigned CallingConvention) const; 35*06c3fb27SDimitry Andric ABIArgInfo coerceIllegalVector(QualType Ty) const; 36*06c3fb27SDimitry Andric bool isHomogeneousAggregateBaseType(QualType Ty) const override; 37*06c3fb27SDimitry Andric bool isHomogeneousAggregateSmallEnough(const Type *Ty, 38*06c3fb27SDimitry Andric uint64_t Members) const override; 39*06c3fb27SDimitry Andric bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override; 40*06c3fb27SDimitry Andric 41*06c3fb27SDimitry Andric bool isIllegalVectorType(QualType Ty) const; 42*06c3fb27SDimitry Andric 43*06c3fb27SDimitry Andric void computeInfo(CGFunctionInfo &FI) const override { 44*06c3fb27SDimitry Andric if (!::classifyReturnType(getCXXABI(), FI, *this)) 45*06c3fb27SDimitry Andric FI.getReturnInfo() = 46*06c3fb27SDimitry Andric classifyReturnType(FI.getReturnType(), FI.isVariadic()); 47*06c3fb27SDimitry Andric 48*06c3fb27SDimitry Andric for (auto &it : FI.arguments()) 49*06c3fb27SDimitry Andric it.info = classifyArgumentType(it.type, FI.isVariadic(), 50*06c3fb27SDimitry Andric FI.getCallingConvention()); 51*06c3fb27SDimitry Andric } 52*06c3fb27SDimitry Andric 53*06c3fb27SDimitry Andric Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 54*06c3fb27SDimitry Andric CodeGenFunction &CGF) const; 55*06c3fb27SDimitry Andric 56*06c3fb27SDimitry Andric Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 57*06c3fb27SDimitry Andric CodeGenFunction &CGF) const; 58*06c3fb27SDimitry Andric 59*06c3fb27SDimitry Andric Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 60*06c3fb27SDimitry Andric QualType Ty) const override { 61*06c3fb27SDimitry Andric llvm::Type *BaseTy = CGF.ConvertType(Ty); 62*06c3fb27SDimitry Andric if (isa<llvm::ScalableVectorType>(BaseTy)) 63*06c3fb27SDimitry Andric llvm::report_fatal_error("Passing SVE types to variadic functions is " 64*06c3fb27SDimitry Andric "currently not supported"); 65*06c3fb27SDimitry Andric 66*06c3fb27SDimitry Andric return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 67*06c3fb27SDimitry Andric : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 68*06c3fb27SDimitry Andric : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 69*06c3fb27SDimitry Andric } 70*06c3fb27SDimitry Andric 71*06c3fb27SDimitry Andric Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 72*06c3fb27SDimitry Andric QualType Ty) const override; 73*06c3fb27SDimitry Andric 74*06c3fb27SDimitry Andric bool allowBFloatArgsAndRet() const override { 75*06c3fb27SDimitry Andric return getTarget().hasBFloat16Type(); 76*06c3fb27SDimitry Andric } 77*06c3fb27SDimitry Andric }; 78*06c3fb27SDimitry Andric 79*06c3fb27SDimitry Andric class AArch64SwiftABIInfo : public SwiftABIInfo { 80*06c3fb27SDimitry Andric public: 81*06c3fb27SDimitry Andric explicit AArch64SwiftABIInfo(CodeGenTypes &CGT) 82*06c3fb27SDimitry Andric : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {} 83*06c3fb27SDimitry Andric 84*06c3fb27SDimitry Andric bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, 85*06c3fb27SDimitry Andric unsigned NumElts) const override; 86*06c3fb27SDimitry Andric }; 87*06c3fb27SDimitry Andric 88*06c3fb27SDimitry Andric class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 89*06c3fb27SDimitry Andric public: 90*06c3fb27SDimitry Andric AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind) 91*06c3fb27SDimitry Andric : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) { 92*06c3fb27SDimitry Andric SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT); 93*06c3fb27SDimitry Andric } 94*06c3fb27SDimitry Andric 95*06c3fb27SDimitry Andric StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 96*06c3fb27SDimitry Andric return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 97*06c3fb27SDimitry Andric } 98*06c3fb27SDimitry Andric 99*06c3fb27SDimitry Andric int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 100*06c3fb27SDimitry Andric return 31; 101*06c3fb27SDimitry Andric } 102*06c3fb27SDimitry Andric 103*06c3fb27SDimitry Andric bool doesReturnSlotInterfereWithArgs() const override { return false; } 104*06c3fb27SDimitry Andric 105*06c3fb27SDimitry Andric void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 106*06c3fb27SDimitry Andric CodeGen::CodeGenModule &CGM) const override { 107*06c3fb27SDimitry Andric const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 108*06c3fb27SDimitry Andric if (!FD) 109*06c3fb27SDimitry Andric return; 110*06c3fb27SDimitry Andric 111*06c3fb27SDimitry Andric const auto *TA = FD->getAttr<TargetAttr>(); 112*06c3fb27SDimitry Andric if (TA == nullptr) 113*06c3fb27SDimitry Andric return; 114*06c3fb27SDimitry Andric 115*06c3fb27SDimitry Andric ParsedTargetAttr Attr = 116*06c3fb27SDimitry Andric CGM.getTarget().parseTargetAttr(TA->getFeaturesStr()); 117*06c3fb27SDimitry Andric if (Attr.BranchProtection.empty()) 118*06c3fb27SDimitry Andric return; 119*06c3fb27SDimitry Andric 120*06c3fb27SDimitry Andric TargetInfo::BranchProtectionInfo BPI; 121*06c3fb27SDimitry Andric StringRef Error; 122*06c3fb27SDimitry Andric (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 123*06c3fb27SDimitry Andric Attr.CPU, BPI, Error); 124*06c3fb27SDimitry Andric assert(Error.empty()); 125*06c3fb27SDimitry Andric 126*06c3fb27SDimitry Andric auto *Fn = cast<llvm::Function>(GV); 127*06c3fb27SDimitry Andric static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; 128*06c3fb27SDimitry Andric Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); 129*06c3fb27SDimitry Andric 130*06c3fb27SDimitry Andric if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { 131*06c3fb27SDimitry Andric Fn->addFnAttr("sign-return-address-key", 132*06c3fb27SDimitry Andric BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey 133*06c3fb27SDimitry Andric ? "a_key" 134*06c3fb27SDimitry Andric : "b_key"); 135*06c3fb27SDimitry Andric } 136*06c3fb27SDimitry Andric 137*06c3fb27SDimitry Andric Fn->addFnAttr("branch-target-enforcement", 138*06c3fb27SDimitry Andric BPI.BranchTargetEnforcement ? "true" : "false"); 139*06c3fb27SDimitry Andric } 140*06c3fb27SDimitry Andric 141*06c3fb27SDimitry Andric bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, 142*06c3fb27SDimitry Andric llvm::Type *Ty) const override { 143*06c3fb27SDimitry Andric if (CGF.getTarget().hasFeature("ls64")) { 144*06c3fb27SDimitry Andric auto *ST = dyn_cast<llvm::StructType>(Ty); 145*06c3fb27SDimitry Andric if (ST && ST->getNumElements() == 1) { 146*06c3fb27SDimitry Andric auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0)); 147*06c3fb27SDimitry Andric if (AT && AT->getNumElements() == 8 && 148*06c3fb27SDimitry Andric AT->getElementType()->isIntegerTy(64)) 149*06c3fb27SDimitry Andric return true; 150*06c3fb27SDimitry Andric } 151*06c3fb27SDimitry Andric } 152*06c3fb27SDimitry Andric return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty); 153*06c3fb27SDimitry Andric } 154*06c3fb27SDimitry Andric }; 155*06c3fb27SDimitry Andric 156*06c3fb27SDimitry Andric class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 157*06c3fb27SDimitry Andric public: 158*06c3fb27SDimitry Andric WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K) 159*06c3fb27SDimitry Andric : AArch64TargetCodeGenInfo(CGT, K) {} 160*06c3fb27SDimitry Andric 161*06c3fb27SDimitry Andric void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 162*06c3fb27SDimitry Andric CodeGen::CodeGenModule &CGM) const override; 163*06c3fb27SDimitry Andric 164*06c3fb27SDimitry Andric void getDependentLibraryOption(llvm::StringRef Lib, 165*06c3fb27SDimitry Andric llvm::SmallString<24> &Opt) const override { 166*06c3fb27SDimitry Andric Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 167*06c3fb27SDimitry Andric } 168*06c3fb27SDimitry Andric 169*06c3fb27SDimitry Andric void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 170*06c3fb27SDimitry Andric llvm::SmallString<32> &Opt) const override { 171*06c3fb27SDimitry Andric Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 172*06c3fb27SDimitry Andric } 173*06c3fb27SDimitry Andric }; 174*06c3fb27SDimitry Andric 175*06c3fb27SDimitry Andric void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( 176*06c3fb27SDimitry Andric const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 177*06c3fb27SDimitry Andric AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 178*06c3fb27SDimitry Andric if (GV->isDeclaration()) 179*06c3fb27SDimitry Andric return; 180*06c3fb27SDimitry Andric addStackProbeTargetAttributes(D, GV, CGM); 181*06c3fb27SDimitry Andric } 182*06c3fb27SDimitry Andric } 183*06c3fb27SDimitry Andric 184*06c3fb27SDimitry Andric ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { 185*06c3fb27SDimitry Andric assert(Ty->isVectorType() && "expected vector type!"); 186*06c3fb27SDimitry Andric 187*06c3fb27SDimitry Andric const auto *VT = Ty->castAs<VectorType>(); 188*06c3fb27SDimitry Andric if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { 189*06c3fb27SDimitry Andric assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 190*06c3fb27SDimitry Andric assert(VT->getElementType()->castAs<BuiltinType>()->getKind() == 191*06c3fb27SDimitry Andric BuiltinType::UChar && 192*06c3fb27SDimitry Andric "unexpected builtin type for SVE predicate!"); 193*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::ScalableVectorType::get( 194*06c3fb27SDimitry Andric llvm::Type::getInt1Ty(getVMContext()), 16)); 195*06c3fb27SDimitry Andric } 196*06c3fb27SDimitry Andric 197*06c3fb27SDimitry Andric if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) { 198*06c3fb27SDimitry Andric assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 199*06c3fb27SDimitry Andric 200*06c3fb27SDimitry Andric const auto *BT = VT->getElementType()->castAs<BuiltinType>(); 201*06c3fb27SDimitry Andric llvm::ScalableVectorType *ResType = nullptr; 202*06c3fb27SDimitry Andric switch (BT->getKind()) { 203*06c3fb27SDimitry Andric default: 204*06c3fb27SDimitry Andric llvm_unreachable("unexpected builtin type for SVE vector!"); 205*06c3fb27SDimitry Andric case BuiltinType::SChar: 206*06c3fb27SDimitry Andric case BuiltinType::UChar: 207*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 208*06c3fb27SDimitry Andric llvm::Type::getInt8Ty(getVMContext()), 16); 209*06c3fb27SDimitry Andric break; 210*06c3fb27SDimitry Andric case BuiltinType::Short: 211*06c3fb27SDimitry Andric case BuiltinType::UShort: 212*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 213*06c3fb27SDimitry Andric llvm::Type::getInt16Ty(getVMContext()), 8); 214*06c3fb27SDimitry Andric break; 215*06c3fb27SDimitry Andric case BuiltinType::Int: 216*06c3fb27SDimitry Andric case BuiltinType::UInt: 217*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 218*06c3fb27SDimitry Andric llvm::Type::getInt32Ty(getVMContext()), 4); 219*06c3fb27SDimitry Andric break; 220*06c3fb27SDimitry Andric case BuiltinType::Long: 221*06c3fb27SDimitry Andric case BuiltinType::ULong: 222*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 223*06c3fb27SDimitry Andric llvm::Type::getInt64Ty(getVMContext()), 2); 224*06c3fb27SDimitry Andric break; 225*06c3fb27SDimitry Andric case BuiltinType::Half: 226*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 227*06c3fb27SDimitry Andric llvm::Type::getHalfTy(getVMContext()), 8); 228*06c3fb27SDimitry Andric break; 229*06c3fb27SDimitry Andric case BuiltinType::Float: 230*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 231*06c3fb27SDimitry Andric llvm::Type::getFloatTy(getVMContext()), 4); 232*06c3fb27SDimitry Andric break; 233*06c3fb27SDimitry Andric case BuiltinType::Double: 234*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 235*06c3fb27SDimitry Andric llvm::Type::getDoubleTy(getVMContext()), 2); 236*06c3fb27SDimitry Andric break; 237*06c3fb27SDimitry Andric case BuiltinType::BFloat16: 238*06c3fb27SDimitry Andric ResType = llvm::ScalableVectorType::get( 239*06c3fb27SDimitry Andric llvm::Type::getBFloatTy(getVMContext()), 8); 240*06c3fb27SDimitry Andric break; 241*06c3fb27SDimitry Andric } 242*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 243*06c3fb27SDimitry Andric } 244*06c3fb27SDimitry Andric 245*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(Ty); 246*06c3fb27SDimitry Andric // Android promotes <2 x i8> to i16, not i32 247*06c3fb27SDimitry Andric if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) { 248*06c3fb27SDimitry Andric llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 249*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 250*06c3fb27SDimitry Andric } 251*06c3fb27SDimitry Andric if (Size <= 32) { 252*06c3fb27SDimitry Andric llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 253*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 254*06c3fb27SDimitry Andric } 255*06c3fb27SDimitry Andric if (Size == 64) { 256*06c3fb27SDimitry Andric auto *ResType = 257*06c3fb27SDimitry Andric llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 258*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 259*06c3fb27SDimitry Andric } 260*06c3fb27SDimitry Andric if (Size == 128) { 261*06c3fb27SDimitry Andric auto *ResType = 262*06c3fb27SDimitry Andric llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 263*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(ResType); 264*06c3fb27SDimitry Andric } 265*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 266*06c3fb27SDimitry Andric } 267*06c3fb27SDimitry Andric 268*06c3fb27SDimitry Andric ABIArgInfo 269*06c3fb27SDimitry Andric AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic, 270*06c3fb27SDimitry Andric unsigned CallingConvention) const { 271*06c3fb27SDimitry Andric Ty = useFirstFieldIfTransparentUnion(Ty); 272*06c3fb27SDimitry Andric 273*06c3fb27SDimitry Andric // Handle illegal vector types here. 274*06c3fb27SDimitry Andric if (isIllegalVectorType(Ty)) 275*06c3fb27SDimitry Andric return coerceIllegalVector(Ty); 276*06c3fb27SDimitry Andric 277*06c3fb27SDimitry Andric if (!isAggregateTypeForABI(Ty)) { 278*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 279*06c3fb27SDimitry Andric if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 280*06c3fb27SDimitry Andric Ty = EnumTy->getDecl()->getIntegerType(); 281*06c3fb27SDimitry Andric 282*06c3fb27SDimitry Andric if (const auto *EIT = Ty->getAs<BitIntType>()) 283*06c3fb27SDimitry Andric if (EIT->getNumBits() > 128) 284*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty); 285*06c3fb27SDimitry Andric 286*06c3fb27SDimitry Andric return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() 287*06c3fb27SDimitry Andric ? ABIArgInfo::getExtend(Ty) 288*06c3fb27SDimitry Andric : ABIArgInfo::getDirect()); 289*06c3fb27SDimitry Andric } 290*06c3fb27SDimitry Andric 291*06c3fb27SDimitry Andric // Structures with either a non-trivial destructor or a non-trivial 292*06c3fb27SDimitry Andric // copy constructor are always indirect. 293*06c3fb27SDimitry Andric if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 294*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 295*06c3fb27SDimitry Andric CGCXXABI::RAA_DirectInMemory); 296*06c3fb27SDimitry Andric } 297*06c3fb27SDimitry Andric 298*06c3fb27SDimitry Andric // Empty records are always ignored on Darwin, but actually passed in C++ mode 299*06c3fb27SDimitry Andric // elsewhere for GNU compatibility. 300*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(Ty); 301*06c3fb27SDimitry Andric bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 302*06c3fb27SDimitry Andric if (IsEmpty || Size == 0) { 303*06c3fb27SDimitry Andric if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 304*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 305*06c3fb27SDimitry Andric 306*06c3fb27SDimitry Andric // GNU C mode. The only argument that gets ignored is an empty one with size 307*06c3fb27SDimitry Andric // 0. 308*06c3fb27SDimitry Andric if (IsEmpty && Size == 0) 309*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 310*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 311*06c3fb27SDimitry Andric } 312*06c3fb27SDimitry Andric 313*06c3fb27SDimitry Andric // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 314*06c3fb27SDimitry Andric const Type *Base = nullptr; 315*06c3fb27SDimitry Andric uint64_t Members = 0; 316*06c3fb27SDimitry Andric bool IsWin64 = Kind == AArch64ABIKind::Win64 || 317*06c3fb27SDimitry Andric CallingConvention == llvm::CallingConv::Win64; 318*06c3fb27SDimitry Andric bool IsWinVariadic = IsWin64 && IsVariadic; 319*06c3fb27SDimitry Andric // In variadic functions on Windows, all composite types are treated alike, 320*06c3fb27SDimitry Andric // no special handling of HFAs/HVAs. 321*06c3fb27SDimitry Andric if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) { 322*06c3fb27SDimitry Andric if (Kind != AArch64ABIKind::AAPCS) 323*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 324*06c3fb27SDimitry Andric llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 325*06c3fb27SDimitry Andric 326*06c3fb27SDimitry Andric // For alignment adjusted HFAs, cap the argument alignment to 16, leave it 327*06c3fb27SDimitry Andric // default otherwise. 328*06c3fb27SDimitry Andric unsigned Align = 329*06c3fb27SDimitry Andric getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 330*06c3fb27SDimitry Andric unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); 331*06c3fb27SDimitry Andric Align = (Align > BaseAlign && Align >= 16) ? 16 : 0; 332*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 333*06c3fb27SDimitry Andric llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0, 334*06c3fb27SDimitry Andric nullptr, true, Align); 335*06c3fb27SDimitry Andric } 336*06c3fb27SDimitry Andric 337*06c3fb27SDimitry Andric // Aggregates <= 16 bytes are passed directly in registers or on the stack. 338*06c3fb27SDimitry Andric if (Size <= 128) { 339*06c3fb27SDimitry Andric // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 340*06c3fb27SDimitry Andric // same size and alignment. 341*06c3fb27SDimitry Andric if (getTarget().isRenderScriptTarget()) { 342*06c3fb27SDimitry Andric return coerceToIntArray(Ty, getContext(), getVMContext()); 343*06c3fb27SDimitry Andric } 344*06c3fb27SDimitry Andric unsigned Alignment; 345*06c3fb27SDimitry Andric if (Kind == AArch64ABIKind::AAPCS) { 346*06c3fb27SDimitry Andric Alignment = getContext().getTypeUnadjustedAlign(Ty); 347*06c3fb27SDimitry Andric Alignment = Alignment < 128 ? 64 : 128; 348*06c3fb27SDimitry Andric } else { 349*06c3fb27SDimitry Andric Alignment = 350*06c3fb27SDimitry Andric std::max(getContext().getTypeAlign(Ty), 351*06c3fb27SDimitry Andric (unsigned)getTarget().getPointerWidth(LangAS::Default)); 352*06c3fb27SDimitry Andric } 353*06c3fb27SDimitry Andric Size = llvm::alignTo(Size, Alignment); 354*06c3fb27SDimitry Andric 355*06c3fb27SDimitry Andric // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 356*06c3fb27SDimitry Andric // For aggregates with 16-byte alignment, we use i128. 357*06c3fb27SDimitry Andric llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); 358*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 359*06c3fb27SDimitry Andric Size == Alignment ? BaseTy 360*06c3fb27SDimitry Andric : llvm::ArrayType::get(BaseTy, Size / Alignment)); 361*06c3fb27SDimitry Andric } 362*06c3fb27SDimitry Andric 363*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 364*06c3fb27SDimitry Andric } 365*06c3fb27SDimitry Andric 366*06c3fb27SDimitry Andric ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, 367*06c3fb27SDimitry Andric bool IsVariadic) const { 368*06c3fb27SDimitry Andric if (RetTy->isVoidType()) 369*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 370*06c3fb27SDimitry Andric 371*06c3fb27SDimitry Andric if (const auto *VT = RetTy->getAs<VectorType>()) { 372*06c3fb27SDimitry Andric if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 373*06c3fb27SDimitry Andric VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 374*06c3fb27SDimitry Andric return coerceIllegalVector(RetTy); 375*06c3fb27SDimitry Andric } 376*06c3fb27SDimitry Andric 377*06c3fb27SDimitry Andric // Large vector types should be returned via memory. 378*06c3fb27SDimitry Andric if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 379*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 380*06c3fb27SDimitry Andric 381*06c3fb27SDimitry Andric if (!isAggregateTypeForABI(RetTy)) { 382*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 383*06c3fb27SDimitry Andric if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 384*06c3fb27SDimitry Andric RetTy = EnumTy->getDecl()->getIntegerType(); 385*06c3fb27SDimitry Andric 386*06c3fb27SDimitry Andric if (const auto *EIT = RetTy->getAs<BitIntType>()) 387*06c3fb27SDimitry Andric if (EIT->getNumBits() > 128) 388*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 389*06c3fb27SDimitry Andric 390*06c3fb27SDimitry Andric return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() 391*06c3fb27SDimitry Andric ? ABIArgInfo::getExtend(RetTy) 392*06c3fb27SDimitry Andric : ABIArgInfo::getDirect()); 393*06c3fb27SDimitry Andric } 394*06c3fb27SDimitry Andric 395*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(RetTy); 396*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 397*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 398*06c3fb27SDimitry Andric 399*06c3fb27SDimitry Andric const Type *Base = nullptr; 400*06c3fb27SDimitry Andric uint64_t Members = 0; 401*06c3fb27SDimitry Andric if (isHomogeneousAggregate(RetTy, Base, Members) && 402*06c3fb27SDimitry Andric !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && 403*06c3fb27SDimitry Andric IsVariadic)) 404*06c3fb27SDimitry Andric // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 405*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(); 406*06c3fb27SDimitry Andric 407*06c3fb27SDimitry Andric // Aggregates <= 16 bytes are returned directly in registers or on the stack. 408*06c3fb27SDimitry Andric if (Size <= 128) { 409*06c3fb27SDimitry Andric // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 410*06c3fb27SDimitry Andric // same size and alignment. 411*06c3fb27SDimitry Andric if (getTarget().isRenderScriptTarget()) { 412*06c3fb27SDimitry Andric return coerceToIntArray(RetTy, getContext(), getVMContext()); 413*06c3fb27SDimitry Andric } 414*06c3fb27SDimitry Andric 415*06c3fb27SDimitry Andric if (Size <= 64 && getDataLayout().isLittleEndian()) { 416*06c3fb27SDimitry Andric // Composite types are returned in lower bits of a 64-bit register for LE, 417*06c3fb27SDimitry Andric // and in higher bits for BE. However, integer types are always returned 418*06c3fb27SDimitry Andric // in lower bits for both LE and BE, and they are not rounded up to 419*06c3fb27SDimitry Andric // 64-bits. We can skip rounding up of composite types for LE, but not for 420*06c3fb27SDimitry Andric // BE, otherwise composite types will be indistinguishable from integer 421*06c3fb27SDimitry Andric // types. 422*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 423*06c3fb27SDimitry Andric llvm::IntegerType::get(getVMContext(), Size)); 424*06c3fb27SDimitry Andric } 425*06c3fb27SDimitry Andric 426*06c3fb27SDimitry Andric unsigned Alignment = getContext().getTypeAlign(RetTy); 427*06c3fb27SDimitry Andric Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 428*06c3fb27SDimitry Andric 429*06c3fb27SDimitry Andric // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 430*06c3fb27SDimitry Andric // For aggregates with 16-byte alignment, we use i128. 431*06c3fb27SDimitry Andric if (Alignment < 128 && Size == 128) { 432*06c3fb27SDimitry Andric llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 433*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 434*06c3fb27SDimitry Andric } 435*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 436*06c3fb27SDimitry Andric } 437*06c3fb27SDimitry Andric 438*06c3fb27SDimitry Andric return getNaturalAlignIndirect(RetTy); 439*06c3fb27SDimitry Andric } 440*06c3fb27SDimitry Andric 441*06c3fb27SDimitry Andric /// isIllegalVectorType - check whether the vector type is legal for AArch64. 442*06c3fb27SDimitry Andric bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 443*06c3fb27SDimitry Andric if (const VectorType *VT = Ty->getAs<VectorType>()) { 444*06c3fb27SDimitry Andric // Check whether VT is a fixed-length SVE vector. These types are 445*06c3fb27SDimitry Andric // represented as scalable vectors in function args/return and must be 446*06c3fb27SDimitry Andric // coerced from fixed vectors. 447*06c3fb27SDimitry Andric if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 448*06c3fb27SDimitry Andric VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 449*06c3fb27SDimitry Andric return true; 450*06c3fb27SDimitry Andric 451*06c3fb27SDimitry Andric // Check whether VT is legal. 452*06c3fb27SDimitry Andric unsigned NumElements = VT->getNumElements(); 453*06c3fb27SDimitry Andric uint64_t Size = getContext().getTypeSize(VT); 454*06c3fb27SDimitry Andric // NumElements should be power of 2. 455*06c3fb27SDimitry Andric if (!llvm::isPowerOf2_32(NumElements)) 456*06c3fb27SDimitry Andric return true; 457*06c3fb27SDimitry Andric 458*06c3fb27SDimitry Andric // arm64_32 has to be compatible with the ARM logic here, which allows huge 459*06c3fb27SDimitry Andric // vectors for some reason. 460*06c3fb27SDimitry Andric llvm::Triple Triple = getTarget().getTriple(); 461*06c3fb27SDimitry Andric if (Triple.getArch() == llvm::Triple::aarch64_32 && 462*06c3fb27SDimitry Andric Triple.isOSBinFormatMachO()) 463*06c3fb27SDimitry Andric return Size <= 32; 464*06c3fb27SDimitry Andric 465*06c3fb27SDimitry Andric return Size != 64 && (Size != 128 || NumElements == 1); 466*06c3fb27SDimitry Andric } 467*06c3fb27SDimitry Andric return false; 468*06c3fb27SDimitry Andric } 469*06c3fb27SDimitry Andric 470*06c3fb27SDimitry Andric bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, 471*06c3fb27SDimitry Andric llvm::Type *EltTy, 472*06c3fb27SDimitry Andric unsigned NumElts) const { 473*06c3fb27SDimitry Andric if (!llvm::isPowerOf2_32(NumElts)) 474*06c3fb27SDimitry Andric return false; 475*06c3fb27SDimitry Andric if (VectorSize.getQuantity() != 8 && 476*06c3fb27SDimitry Andric (VectorSize.getQuantity() != 16 || NumElts == 1)) 477*06c3fb27SDimitry Andric return false; 478*06c3fb27SDimitry Andric return true; 479*06c3fb27SDimitry Andric } 480*06c3fb27SDimitry Andric 481*06c3fb27SDimitry Andric bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 482*06c3fb27SDimitry Andric // Homogeneous aggregates for AAPCS64 must have base types of a floating 483*06c3fb27SDimitry Andric // point type or a short-vector type. This is the same as the 32-bit ABI, 484*06c3fb27SDimitry Andric // but with the difference that any floating-point type is allowed, 485*06c3fb27SDimitry Andric // including __fp16. 486*06c3fb27SDimitry Andric if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 487*06c3fb27SDimitry Andric if (BT->isFloatingPoint()) 488*06c3fb27SDimitry Andric return true; 489*06c3fb27SDimitry Andric } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 490*06c3fb27SDimitry Andric unsigned VecSize = getContext().getTypeSize(VT); 491*06c3fb27SDimitry Andric if (VecSize == 64 || VecSize == 128) 492*06c3fb27SDimitry Andric return true; 493*06c3fb27SDimitry Andric } 494*06c3fb27SDimitry Andric return false; 495*06c3fb27SDimitry Andric } 496*06c3fb27SDimitry Andric 497*06c3fb27SDimitry Andric bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 498*06c3fb27SDimitry Andric uint64_t Members) const { 499*06c3fb27SDimitry Andric return Members <= 4; 500*06c3fb27SDimitry Andric } 501*06c3fb27SDimitry Andric 502*06c3fb27SDimitry Andric bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() 503*06c3fb27SDimitry Andric const { 504*06c3fb27SDimitry Andric // AAPCS64 says that the rule for whether something is a homogeneous 505*06c3fb27SDimitry Andric // aggregate is applied to the output of the data layout decision. So 506*06c3fb27SDimitry Andric // anything that doesn't affect the data layout also does not affect 507*06c3fb27SDimitry Andric // homogeneity. In particular, zero-length bitfields don't stop a struct 508*06c3fb27SDimitry Andric // being homogeneous. 509*06c3fb27SDimitry Andric return true; 510*06c3fb27SDimitry Andric } 511*06c3fb27SDimitry Andric 512*06c3fb27SDimitry Andric Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 513*06c3fb27SDimitry Andric CodeGenFunction &CGF) const { 514*06c3fb27SDimitry Andric ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true, 515*06c3fb27SDimitry Andric CGF.CurFnInfo->getCallingConvention()); 516*06c3fb27SDimitry Andric // Empty records are ignored for parameter passing purposes. 517*06c3fb27SDimitry Andric if (AI.isIgnore()) { 518*06c3fb27SDimitry Andric uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8; 519*06c3fb27SDimitry Andric CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); 520*06c3fb27SDimitry Andric VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy); 521*06c3fb27SDimitry Andric auto *Load = CGF.Builder.CreateLoad(VAListAddr); 522*06c3fb27SDimitry Andric return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize); 523*06c3fb27SDimitry Andric } 524*06c3fb27SDimitry Andric 525*06c3fb27SDimitry Andric bool IsIndirect = AI.isIndirect(); 526*06c3fb27SDimitry Andric 527*06c3fb27SDimitry Andric llvm::Type *BaseTy = CGF.ConvertType(Ty); 528*06c3fb27SDimitry Andric if (IsIndirect) 529*06c3fb27SDimitry Andric BaseTy = llvm::PointerType::getUnqual(BaseTy); 530*06c3fb27SDimitry Andric else if (AI.getCoerceToType()) 531*06c3fb27SDimitry Andric BaseTy = AI.getCoerceToType(); 532*06c3fb27SDimitry Andric 533*06c3fb27SDimitry Andric unsigned NumRegs = 1; 534*06c3fb27SDimitry Andric if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 535*06c3fb27SDimitry Andric BaseTy = ArrTy->getElementType(); 536*06c3fb27SDimitry Andric NumRegs = ArrTy->getNumElements(); 537*06c3fb27SDimitry Andric } 538*06c3fb27SDimitry Andric bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 539*06c3fb27SDimitry Andric 540*06c3fb27SDimitry Andric // The AArch64 va_list type and handling is specified in the Procedure Call 541*06c3fb27SDimitry Andric // Standard, section B.4: 542*06c3fb27SDimitry Andric // 543*06c3fb27SDimitry Andric // struct { 544*06c3fb27SDimitry Andric // void *__stack; 545*06c3fb27SDimitry Andric // void *__gr_top; 546*06c3fb27SDimitry Andric // void *__vr_top; 547*06c3fb27SDimitry Andric // int __gr_offs; 548*06c3fb27SDimitry Andric // int __vr_offs; 549*06c3fb27SDimitry Andric // }; 550*06c3fb27SDimitry Andric 551*06c3fb27SDimitry Andric llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 552*06c3fb27SDimitry Andric llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 553*06c3fb27SDimitry Andric llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 554*06c3fb27SDimitry Andric llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 555*06c3fb27SDimitry Andric 556*06c3fb27SDimitry Andric CharUnits TySize = getContext().getTypeSizeInChars(Ty); 557*06c3fb27SDimitry Andric CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); 558*06c3fb27SDimitry Andric 559*06c3fb27SDimitry Andric Address reg_offs_p = Address::invalid(); 560*06c3fb27SDimitry Andric llvm::Value *reg_offs = nullptr; 561*06c3fb27SDimitry Andric int reg_top_index; 562*06c3fb27SDimitry Andric int RegSize = IsIndirect ? 8 : TySize.getQuantity(); 563*06c3fb27SDimitry Andric if (!IsFPR) { 564*06c3fb27SDimitry Andric // 3 is the field number of __gr_offs 565*06c3fb27SDimitry Andric reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 566*06c3fb27SDimitry Andric reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 567*06c3fb27SDimitry Andric reg_top_index = 1; // field number for __gr_top 568*06c3fb27SDimitry Andric RegSize = llvm::alignTo(RegSize, 8); 569*06c3fb27SDimitry Andric } else { 570*06c3fb27SDimitry Andric // 4 is the field number of __vr_offs. 571*06c3fb27SDimitry Andric reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 572*06c3fb27SDimitry Andric reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 573*06c3fb27SDimitry Andric reg_top_index = 2; // field number for __vr_top 574*06c3fb27SDimitry Andric RegSize = 16 * NumRegs; 575*06c3fb27SDimitry Andric } 576*06c3fb27SDimitry Andric 577*06c3fb27SDimitry Andric //======================================= 578*06c3fb27SDimitry Andric // Find out where argument was passed 579*06c3fb27SDimitry Andric //======================================= 580*06c3fb27SDimitry Andric 581*06c3fb27SDimitry Andric // If reg_offs >= 0 we're already using the stack for this type of 582*06c3fb27SDimitry Andric // argument. We don't want to keep updating reg_offs (in case it overflows, 583*06c3fb27SDimitry Andric // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 584*06c3fb27SDimitry Andric // whatever they get). 585*06c3fb27SDimitry Andric llvm::Value *UsingStack = nullptr; 586*06c3fb27SDimitry Andric UsingStack = CGF.Builder.CreateICmpSGE( 587*06c3fb27SDimitry Andric reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 588*06c3fb27SDimitry Andric 589*06c3fb27SDimitry Andric CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 590*06c3fb27SDimitry Andric 591*06c3fb27SDimitry Andric // Otherwise, at least some kind of argument could go in these registers, the 592*06c3fb27SDimitry Andric // question is whether this particular type is too big. 593*06c3fb27SDimitry Andric CGF.EmitBlock(MaybeRegBlock); 594*06c3fb27SDimitry Andric 595*06c3fb27SDimitry Andric // Integer arguments may need to correct register alignment (for example a 596*06c3fb27SDimitry Andric // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 597*06c3fb27SDimitry Andric // align __gr_offs to calculate the potential address. 598*06c3fb27SDimitry Andric if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 599*06c3fb27SDimitry Andric int Align = TyAlign.getQuantity(); 600*06c3fb27SDimitry Andric 601*06c3fb27SDimitry Andric reg_offs = CGF.Builder.CreateAdd( 602*06c3fb27SDimitry Andric reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 603*06c3fb27SDimitry Andric "align_regoffs"); 604*06c3fb27SDimitry Andric reg_offs = CGF.Builder.CreateAnd( 605*06c3fb27SDimitry Andric reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 606*06c3fb27SDimitry Andric "aligned_regoffs"); 607*06c3fb27SDimitry Andric } 608*06c3fb27SDimitry Andric 609*06c3fb27SDimitry Andric // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 610*06c3fb27SDimitry Andric // The fact that this is done unconditionally reflects the fact that 611*06c3fb27SDimitry Andric // allocating an argument to the stack also uses up all the remaining 612*06c3fb27SDimitry Andric // registers of the appropriate kind. 613*06c3fb27SDimitry Andric llvm::Value *NewOffset = nullptr; 614*06c3fb27SDimitry Andric NewOffset = CGF.Builder.CreateAdd( 615*06c3fb27SDimitry Andric reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 616*06c3fb27SDimitry Andric CGF.Builder.CreateStore(NewOffset, reg_offs_p); 617*06c3fb27SDimitry Andric 618*06c3fb27SDimitry Andric // Now we're in a position to decide whether this argument really was in 619*06c3fb27SDimitry Andric // registers or not. 620*06c3fb27SDimitry Andric llvm::Value *InRegs = nullptr; 621*06c3fb27SDimitry Andric InRegs = CGF.Builder.CreateICmpSLE( 622*06c3fb27SDimitry Andric NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 623*06c3fb27SDimitry Andric 624*06c3fb27SDimitry Andric CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 625*06c3fb27SDimitry Andric 626*06c3fb27SDimitry Andric //======================================= 627*06c3fb27SDimitry Andric // Argument was in registers 628*06c3fb27SDimitry Andric //======================================= 629*06c3fb27SDimitry Andric 630*06c3fb27SDimitry Andric // Now we emit the code for if the argument was originally passed in 631*06c3fb27SDimitry Andric // registers. First start the appropriate block: 632*06c3fb27SDimitry Andric CGF.EmitBlock(InRegBlock); 633*06c3fb27SDimitry Andric 634*06c3fb27SDimitry Andric llvm::Value *reg_top = nullptr; 635*06c3fb27SDimitry Andric Address reg_top_p = 636*06c3fb27SDimitry Andric CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 637*06c3fb27SDimitry Andric reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 638*06c3fb27SDimitry Andric Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs), 639*06c3fb27SDimitry Andric CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8)); 640*06c3fb27SDimitry Andric Address RegAddr = Address::invalid(); 641*06c3fb27SDimitry Andric llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy; 642*06c3fb27SDimitry Andric 643*06c3fb27SDimitry Andric if (IsIndirect) { 644*06c3fb27SDimitry Andric // If it's been passed indirectly (actually a struct), whatever we find from 645*06c3fb27SDimitry Andric // stored registers or on the stack will actually be a struct **. 646*06c3fb27SDimitry Andric MemTy = llvm::PointerType::getUnqual(MemTy); 647*06c3fb27SDimitry Andric } 648*06c3fb27SDimitry Andric 649*06c3fb27SDimitry Andric const Type *Base = nullptr; 650*06c3fb27SDimitry Andric uint64_t NumMembers = 0; 651*06c3fb27SDimitry Andric bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 652*06c3fb27SDimitry Andric if (IsHFA && NumMembers > 1) { 653*06c3fb27SDimitry Andric // Homogeneous aggregates passed in registers will have their elements split 654*06c3fb27SDimitry Andric // and stored 16-bytes apart regardless of size (they're notionally in qN, 655*06c3fb27SDimitry Andric // qN+1, ...). We reload and store into a temporary local variable 656*06c3fb27SDimitry Andric // contiguously. 657*06c3fb27SDimitry Andric assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 658*06c3fb27SDimitry Andric auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 659*06c3fb27SDimitry Andric llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 660*06c3fb27SDimitry Andric llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 661*06c3fb27SDimitry Andric Address Tmp = CGF.CreateTempAlloca(HFATy, 662*06c3fb27SDimitry Andric std::max(TyAlign, BaseTyInfo.Align)); 663*06c3fb27SDimitry Andric 664*06c3fb27SDimitry Andric // On big-endian platforms, the value will be right-aligned in its slot. 665*06c3fb27SDimitry Andric int Offset = 0; 666*06c3fb27SDimitry Andric if (CGF.CGM.getDataLayout().isBigEndian() && 667*06c3fb27SDimitry Andric BaseTyInfo.Width.getQuantity() < 16) 668*06c3fb27SDimitry Andric Offset = 16 - BaseTyInfo.Width.getQuantity(); 669*06c3fb27SDimitry Andric 670*06c3fb27SDimitry Andric for (unsigned i = 0; i < NumMembers; ++i) { 671*06c3fb27SDimitry Andric CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 672*06c3fb27SDimitry Andric Address LoadAddr = 673*06c3fb27SDimitry Andric CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 674*06c3fb27SDimitry Andric LoadAddr = LoadAddr.withElementType(BaseTy); 675*06c3fb27SDimitry Andric 676*06c3fb27SDimitry Andric Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); 677*06c3fb27SDimitry Andric 678*06c3fb27SDimitry Andric llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 679*06c3fb27SDimitry Andric CGF.Builder.CreateStore(Elem, StoreAddr); 680*06c3fb27SDimitry Andric } 681*06c3fb27SDimitry Andric 682*06c3fb27SDimitry Andric RegAddr = Tmp.withElementType(MemTy); 683*06c3fb27SDimitry Andric } else { 684*06c3fb27SDimitry Andric // Otherwise the object is contiguous in memory. 685*06c3fb27SDimitry Andric 686*06c3fb27SDimitry Andric // It might be right-aligned in its slot. 687*06c3fb27SDimitry Andric CharUnits SlotSize = BaseAddr.getAlignment(); 688*06c3fb27SDimitry Andric if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 689*06c3fb27SDimitry Andric (IsHFA || !isAggregateTypeForABI(Ty)) && 690*06c3fb27SDimitry Andric TySize < SlotSize) { 691*06c3fb27SDimitry Andric CharUnits Offset = SlotSize - TySize; 692*06c3fb27SDimitry Andric BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 693*06c3fb27SDimitry Andric } 694*06c3fb27SDimitry Andric 695*06c3fb27SDimitry Andric RegAddr = BaseAddr.withElementType(MemTy); 696*06c3fb27SDimitry Andric } 697*06c3fb27SDimitry Andric 698*06c3fb27SDimitry Andric CGF.EmitBranch(ContBlock); 699*06c3fb27SDimitry Andric 700*06c3fb27SDimitry Andric //======================================= 701*06c3fb27SDimitry Andric // Argument was on the stack 702*06c3fb27SDimitry Andric //======================================= 703*06c3fb27SDimitry Andric CGF.EmitBlock(OnStackBlock); 704*06c3fb27SDimitry Andric 705*06c3fb27SDimitry Andric Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 706*06c3fb27SDimitry Andric llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 707*06c3fb27SDimitry Andric 708*06c3fb27SDimitry Andric // Again, stack arguments may need realignment. In this case both integer and 709*06c3fb27SDimitry Andric // floating-point ones might be affected. 710*06c3fb27SDimitry Andric if (!IsIndirect && TyAlign.getQuantity() > 8) { 711*06c3fb27SDimitry Andric int Align = TyAlign.getQuantity(); 712*06c3fb27SDimitry Andric 713*06c3fb27SDimitry Andric OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 714*06c3fb27SDimitry Andric 715*06c3fb27SDimitry Andric OnStackPtr = CGF.Builder.CreateAdd( 716*06c3fb27SDimitry Andric OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 717*06c3fb27SDimitry Andric "align_stack"); 718*06c3fb27SDimitry Andric OnStackPtr = CGF.Builder.CreateAnd( 719*06c3fb27SDimitry Andric OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 720*06c3fb27SDimitry Andric "align_stack"); 721*06c3fb27SDimitry Andric 722*06c3fb27SDimitry Andric OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 723*06c3fb27SDimitry Andric } 724*06c3fb27SDimitry Andric Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty, 725*06c3fb27SDimitry Andric std::max(CharUnits::fromQuantity(8), TyAlign)); 726*06c3fb27SDimitry Andric 727*06c3fb27SDimitry Andric // All stack slots are multiples of 8 bytes. 728*06c3fb27SDimitry Andric CharUnits StackSlotSize = CharUnits::fromQuantity(8); 729*06c3fb27SDimitry Andric CharUnits StackSize; 730*06c3fb27SDimitry Andric if (IsIndirect) 731*06c3fb27SDimitry Andric StackSize = StackSlotSize; 732*06c3fb27SDimitry Andric else 733*06c3fb27SDimitry Andric StackSize = TySize.alignTo(StackSlotSize); 734*06c3fb27SDimitry Andric 735*06c3fb27SDimitry Andric llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 736*06c3fb27SDimitry Andric llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP( 737*06c3fb27SDimitry Andric CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack"); 738*06c3fb27SDimitry Andric 739*06c3fb27SDimitry Andric // Write the new value of __stack for the next call to va_arg 740*06c3fb27SDimitry Andric CGF.Builder.CreateStore(NewStack, stack_p); 741*06c3fb27SDimitry Andric 742*06c3fb27SDimitry Andric if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 743*06c3fb27SDimitry Andric TySize < StackSlotSize) { 744*06c3fb27SDimitry Andric CharUnits Offset = StackSlotSize - TySize; 745*06c3fb27SDimitry Andric OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 746*06c3fb27SDimitry Andric } 747*06c3fb27SDimitry Andric 748*06c3fb27SDimitry Andric OnStackAddr = OnStackAddr.withElementType(MemTy); 749*06c3fb27SDimitry Andric 750*06c3fb27SDimitry Andric CGF.EmitBranch(ContBlock); 751*06c3fb27SDimitry Andric 752*06c3fb27SDimitry Andric //======================================= 753*06c3fb27SDimitry Andric // Tidy up 754*06c3fb27SDimitry Andric //======================================= 755*06c3fb27SDimitry Andric CGF.EmitBlock(ContBlock); 756*06c3fb27SDimitry Andric 757*06c3fb27SDimitry Andric Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr, 758*06c3fb27SDimitry Andric OnStackBlock, "vaargs.addr"); 759*06c3fb27SDimitry Andric 760*06c3fb27SDimitry Andric if (IsIndirect) 761*06c3fb27SDimitry Andric return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy, 762*06c3fb27SDimitry Andric TyAlign); 763*06c3fb27SDimitry Andric 764*06c3fb27SDimitry Andric return ResAddr; 765*06c3fb27SDimitry Andric } 766*06c3fb27SDimitry Andric 767*06c3fb27SDimitry Andric Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 768*06c3fb27SDimitry Andric CodeGenFunction &CGF) const { 769*06c3fb27SDimitry Andric // The backend's lowering doesn't support va_arg for aggregates or 770*06c3fb27SDimitry Andric // illegal vector types. Lower VAArg here for these cases and use 771*06c3fb27SDimitry Andric // the LLVM va_arg instruction for everything else. 772*06c3fb27SDimitry Andric if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 773*06c3fb27SDimitry Andric return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 774*06c3fb27SDimitry Andric 775*06c3fb27SDimitry Andric uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8; 776*06c3fb27SDimitry Andric CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); 777*06c3fb27SDimitry Andric 778*06c3fb27SDimitry Andric // Empty records are ignored for parameter passing purposes. 779*06c3fb27SDimitry Andric if (isEmptyRecord(getContext(), Ty, true)) 780*06c3fb27SDimitry Andric return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), 781*06c3fb27SDimitry Andric CGF.ConvertTypeForMem(Ty), SlotSize); 782*06c3fb27SDimitry Andric 783*06c3fb27SDimitry Andric // The size of the actual thing passed, which might end up just 784*06c3fb27SDimitry Andric // being a pointer for indirect types. 785*06c3fb27SDimitry Andric auto TyInfo = getContext().getTypeInfoInChars(Ty); 786*06c3fb27SDimitry Andric 787*06c3fb27SDimitry Andric // Arguments bigger than 16 bytes which aren't homogeneous 788*06c3fb27SDimitry Andric // aggregates should be passed indirectly. 789*06c3fb27SDimitry Andric bool IsIndirect = false; 790*06c3fb27SDimitry Andric if (TyInfo.Width.getQuantity() > 16) { 791*06c3fb27SDimitry Andric const Type *Base = nullptr; 792*06c3fb27SDimitry Andric uint64_t Members = 0; 793*06c3fb27SDimitry Andric IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 794*06c3fb27SDimitry Andric } 795*06c3fb27SDimitry Andric 796*06c3fb27SDimitry Andric return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 797*06c3fb27SDimitry Andric TyInfo, SlotSize, /*AllowHigherAlign*/ true); 798*06c3fb27SDimitry Andric } 799*06c3fb27SDimitry Andric 800*06c3fb27SDimitry Andric Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 801*06c3fb27SDimitry Andric QualType Ty) const { 802*06c3fb27SDimitry Andric bool IsIndirect = false; 803*06c3fb27SDimitry Andric 804*06c3fb27SDimitry Andric // Composites larger than 16 bytes are passed by reference. 805*06c3fb27SDimitry Andric if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128) 806*06c3fb27SDimitry Andric IsIndirect = true; 807*06c3fb27SDimitry Andric 808*06c3fb27SDimitry Andric return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 809*06c3fb27SDimitry Andric CGF.getContext().getTypeInfoInChars(Ty), 810*06c3fb27SDimitry Andric CharUnits::fromQuantity(8), 811*06c3fb27SDimitry Andric /*allowHigherAlign*/ false); 812*06c3fb27SDimitry Andric } 813*06c3fb27SDimitry Andric 814*06c3fb27SDimitry Andric std::unique_ptr<TargetCodeGenInfo> 815*06c3fb27SDimitry Andric CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM, 816*06c3fb27SDimitry Andric AArch64ABIKind Kind) { 817*06c3fb27SDimitry Andric return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind); 818*06c3fb27SDimitry Andric } 819*06c3fb27SDimitry Andric 820*06c3fb27SDimitry Andric std::unique_ptr<TargetCodeGenInfo> 821*06c3fb27SDimitry Andric CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, 822*06c3fb27SDimitry Andric AArch64ABIKind K) { 823*06c3fb27SDimitry Andric return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K); 824*06c3fb27SDimitry Andric } 825