10b57cec5SDimitry Andric //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // Implementation of the abstract lowering for the Swift calling convention. 100b57cec5SDimitry Andric // 110b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 120b57cec5SDimitry Andric 130b57cec5SDimitry Andric #include "clang/CodeGen/SwiftCallingConv.h" 14fcaf7f86SDimitry Andric #include "ABIInfo.h" 150b57cec5SDimitry Andric #include "CodeGenModule.h" 160b57cec5SDimitry Andric #include "TargetInfo.h" 17fcaf7f86SDimitry Andric #include "clang/Basic/TargetInfo.h" 18bdd1243dSDimitry Andric #include <optional> 190b57cec5SDimitry Andric 200b57cec5SDimitry Andric using namespace clang; 210b57cec5SDimitry Andric using namespace CodeGen; 220b57cec5SDimitry Andric using namespace swiftcall; 230b57cec5SDimitry Andric 240b57cec5SDimitry Andric static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) { 25bdd1243dSDimitry Andric return CGM.getTargetCodeGenInfo().getSwiftABIInfo(); 260b57cec5SDimitry Andric } 270b57cec5SDimitry Andric 280b57cec5SDimitry Andric static bool isPowerOf2(unsigned n) { 290b57cec5SDimitry Andric return n == (n & -n); 300b57cec5SDimitry Andric } 310b57cec5SDimitry Andric 320b57cec5SDimitry Andric /// Given two types with the same size, try to find a common type. 330b57cec5SDimitry Andric static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) { 340b57cec5SDimitry Andric assert(first != second); 350b57cec5SDimitry Andric 360b57cec5SDimitry Andric // Allow pointers to merge with integers, but prefer the integer type. 370b57cec5SDimitry Andric if (first->isIntegerTy()) { 380b57cec5SDimitry Andric if (second->isPointerTy()) return first; 390b57cec5SDimitry Andric } else if (first->isPointerTy()) { 400b57cec5SDimitry Andric if (second->isIntegerTy()) return second; 410b57cec5SDimitry Andric if (second->isPointerTy()) return first; 420b57cec5SDimitry Andric 430b57cec5SDimitry Andric // Allow two vectors to be merged (given that they have the same size). 440b57cec5SDimitry Andric // This assumes that we never have two different vector register sets. 450b57cec5SDimitry Andric } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) { 460b57cec5SDimitry Andric if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) { 470b57cec5SDimitry Andric if (auto commonTy = getCommonType(firstVecTy->getElementType(), 480b57cec5SDimitry Andric secondVecTy->getElementType())) { 490b57cec5SDimitry Andric return (commonTy == firstVecTy->getElementType() ? first : second); 500b57cec5SDimitry Andric } 510b57cec5SDimitry Andric } 520b57cec5SDimitry Andric } 530b57cec5SDimitry Andric 540b57cec5SDimitry Andric return nullptr; 550b57cec5SDimitry Andric } 560b57cec5SDimitry Andric 570b57cec5SDimitry Andric static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) { 580b57cec5SDimitry Andric return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type)); 590b57cec5SDimitry Andric } 600b57cec5SDimitry Andric 610b57cec5SDimitry Andric static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) { 620b57cec5SDimitry Andric return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type)); 630b57cec5SDimitry Andric } 640b57cec5SDimitry Andric 650b57cec5SDimitry Andric void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) { 660b57cec5SDimitry Andric // Deal with various aggregate types as special cases: 670b57cec5SDimitry Andric 680b57cec5SDimitry Andric // Record types. 690b57cec5SDimitry Andric if (auto recType = type->getAs<RecordType>()) { 700b57cec5SDimitry Andric addTypedData(recType->getDecl(), begin); 710b57cec5SDimitry Andric 720b57cec5SDimitry Andric // Array types. 730b57cec5SDimitry Andric } else if (type->isArrayType()) { 740b57cec5SDimitry Andric // Incomplete array types (flexible array members?) don't provide 750b57cec5SDimitry Andric // data to lay out, and the other cases shouldn't be possible. 760b57cec5SDimitry Andric auto arrayType = CGM.getContext().getAsConstantArrayType(type); 770b57cec5SDimitry Andric if (!arrayType) return; 780b57cec5SDimitry Andric 790b57cec5SDimitry Andric QualType eltType = arrayType->getElementType(); 800b57cec5SDimitry Andric auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 81*0fca6ea1SDimitry Andric for (uint64_t i = 0, e = arrayType->getZExtSize(); i != e; ++i) { 820b57cec5SDimitry Andric addTypedData(eltType, begin + i * eltSize); 830b57cec5SDimitry Andric } 840b57cec5SDimitry Andric 850b57cec5SDimitry Andric // Complex types. 860b57cec5SDimitry Andric } else if (auto complexType = type->getAs<ComplexType>()) { 870b57cec5SDimitry Andric auto eltType = complexType->getElementType(); 880b57cec5SDimitry Andric auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 890b57cec5SDimitry Andric auto eltLLVMType = CGM.getTypes().ConvertType(eltType); 900b57cec5SDimitry Andric addTypedData(eltLLVMType, begin, begin + eltSize); 910b57cec5SDimitry Andric addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize); 920b57cec5SDimitry Andric 930b57cec5SDimitry Andric // Member pointer types. 940b57cec5SDimitry Andric } else if (type->getAs<MemberPointerType>()) { 950b57cec5SDimitry Andric // Just add it all as opaque. 960b57cec5SDimitry Andric addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type)); 970b57cec5SDimitry Andric 98e8d8bef9SDimitry Andric // Atomic types. 99e8d8bef9SDimitry Andric } else if (const auto *atomicType = type->getAs<AtomicType>()) { 100e8d8bef9SDimitry Andric auto valueType = atomicType->getValueType(); 101e8d8bef9SDimitry Andric auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType); 102e8d8bef9SDimitry Andric auto valueSize = CGM.getContext().getTypeSizeInChars(valueType); 103e8d8bef9SDimitry Andric 104e8d8bef9SDimitry Andric addTypedData(atomicType->getValueType(), begin); 105e8d8bef9SDimitry Andric 106e8d8bef9SDimitry Andric // Add atomic padding. 107e8d8bef9SDimitry Andric auto atomicPadding = atomicSize - valueSize; 108e8d8bef9SDimitry Andric if (atomicPadding > CharUnits::Zero()) 109e8d8bef9SDimitry Andric addOpaqueData(begin + valueSize, begin + atomicSize); 110e8d8bef9SDimitry Andric 1110b57cec5SDimitry Andric // Everything else is scalar and should not convert as an LLVM aggregate. 1120b57cec5SDimitry Andric } else { 1130b57cec5SDimitry Andric // We intentionally convert as !ForMem because we want to preserve 1140b57cec5SDimitry Andric // that a type was an i1. 115e8d8bef9SDimitry Andric auto *llvmType = CGM.getTypes().ConvertType(type); 1160b57cec5SDimitry Andric addTypedData(llvmType, begin); 1170b57cec5SDimitry Andric } 1180b57cec5SDimitry Andric } 1190b57cec5SDimitry Andric 1200b57cec5SDimitry Andric void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) { 1210b57cec5SDimitry Andric addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record)); 1220b57cec5SDimitry Andric } 1230b57cec5SDimitry Andric 1240b57cec5SDimitry Andric void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin, 1250b57cec5SDimitry Andric const ASTRecordLayout &layout) { 1260b57cec5SDimitry Andric // Unions are a special case. 1270b57cec5SDimitry Andric if (record->isUnion()) { 128bdd1243dSDimitry Andric for (auto *field : record->fields()) { 1290b57cec5SDimitry Andric if (field->isBitField()) { 1300b57cec5SDimitry Andric addBitFieldData(field, begin, 0); 1310b57cec5SDimitry Andric } else { 1320b57cec5SDimitry Andric addTypedData(field->getType(), begin); 1330b57cec5SDimitry Andric } 1340b57cec5SDimitry Andric } 1350b57cec5SDimitry Andric return; 1360b57cec5SDimitry Andric } 1370b57cec5SDimitry Andric 1380b57cec5SDimitry Andric // Note that correctness does not rely on us adding things in 1390b57cec5SDimitry Andric // their actual order of layout; it's just somewhat more efficient 1400b57cec5SDimitry Andric // for the builder. 1410b57cec5SDimitry Andric 1420b57cec5SDimitry Andric // With that in mind, add "early" C++ data. 1430b57cec5SDimitry Andric auto cxxRecord = dyn_cast<CXXRecordDecl>(record); 1440b57cec5SDimitry Andric if (cxxRecord) { 1450b57cec5SDimitry Andric // - a v-table pointer, if the class adds its own 1460b57cec5SDimitry Andric if (layout.hasOwnVFPtr()) { 1470b57cec5SDimitry Andric addTypedData(CGM.Int8PtrTy, begin); 1480b57cec5SDimitry Andric } 1490b57cec5SDimitry Andric 1500b57cec5SDimitry Andric // - non-virtual bases 1510b57cec5SDimitry Andric for (auto &baseSpecifier : cxxRecord->bases()) { 1520b57cec5SDimitry Andric if (baseSpecifier.isVirtual()) continue; 1530b57cec5SDimitry Andric 1540b57cec5SDimitry Andric auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl(); 1550b57cec5SDimitry Andric addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord)); 1560b57cec5SDimitry Andric } 1570b57cec5SDimitry Andric 1580b57cec5SDimitry Andric // - a vbptr if the class adds its own 1590b57cec5SDimitry Andric if (layout.hasOwnVBPtr()) { 1600b57cec5SDimitry Andric addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset()); 1610b57cec5SDimitry Andric } 1620b57cec5SDimitry Andric } 1630b57cec5SDimitry Andric 1640b57cec5SDimitry Andric // Add fields. 165bdd1243dSDimitry Andric for (auto *field : record->fields()) { 1660b57cec5SDimitry Andric auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex()); 1670b57cec5SDimitry Andric if (field->isBitField()) { 1680b57cec5SDimitry Andric addBitFieldData(field, begin, fieldOffsetInBits); 1690b57cec5SDimitry Andric } else { 1700b57cec5SDimitry Andric addTypedData(field->getType(), 1710b57cec5SDimitry Andric begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits)); 1720b57cec5SDimitry Andric } 1730b57cec5SDimitry Andric } 1740b57cec5SDimitry Andric 1750b57cec5SDimitry Andric // Add "late" C++ data: 1760b57cec5SDimitry Andric if (cxxRecord) { 1770b57cec5SDimitry Andric // - virtual bases 1780b57cec5SDimitry Andric for (auto &vbaseSpecifier : cxxRecord->vbases()) { 1790b57cec5SDimitry Andric auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl(); 1800b57cec5SDimitry Andric addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord)); 1810b57cec5SDimitry Andric } 1820b57cec5SDimitry Andric } 1830b57cec5SDimitry Andric } 1840b57cec5SDimitry Andric 1850b57cec5SDimitry Andric void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield, 1860b57cec5SDimitry Andric CharUnits recordBegin, 1870b57cec5SDimitry Andric uint64_t bitfieldBitBegin) { 1880b57cec5SDimitry Andric assert(bitfield->isBitField()); 1890b57cec5SDimitry Andric auto &ctx = CGM.getContext(); 1900b57cec5SDimitry Andric auto width = bitfield->getBitWidthValue(ctx); 1910b57cec5SDimitry Andric 1920b57cec5SDimitry Andric // We can ignore zero-width bit-fields. 1930b57cec5SDimitry Andric if (width == 0) return; 1940b57cec5SDimitry Andric 1950b57cec5SDimitry Andric // toCharUnitsFromBits rounds down. 1960b57cec5SDimitry Andric CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin); 1970b57cec5SDimitry Andric 1980b57cec5SDimitry Andric // Find the offset of the last byte that is partially occupied by the 1990b57cec5SDimitry Andric // bit-field; since we otherwise expect exclusive ends, the end is the 2000b57cec5SDimitry Andric // next byte. 2010b57cec5SDimitry Andric uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1; 2020b57cec5SDimitry Andric CharUnits bitfieldByteEnd = 2030b57cec5SDimitry Andric ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One(); 2040b57cec5SDimitry Andric addOpaqueData(recordBegin + bitfieldByteBegin, 2050b57cec5SDimitry Andric recordBegin + bitfieldByteEnd); 2060b57cec5SDimitry Andric } 2070b57cec5SDimitry Andric 2080b57cec5SDimitry Andric void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) { 2090b57cec5SDimitry Andric assert(type && "didn't provide type for typed data"); 2100b57cec5SDimitry Andric addTypedData(type, begin, begin + getTypeStoreSize(CGM, type)); 2110b57cec5SDimitry Andric } 2120b57cec5SDimitry Andric 2130b57cec5SDimitry Andric void SwiftAggLowering::addTypedData(llvm::Type *type, 2140b57cec5SDimitry Andric CharUnits begin, CharUnits end) { 2150b57cec5SDimitry Andric assert(type && "didn't provide type for typed data"); 2160b57cec5SDimitry Andric assert(getTypeStoreSize(CGM, type) == end - begin); 2170b57cec5SDimitry Andric 2180b57cec5SDimitry Andric // Legalize vector types. 2190b57cec5SDimitry Andric if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { 2200b57cec5SDimitry Andric SmallVector<llvm::Type*, 4> componentTys; 2210b57cec5SDimitry Andric legalizeVectorType(CGM, end - begin, vecTy, componentTys); 2220b57cec5SDimitry Andric assert(componentTys.size() >= 1); 2230b57cec5SDimitry Andric 2240b57cec5SDimitry Andric // Walk the initial components. 2250b57cec5SDimitry Andric for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) { 2260b57cec5SDimitry Andric llvm::Type *componentTy = componentTys[i]; 2270b57cec5SDimitry Andric auto componentSize = getTypeStoreSize(CGM, componentTy); 2280b57cec5SDimitry Andric assert(componentSize < end - begin); 2290b57cec5SDimitry Andric addLegalTypedData(componentTy, begin, begin + componentSize); 2300b57cec5SDimitry Andric begin += componentSize; 2310b57cec5SDimitry Andric } 2320b57cec5SDimitry Andric 2330b57cec5SDimitry Andric return addLegalTypedData(componentTys.back(), begin, end); 2340b57cec5SDimitry Andric } 2350b57cec5SDimitry Andric 2360b57cec5SDimitry Andric // Legalize integer types. 2370b57cec5SDimitry Andric if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 2380b57cec5SDimitry Andric if (!isLegalIntegerType(CGM, intTy)) 2390b57cec5SDimitry Andric return addOpaqueData(begin, end); 2400b57cec5SDimitry Andric } 2410b57cec5SDimitry Andric 2420b57cec5SDimitry Andric // All other types should be legal. 2430b57cec5SDimitry Andric return addLegalTypedData(type, begin, end); 2440b57cec5SDimitry Andric } 2450b57cec5SDimitry Andric 2460b57cec5SDimitry Andric void SwiftAggLowering::addLegalTypedData(llvm::Type *type, 2470b57cec5SDimitry Andric CharUnits begin, CharUnits end) { 2480b57cec5SDimitry Andric // Require the type to be naturally aligned. 2490b57cec5SDimitry Andric if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) { 2500b57cec5SDimitry Andric 2510b57cec5SDimitry Andric // Try splitting vector types. 2520b57cec5SDimitry Andric if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { 2530b57cec5SDimitry Andric auto split = splitLegalVectorType(CGM, end - begin, vecTy); 2540b57cec5SDimitry Andric auto eltTy = split.first; 2550b57cec5SDimitry Andric auto numElts = split.second; 2560b57cec5SDimitry Andric 2570b57cec5SDimitry Andric auto eltSize = (end - begin) / numElts; 2580b57cec5SDimitry Andric assert(eltSize == getTypeStoreSize(CGM, eltTy)); 2590b57cec5SDimitry Andric for (size_t i = 0, e = numElts; i != e; ++i) { 2600b57cec5SDimitry Andric addLegalTypedData(eltTy, begin, begin + eltSize); 2610b57cec5SDimitry Andric begin += eltSize; 2620b57cec5SDimitry Andric } 2630b57cec5SDimitry Andric assert(begin == end); 2640b57cec5SDimitry Andric return; 2650b57cec5SDimitry Andric } 2660b57cec5SDimitry Andric 2670b57cec5SDimitry Andric return addOpaqueData(begin, end); 2680b57cec5SDimitry Andric } 2690b57cec5SDimitry Andric 2700b57cec5SDimitry Andric addEntry(type, begin, end); 2710b57cec5SDimitry Andric } 2720b57cec5SDimitry Andric 2730b57cec5SDimitry Andric void SwiftAggLowering::addEntry(llvm::Type *type, 2740b57cec5SDimitry Andric CharUnits begin, CharUnits end) { 2750b57cec5SDimitry Andric assert((!type || 2760b57cec5SDimitry Andric (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) && 2770b57cec5SDimitry Andric "cannot add aggregate-typed data"); 2780b57cec5SDimitry Andric assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type))); 2790b57cec5SDimitry Andric 2800b57cec5SDimitry Andric // Fast path: we can just add entries to the end. 2810b57cec5SDimitry Andric if (Entries.empty() || Entries.back().End <= begin) { 2820b57cec5SDimitry Andric Entries.push_back({begin, end, type}); 2830b57cec5SDimitry Andric return; 2840b57cec5SDimitry Andric } 2850b57cec5SDimitry Andric 2860b57cec5SDimitry Andric // Find the first existing entry that ends after the start of the new data. 2870b57cec5SDimitry Andric // TODO: do a binary search if Entries is big enough for it to matter. 2880b57cec5SDimitry Andric size_t index = Entries.size() - 1; 2890b57cec5SDimitry Andric while (index != 0) { 2900b57cec5SDimitry Andric if (Entries[index - 1].End <= begin) break; 2910b57cec5SDimitry Andric --index; 2920b57cec5SDimitry Andric } 2930b57cec5SDimitry Andric 2940b57cec5SDimitry Andric // The entry ends after the start of the new data. 2950b57cec5SDimitry Andric // If the entry starts after the end of the new data, there's no conflict. 2960b57cec5SDimitry Andric if (Entries[index].Begin >= end) { 2970b57cec5SDimitry Andric // This insertion is potentially O(n), but the way we generally build 2980b57cec5SDimitry Andric // these layouts makes that unlikely to matter: we'd need a union of 2990b57cec5SDimitry Andric // several very large types. 3000b57cec5SDimitry Andric Entries.insert(Entries.begin() + index, {begin, end, type}); 3010b57cec5SDimitry Andric return; 3020b57cec5SDimitry Andric } 3030b57cec5SDimitry Andric 3040b57cec5SDimitry Andric // Otherwise, the ranges overlap. The new range might also overlap 3050b57cec5SDimitry Andric // with later ranges. 3060b57cec5SDimitry Andric restartAfterSplit: 3070b57cec5SDimitry Andric 3080b57cec5SDimitry Andric // Simplest case: an exact overlap. 3090b57cec5SDimitry Andric if (Entries[index].Begin == begin && Entries[index].End == end) { 3100b57cec5SDimitry Andric // If the types match exactly, great. 3110b57cec5SDimitry Andric if (Entries[index].Type == type) return; 3120b57cec5SDimitry Andric 3130b57cec5SDimitry Andric // If either type is opaque, make the entry opaque and return. 3140b57cec5SDimitry Andric if (Entries[index].Type == nullptr) { 3150b57cec5SDimitry Andric return; 3160b57cec5SDimitry Andric } else if (type == nullptr) { 3170b57cec5SDimitry Andric Entries[index].Type = nullptr; 3180b57cec5SDimitry Andric return; 3190b57cec5SDimitry Andric } 3200b57cec5SDimitry Andric 3210b57cec5SDimitry Andric // If they disagree in an ABI-agnostic way, just resolve the conflict 3220b57cec5SDimitry Andric // arbitrarily. 3230b57cec5SDimitry Andric if (auto entryType = getCommonType(Entries[index].Type, type)) { 3240b57cec5SDimitry Andric Entries[index].Type = entryType; 3250b57cec5SDimitry Andric return; 3260b57cec5SDimitry Andric } 3270b57cec5SDimitry Andric 3280b57cec5SDimitry Andric // Otherwise, make the entry opaque. 3290b57cec5SDimitry Andric Entries[index].Type = nullptr; 3300b57cec5SDimitry Andric return; 3310b57cec5SDimitry Andric } 3320b57cec5SDimitry Andric 3330b57cec5SDimitry Andric // Okay, we have an overlapping conflict of some sort. 3340b57cec5SDimitry Andric 3350b57cec5SDimitry Andric // If we have a vector type, split it. 3360b57cec5SDimitry Andric if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) { 3370b57cec5SDimitry Andric auto eltTy = vecTy->getElementType(); 338e8d8bef9SDimitry Andric CharUnits eltSize = 339e8d8bef9SDimitry Andric (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements(); 3400b57cec5SDimitry Andric assert(eltSize == getTypeStoreSize(CGM, eltTy)); 341e8d8bef9SDimitry Andric for (unsigned i = 0, 342e8d8bef9SDimitry Andric e = cast<llvm::FixedVectorType>(vecTy)->getNumElements(); 343e8d8bef9SDimitry Andric i != e; ++i) { 3440b57cec5SDimitry Andric addEntry(eltTy, begin, begin + eltSize); 3450b57cec5SDimitry Andric begin += eltSize; 3460b57cec5SDimitry Andric } 3470b57cec5SDimitry Andric assert(begin == end); 3480b57cec5SDimitry Andric return; 3490b57cec5SDimitry Andric } 3500b57cec5SDimitry Andric 3510b57cec5SDimitry Andric // If the entry is a vector type, split it and try again. 3520b57cec5SDimitry Andric if (Entries[index].Type && Entries[index].Type->isVectorTy()) { 3530b57cec5SDimitry Andric splitVectorEntry(index); 3540b57cec5SDimitry Andric goto restartAfterSplit; 3550b57cec5SDimitry Andric } 3560b57cec5SDimitry Andric 3570b57cec5SDimitry Andric // Okay, we have no choice but to make the existing entry opaque. 3580b57cec5SDimitry Andric 3590b57cec5SDimitry Andric Entries[index].Type = nullptr; 3600b57cec5SDimitry Andric 3610b57cec5SDimitry Andric // Stretch the start of the entry to the beginning of the range. 3620b57cec5SDimitry Andric if (begin < Entries[index].Begin) { 3630b57cec5SDimitry Andric Entries[index].Begin = begin; 3640b57cec5SDimitry Andric assert(index == 0 || begin >= Entries[index - 1].End); 3650b57cec5SDimitry Andric } 3660b57cec5SDimitry Andric 3670b57cec5SDimitry Andric // Stretch the end of the entry to the end of the range; but if we run 3680b57cec5SDimitry Andric // into the start of the next entry, just leave the range there and repeat. 3690b57cec5SDimitry Andric while (end > Entries[index].End) { 3700b57cec5SDimitry Andric assert(Entries[index].Type == nullptr); 3710b57cec5SDimitry Andric 3720b57cec5SDimitry Andric // If the range doesn't overlap the next entry, we're done. 3730b57cec5SDimitry Andric if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) { 3740b57cec5SDimitry Andric Entries[index].End = end; 3750b57cec5SDimitry Andric break; 3760b57cec5SDimitry Andric } 3770b57cec5SDimitry Andric 3780b57cec5SDimitry Andric // Otherwise, stretch to the start of the next entry. 3790b57cec5SDimitry Andric Entries[index].End = Entries[index + 1].Begin; 3800b57cec5SDimitry Andric 3810b57cec5SDimitry Andric // Continue with the next entry. 3820b57cec5SDimitry Andric index++; 3830b57cec5SDimitry Andric 3840b57cec5SDimitry Andric // This entry needs to be made opaque if it is not already. 3850b57cec5SDimitry Andric if (Entries[index].Type == nullptr) 3860b57cec5SDimitry Andric continue; 3870b57cec5SDimitry Andric 3880b57cec5SDimitry Andric // Split vector entries unless we completely subsume them. 3890b57cec5SDimitry Andric if (Entries[index].Type->isVectorTy() && 3900b57cec5SDimitry Andric end < Entries[index].End) { 3910b57cec5SDimitry Andric splitVectorEntry(index); 3920b57cec5SDimitry Andric } 3930b57cec5SDimitry Andric 3940b57cec5SDimitry Andric // Make the entry opaque. 3950b57cec5SDimitry Andric Entries[index].Type = nullptr; 3960b57cec5SDimitry Andric } 3970b57cec5SDimitry Andric } 3980b57cec5SDimitry Andric 3990b57cec5SDimitry Andric /// Replace the entry of vector type at offset 'index' with a sequence 4000b57cec5SDimitry Andric /// of its component vectors. 4010b57cec5SDimitry Andric void SwiftAggLowering::splitVectorEntry(unsigned index) { 4020b57cec5SDimitry Andric auto vecTy = cast<llvm::VectorType>(Entries[index].Type); 4030b57cec5SDimitry Andric auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy); 4040b57cec5SDimitry Andric 4050b57cec5SDimitry Andric auto eltTy = split.first; 4060b57cec5SDimitry Andric CharUnits eltSize = getTypeStoreSize(CGM, eltTy); 4070b57cec5SDimitry Andric auto numElts = split.second; 4080b57cec5SDimitry Andric Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry()); 4090b57cec5SDimitry Andric 4100b57cec5SDimitry Andric CharUnits begin = Entries[index].Begin; 4110b57cec5SDimitry Andric for (unsigned i = 0; i != numElts; ++i) { 4125f757f3fSDimitry Andric unsigned idx = index + i; 4135f757f3fSDimitry Andric Entries[idx].Type = eltTy; 4145f757f3fSDimitry Andric Entries[idx].Begin = begin; 4155f757f3fSDimitry Andric Entries[idx].End = begin + eltSize; 4160b57cec5SDimitry Andric begin += eltSize; 4170b57cec5SDimitry Andric } 4180b57cec5SDimitry Andric } 4190b57cec5SDimitry Andric 4200b57cec5SDimitry Andric /// Given a power-of-two unit size, return the offset of the aligned unit 4210b57cec5SDimitry Andric /// of that size which contains the given offset. 4220b57cec5SDimitry Andric /// 4230b57cec5SDimitry Andric /// In other words, round down to the nearest multiple of the unit size. 4240b57cec5SDimitry Andric static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) { 4250b57cec5SDimitry Andric assert(isPowerOf2(unitSize.getQuantity())); 4260b57cec5SDimitry Andric auto unitMask = ~(unitSize.getQuantity() - 1); 4270b57cec5SDimitry Andric return CharUnits::fromQuantity(offset.getQuantity() & unitMask); 4280b57cec5SDimitry Andric } 4290b57cec5SDimitry Andric 4300b57cec5SDimitry Andric static bool areBytesInSameUnit(CharUnits first, CharUnits second, 4310b57cec5SDimitry Andric CharUnits chunkSize) { 4320b57cec5SDimitry Andric return getOffsetAtStartOfUnit(first, chunkSize) 4330b57cec5SDimitry Andric == getOffsetAtStartOfUnit(second, chunkSize); 4340b57cec5SDimitry Andric } 4350b57cec5SDimitry Andric 4360b57cec5SDimitry Andric static bool isMergeableEntryType(llvm::Type *type) { 4370b57cec5SDimitry Andric // Opaquely-typed memory is always mergeable. 4380b57cec5SDimitry Andric if (type == nullptr) return true; 4390b57cec5SDimitry Andric 4400b57cec5SDimitry Andric // Pointers and integers are always mergeable. In theory we should not 4410b57cec5SDimitry Andric // merge pointers, but (1) it doesn't currently matter in practice because 4420b57cec5SDimitry Andric // the chunk size is never greater than the size of a pointer and (2) 4430b57cec5SDimitry Andric // Swift IRGen uses integer types for a lot of things that are "really" 444bdd1243dSDimitry Andric // just storing pointers (like std::optional<SomePointer>). If we ever have a 4450b57cec5SDimitry Andric // target that would otherwise combine pointers, we should put some effort 4460b57cec5SDimitry Andric // into fixing those cases in Swift IRGen and then call out pointer types 4470b57cec5SDimitry Andric // here. 4480b57cec5SDimitry Andric 4490b57cec5SDimitry Andric // Floating-point and vector types should never be merged. 4500b57cec5SDimitry Andric // Most such types are too large and highly-aligned to ever trigger merging 4510b57cec5SDimitry Andric // in practice, but it's important for the rule to cover at least 'half' 4520b57cec5SDimitry Andric // and 'float', as well as things like small vectors of 'i1' or 'i8'. 4530b57cec5SDimitry Andric return (!type->isFloatingPointTy() && !type->isVectorTy()); 4540b57cec5SDimitry Andric } 4550b57cec5SDimitry Andric 4560b57cec5SDimitry Andric bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first, 4570b57cec5SDimitry Andric const StorageEntry &second, 4580b57cec5SDimitry Andric CharUnits chunkSize) { 4590b57cec5SDimitry Andric // Only merge entries that overlap the same chunk. We test this first 4600b57cec5SDimitry Andric // despite being a bit more expensive because this is the condition that 4610b57cec5SDimitry Andric // tends to prevent merging. 4620b57cec5SDimitry Andric if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin, 4630b57cec5SDimitry Andric chunkSize)) 4640b57cec5SDimitry Andric return false; 4650b57cec5SDimitry Andric 4660b57cec5SDimitry Andric return (isMergeableEntryType(first.Type) && 4670b57cec5SDimitry Andric isMergeableEntryType(second.Type)); 4680b57cec5SDimitry Andric } 4690b57cec5SDimitry Andric 4700b57cec5SDimitry Andric void SwiftAggLowering::finish() { 4710b57cec5SDimitry Andric if (Entries.empty()) { 4720b57cec5SDimitry Andric Finished = true; 4730b57cec5SDimitry Andric return; 4740b57cec5SDimitry Andric } 4750b57cec5SDimitry Andric 4760b57cec5SDimitry Andric // We logically split the layout down into a series of chunks of this size, 4770b57cec5SDimitry Andric // which is generally the size of a pointer. 4780b57cec5SDimitry Andric const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM); 4790b57cec5SDimitry Andric 4800b57cec5SDimitry Andric // First pass: if two entries should be merged, make them both opaque 4810b57cec5SDimitry Andric // and stretch one to meet the next. 4820b57cec5SDimitry Andric // Also, remember if there are any opaque entries. 4830b57cec5SDimitry Andric bool hasOpaqueEntries = (Entries[0].Type == nullptr); 4840b57cec5SDimitry Andric for (size_t i = 1, e = Entries.size(); i != e; ++i) { 4850b57cec5SDimitry Andric if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) { 4860b57cec5SDimitry Andric Entries[i - 1].Type = nullptr; 4870b57cec5SDimitry Andric Entries[i].Type = nullptr; 4880b57cec5SDimitry Andric Entries[i - 1].End = Entries[i].Begin; 4890b57cec5SDimitry Andric hasOpaqueEntries = true; 4900b57cec5SDimitry Andric 4910b57cec5SDimitry Andric } else if (Entries[i].Type == nullptr) { 4920b57cec5SDimitry Andric hasOpaqueEntries = true; 4930b57cec5SDimitry Andric } 4940b57cec5SDimitry Andric } 4950b57cec5SDimitry Andric 4960b57cec5SDimitry Andric // The rest of the algorithm leaves non-opaque entries alone, so if we 4970b57cec5SDimitry Andric // have no opaque entries, we're done. 4980b57cec5SDimitry Andric if (!hasOpaqueEntries) { 4990b57cec5SDimitry Andric Finished = true; 5000b57cec5SDimitry Andric return; 5010b57cec5SDimitry Andric } 5020b57cec5SDimitry Andric 5030b57cec5SDimitry Andric // Okay, move the entries to a temporary and rebuild Entries. 5040b57cec5SDimitry Andric auto orig = std::move(Entries); 5050b57cec5SDimitry Andric assert(Entries.empty()); 5060b57cec5SDimitry Andric 5070b57cec5SDimitry Andric for (size_t i = 0, e = orig.size(); i != e; ++i) { 5080b57cec5SDimitry Andric // Just copy over non-opaque entries. 5090b57cec5SDimitry Andric if (orig[i].Type != nullptr) { 5100b57cec5SDimitry Andric Entries.push_back(orig[i]); 5110b57cec5SDimitry Andric continue; 5120b57cec5SDimitry Andric } 5130b57cec5SDimitry Andric 5140b57cec5SDimitry Andric // Scan forward to determine the full extent of the next opaque range. 5150b57cec5SDimitry Andric // We know from the first pass that only contiguous ranges will overlap 5160b57cec5SDimitry Andric // the same aligned chunk. 5170b57cec5SDimitry Andric auto begin = orig[i].Begin; 5180b57cec5SDimitry Andric auto end = orig[i].End; 5190b57cec5SDimitry Andric while (i + 1 != e && 5200b57cec5SDimitry Andric orig[i + 1].Type == nullptr && 5210b57cec5SDimitry Andric end == orig[i + 1].Begin) { 5220b57cec5SDimitry Andric end = orig[i + 1].End; 5230b57cec5SDimitry Andric i++; 5240b57cec5SDimitry Andric } 5250b57cec5SDimitry Andric 5260b57cec5SDimitry Andric // Add an entry per intersected chunk. 5270b57cec5SDimitry Andric do { 5280b57cec5SDimitry Andric // Find the smallest aligned storage unit in the maximal aligned 5290b57cec5SDimitry Andric // storage unit containing 'begin' that contains all the bytes in 5300b57cec5SDimitry Andric // the intersection between the range and this chunk. 5310b57cec5SDimitry Andric CharUnits localBegin = begin; 5320b57cec5SDimitry Andric CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize); 5330b57cec5SDimitry Andric CharUnits chunkEnd = chunkBegin + chunkSize; 5340b57cec5SDimitry Andric CharUnits localEnd = std::min(end, chunkEnd); 5350b57cec5SDimitry Andric 5360b57cec5SDimitry Andric // Just do a simple loop over ever-increasing unit sizes. 5370b57cec5SDimitry Andric CharUnits unitSize = CharUnits::One(); 5380b57cec5SDimitry Andric CharUnits unitBegin, unitEnd; 5390b57cec5SDimitry Andric for (; ; unitSize *= 2) { 5400b57cec5SDimitry Andric assert(unitSize <= chunkSize); 5410b57cec5SDimitry Andric unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize); 5420b57cec5SDimitry Andric unitEnd = unitBegin + unitSize; 5430b57cec5SDimitry Andric if (unitEnd >= localEnd) break; 5440b57cec5SDimitry Andric } 5450b57cec5SDimitry Andric 5460b57cec5SDimitry Andric // Add an entry for this unit. 5470b57cec5SDimitry Andric auto entryTy = 5480b57cec5SDimitry Andric llvm::IntegerType::get(CGM.getLLVMContext(), 5490b57cec5SDimitry Andric CGM.getContext().toBits(unitSize)); 5500b57cec5SDimitry Andric Entries.push_back({unitBegin, unitEnd, entryTy}); 5510b57cec5SDimitry Andric 5520b57cec5SDimitry Andric // The next chunk starts where this chunk left off. 5530b57cec5SDimitry Andric begin = localEnd; 5540b57cec5SDimitry Andric } while (begin != end); 5550b57cec5SDimitry Andric } 5560b57cec5SDimitry Andric 5570b57cec5SDimitry Andric // Okay, finally finished. 5580b57cec5SDimitry Andric Finished = true; 5590b57cec5SDimitry Andric } 5600b57cec5SDimitry Andric 5610b57cec5SDimitry Andric void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const { 5620b57cec5SDimitry Andric assert(Finished && "haven't yet finished lowering"); 5630b57cec5SDimitry Andric 5640b57cec5SDimitry Andric for (auto &entry : Entries) { 5650b57cec5SDimitry Andric callback(entry.Begin, entry.End, entry.Type); 5660b57cec5SDimitry Andric } 5670b57cec5SDimitry Andric } 5680b57cec5SDimitry Andric 5690b57cec5SDimitry Andric std::pair<llvm::StructType*, llvm::Type*> 5700b57cec5SDimitry Andric SwiftAggLowering::getCoerceAndExpandTypes() const { 5710b57cec5SDimitry Andric assert(Finished && "haven't yet finished lowering"); 5720b57cec5SDimitry Andric 5730b57cec5SDimitry Andric auto &ctx = CGM.getLLVMContext(); 5740b57cec5SDimitry Andric 5750b57cec5SDimitry Andric if (Entries.empty()) { 5760b57cec5SDimitry Andric auto type = llvm::StructType::get(ctx); 5770b57cec5SDimitry Andric return { type, type }; 5780b57cec5SDimitry Andric } 5790b57cec5SDimitry Andric 5800b57cec5SDimitry Andric SmallVector<llvm::Type*, 8> elts; 5810b57cec5SDimitry Andric CharUnits lastEnd = CharUnits::Zero(); 5820b57cec5SDimitry Andric bool hasPadding = false; 5830b57cec5SDimitry Andric bool packed = false; 5840b57cec5SDimitry Andric for (auto &entry : Entries) { 5850b57cec5SDimitry Andric if (entry.Begin != lastEnd) { 5860b57cec5SDimitry Andric auto paddingSize = entry.Begin - lastEnd; 5870b57cec5SDimitry Andric assert(!paddingSize.isNegative()); 5880b57cec5SDimitry Andric 5890b57cec5SDimitry Andric auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx), 5900b57cec5SDimitry Andric paddingSize.getQuantity()); 5910b57cec5SDimitry Andric elts.push_back(padding); 5920b57cec5SDimitry Andric hasPadding = true; 5930b57cec5SDimitry Andric } 5940b57cec5SDimitry Andric 595bdd1243dSDimitry Andric if (!packed && !entry.Begin.isMultipleOf(CharUnits::fromQuantity( 596bdd1243dSDimitry Andric CGM.getDataLayout().getABITypeAlign(entry.Type)))) 5970b57cec5SDimitry Andric packed = true; 5980b57cec5SDimitry Andric 5990b57cec5SDimitry Andric elts.push_back(entry.Type); 6000b57cec5SDimitry Andric 6010b57cec5SDimitry Andric lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type); 6020b57cec5SDimitry Andric assert(entry.End <= lastEnd); 6030b57cec5SDimitry Andric } 6040b57cec5SDimitry Andric 6050b57cec5SDimitry Andric // We don't need to adjust 'packed' to deal with possible tail padding 6060b57cec5SDimitry Andric // because we never do that kind of access through the coercion type. 6070b57cec5SDimitry Andric auto coercionType = llvm::StructType::get(ctx, elts, packed); 6080b57cec5SDimitry Andric 6090b57cec5SDimitry Andric llvm::Type *unpaddedType = coercionType; 6100b57cec5SDimitry Andric if (hasPadding) { 6110b57cec5SDimitry Andric elts.clear(); 6120b57cec5SDimitry Andric for (auto &entry : Entries) { 6130b57cec5SDimitry Andric elts.push_back(entry.Type); 6140b57cec5SDimitry Andric } 6150b57cec5SDimitry Andric if (elts.size() == 1) { 6160b57cec5SDimitry Andric unpaddedType = elts[0]; 6170b57cec5SDimitry Andric } else { 6180b57cec5SDimitry Andric unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false); 6190b57cec5SDimitry Andric } 6200b57cec5SDimitry Andric } else if (Entries.size() == 1) { 6210b57cec5SDimitry Andric unpaddedType = Entries[0].Type; 6220b57cec5SDimitry Andric } 6230b57cec5SDimitry Andric 6240b57cec5SDimitry Andric return { coercionType, unpaddedType }; 6250b57cec5SDimitry Andric } 6260b57cec5SDimitry Andric 6270b57cec5SDimitry Andric bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const { 6280b57cec5SDimitry Andric assert(Finished && "haven't yet finished lowering"); 6290b57cec5SDimitry Andric 6300b57cec5SDimitry Andric // Empty types don't need to be passed indirectly. 6310b57cec5SDimitry Andric if (Entries.empty()) return false; 6320b57cec5SDimitry Andric 6330b57cec5SDimitry Andric // Avoid copying the array of types when there's just a single element. 6340b57cec5SDimitry Andric if (Entries.size() == 1) { 635bdd1243dSDimitry Andric return getSwiftABIInfo(CGM).shouldPassIndirectly(Entries.back().Type, 6360b57cec5SDimitry Andric asReturnValue); 6370b57cec5SDimitry Andric } 6380b57cec5SDimitry Andric 6390b57cec5SDimitry Andric SmallVector<llvm::Type*, 8> componentTys; 6400b57cec5SDimitry Andric componentTys.reserve(Entries.size()); 6410b57cec5SDimitry Andric for (auto &entry : Entries) { 6420b57cec5SDimitry Andric componentTys.push_back(entry.Type); 6430b57cec5SDimitry Andric } 644bdd1243dSDimitry Andric return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue); 6450b57cec5SDimitry Andric } 6460b57cec5SDimitry Andric 6470b57cec5SDimitry Andric bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM, 6480b57cec5SDimitry Andric ArrayRef<llvm::Type*> componentTys, 6490b57cec5SDimitry Andric bool asReturnValue) { 650bdd1243dSDimitry Andric return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue); 6510b57cec5SDimitry Andric } 6520b57cec5SDimitry Andric 6530b57cec5SDimitry Andric CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { 6540b57cec5SDimitry Andric // Currently always the size of an ordinary pointer. 6550b57cec5SDimitry Andric return CGM.getContext().toCharUnitsFromBits( 656bdd1243dSDimitry Andric CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default)); 6570b57cec5SDimitry Andric } 6580b57cec5SDimitry Andric 6590b57cec5SDimitry Andric CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) { 6600b57cec5SDimitry Andric // For Swift's purposes, this is always just the store size of the type 6610b57cec5SDimitry Andric // rounded up to a power of 2. 6620b57cec5SDimitry Andric auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity(); 663bdd1243dSDimitry Andric size = llvm::bit_ceil(size); 664bdd1243dSDimitry Andric assert(CGM.getDataLayout().getABITypeAlign(type) <= size); 6650b57cec5SDimitry Andric return CharUnits::fromQuantity(size); 6660b57cec5SDimitry Andric } 6670b57cec5SDimitry Andric 6680b57cec5SDimitry Andric bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, 6690b57cec5SDimitry Andric llvm::IntegerType *intTy) { 6700b57cec5SDimitry Andric auto size = intTy->getBitWidth(); 6710b57cec5SDimitry Andric switch (size) { 6720b57cec5SDimitry Andric case 1: 6730b57cec5SDimitry Andric case 8: 6740b57cec5SDimitry Andric case 16: 6750b57cec5SDimitry Andric case 32: 6760b57cec5SDimitry Andric case 64: 6770b57cec5SDimitry Andric // Just assume that the above are always legal. 6780b57cec5SDimitry Andric return true; 6790b57cec5SDimitry Andric 6800b57cec5SDimitry Andric case 128: 6810b57cec5SDimitry Andric return CGM.getContext().getTargetInfo().hasInt128Type(); 6820b57cec5SDimitry Andric 6830b57cec5SDimitry Andric default: 6840b57cec5SDimitry Andric return false; 6850b57cec5SDimitry Andric } 6860b57cec5SDimitry Andric } 6870b57cec5SDimitry Andric 6880b57cec5SDimitry Andric bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 6890b57cec5SDimitry Andric llvm::VectorType *vectorTy) { 690e8d8bef9SDimitry Andric return isLegalVectorType( 691e8d8bef9SDimitry Andric CGM, vectorSize, vectorTy->getElementType(), 692e8d8bef9SDimitry Andric cast<llvm::FixedVectorType>(vectorTy)->getNumElements()); 6930b57cec5SDimitry Andric } 6940b57cec5SDimitry Andric 6950b57cec5SDimitry Andric bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 6960b57cec5SDimitry Andric llvm::Type *eltTy, unsigned numElts) { 6970b57cec5SDimitry Andric assert(numElts > 1 && "illegal vector length"); 698bdd1243dSDimitry Andric return getSwiftABIInfo(CGM).isLegalVectorType(vectorSize, eltTy, numElts); 6990b57cec5SDimitry Andric } 7000b57cec5SDimitry Andric 7010b57cec5SDimitry Andric std::pair<llvm::Type*, unsigned> 7020b57cec5SDimitry Andric swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 7030b57cec5SDimitry Andric llvm::VectorType *vectorTy) { 704e8d8bef9SDimitry Andric auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements(); 7050b57cec5SDimitry Andric auto eltTy = vectorTy->getElementType(); 7060b57cec5SDimitry Andric 7070b57cec5SDimitry Andric // Try to split the vector type in half. 7080b57cec5SDimitry Andric if (numElts >= 4 && isPowerOf2(numElts)) { 7090b57cec5SDimitry Andric if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2)) 7105ffd83dbSDimitry Andric return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2}; 7110b57cec5SDimitry Andric } 7120b57cec5SDimitry Andric 7130b57cec5SDimitry Andric return {eltTy, numElts}; 7140b57cec5SDimitry Andric } 7150b57cec5SDimitry Andric 7160b57cec5SDimitry Andric void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize, 7170b57cec5SDimitry Andric llvm::VectorType *origVectorTy, 7180b57cec5SDimitry Andric llvm::SmallVectorImpl<llvm::Type*> &components) { 7190b57cec5SDimitry Andric // If it's already a legal vector type, use it. 7200b57cec5SDimitry Andric if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) { 7210b57cec5SDimitry Andric components.push_back(origVectorTy); 7220b57cec5SDimitry Andric return; 7230b57cec5SDimitry Andric } 7240b57cec5SDimitry Andric 7250b57cec5SDimitry Andric // Try to split the vector into legal subvectors. 726e8d8bef9SDimitry Andric auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements(); 7270b57cec5SDimitry Andric auto eltTy = origVectorTy->getElementType(); 7280b57cec5SDimitry Andric assert(numElts != 1); 7290b57cec5SDimitry Andric 7300b57cec5SDimitry Andric // The largest size that we're still considering making subvectors of. 7310b57cec5SDimitry Andric // Always a power of 2. 73206c3fb27SDimitry Andric unsigned logCandidateNumElts = llvm::Log2_32(numElts); 7330b57cec5SDimitry Andric unsigned candidateNumElts = 1U << logCandidateNumElts; 7340b57cec5SDimitry Andric assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts); 7350b57cec5SDimitry Andric 7360b57cec5SDimitry Andric // Minor optimization: don't check the legality of this exact size twice. 7370b57cec5SDimitry Andric if (candidateNumElts == numElts) { 7380b57cec5SDimitry Andric logCandidateNumElts--; 7390b57cec5SDimitry Andric candidateNumElts >>= 1; 7400b57cec5SDimitry Andric } 7410b57cec5SDimitry Andric 7420b57cec5SDimitry Andric CharUnits eltSize = (origVectorSize / numElts); 7430b57cec5SDimitry Andric CharUnits candidateSize = eltSize * candidateNumElts; 7440b57cec5SDimitry Andric 7450b57cec5SDimitry Andric // The sensibility of this algorithm relies on the fact that we never 7460b57cec5SDimitry Andric // have a legal non-power-of-2 vector size without having the power of 2 7470b57cec5SDimitry Andric // also be legal. 7480b57cec5SDimitry Andric while (logCandidateNumElts > 0) { 7490b57cec5SDimitry Andric assert(candidateNumElts == 1U << logCandidateNumElts); 7500b57cec5SDimitry Andric assert(candidateNumElts <= numElts); 7510b57cec5SDimitry Andric assert(candidateSize == eltSize * candidateNumElts); 7520b57cec5SDimitry Andric 7530b57cec5SDimitry Andric // Skip illegal vector sizes. 7540b57cec5SDimitry Andric if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) { 7550b57cec5SDimitry Andric logCandidateNumElts--; 7560b57cec5SDimitry Andric candidateNumElts /= 2; 7570b57cec5SDimitry Andric candidateSize /= 2; 7580b57cec5SDimitry Andric continue; 7590b57cec5SDimitry Andric } 7600b57cec5SDimitry Andric 7610b57cec5SDimitry Andric // Add the right number of vectors of this size. 7620b57cec5SDimitry Andric auto numVecs = numElts >> logCandidateNumElts; 7635ffd83dbSDimitry Andric components.append(numVecs, 7645ffd83dbSDimitry Andric llvm::FixedVectorType::get(eltTy, candidateNumElts)); 7650b57cec5SDimitry Andric numElts -= (numVecs << logCandidateNumElts); 7660b57cec5SDimitry Andric 7670b57cec5SDimitry Andric if (numElts == 0) return; 7680b57cec5SDimitry Andric 7690b57cec5SDimitry Andric // It's possible that the number of elements remaining will be legal. 7700b57cec5SDimitry Andric // This can happen with e.g. <7 x float> when <3 x float> is legal. 7710b57cec5SDimitry Andric // This only needs to be separately checked if it's not a power of 2. 7720b57cec5SDimitry Andric if (numElts > 2 && !isPowerOf2(numElts) && 7730b57cec5SDimitry Andric isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) { 7745ffd83dbSDimitry Andric components.push_back(llvm::FixedVectorType::get(eltTy, numElts)); 7750b57cec5SDimitry Andric return; 7760b57cec5SDimitry Andric } 7770b57cec5SDimitry Andric 7780b57cec5SDimitry Andric // Bring vecSize down to something no larger than numElts. 7790b57cec5SDimitry Andric do { 7800b57cec5SDimitry Andric logCandidateNumElts--; 7810b57cec5SDimitry Andric candidateNumElts /= 2; 7820b57cec5SDimitry Andric candidateSize /= 2; 7830b57cec5SDimitry Andric } while (candidateNumElts > numElts); 7840b57cec5SDimitry Andric } 7850b57cec5SDimitry Andric 7860b57cec5SDimitry Andric // Otherwise, just append a bunch of individual elements. 7870b57cec5SDimitry Andric components.append(numElts, eltTy); 7880b57cec5SDimitry Andric } 7890b57cec5SDimitry Andric 7900b57cec5SDimitry Andric bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM, 7910b57cec5SDimitry Andric const RecordDecl *record) { 7920b57cec5SDimitry Andric // FIXME: should we not rely on the standard computation in Sema, just in 7930b57cec5SDimitry Andric // case we want to diverge from the platform ABI (e.g. on targets where 7940b57cec5SDimitry Andric // that uses the MSVC rule)? 7950b57cec5SDimitry Andric return !record->canPassInRegisters(); 7960b57cec5SDimitry Andric } 7970b57cec5SDimitry Andric 7980b57cec5SDimitry Andric static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, 7990b57cec5SDimitry Andric bool forReturn, 8000b57cec5SDimitry Andric CharUnits alignmentForIndirect) { 8010b57cec5SDimitry Andric if (lowering.empty()) { 8020b57cec5SDimitry Andric return ABIArgInfo::getIgnore(); 8030b57cec5SDimitry Andric } else if (lowering.shouldPassIndirectly(forReturn)) { 8040b57cec5SDimitry Andric return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false); 8050b57cec5SDimitry Andric } else { 8060b57cec5SDimitry Andric auto types = lowering.getCoerceAndExpandTypes(); 8070b57cec5SDimitry Andric return ABIArgInfo::getCoerceAndExpand(types.first, types.second); 8080b57cec5SDimitry Andric } 8090b57cec5SDimitry Andric } 8100b57cec5SDimitry Andric 8110b57cec5SDimitry Andric static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, 8120b57cec5SDimitry Andric bool forReturn) { 8130b57cec5SDimitry Andric if (auto recordType = dyn_cast<RecordType>(type)) { 8140b57cec5SDimitry Andric auto record = recordType->getDecl(); 8150b57cec5SDimitry Andric auto &layout = CGM.getContext().getASTRecordLayout(record); 8160b57cec5SDimitry Andric 8170b57cec5SDimitry Andric if (mustPassRecordIndirectly(CGM, record)) 8180b57cec5SDimitry Andric return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false); 8190b57cec5SDimitry Andric 8200b57cec5SDimitry Andric SwiftAggLowering lowering(CGM); 8210b57cec5SDimitry Andric lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout); 8220b57cec5SDimitry Andric lowering.finish(); 8230b57cec5SDimitry Andric 8240b57cec5SDimitry Andric return classifyExpandedType(lowering, forReturn, layout.getAlignment()); 8250b57cec5SDimitry Andric } 8260b57cec5SDimitry Andric 8270b57cec5SDimitry Andric // Just assume that all of our target ABIs can support returning at least 8280b57cec5SDimitry Andric // two integer or floating-point values. 8290b57cec5SDimitry Andric if (isa<ComplexType>(type)) { 8300b57cec5SDimitry Andric return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand()); 8310b57cec5SDimitry Andric } 8320b57cec5SDimitry Andric 8330b57cec5SDimitry Andric // Vector types may need to be legalized. 8340b57cec5SDimitry Andric if (isa<VectorType>(type)) { 8350b57cec5SDimitry Andric SwiftAggLowering lowering(CGM); 8360b57cec5SDimitry Andric lowering.addTypedData(type, CharUnits::Zero()); 8370b57cec5SDimitry Andric lowering.finish(); 8380b57cec5SDimitry Andric 8390b57cec5SDimitry Andric CharUnits alignment = CGM.getContext().getTypeAlignInChars(type); 8400b57cec5SDimitry Andric return classifyExpandedType(lowering, forReturn, alignment); 8410b57cec5SDimitry Andric } 8420b57cec5SDimitry Andric 8430b57cec5SDimitry Andric // Member pointer types need to be expanded, but it's a simple form of 8440b57cec5SDimitry Andric // expansion that 'Direct' can handle. Note that CanBeFlattened should be 8450b57cec5SDimitry Andric // true for this to work. 8460b57cec5SDimitry Andric 8470b57cec5SDimitry Andric // 'void' needs to be ignored. 8480b57cec5SDimitry Andric if (type->isVoidType()) { 8490b57cec5SDimitry Andric return ABIArgInfo::getIgnore(); 8500b57cec5SDimitry Andric } 8510b57cec5SDimitry Andric 8520b57cec5SDimitry Andric // Everything else can be passed directly. 8530b57cec5SDimitry Andric return ABIArgInfo::getDirect(); 8540b57cec5SDimitry Andric } 8550b57cec5SDimitry Andric 8560b57cec5SDimitry Andric ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) { 8570b57cec5SDimitry Andric return classifyType(CGM, type, /*forReturn*/ true); 8580b57cec5SDimitry Andric } 8590b57cec5SDimitry Andric 8600b57cec5SDimitry Andric ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM, 8610b57cec5SDimitry Andric CanQualType type) { 8620b57cec5SDimitry Andric return classifyType(CGM, type, /*forReturn*/ false); 8630b57cec5SDimitry Andric } 8640b57cec5SDimitry Andric 8650b57cec5SDimitry Andric void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 8660b57cec5SDimitry Andric auto &retInfo = FI.getReturnInfo(); 8670b57cec5SDimitry Andric retInfo = classifyReturnType(CGM, FI.getReturnType()); 8680b57cec5SDimitry Andric 8690b57cec5SDimitry Andric for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { 8700b57cec5SDimitry Andric auto &argInfo = FI.arg_begin()[i]; 8710b57cec5SDimitry Andric argInfo.info = classifyArgumentType(CGM, argInfo.type); 8720b57cec5SDimitry Andric } 8730b57cec5SDimitry Andric } 8740b57cec5SDimitry Andric 8750b57cec5SDimitry Andric // Is swifterror lowered to a register by the target ABI. 8760b57cec5SDimitry Andric bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) { 8770b57cec5SDimitry Andric return getSwiftABIInfo(CGM).isSwiftErrorInRegister(); 8780b57cec5SDimitry Andric } 879