1e5dd7070Spatrick //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2e5dd7070Spatrick //
3e5dd7070Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e5dd7070Spatrick // See https://llvm.org/LICENSE.txt for license information.
5e5dd7070Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e5dd7070Spatrick //
7e5dd7070Spatrick //===----------------------------------------------------------------------===//
8e5dd7070Spatrick //
9e5dd7070Spatrick // Implementation of the abstract lowering for the Swift calling convention.
10e5dd7070Spatrick //
11e5dd7070Spatrick //===----------------------------------------------------------------------===//
12e5dd7070Spatrick
13e5dd7070Spatrick #include "clang/CodeGen/SwiftCallingConv.h"
14*12c85518Srobert #include "ABIInfo.h"
15e5dd7070Spatrick #include "CodeGenModule.h"
16e5dd7070Spatrick #include "TargetInfo.h"
17*12c85518Srobert #include "clang/Basic/TargetInfo.h"
18*12c85518Srobert #include <optional>
19e5dd7070Spatrick
20e5dd7070Spatrick using namespace clang;
21e5dd7070Spatrick using namespace CodeGen;
22e5dd7070Spatrick using namespace swiftcall;
23e5dd7070Spatrick
getSwiftABIInfo(CodeGenModule & CGM)24e5dd7070Spatrick static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
25*12c85518Srobert return CGM.getTargetCodeGenInfo().getSwiftABIInfo();
26e5dd7070Spatrick }
27e5dd7070Spatrick
isPowerOf2(unsigned n)28e5dd7070Spatrick static bool isPowerOf2(unsigned n) {
29e5dd7070Spatrick return n == (n & -n);
30e5dd7070Spatrick }
31e5dd7070Spatrick
32e5dd7070Spatrick /// Given two types with the same size, try to find a common type.
getCommonType(llvm::Type * first,llvm::Type * second)33e5dd7070Spatrick static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
34e5dd7070Spatrick assert(first != second);
35e5dd7070Spatrick
36e5dd7070Spatrick // Allow pointers to merge with integers, but prefer the integer type.
37e5dd7070Spatrick if (first->isIntegerTy()) {
38e5dd7070Spatrick if (second->isPointerTy()) return first;
39e5dd7070Spatrick } else if (first->isPointerTy()) {
40e5dd7070Spatrick if (second->isIntegerTy()) return second;
41e5dd7070Spatrick if (second->isPointerTy()) return first;
42e5dd7070Spatrick
43e5dd7070Spatrick // Allow two vectors to be merged (given that they have the same size).
44e5dd7070Spatrick // This assumes that we never have two different vector register sets.
45e5dd7070Spatrick } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
46e5dd7070Spatrick if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
47e5dd7070Spatrick if (auto commonTy = getCommonType(firstVecTy->getElementType(),
48e5dd7070Spatrick secondVecTy->getElementType())) {
49e5dd7070Spatrick return (commonTy == firstVecTy->getElementType() ? first : second);
50e5dd7070Spatrick }
51e5dd7070Spatrick }
52e5dd7070Spatrick }
53e5dd7070Spatrick
54e5dd7070Spatrick return nullptr;
55e5dd7070Spatrick }
56e5dd7070Spatrick
getTypeStoreSize(CodeGenModule & CGM,llvm::Type * type)57e5dd7070Spatrick static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
58e5dd7070Spatrick return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
59e5dd7070Spatrick }
60e5dd7070Spatrick
getTypeAllocSize(CodeGenModule & CGM,llvm::Type * type)61e5dd7070Spatrick static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
62e5dd7070Spatrick return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
63e5dd7070Spatrick }
64e5dd7070Spatrick
addTypedData(QualType type,CharUnits begin)65e5dd7070Spatrick void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
66e5dd7070Spatrick // Deal with various aggregate types as special cases:
67e5dd7070Spatrick
68e5dd7070Spatrick // Record types.
69e5dd7070Spatrick if (auto recType = type->getAs<RecordType>()) {
70e5dd7070Spatrick addTypedData(recType->getDecl(), begin);
71e5dd7070Spatrick
72e5dd7070Spatrick // Array types.
73e5dd7070Spatrick } else if (type->isArrayType()) {
74e5dd7070Spatrick // Incomplete array types (flexible array members?) don't provide
75e5dd7070Spatrick // data to lay out, and the other cases shouldn't be possible.
76e5dd7070Spatrick auto arrayType = CGM.getContext().getAsConstantArrayType(type);
77e5dd7070Spatrick if (!arrayType) return;
78e5dd7070Spatrick
79e5dd7070Spatrick QualType eltType = arrayType->getElementType();
80e5dd7070Spatrick auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
81e5dd7070Spatrick for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
82e5dd7070Spatrick addTypedData(eltType, begin + i * eltSize);
83e5dd7070Spatrick }
84e5dd7070Spatrick
85e5dd7070Spatrick // Complex types.
86e5dd7070Spatrick } else if (auto complexType = type->getAs<ComplexType>()) {
87e5dd7070Spatrick auto eltType = complexType->getElementType();
88e5dd7070Spatrick auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
89e5dd7070Spatrick auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
90e5dd7070Spatrick addTypedData(eltLLVMType, begin, begin + eltSize);
91e5dd7070Spatrick addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
92e5dd7070Spatrick
93e5dd7070Spatrick // Member pointer types.
94e5dd7070Spatrick } else if (type->getAs<MemberPointerType>()) {
95e5dd7070Spatrick // Just add it all as opaque.
96e5dd7070Spatrick addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
97e5dd7070Spatrick
98a9ac8606Spatrick // Atomic types.
99a9ac8606Spatrick } else if (const auto *atomicType = type->getAs<AtomicType>()) {
100a9ac8606Spatrick auto valueType = atomicType->getValueType();
101a9ac8606Spatrick auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
102a9ac8606Spatrick auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
103a9ac8606Spatrick
104a9ac8606Spatrick addTypedData(atomicType->getValueType(), begin);
105a9ac8606Spatrick
106a9ac8606Spatrick // Add atomic padding.
107a9ac8606Spatrick auto atomicPadding = atomicSize - valueSize;
108a9ac8606Spatrick if (atomicPadding > CharUnits::Zero())
109a9ac8606Spatrick addOpaqueData(begin + valueSize, begin + atomicSize);
110a9ac8606Spatrick
111e5dd7070Spatrick // Everything else is scalar and should not convert as an LLVM aggregate.
112e5dd7070Spatrick } else {
113e5dd7070Spatrick // We intentionally convert as !ForMem because we want to preserve
114e5dd7070Spatrick // that a type was an i1.
115a9ac8606Spatrick auto *llvmType = CGM.getTypes().ConvertType(type);
116e5dd7070Spatrick addTypedData(llvmType, begin);
117e5dd7070Spatrick }
118e5dd7070Spatrick }
119e5dd7070Spatrick
addTypedData(const RecordDecl * record,CharUnits begin)120e5dd7070Spatrick void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
121e5dd7070Spatrick addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
122e5dd7070Spatrick }
123e5dd7070Spatrick
addTypedData(const RecordDecl * record,CharUnits begin,const ASTRecordLayout & layout)124e5dd7070Spatrick void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
125e5dd7070Spatrick const ASTRecordLayout &layout) {
126e5dd7070Spatrick // Unions are a special case.
127e5dd7070Spatrick if (record->isUnion()) {
128*12c85518Srobert for (auto *field : record->fields()) {
129e5dd7070Spatrick if (field->isBitField()) {
130e5dd7070Spatrick addBitFieldData(field, begin, 0);
131e5dd7070Spatrick } else {
132e5dd7070Spatrick addTypedData(field->getType(), begin);
133e5dd7070Spatrick }
134e5dd7070Spatrick }
135e5dd7070Spatrick return;
136e5dd7070Spatrick }
137e5dd7070Spatrick
138e5dd7070Spatrick // Note that correctness does not rely on us adding things in
139e5dd7070Spatrick // their actual order of layout; it's just somewhat more efficient
140e5dd7070Spatrick // for the builder.
141e5dd7070Spatrick
142e5dd7070Spatrick // With that in mind, add "early" C++ data.
143e5dd7070Spatrick auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
144e5dd7070Spatrick if (cxxRecord) {
145e5dd7070Spatrick // - a v-table pointer, if the class adds its own
146e5dd7070Spatrick if (layout.hasOwnVFPtr()) {
147e5dd7070Spatrick addTypedData(CGM.Int8PtrTy, begin);
148e5dd7070Spatrick }
149e5dd7070Spatrick
150e5dd7070Spatrick // - non-virtual bases
151e5dd7070Spatrick for (auto &baseSpecifier : cxxRecord->bases()) {
152e5dd7070Spatrick if (baseSpecifier.isVirtual()) continue;
153e5dd7070Spatrick
154e5dd7070Spatrick auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
155e5dd7070Spatrick addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
156e5dd7070Spatrick }
157e5dd7070Spatrick
158e5dd7070Spatrick // - a vbptr if the class adds its own
159e5dd7070Spatrick if (layout.hasOwnVBPtr()) {
160e5dd7070Spatrick addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
161e5dd7070Spatrick }
162e5dd7070Spatrick }
163e5dd7070Spatrick
164e5dd7070Spatrick // Add fields.
165*12c85518Srobert for (auto *field : record->fields()) {
166e5dd7070Spatrick auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
167e5dd7070Spatrick if (field->isBitField()) {
168e5dd7070Spatrick addBitFieldData(field, begin, fieldOffsetInBits);
169e5dd7070Spatrick } else {
170e5dd7070Spatrick addTypedData(field->getType(),
171e5dd7070Spatrick begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
172e5dd7070Spatrick }
173e5dd7070Spatrick }
174e5dd7070Spatrick
175e5dd7070Spatrick // Add "late" C++ data:
176e5dd7070Spatrick if (cxxRecord) {
177e5dd7070Spatrick // - virtual bases
178e5dd7070Spatrick for (auto &vbaseSpecifier : cxxRecord->vbases()) {
179e5dd7070Spatrick auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
180e5dd7070Spatrick addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
181e5dd7070Spatrick }
182e5dd7070Spatrick }
183e5dd7070Spatrick }
184e5dd7070Spatrick
addBitFieldData(const FieldDecl * bitfield,CharUnits recordBegin,uint64_t bitfieldBitBegin)185e5dd7070Spatrick void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
186e5dd7070Spatrick CharUnits recordBegin,
187e5dd7070Spatrick uint64_t bitfieldBitBegin) {
188e5dd7070Spatrick assert(bitfield->isBitField());
189e5dd7070Spatrick auto &ctx = CGM.getContext();
190e5dd7070Spatrick auto width = bitfield->getBitWidthValue(ctx);
191e5dd7070Spatrick
192e5dd7070Spatrick // We can ignore zero-width bit-fields.
193e5dd7070Spatrick if (width == 0) return;
194e5dd7070Spatrick
195e5dd7070Spatrick // toCharUnitsFromBits rounds down.
196e5dd7070Spatrick CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
197e5dd7070Spatrick
198e5dd7070Spatrick // Find the offset of the last byte that is partially occupied by the
199e5dd7070Spatrick // bit-field; since we otherwise expect exclusive ends, the end is the
200e5dd7070Spatrick // next byte.
201e5dd7070Spatrick uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
202e5dd7070Spatrick CharUnits bitfieldByteEnd =
203e5dd7070Spatrick ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
204e5dd7070Spatrick addOpaqueData(recordBegin + bitfieldByteBegin,
205e5dd7070Spatrick recordBegin + bitfieldByteEnd);
206e5dd7070Spatrick }
207e5dd7070Spatrick
addTypedData(llvm::Type * type,CharUnits begin)208e5dd7070Spatrick void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
209e5dd7070Spatrick assert(type && "didn't provide type for typed data");
210e5dd7070Spatrick addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
211e5dd7070Spatrick }
212e5dd7070Spatrick
addTypedData(llvm::Type * type,CharUnits begin,CharUnits end)213e5dd7070Spatrick void SwiftAggLowering::addTypedData(llvm::Type *type,
214e5dd7070Spatrick CharUnits begin, CharUnits end) {
215e5dd7070Spatrick assert(type && "didn't provide type for typed data");
216e5dd7070Spatrick assert(getTypeStoreSize(CGM, type) == end - begin);
217e5dd7070Spatrick
218e5dd7070Spatrick // Legalize vector types.
219e5dd7070Spatrick if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
220e5dd7070Spatrick SmallVector<llvm::Type*, 4> componentTys;
221e5dd7070Spatrick legalizeVectorType(CGM, end - begin, vecTy, componentTys);
222e5dd7070Spatrick assert(componentTys.size() >= 1);
223e5dd7070Spatrick
224e5dd7070Spatrick // Walk the initial components.
225e5dd7070Spatrick for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
226e5dd7070Spatrick llvm::Type *componentTy = componentTys[i];
227e5dd7070Spatrick auto componentSize = getTypeStoreSize(CGM, componentTy);
228e5dd7070Spatrick assert(componentSize < end - begin);
229e5dd7070Spatrick addLegalTypedData(componentTy, begin, begin + componentSize);
230e5dd7070Spatrick begin += componentSize;
231e5dd7070Spatrick }
232e5dd7070Spatrick
233e5dd7070Spatrick return addLegalTypedData(componentTys.back(), begin, end);
234e5dd7070Spatrick }
235e5dd7070Spatrick
236e5dd7070Spatrick // Legalize integer types.
237e5dd7070Spatrick if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
238e5dd7070Spatrick if (!isLegalIntegerType(CGM, intTy))
239e5dd7070Spatrick return addOpaqueData(begin, end);
240e5dd7070Spatrick }
241e5dd7070Spatrick
242e5dd7070Spatrick // All other types should be legal.
243e5dd7070Spatrick return addLegalTypedData(type, begin, end);
244e5dd7070Spatrick }
245e5dd7070Spatrick
addLegalTypedData(llvm::Type * type,CharUnits begin,CharUnits end)246e5dd7070Spatrick void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
247e5dd7070Spatrick CharUnits begin, CharUnits end) {
248e5dd7070Spatrick // Require the type to be naturally aligned.
249e5dd7070Spatrick if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
250e5dd7070Spatrick
251e5dd7070Spatrick // Try splitting vector types.
252e5dd7070Spatrick if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
253e5dd7070Spatrick auto split = splitLegalVectorType(CGM, end - begin, vecTy);
254e5dd7070Spatrick auto eltTy = split.first;
255e5dd7070Spatrick auto numElts = split.second;
256e5dd7070Spatrick
257e5dd7070Spatrick auto eltSize = (end - begin) / numElts;
258e5dd7070Spatrick assert(eltSize == getTypeStoreSize(CGM, eltTy));
259e5dd7070Spatrick for (size_t i = 0, e = numElts; i != e; ++i) {
260e5dd7070Spatrick addLegalTypedData(eltTy, begin, begin + eltSize);
261e5dd7070Spatrick begin += eltSize;
262e5dd7070Spatrick }
263e5dd7070Spatrick assert(begin == end);
264e5dd7070Spatrick return;
265e5dd7070Spatrick }
266e5dd7070Spatrick
267e5dd7070Spatrick return addOpaqueData(begin, end);
268e5dd7070Spatrick }
269e5dd7070Spatrick
270e5dd7070Spatrick addEntry(type, begin, end);
271e5dd7070Spatrick }
272e5dd7070Spatrick
addEntry(llvm::Type * type,CharUnits begin,CharUnits end)273e5dd7070Spatrick void SwiftAggLowering::addEntry(llvm::Type *type,
274e5dd7070Spatrick CharUnits begin, CharUnits end) {
275e5dd7070Spatrick assert((!type ||
276e5dd7070Spatrick (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
277e5dd7070Spatrick "cannot add aggregate-typed data");
278e5dd7070Spatrick assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
279e5dd7070Spatrick
280e5dd7070Spatrick // Fast path: we can just add entries to the end.
281e5dd7070Spatrick if (Entries.empty() || Entries.back().End <= begin) {
282e5dd7070Spatrick Entries.push_back({begin, end, type});
283e5dd7070Spatrick return;
284e5dd7070Spatrick }
285e5dd7070Spatrick
286e5dd7070Spatrick // Find the first existing entry that ends after the start of the new data.
287e5dd7070Spatrick // TODO: do a binary search if Entries is big enough for it to matter.
288e5dd7070Spatrick size_t index = Entries.size() - 1;
289e5dd7070Spatrick while (index != 0) {
290e5dd7070Spatrick if (Entries[index - 1].End <= begin) break;
291e5dd7070Spatrick --index;
292e5dd7070Spatrick }
293e5dd7070Spatrick
294e5dd7070Spatrick // The entry ends after the start of the new data.
295e5dd7070Spatrick // If the entry starts after the end of the new data, there's no conflict.
296e5dd7070Spatrick if (Entries[index].Begin >= end) {
297e5dd7070Spatrick // This insertion is potentially O(n), but the way we generally build
298e5dd7070Spatrick // these layouts makes that unlikely to matter: we'd need a union of
299e5dd7070Spatrick // several very large types.
300e5dd7070Spatrick Entries.insert(Entries.begin() + index, {begin, end, type});
301e5dd7070Spatrick return;
302e5dd7070Spatrick }
303e5dd7070Spatrick
304e5dd7070Spatrick // Otherwise, the ranges overlap. The new range might also overlap
305e5dd7070Spatrick // with later ranges.
306e5dd7070Spatrick restartAfterSplit:
307e5dd7070Spatrick
308e5dd7070Spatrick // Simplest case: an exact overlap.
309e5dd7070Spatrick if (Entries[index].Begin == begin && Entries[index].End == end) {
310e5dd7070Spatrick // If the types match exactly, great.
311e5dd7070Spatrick if (Entries[index].Type == type) return;
312e5dd7070Spatrick
313e5dd7070Spatrick // If either type is opaque, make the entry opaque and return.
314e5dd7070Spatrick if (Entries[index].Type == nullptr) {
315e5dd7070Spatrick return;
316e5dd7070Spatrick } else if (type == nullptr) {
317e5dd7070Spatrick Entries[index].Type = nullptr;
318e5dd7070Spatrick return;
319e5dd7070Spatrick }
320e5dd7070Spatrick
321e5dd7070Spatrick // If they disagree in an ABI-agnostic way, just resolve the conflict
322e5dd7070Spatrick // arbitrarily.
323e5dd7070Spatrick if (auto entryType = getCommonType(Entries[index].Type, type)) {
324e5dd7070Spatrick Entries[index].Type = entryType;
325e5dd7070Spatrick return;
326e5dd7070Spatrick }
327e5dd7070Spatrick
328e5dd7070Spatrick // Otherwise, make the entry opaque.
329e5dd7070Spatrick Entries[index].Type = nullptr;
330e5dd7070Spatrick return;
331e5dd7070Spatrick }
332e5dd7070Spatrick
333e5dd7070Spatrick // Okay, we have an overlapping conflict of some sort.
334e5dd7070Spatrick
335e5dd7070Spatrick // If we have a vector type, split it.
336e5dd7070Spatrick if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
337e5dd7070Spatrick auto eltTy = vecTy->getElementType();
338a9ac8606Spatrick CharUnits eltSize =
339a9ac8606Spatrick (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
340e5dd7070Spatrick assert(eltSize == getTypeStoreSize(CGM, eltTy));
341a9ac8606Spatrick for (unsigned i = 0,
342a9ac8606Spatrick e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
343a9ac8606Spatrick i != e; ++i) {
344e5dd7070Spatrick addEntry(eltTy, begin, begin + eltSize);
345e5dd7070Spatrick begin += eltSize;
346e5dd7070Spatrick }
347e5dd7070Spatrick assert(begin == end);
348e5dd7070Spatrick return;
349e5dd7070Spatrick }
350e5dd7070Spatrick
351e5dd7070Spatrick // If the entry is a vector type, split it and try again.
352e5dd7070Spatrick if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
353e5dd7070Spatrick splitVectorEntry(index);
354e5dd7070Spatrick goto restartAfterSplit;
355e5dd7070Spatrick }
356e5dd7070Spatrick
357e5dd7070Spatrick // Okay, we have no choice but to make the existing entry opaque.
358e5dd7070Spatrick
359e5dd7070Spatrick Entries[index].Type = nullptr;
360e5dd7070Spatrick
361e5dd7070Spatrick // Stretch the start of the entry to the beginning of the range.
362e5dd7070Spatrick if (begin < Entries[index].Begin) {
363e5dd7070Spatrick Entries[index].Begin = begin;
364e5dd7070Spatrick assert(index == 0 || begin >= Entries[index - 1].End);
365e5dd7070Spatrick }
366e5dd7070Spatrick
367e5dd7070Spatrick // Stretch the end of the entry to the end of the range; but if we run
368e5dd7070Spatrick // into the start of the next entry, just leave the range there and repeat.
369e5dd7070Spatrick while (end > Entries[index].End) {
370e5dd7070Spatrick assert(Entries[index].Type == nullptr);
371e5dd7070Spatrick
372e5dd7070Spatrick // If the range doesn't overlap the next entry, we're done.
373e5dd7070Spatrick if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
374e5dd7070Spatrick Entries[index].End = end;
375e5dd7070Spatrick break;
376e5dd7070Spatrick }
377e5dd7070Spatrick
378e5dd7070Spatrick // Otherwise, stretch to the start of the next entry.
379e5dd7070Spatrick Entries[index].End = Entries[index + 1].Begin;
380e5dd7070Spatrick
381e5dd7070Spatrick // Continue with the next entry.
382e5dd7070Spatrick index++;
383e5dd7070Spatrick
384e5dd7070Spatrick // This entry needs to be made opaque if it is not already.
385e5dd7070Spatrick if (Entries[index].Type == nullptr)
386e5dd7070Spatrick continue;
387e5dd7070Spatrick
388e5dd7070Spatrick // Split vector entries unless we completely subsume them.
389e5dd7070Spatrick if (Entries[index].Type->isVectorTy() &&
390e5dd7070Spatrick end < Entries[index].End) {
391e5dd7070Spatrick splitVectorEntry(index);
392e5dd7070Spatrick }
393e5dd7070Spatrick
394e5dd7070Spatrick // Make the entry opaque.
395e5dd7070Spatrick Entries[index].Type = nullptr;
396e5dd7070Spatrick }
397e5dd7070Spatrick }
398e5dd7070Spatrick
399e5dd7070Spatrick /// Replace the entry of vector type at offset 'index' with a sequence
400e5dd7070Spatrick /// of its component vectors.
splitVectorEntry(unsigned index)401e5dd7070Spatrick void SwiftAggLowering::splitVectorEntry(unsigned index) {
402e5dd7070Spatrick auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
403e5dd7070Spatrick auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
404e5dd7070Spatrick
405e5dd7070Spatrick auto eltTy = split.first;
406e5dd7070Spatrick CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
407e5dd7070Spatrick auto numElts = split.second;
408e5dd7070Spatrick Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
409e5dd7070Spatrick
410e5dd7070Spatrick CharUnits begin = Entries[index].Begin;
411e5dd7070Spatrick for (unsigned i = 0; i != numElts; ++i) {
412e5dd7070Spatrick Entries[index].Type = eltTy;
413e5dd7070Spatrick Entries[index].Begin = begin;
414e5dd7070Spatrick Entries[index].End = begin + eltSize;
415e5dd7070Spatrick begin += eltSize;
416e5dd7070Spatrick }
417e5dd7070Spatrick }
418e5dd7070Spatrick
419e5dd7070Spatrick /// Given a power-of-two unit size, return the offset of the aligned unit
420e5dd7070Spatrick /// of that size which contains the given offset.
421e5dd7070Spatrick ///
422e5dd7070Spatrick /// In other words, round down to the nearest multiple of the unit size.
getOffsetAtStartOfUnit(CharUnits offset,CharUnits unitSize)423e5dd7070Spatrick static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
424e5dd7070Spatrick assert(isPowerOf2(unitSize.getQuantity()));
425e5dd7070Spatrick auto unitMask = ~(unitSize.getQuantity() - 1);
426e5dd7070Spatrick return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
427e5dd7070Spatrick }
428e5dd7070Spatrick
areBytesInSameUnit(CharUnits first,CharUnits second,CharUnits chunkSize)429e5dd7070Spatrick static bool areBytesInSameUnit(CharUnits first, CharUnits second,
430e5dd7070Spatrick CharUnits chunkSize) {
431e5dd7070Spatrick return getOffsetAtStartOfUnit(first, chunkSize)
432e5dd7070Spatrick == getOffsetAtStartOfUnit(second, chunkSize);
433e5dd7070Spatrick }
434e5dd7070Spatrick
isMergeableEntryType(llvm::Type * type)435e5dd7070Spatrick static bool isMergeableEntryType(llvm::Type *type) {
436e5dd7070Spatrick // Opaquely-typed memory is always mergeable.
437e5dd7070Spatrick if (type == nullptr) return true;
438e5dd7070Spatrick
439e5dd7070Spatrick // Pointers and integers are always mergeable. In theory we should not
440e5dd7070Spatrick // merge pointers, but (1) it doesn't currently matter in practice because
441e5dd7070Spatrick // the chunk size is never greater than the size of a pointer and (2)
442e5dd7070Spatrick // Swift IRGen uses integer types for a lot of things that are "really"
443*12c85518Srobert // just storing pointers (like std::optional<SomePointer>). If we ever have a
444e5dd7070Spatrick // target that would otherwise combine pointers, we should put some effort
445e5dd7070Spatrick // into fixing those cases in Swift IRGen and then call out pointer types
446e5dd7070Spatrick // here.
447e5dd7070Spatrick
448e5dd7070Spatrick // Floating-point and vector types should never be merged.
449e5dd7070Spatrick // Most such types are too large and highly-aligned to ever trigger merging
450e5dd7070Spatrick // in practice, but it's important for the rule to cover at least 'half'
451e5dd7070Spatrick // and 'float', as well as things like small vectors of 'i1' or 'i8'.
452e5dd7070Spatrick return (!type->isFloatingPointTy() && !type->isVectorTy());
453e5dd7070Spatrick }
454e5dd7070Spatrick
shouldMergeEntries(const StorageEntry & first,const StorageEntry & second,CharUnits chunkSize)455e5dd7070Spatrick bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
456e5dd7070Spatrick const StorageEntry &second,
457e5dd7070Spatrick CharUnits chunkSize) {
458e5dd7070Spatrick // Only merge entries that overlap the same chunk. We test this first
459e5dd7070Spatrick // despite being a bit more expensive because this is the condition that
460e5dd7070Spatrick // tends to prevent merging.
461e5dd7070Spatrick if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
462e5dd7070Spatrick chunkSize))
463e5dd7070Spatrick return false;
464e5dd7070Spatrick
465e5dd7070Spatrick return (isMergeableEntryType(first.Type) &&
466e5dd7070Spatrick isMergeableEntryType(second.Type));
467e5dd7070Spatrick }
468e5dd7070Spatrick
finish()469e5dd7070Spatrick void SwiftAggLowering::finish() {
470e5dd7070Spatrick if (Entries.empty()) {
471e5dd7070Spatrick Finished = true;
472e5dd7070Spatrick return;
473e5dd7070Spatrick }
474e5dd7070Spatrick
475e5dd7070Spatrick // We logically split the layout down into a series of chunks of this size,
476e5dd7070Spatrick // which is generally the size of a pointer.
477e5dd7070Spatrick const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
478e5dd7070Spatrick
479e5dd7070Spatrick // First pass: if two entries should be merged, make them both opaque
480e5dd7070Spatrick // and stretch one to meet the next.
481e5dd7070Spatrick // Also, remember if there are any opaque entries.
482e5dd7070Spatrick bool hasOpaqueEntries = (Entries[0].Type == nullptr);
483e5dd7070Spatrick for (size_t i = 1, e = Entries.size(); i != e; ++i) {
484e5dd7070Spatrick if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
485e5dd7070Spatrick Entries[i - 1].Type = nullptr;
486e5dd7070Spatrick Entries[i].Type = nullptr;
487e5dd7070Spatrick Entries[i - 1].End = Entries[i].Begin;
488e5dd7070Spatrick hasOpaqueEntries = true;
489e5dd7070Spatrick
490e5dd7070Spatrick } else if (Entries[i].Type == nullptr) {
491e5dd7070Spatrick hasOpaqueEntries = true;
492e5dd7070Spatrick }
493e5dd7070Spatrick }
494e5dd7070Spatrick
495e5dd7070Spatrick // The rest of the algorithm leaves non-opaque entries alone, so if we
496e5dd7070Spatrick // have no opaque entries, we're done.
497e5dd7070Spatrick if (!hasOpaqueEntries) {
498e5dd7070Spatrick Finished = true;
499e5dd7070Spatrick return;
500e5dd7070Spatrick }
501e5dd7070Spatrick
502e5dd7070Spatrick // Okay, move the entries to a temporary and rebuild Entries.
503e5dd7070Spatrick auto orig = std::move(Entries);
504e5dd7070Spatrick assert(Entries.empty());
505e5dd7070Spatrick
506e5dd7070Spatrick for (size_t i = 0, e = orig.size(); i != e; ++i) {
507e5dd7070Spatrick // Just copy over non-opaque entries.
508e5dd7070Spatrick if (orig[i].Type != nullptr) {
509e5dd7070Spatrick Entries.push_back(orig[i]);
510e5dd7070Spatrick continue;
511e5dd7070Spatrick }
512e5dd7070Spatrick
513e5dd7070Spatrick // Scan forward to determine the full extent of the next opaque range.
514e5dd7070Spatrick // We know from the first pass that only contiguous ranges will overlap
515e5dd7070Spatrick // the same aligned chunk.
516e5dd7070Spatrick auto begin = orig[i].Begin;
517e5dd7070Spatrick auto end = orig[i].End;
518e5dd7070Spatrick while (i + 1 != e &&
519e5dd7070Spatrick orig[i + 1].Type == nullptr &&
520e5dd7070Spatrick end == orig[i + 1].Begin) {
521e5dd7070Spatrick end = orig[i + 1].End;
522e5dd7070Spatrick i++;
523e5dd7070Spatrick }
524e5dd7070Spatrick
525e5dd7070Spatrick // Add an entry per intersected chunk.
526e5dd7070Spatrick do {
527e5dd7070Spatrick // Find the smallest aligned storage unit in the maximal aligned
528e5dd7070Spatrick // storage unit containing 'begin' that contains all the bytes in
529e5dd7070Spatrick // the intersection between the range and this chunk.
530e5dd7070Spatrick CharUnits localBegin = begin;
531e5dd7070Spatrick CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
532e5dd7070Spatrick CharUnits chunkEnd = chunkBegin + chunkSize;
533e5dd7070Spatrick CharUnits localEnd = std::min(end, chunkEnd);
534e5dd7070Spatrick
535e5dd7070Spatrick // Just do a simple loop over ever-increasing unit sizes.
536e5dd7070Spatrick CharUnits unitSize = CharUnits::One();
537e5dd7070Spatrick CharUnits unitBegin, unitEnd;
538e5dd7070Spatrick for (; ; unitSize *= 2) {
539e5dd7070Spatrick assert(unitSize <= chunkSize);
540e5dd7070Spatrick unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
541e5dd7070Spatrick unitEnd = unitBegin + unitSize;
542e5dd7070Spatrick if (unitEnd >= localEnd) break;
543e5dd7070Spatrick }
544e5dd7070Spatrick
545e5dd7070Spatrick // Add an entry for this unit.
546e5dd7070Spatrick auto entryTy =
547e5dd7070Spatrick llvm::IntegerType::get(CGM.getLLVMContext(),
548e5dd7070Spatrick CGM.getContext().toBits(unitSize));
549e5dd7070Spatrick Entries.push_back({unitBegin, unitEnd, entryTy});
550e5dd7070Spatrick
551e5dd7070Spatrick // The next chunk starts where this chunk left off.
552e5dd7070Spatrick begin = localEnd;
553e5dd7070Spatrick } while (begin != end);
554e5dd7070Spatrick }
555e5dd7070Spatrick
556e5dd7070Spatrick // Okay, finally finished.
557e5dd7070Spatrick Finished = true;
558e5dd7070Spatrick }
559e5dd7070Spatrick
enumerateComponents(EnumerationCallback callback) const560e5dd7070Spatrick void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
561e5dd7070Spatrick assert(Finished && "haven't yet finished lowering");
562e5dd7070Spatrick
563e5dd7070Spatrick for (auto &entry : Entries) {
564e5dd7070Spatrick callback(entry.Begin, entry.End, entry.Type);
565e5dd7070Spatrick }
566e5dd7070Spatrick }
567e5dd7070Spatrick
568e5dd7070Spatrick std::pair<llvm::StructType*, llvm::Type*>
getCoerceAndExpandTypes() const569e5dd7070Spatrick SwiftAggLowering::getCoerceAndExpandTypes() const {
570e5dd7070Spatrick assert(Finished && "haven't yet finished lowering");
571e5dd7070Spatrick
572e5dd7070Spatrick auto &ctx = CGM.getLLVMContext();
573e5dd7070Spatrick
574e5dd7070Spatrick if (Entries.empty()) {
575e5dd7070Spatrick auto type = llvm::StructType::get(ctx);
576e5dd7070Spatrick return { type, type };
577e5dd7070Spatrick }
578e5dd7070Spatrick
579e5dd7070Spatrick SmallVector<llvm::Type*, 8> elts;
580e5dd7070Spatrick CharUnits lastEnd = CharUnits::Zero();
581e5dd7070Spatrick bool hasPadding = false;
582e5dd7070Spatrick bool packed = false;
583e5dd7070Spatrick for (auto &entry : Entries) {
584e5dd7070Spatrick if (entry.Begin != lastEnd) {
585e5dd7070Spatrick auto paddingSize = entry.Begin - lastEnd;
586e5dd7070Spatrick assert(!paddingSize.isNegative());
587e5dd7070Spatrick
588e5dd7070Spatrick auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
589e5dd7070Spatrick paddingSize.getQuantity());
590e5dd7070Spatrick elts.push_back(padding);
591e5dd7070Spatrick hasPadding = true;
592e5dd7070Spatrick }
593e5dd7070Spatrick
594*12c85518Srobert if (!packed && !entry.Begin.isMultipleOf(CharUnits::fromQuantity(
595*12c85518Srobert CGM.getDataLayout().getABITypeAlign(entry.Type))))
596e5dd7070Spatrick packed = true;
597e5dd7070Spatrick
598e5dd7070Spatrick elts.push_back(entry.Type);
599e5dd7070Spatrick
600e5dd7070Spatrick lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
601e5dd7070Spatrick assert(entry.End <= lastEnd);
602e5dd7070Spatrick }
603e5dd7070Spatrick
604e5dd7070Spatrick // We don't need to adjust 'packed' to deal with possible tail padding
605e5dd7070Spatrick // because we never do that kind of access through the coercion type.
606e5dd7070Spatrick auto coercionType = llvm::StructType::get(ctx, elts, packed);
607e5dd7070Spatrick
608e5dd7070Spatrick llvm::Type *unpaddedType = coercionType;
609e5dd7070Spatrick if (hasPadding) {
610e5dd7070Spatrick elts.clear();
611e5dd7070Spatrick for (auto &entry : Entries) {
612e5dd7070Spatrick elts.push_back(entry.Type);
613e5dd7070Spatrick }
614e5dd7070Spatrick if (elts.size() == 1) {
615e5dd7070Spatrick unpaddedType = elts[0];
616e5dd7070Spatrick } else {
617e5dd7070Spatrick unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
618e5dd7070Spatrick }
619e5dd7070Spatrick } else if (Entries.size() == 1) {
620e5dd7070Spatrick unpaddedType = Entries[0].Type;
621e5dd7070Spatrick }
622e5dd7070Spatrick
623e5dd7070Spatrick return { coercionType, unpaddedType };
624e5dd7070Spatrick }
625e5dd7070Spatrick
shouldPassIndirectly(bool asReturnValue) const626e5dd7070Spatrick bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
627e5dd7070Spatrick assert(Finished && "haven't yet finished lowering");
628e5dd7070Spatrick
629e5dd7070Spatrick // Empty types don't need to be passed indirectly.
630e5dd7070Spatrick if (Entries.empty()) return false;
631e5dd7070Spatrick
632e5dd7070Spatrick // Avoid copying the array of types when there's just a single element.
633e5dd7070Spatrick if (Entries.size() == 1) {
634*12c85518Srobert return getSwiftABIInfo(CGM).shouldPassIndirectly(Entries.back().Type,
635e5dd7070Spatrick asReturnValue);
636e5dd7070Spatrick }
637e5dd7070Spatrick
638e5dd7070Spatrick SmallVector<llvm::Type*, 8> componentTys;
639e5dd7070Spatrick componentTys.reserve(Entries.size());
640e5dd7070Spatrick for (auto &entry : Entries) {
641e5dd7070Spatrick componentTys.push_back(entry.Type);
642e5dd7070Spatrick }
643*12c85518Srobert return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
644e5dd7070Spatrick }
645e5dd7070Spatrick
shouldPassIndirectly(CodeGenModule & CGM,ArrayRef<llvm::Type * > componentTys,bool asReturnValue)646e5dd7070Spatrick bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
647e5dd7070Spatrick ArrayRef<llvm::Type*> componentTys,
648e5dd7070Spatrick bool asReturnValue) {
649*12c85518Srobert return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
650e5dd7070Spatrick }
651e5dd7070Spatrick
getMaximumVoluntaryIntegerSize(CodeGenModule & CGM)652e5dd7070Spatrick CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
653e5dd7070Spatrick // Currently always the size of an ordinary pointer.
654e5dd7070Spatrick return CGM.getContext().toCharUnitsFromBits(
655*12c85518Srobert CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default));
656e5dd7070Spatrick }
657e5dd7070Spatrick
getNaturalAlignment(CodeGenModule & CGM,llvm::Type * type)658e5dd7070Spatrick CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
659e5dd7070Spatrick // For Swift's purposes, this is always just the store size of the type
660e5dd7070Spatrick // rounded up to a power of 2.
661e5dd7070Spatrick auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
662*12c85518Srobert size = llvm::bit_ceil(size);
663*12c85518Srobert assert(CGM.getDataLayout().getABITypeAlign(type) <= size);
664e5dd7070Spatrick return CharUnits::fromQuantity(size);
665e5dd7070Spatrick }
666e5dd7070Spatrick
isLegalIntegerType(CodeGenModule & CGM,llvm::IntegerType * intTy)667e5dd7070Spatrick bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
668e5dd7070Spatrick llvm::IntegerType *intTy) {
669e5dd7070Spatrick auto size = intTy->getBitWidth();
670e5dd7070Spatrick switch (size) {
671e5dd7070Spatrick case 1:
672e5dd7070Spatrick case 8:
673e5dd7070Spatrick case 16:
674e5dd7070Spatrick case 32:
675e5dd7070Spatrick case 64:
676e5dd7070Spatrick // Just assume that the above are always legal.
677e5dd7070Spatrick return true;
678e5dd7070Spatrick
679e5dd7070Spatrick case 128:
680e5dd7070Spatrick return CGM.getContext().getTargetInfo().hasInt128Type();
681e5dd7070Spatrick
682e5dd7070Spatrick default:
683e5dd7070Spatrick return false;
684e5dd7070Spatrick }
685e5dd7070Spatrick }
686e5dd7070Spatrick
isLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::VectorType * vectorTy)687e5dd7070Spatrick bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
688e5dd7070Spatrick llvm::VectorType *vectorTy) {
689a9ac8606Spatrick return isLegalVectorType(
690a9ac8606Spatrick CGM, vectorSize, vectorTy->getElementType(),
691a9ac8606Spatrick cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
692e5dd7070Spatrick }
693e5dd7070Spatrick
isLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts)694e5dd7070Spatrick bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
695e5dd7070Spatrick llvm::Type *eltTy, unsigned numElts) {
696e5dd7070Spatrick assert(numElts > 1 && "illegal vector length");
697*12c85518Srobert return getSwiftABIInfo(CGM).isLegalVectorType(vectorSize, eltTy, numElts);
698e5dd7070Spatrick }
699e5dd7070Spatrick
700e5dd7070Spatrick std::pair<llvm::Type*, unsigned>
splitLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::VectorType * vectorTy)701e5dd7070Spatrick swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
702e5dd7070Spatrick llvm::VectorType *vectorTy) {
703a9ac8606Spatrick auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
704e5dd7070Spatrick auto eltTy = vectorTy->getElementType();
705e5dd7070Spatrick
706e5dd7070Spatrick // Try to split the vector type in half.
707e5dd7070Spatrick if (numElts >= 4 && isPowerOf2(numElts)) {
708e5dd7070Spatrick if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
709ec727ea7Spatrick return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
710e5dd7070Spatrick }
711e5dd7070Spatrick
712e5dd7070Spatrick return {eltTy, numElts};
713e5dd7070Spatrick }
714e5dd7070Spatrick
legalizeVectorType(CodeGenModule & CGM,CharUnits origVectorSize,llvm::VectorType * origVectorTy,llvm::SmallVectorImpl<llvm::Type * > & components)715e5dd7070Spatrick void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
716e5dd7070Spatrick llvm::VectorType *origVectorTy,
717e5dd7070Spatrick llvm::SmallVectorImpl<llvm::Type*> &components) {
718e5dd7070Spatrick // If it's already a legal vector type, use it.
719e5dd7070Spatrick if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
720e5dd7070Spatrick components.push_back(origVectorTy);
721e5dd7070Spatrick return;
722e5dd7070Spatrick }
723e5dd7070Spatrick
724e5dd7070Spatrick // Try to split the vector into legal subvectors.
725a9ac8606Spatrick auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
726e5dd7070Spatrick auto eltTy = origVectorTy->getElementType();
727e5dd7070Spatrick assert(numElts != 1);
728e5dd7070Spatrick
729e5dd7070Spatrick // The largest size that we're still considering making subvectors of.
730e5dd7070Spatrick // Always a power of 2.
731e5dd7070Spatrick unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
732e5dd7070Spatrick unsigned candidateNumElts = 1U << logCandidateNumElts;
733e5dd7070Spatrick assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
734e5dd7070Spatrick
735e5dd7070Spatrick // Minor optimization: don't check the legality of this exact size twice.
736e5dd7070Spatrick if (candidateNumElts == numElts) {
737e5dd7070Spatrick logCandidateNumElts--;
738e5dd7070Spatrick candidateNumElts >>= 1;
739e5dd7070Spatrick }
740e5dd7070Spatrick
741e5dd7070Spatrick CharUnits eltSize = (origVectorSize / numElts);
742e5dd7070Spatrick CharUnits candidateSize = eltSize * candidateNumElts;
743e5dd7070Spatrick
744e5dd7070Spatrick // The sensibility of this algorithm relies on the fact that we never
745e5dd7070Spatrick // have a legal non-power-of-2 vector size without having the power of 2
746e5dd7070Spatrick // also be legal.
747e5dd7070Spatrick while (logCandidateNumElts > 0) {
748e5dd7070Spatrick assert(candidateNumElts == 1U << logCandidateNumElts);
749e5dd7070Spatrick assert(candidateNumElts <= numElts);
750e5dd7070Spatrick assert(candidateSize == eltSize * candidateNumElts);
751e5dd7070Spatrick
752e5dd7070Spatrick // Skip illegal vector sizes.
753e5dd7070Spatrick if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
754e5dd7070Spatrick logCandidateNumElts--;
755e5dd7070Spatrick candidateNumElts /= 2;
756e5dd7070Spatrick candidateSize /= 2;
757e5dd7070Spatrick continue;
758e5dd7070Spatrick }
759e5dd7070Spatrick
760e5dd7070Spatrick // Add the right number of vectors of this size.
761e5dd7070Spatrick auto numVecs = numElts >> logCandidateNumElts;
762ec727ea7Spatrick components.append(numVecs,
763ec727ea7Spatrick llvm::FixedVectorType::get(eltTy, candidateNumElts));
764e5dd7070Spatrick numElts -= (numVecs << logCandidateNumElts);
765e5dd7070Spatrick
766e5dd7070Spatrick if (numElts == 0) return;
767e5dd7070Spatrick
768e5dd7070Spatrick // It's possible that the number of elements remaining will be legal.
769e5dd7070Spatrick // This can happen with e.g. <7 x float> when <3 x float> is legal.
770e5dd7070Spatrick // This only needs to be separately checked if it's not a power of 2.
771e5dd7070Spatrick if (numElts > 2 && !isPowerOf2(numElts) &&
772e5dd7070Spatrick isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
773ec727ea7Spatrick components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
774e5dd7070Spatrick return;
775e5dd7070Spatrick }
776e5dd7070Spatrick
777e5dd7070Spatrick // Bring vecSize down to something no larger than numElts.
778e5dd7070Spatrick do {
779e5dd7070Spatrick logCandidateNumElts--;
780e5dd7070Spatrick candidateNumElts /= 2;
781e5dd7070Spatrick candidateSize /= 2;
782e5dd7070Spatrick } while (candidateNumElts > numElts);
783e5dd7070Spatrick }
784e5dd7070Spatrick
785e5dd7070Spatrick // Otherwise, just append a bunch of individual elements.
786e5dd7070Spatrick components.append(numElts, eltTy);
787e5dd7070Spatrick }
788e5dd7070Spatrick
mustPassRecordIndirectly(CodeGenModule & CGM,const RecordDecl * record)789e5dd7070Spatrick bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
790e5dd7070Spatrick const RecordDecl *record) {
791e5dd7070Spatrick // FIXME: should we not rely on the standard computation in Sema, just in
792e5dd7070Spatrick // case we want to diverge from the platform ABI (e.g. on targets where
793e5dd7070Spatrick // that uses the MSVC rule)?
794e5dd7070Spatrick return !record->canPassInRegisters();
795e5dd7070Spatrick }
796e5dd7070Spatrick
classifyExpandedType(SwiftAggLowering & lowering,bool forReturn,CharUnits alignmentForIndirect)797e5dd7070Spatrick static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
798e5dd7070Spatrick bool forReturn,
799e5dd7070Spatrick CharUnits alignmentForIndirect) {
800e5dd7070Spatrick if (lowering.empty()) {
801e5dd7070Spatrick return ABIArgInfo::getIgnore();
802e5dd7070Spatrick } else if (lowering.shouldPassIndirectly(forReturn)) {
803e5dd7070Spatrick return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
804e5dd7070Spatrick } else {
805e5dd7070Spatrick auto types = lowering.getCoerceAndExpandTypes();
806e5dd7070Spatrick return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
807e5dd7070Spatrick }
808e5dd7070Spatrick }
809e5dd7070Spatrick
classifyType(CodeGenModule & CGM,CanQualType type,bool forReturn)810e5dd7070Spatrick static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
811e5dd7070Spatrick bool forReturn) {
812e5dd7070Spatrick if (auto recordType = dyn_cast<RecordType>(type)) {
813e5dd7070Spatrick auto record = recordType->getDecl();
814e5dd7070Spatrick auto &layout = CGM.getContext().getASTRecordLayout(record);
815e5dd7070Spatrick
816e5dd7070Spatrick if (mustPassRecordIndirectly(CGM, record))
817e5dd7070Spatrick return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
818e5dd7070Spatrick
819e5dd7070Spatrick SwiftAggLowering lowering(CGM);
820e5dd7070Spatrick lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
821e5dd7070Spatrick lowering.finish();
822e5dd7070Spatrick
823e5dd7070Spatrick return classifyExpandedType(lowering, forReturn, layout.getAlignment());
824e5dd7070Spatrick }
825e5dd7070Spatrick
826e5dd7070Spatrick // Just assume that all of our target ABIs can support returning at least
827e5dd7070Spatrick // two integer or floating-point values.
828e5dd7070Spatrick if (isa<ComplexType>(type)) {
829e5dd7070Spatrick return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
830e5dd7070Spatrick }
831e5dd7070Spatrick
832e5dd7070Spatrick // Vector types may need to be legalized.
833e5dd7070Spatrick if (isa<VectorType>(type)) {
834e5dd7070Spatrick SwiftAggLowering lowering(CGM);
835e5dd7070Spatrick lowering.addTypedData(type, CharUnits::Zero());
836e5dd7070Spatrick lowering.finish();
837e5dd7070Spatrick
838e5dd7070Spatrick CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
839e5dd7070Spatrick return classifyExpandedType(lowering, forReturn, alignment);
840e5dd7070Spatrick }
841e5dd7070Spatrick
842e5dd7070Spatrick // Member pointer types need to be expanded, but it's a simple form of
843e5dd7070Spatrick // expansion that 'Direct' can handle. Note that CanBeFlattened should be
844e5dd7070Spatrick // true for this to work.
845e5dd7070Spatrick
846e5dd7070Spatrick // 'void' needs to be ignored.
847e5dd7070Spatrick if (type->isVoidType()) {
848e5dd7070Spatrick return ABIArgInfo::getIgnore();
849e5dd7070Spatrick }
850e5dd7070Spatrick
851e5dd7070Spatrick // Everything else can be passed directly.
852e5dd7070Spatrick return ABIArgInfo::getDirect();
853e5dd7070Spatrick }
854e5dd7070Spatrick
classifyReturnType(CodeGenModule & CGM,CanQualType type)855e5dd7070Spatrick ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
856e5dd7070Spatrick return classifyType(CGM, type, /*forReturn*/ true);
857e5dd7070Spatrick }
858e5dd7070Spatrick
classifyArgumentType(CodeGenModule & CGM,CanQualType type)859e5dd7070Spatrick ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
860e5dd7070Spatrick CanQualType type) {
861e5dd7070Spatrick return classifyType(CGM, type, /*forReturn*/ false);
862e5dd7070Spatrick }
863e5dd7070Spatrick
computeABIInfo(CodeGenModule & CGM,CGFunctionInfo & FI)864e5dd7070Spatrick void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
865e5dd7070Spatrick auto &retInfo = FI.getReturnInfo();
866e5dd7070Spatrick retInfo = classifyReturnType(CGM, FI.getReturnType());
867e5dd7070Spatrick
868e5dd7070Spatrick for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
869e5dd7070Spatrick auto &argInfo = FI.arg_begin()[i];
870e5dd7070Spatrick argInfo.info = classifyArgumentType(CGM, argInfo.type);
871e5dd7070Spatrick }
872e5dd7070Spatrick }
873e5dd7070Spatrick
874e5dd7070Spatrick // Is swifterror lowered to a register by the target ABI.
isSwiftErrorLoweredInRegister(CodeGenModule & CGM)875e5dd7070Spatrick bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
876e5dd7070Spatrick return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
877e5dd7070Spatrick }
878