1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implementation of the abstract lowering for the Swift calling convention. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/CodeGen/SwiftCallingConv.h" 14 #include "clang/Basic/TargetInfo.h" 15 #include "CodeGenModule.h" 16 #include "TargetInfo.h" 17 18 using namespace clang; 19 using namespace CodeGen; 20 using namespace swiftcall; 21 22 static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) { 23 return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo()); 24 } 25 26 static bool isPowerOf2(unsigned n) { 27 return n == (n & -n); 28 } 29 30 /// Given two types with the same size, try to find a common type. 31 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) { 32 assert(first != second); 33 34 // Allow pointers to merge with integers, but prefer the integer type. 35 if (first->isIntegerTy()) { 36 if (second->isPointerTy()) return first; 37 } else if (first->isPointerTy()) { 38 if (second->isIntegerTy()) return second; 39 if (second->isPointerTy()) return first; 40 41 // Allow two vectors to be merged (given that they have the same size). 42 // This assumes that we never have two different vector register sets. 43 } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) { 44 if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) { 45 if (auto commonTy = getCommonType(firstVecTy->getElementType(), 46 secondVecTy->getElementType())) { 47 return (commonTy == firstVecTy->getElementType() ? first : second); 48 } 49 } 50 } 51 52 return nullptr; 53 } 54 55 static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) { 56 return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type)); 57 } 58 59 static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) { 60 return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type)); 61 } 62 63 void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) { 64 // Deal with various aggregate types as special cases: 65 66 // Record types. 67 if (auto recType = type->getAs<RecordType>()) { 68 addTypedData(recType->getDecl(), begin); 69 70 // Array types. 71 } else if (type->isArrayType()) { 72 // Incomplete array types (flexible array members?) don't provide 73 // data to lay out, and the other cases shouldn't be possible. 74 auto arrayType = CGM.getContext().getAsConstantArrayType(type); 75 if (!arrayType) return; 76 77 QualType eltType = arrayType->getElementType(); 78 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 79 for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) { 80 addTypedData(eltType, begin + i * eltSize); 81 } 82 83 // Complex types. 84 } else if (auto complexType = type->getAs<ComplexType>()) { 85 auto eltType = complexType->getElementType(); 86 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 87 auto eltLLVMType = CGM.getTypes().ConvertType(eltType); 88 addTypedData(eltLLVMType, begin, begin + eltSize); 89 addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize); 90 91 // Member pointer types. 92 } else if (type->getAs<MemberPointerType>()) { 93 // Just add it all as opaque. 94 addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type)); 95 96 // Everything else is scalar and should not convert as an LLVM aggregate. 97 } else { 98 // We intentionally convert as !ForMem because we want to preserve 99 // that a type was an i1. 100 auto llvmType = CGM.getTypes().ConvertType(type); 101 addTypedData(llvmType, begin); 102 } 103 } 104 105 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) { 106 addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record)); 107 } 108 109 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin, 110 const ASTRecordLayout &layout) { 111 // Unions are a special case. 112 if (record->isUnion()) { 113 for (auto field : record->fields()) { 114 if (field->isBitField()) { 115 addBitFieldData(field, begin, 0); 116 } else { 117 addTypedData(field->getType(), begin); 118 } 119 } 120 return; 121 } 122 123 // Note that correctness does not rely on us adding things in 124 // their actual order of layout; it's just somewhat more efficient 125 // for the builder. 126 127 // With that in mind, add "early" C++ data. 128 auto cxxRecord = dyn_cast<CXXRecordDecl>(record); 129 if (cxxRecord) { 130 // - a v-table pointer, if the class adds its own 131 if (layout.hasOwnVFPtr()) { 132 addTypedData(CGM.Int8PtrTy, begin); 133 } 134 135 // - non-virtual bases 136 for (auto &baseSpecifier : cxxRecord->bases()) { 137 if (baseSpecifier.isVirtual()) continue; 138 139 auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl(); 140 addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord)); 141 } 142 143 // - a vbptr if the class adds its own 144 if (layout.hasOwnVBPtr()) { 145 addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset()); 146 } 147 } 148 149 // Add fields. 150 for (auto field : record->fields()) { 151 auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex()); 152 if (field->isBitField()) { 153 addBitFieldData(field, begin, fieldOffsetInBits); 154 } else { 155 addTypedData(field->getType(), 156 begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits)); 157 } 158 } 159 160 // Add "late" C++ data: 161 if (cxxRecord) { 162 // - virtual bases 163 for (auto &vbaseSpecifier : cxxRecord->vbases()) { 164 auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl(); 165 addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord)); 166 } 167 } 168 } 169 170 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield, 171 CharUnits recordBegin, 172 uint64_t bitfieldBitBegin) { 173 assert(bitfield->isBitField()); 174 auto &ctx = CGM.getContext(); 175 auto width = bitfield->getBitWidthValue(ctx); 176 177 // We can ignore zero-width bit-fields. 178 if (width == 0) return; 179 180 // toCharUnitsFromBits rounds down. 181 CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin); 182 183 // Find the offset of the last byte that is partially occupied by the 184 // bit-field; since we otherwise expect exclusive ends, the end is the 185 // next byte. 186 uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1; 187 CharUnits bitfieldByteEnd = 188 ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One(); 189 addOpaqueData(recordBegin + bitfieldByteBegin, 190 recordBegin + bitfieldByteEnd); 191 } 192 193 void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) { 194 assert(type && "didn't provide type for typed data"); 195 addTypedData(type, begin, begin + getTypeStoreSize(CGM, type)); 196 } 197 198 void SwiftAggLowering::addTypedData(llvm::Type *type, 199 CharUnits begin, CharUnits end) { 200 assert(type && "didn't provide type for typed data"); 201 assert(getTypeStoreSize(CGM, type) == end - begin); 202 203 // Legalize vector types. 204 if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { 205 SmallVector<llvm::Type*, 4> componentTys; 206 legalizeVectorType(CGM, end - begin, vecTy, componentTys); 207 assert(componentTys.size() >= 1); 208 209 // Walk the initial components. 210 for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) { 211 llvm::Type *componentTy = componentTys[i]; 212 auto componentSize = getTypeStoreSize(CGM, componentTy); 213 assert(componentSize < end - begin); 214 addLegalTypedData(componentTy, begin, begin + componentSize); 215 begin += componentSize; 216 } 217 218 return addLegalTypedData(componentTys.back(), begin, end); 219 } 220 221 // Legalize integer types. 222 if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 223 if (!isLegalIntegerType(CGM, intTy)) 224 return addOpaqueData(begin, end); 225 } 226 227 // All other types should be legal. 228 return addLegalTypedData(type, begin, end); 229 } 230 231 void SwiftAggLowering::addLegalTypedData(llvm::Type *type, 232 CharUnits begin, CharUnits end) { 233 // Require the type to be naturally aligned. 234 if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) { 235 236 // Try splitting vector types. 237 if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { 238 auto split = splitLegalVectorType(CGM, end - begin, vecTy); 239 auto eltTy = split.first; 240 auto numElts = split.second; 241 242 auto eltSize = (end - begin) / numElts; 243 assert(eltSize == getTypeStoreSize(CGM, eltTy)); 244 for (size_t i = 0, e = numElts; i != e; ++i) { 245 addLegalTypedData(eltTy, begin, begin + eltSize); 246 begin += eltSize; 247 } 248 assert(begin == end); 249 return; 250 } 251 252 return addOpaqueData(begin, end); 253 } 254 255 addEntry(type, begin, end); 256 } 257 258 void SwiftAggLowering::addEntry(llvm::Type *type, 259 CharUnits begin, CharUnits end) { 260 assert((!type || 261 (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) && 262 "cannot add aggregate-typed data"); 263 assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type))); 264 265 // Fast path: we can just add entries to the end. 266 if (Entries.empty() || Entries.back().End <= begin) { 267 Entries.push_back({begin, end, type}); 268 return; 269 } 270 271 // Find the first existing entry that ends after the start of the new data. 272 // TODO: do a binary search if Entries is big enough for it to matter. 273 size_t index = Entries.size() - 1; 274 while (index != 0) { 275 if (Entries[index - 1].End <= begin) break; 276 --index; 277 } 278 279 // The entry ends after the start of the new data. 280 // If the entry starts after the end of the new data, there's no conflict. 281 if (Entries[index].Begin >= end) { 282 // This insertion is potentially O(n), but the way we generally build 283 // these layouts makes that unlikely to matter: we'd need a union of 284 // several very large types. 285 Entries.insert(Entries.begin() + index, {begin, end, type}); 286 return; 287 } 288 289 // Otherwise, the ranges overlap. The new range might also overlap 290 // with later ranges. 291 restartAfterSplit: 292 293 // Simplest case: an exact overlap. 294 if (Entries[index].Begin == begin && Entries[index].End == end) { 295 // If the types match exactly, great. 296 if (Entries[index].Type == type) return; 297 298 // If either type is opaque, make the entry opaque and return. 299 if (Entries[index].Type == nullptr) { 300 return; 301 } else if (type == nullptr) { 302 Entries[index].Type = nullptr; 303 return; 304 } 305 306 // If they disagree in an ABI-agnostic way, just resolve the conflict 307 // arbitrarily. 308 if (auto entryType = getCommonType(Entries[index].Type, type)) { 309 Entries[index].Type = entryType; 310 return; 311 } 312 313 // Otherwise, make the entry opaque. 314 Entries[index].Type = nullptr; 315 return; 316 } 317 318 // Okay, we have an overlapping conflict of some sort. 319 320 // If we have a vector type, split it. 321 if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) { 322 auto eltTy = vecTy->getElementType(); 323 CharUnits eltSize = 324 (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements(); 325 assert(eltSize == getTypeStoreSize(CGM, eltTy)); 326 for (unsigned i = 0, 327 e = cast<llvm::FixedVectorType>(vecTy)->getNumElements(); 328 i != e; ++i) { 329 addEntry(eltTy, begin, begin + eltSize); 330 begin += eltSize; 331 } 332 assert(begin == end); 333 return; 334 } 335 336 // If the entry is a vector type, split it and try again. 337 if (Entries[index].Type && Entries[index].Type->isVectorTy()) { 338 splitVectorEntry(index); 339 goto restartAfterSplit; 340 } 341 342 // Okay, we have no choice but to make the existing entry opaque. 343 344 Entries[index].Type = nullptr; 345 346 // Stretch the start of the entry to the beginning of the range. 347 if (begin < Entries[index].Begin) { 348 Entries[index].Begin = begin; 349 assert(index == 0 || begin >= Entries[index - 1].End); 350 } 351 352 // Stretch the end of the entry to the end of the range; but if we run 353 // into the start of the next entry, just leave the range there and repeat. 354 while (end > Entries[index].End) { 355 assert(Entries[index].Type == nullptr); 356 357 // If the range doesn't overlap the next entry, we're done. 358 if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) { 359 Entries[index].End = end; 360 break; 361 } 362 363 // Otherwise, stretch to the start of the next entry. 364 Entries[index].End = Entries[index + 1].Begin; 365 366 // Continue with the next entry. 367 index++; 368 369 // This entry needs to be made opaque if it is not already. 370 if (Entries[index].Type == nullptr) 371 continue; 372 373 // Split vector entries unless we completely subsume them. 374 if (Entries[index].Type->isVectorTy() && 375 end < Entries[index].End) { 376 splitVectorEntry(index); 377 } 378 379 // Make the entry opaque. 380 Entries[index].Type = nullptr; 381 } 382 } 383 384 /// Replace the entry of vector type at offset 'index' with a sequence 385 /// of its component vectors. 386 void SwiftAggLowering::splitVectorEntry(unsigned index) { 387 auto vecTy = cast<llvm::VectorType>(Entries[index].Type); 388 auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy); 389 390 auto eltTy = split.first; 391 CharUnits eltSize = getTypeStoreSize(CGM, eltTy); 392 auto numElts = split.second; 393 Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry()); 394 395 CharUnits begin = Entries[index].Begin; 396 for (unsigned i = 0; i != numElts; ++i) { 397 Entries[index].Type = eltTy; 398 Entries[index].Begin = begin; 399 Entries[index].End = begin + eltSize; 400 begin += eltSize; 401 } 402 } 403 404 /// Given a power-of-two unit size, return the offset of the aligned unit 405 /// of that size which contains the given offset. 406 /// 407 /// In other words, round down to the nearest multiple of the unit size. 408 static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) { 409 assert(isPowerOf2(unitSize.getQuantity())); 410 auto unitMask = ~(unitSize.getQuantity() - 1); 411 return CharUnits::fromQuantity(offset.getQuantity() & unitMask); 412 } 413 414 static bool areBytesInSameUnit(CharUnits first, CharUnits second, 415 CharUnits chunkSize) { 416 return getOffsetAtStartOfUnit(first, chunkSize) 417 == getOffsetAtStartOfUnit(second, chunkSize); 418 } 419 420 static bool isMergeableEntryType(llvm::Type *type) { 421 // Opaquely-typed memory is always mergeable. 422 if (type == nullptr) return true; 423 424 // Pointers and integers are always mergeable. In theory we should not 425 // merge pointers, but (1) it doesn't currently matter in practice because 426 // the chunk size is never greater than the size of a pointer and (2) 427 // Swift IRGen uses integer types for a lot of things that are "really" 428 // just storing pointers (like Optional<SomePointer>). If we ever have a 429 // target that would otherwise combine pointers, we should put some effort 430 // into fixing those cases in Swift IRGen and then call out pointer types 431 // here. 432 433 // Floating-point and vector types should never be merged. 434 // Most such types are too large and highly-aligned to ever trigger merging 435 // in practice, but it's important for the rule to cover at least 'half' 436 // and 'float', as well as things like small vectors of 'i1' or 'i8'. 437 return (!type->isFloatingPointTy() && !type->isVectorTy()); 438 } 439 440 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first, 441 const StorageEntry &second, 442 CharUnits chunkSize) { 443 // Only merge entries that overlap the same chunk. We test this first 444 // despite being a bit more expensive because this is the condition that 445 // tends to prevent merging. 446 if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin, 447 chunkSize)) 448 return false; 449 450 return (isMergeableEntryType(first.Type) && 451 isMergeableEntryType(second.Type)); 452 } 453 454 void SwiftAggLowering::finish() { 455 if (Entries.empty()) { 456 Finished = true; 457 return; 458 } 459 460 // We logically split the layout down into a series of chunks of this size, 461 // which is generally the size of a pointer. 462 const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM); 463 464 // First pass: if two entries should be merged, make them both opaque 465 // and stretch one to meet the next. 466 // Also, remember if there are any opaque entries. 467 bool hasOpaqueEntries = (Entries[0].Type == nullptr); 468 for (size_t i = 1, e = Entries.size(); i != e; ++i) { 469 if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) { 470 Entries[i - 1].Type = nullptr; 471 Entries[i].Type = nullptr; 472 Entries[i - 1].End = Entries[i].Begin; 473 hasOpaqueEntries = true; 474 475 } else if (Entries[i].Type == nullptr) { 476 hasOpaqueEntries = true; 477 } 478 } 479 480 // The rest of the algorithm leaves non-opaque entries alone, so if we 481 // have no opaque entries, we're done. 482 if (!hasOpaqueEntries) { 483 Finished = true; 484 return; 485 } 486 487 // Okay, move the entries to a temporary and rebuild Entries. 488 auto orig = std::move(Entries); 489 assert(Entries.empty()); 490 491 for (size_t i = 0, e = orig.size(); i != e; ++i) { 492 // Just copy over non-opaque entries. 493 if (orig[i].Type != nullptr) { 494 Entries.push_back(orig[i]); 495 continue; 496 } 497 498 // Scan forward to determine the full extent of the next opaque range. 499 // We know from the first pass that only contiguous ranges will overlap 500 // the same aligned chunk. 501 auto begin = orig[i].Begin; 502 auto end = orig[i].End; 503 while (i + 1 != e && 504 orig[i + 1].Type == nullptr && 505 end == orig[i + 1].Begin) { 506 end = orig[i + 1].End; 507 i++; 508 } 509 510 // Add an entry per intersected chunk. 511 do { 512 // Find the smallest aligned storage unit in the maximal aligned 513 // storage unit containing 'begin' that contains all the bytes in 514 // the intersection between the range and this chunk. 515 CharUnits localBegin = begin; 516 CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize); 517 CharUnits chunkEnd = chunkBegin + chunkSize; 518 CharUnits localEnd = std::min(end, chunkEnd); 519 520 // Just do a simple loop over ever-increasing unit sizes. 521 CharUnits unitSize = CharUnits::One(); 522 CharUnits unitBegin, unitEnd; 523 for (; ; unitSize *= 2) { 524 assert(unitSize <= chunkSize); 525 unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize); 526 unitEnd = unitBegin + unitSize; 527 if (unitEnd >= localEnd) break; 528 } 529 530 // Add an entry for this unit. 531 auto entryTy = 532 llvm::IntegerType::get(CGM.getLLVMContext(), 533 CGM.getContext().toBits(unitSize)); 534 Entries.push_back({unitBegin, unitEnd, entryTy}); 535 536 // The next chunk starts where this chunk left off. 537 begin = localEnd; 538 } while (begin != end); 539 } 540 541 // Okay, finally finished. 542 Finished = true; 543 } 544 545 void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const { 546 assert(Finished && "haven't yet finished lowering"); 547 548 for (auto &entry : Entries) { 549 callback(entry.Begin, entry.End, entry.Type); 550 } 551 } 552 553 std::pair<llvm::StructType*, llvm::Type*> 554 SwiftAggLowering::getCoerceAndExpandTypes() const { 555 assert(Finished && "haven't yet finished lowering"); 556 557 auto &ctx = CGM.getLLVMContext(); 558 559 if (Entries.empty()) { 560 auto type = llvm::StructType::get(ctx); 561 return { type, type }; 562 } 563 564 SmallVector<llvm::Type*, 8> elts; 565 CharUnits lastEnd = CharUnits::Zero(); 566 bool hasPadding = false; 567 bool packed = false; 568 for (auto &entry : Entries) { 569 if (entry.Begin != lastEnd) { 570 auto paddingSize = entry.Begin - lastEnd; 571 assert(!paddingSize.isNegative()); 572 573 auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx), 574 paddingSize.getQuantity()); 575 elts.push_back(padding); 576 hasPadding = true; 577 } 578 579 if (!packed && !entry.Begin.isMultipleOf( 580 CharUnits::fromQuantity( 581 CGM.getDataLayout().getABITypeAlignment(entry.Type)))) 582 packed = true; 583 584 elts.push_back(entry.Type); 585 586 lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type); 587 assert(entry.End <= lastEnd); 588 } 589 590 // We don't need to adjust 'packed' to deal with possible tail padding 591 // because we never do that kind of access through the coercion type. 592 auto coercionType = llvm::StructType::get(ctx, elts, packed); 593 594 llvm::Type *unpaddedType = coercionType; 595 if (hasPadding) { 596 elts.clear(); 597 for (auto &entry : Entries) { 598 elts.push_back(entry.Type); 599 } 600 if (elts.size() == 1) { 601 unpaddedType = elts[0]; 602 } else { 603 unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false); 604 } 605 } else if (Entries.size() == 1) { 606 unpaddedType = Entries[0].Type; 607 } 608 609 return { coercionType, unpaddedType }; 610 } 611 612 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const { 613 assert(Finished && "haven't yet finished lowering"); 614 615 // Empty types don't need to be passed indirectly. 616 if (Entries.empty()) return false; 617 618 // Avoid copying the array of types when there's just a single element. 619 if (Entries.size() == 1) { 620 return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift( 621 Entries.back().Type, 622 asReturnValue); 623 } 624 625 SmallVector<llvm::Type*, 8> componentTys; 626 componentTys.reserve(Entries.size()); 627 for (auto &entry : Entries) { 628 componentTys.push_back(entry.Type); 629 } 630 return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys, 631 asReturnValue); 632 } 633 634 bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM, 635 ArrayRef<llvm::Type*> componentTys, 636 bool asReturnValue) { 637 return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys, 638 asReturnValue); 639 } 640 641 CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { 642 // Currently always the size of an ordinary pointer. 643 return CGM.getContext().toCharUnitsFromBits( 644 CGM.getContext().getTargetInfo().getPointerWidth(0)); 645 } 646 647 CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) { 648 // For Swift's purposes, this is always just the store size of the type 649 // rounded up to a power of 2. 650 auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity(); 651 if (!isPowerOf2(size)) { 652 size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1); 653 } 654 assert(size >= CGM.getDataLayout().getABITypeAlignment(type)); 655 return CharUnits::fromQuantity(size); 656 } 657 658 bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, 659 llvm::IntegerType *intTy) { 660 auto size = intTy->getBitWidth(); 661 switch (size) { 662 case 1: 663 case 8: 664 case 16: 665 case 32: 666 case 64: 667 // Just assume that the above are always legal. 668 return true; 669 670 case 128: 671 return CGM.getContext().getTargetInfo().hasInt128Type(); 672 673 default: 674 return false; 675 } 676 } 677 678 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 679 llvm::VectorType *vectorTy) { 680 return isLegalVectorType( 681 CGM, vectorSize, vectorTy->getElementType(), 682 cast<llvm::FixedVectorType>(vectorTy)->getNumElements()); 683 } 684 685 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 686 llvm::Type *eltTy, unsigned numElts) { 687 assert(numElts > 1 && "illegal vector length"); 688 return getSwiftABIInfo(CGM) 689 .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts); 690 } 691 692 std::pair<llvm::Type*, unsigned> 693 swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, 694 llvm::VectorType *vectorTy) { 695 auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements(); 696 auto eltTy = vectorTy->getElementType(); 697 698 // Try to split the vector type in half. 699 if (numElts >= 4 && isPowerOf2(numElts)) { 700 if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2)) 701 return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2}; 702 } 703 704 return {eltTy, numElts}; 705 } 706 707 void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize, 708 llvm::VectorType *origVectorTy, 709 llvm::SmallVectorImpl<llvm::Type*> &components) { 710 // If it's already a legal vector type, use it. 711 if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) { 712 components.push_back(origVectorTy); 713 return; 714 } 715 716 // Try to split the vector into legal subvectors. 717 auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements(); 718 auto eltTy = origVectorTy->getElementType(); 719 assert(numElts != 1); 720 721 // The largest size that we're still considering making subvectors of. 722 // Always a power of 2. 723 unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined); 724 unsigned candidateNumElts = 1U << logCandidateNumElts; 725 assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts); 726 727 // Minor optimization: don't check the legality of this exact size twice. 728 if (candidateNumElts == numElts) { 729 logCandidateNumElts--; 730 candidateNumElts >>= 1; 731 } 732 733 CharUnits eltSize = (origVectorSize / numElts); 734 CharUnits candidateSize = eltSize * candidateNumElts; 735 736 // The sensibility of this algorithm relies on the fact that we never 737 // have a legal non-power-of-2 vector size without having the power of 2 738 // also be legal. 739 while (logCandidateNumElts > 0) { 740 assert(candidateNumElts == 1U << logCandidateNumElts); 741 assert(candidateNumElts <= numElts); 742 assert(candidateSize == eltSize * candidateNumElts); 743 744 // Skip illegal vector sizes. 745 if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) { 746 logCandidateNumElts--; 747 candidateNumElts /= 2; 748 candidateSize /= 2; 749 continue; 750 } 751 752 // Add the right number of vectors of this size. 753 auto numVecs = numElts >> logCandidateNumElts; 754 components.append(numVecs, 755 llvm::FixedVectorType::get(eltTy, candidateNumElts)); 756 numElts -= (numVecs << logCandidateNumElts); 757 758 if (numElts == 0) return; 759 760 // It's possible that the number of elements remaining will be legal. 761 // This can happen with e.g. <7 x float> when <3 x float> is legal. 762 // This only needs to be separately checked if it's not a power of 2. 763 if (numElts > 2 && !isPowerOf2(numElts) && 764 isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) { 765 components.push_back(llvm::FixedVectorType::get(eltTy, numElts)); 766 return; 767 } 768 769 // Bring vecSize down to something no larger than numElts. 770 do { 771 logCandidateNumElts--; 772 candidateNumElts /= 2; 773 candidateSize /= 2; 774 } while (candidateNumElts > numElts); 775 } 776 777 // Otherwise, just append a bunch of individual elements. 778 components.append(numElts, eltTy); 779 } 780 781 bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM, 782 const RecordDecl *record) { 783 // FIXME: should we not rely on the standard computation in Sema, just in 784 // case we want to diverge from the platform ABI (e.g. on targets where 785 // that uses the MSVC rule)? 786 return !record->canPassInRegisters(); 787 } 788 789 static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, 790 bool forReturn, 791 CharUnits alignmentForIndirect) { 792 if (lowering.empty()) { 793 return ABIArgInfo::getIgnore(); 794 } else if (lowering.shouldPassIndirectly(forReturn)) { 795 return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false); 796 } else { 797 auto types = lowering.getCoerceAndExpandTypes(); 798 return ABIArgInfo::getCoerceAndExpand(types.first, types.second); 799 } 800 } 801 802 static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, 803 bool forReturn) { 804 if (auto recordType = dyn_cast<RecordType>(type)) { 805 auto record = recordType->getDecl(); 806 auto &layout = CGM.getContext().getASTRecordLayout(record); 807 808 if (mustPassRecordIndirectly(CGM, record)) 809 return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false); 810 811 SwiftAggLowering lowering(CGM); 812 lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout); 813 lowering.finish(); 814 815 return classifyExpandedType(lowering, forReturn, layout.getAlignment()); 816 } 817 818 // Just assume that all of our target ABIs can support returning at least 819 // two integer or floating-point values. 820 if (isa<ComplexType>(type)) { 821 return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand()); 822 } 823 824 // Vector types may need to be legalized. 825 if (isa<VectorType>(type)) { 826 SwiftAggLowering lowering(CGM); 827 lowering.addTypedData(type, CharUnits::Zero()); 828 lowering.finish(); 829 830 CharUnits alignment = CGM.getContext().getTypeAlignInChars(type); 831 return classifyExpandedType(lowering, forReturn, alignment); 832 } 833 834 // Member pointer types need to be expanded, but it's a simple form of 835 // expansion that 'Direct' can handle. Note that CanBeFlattened should be 836 // true for this to work. 837 838 // 'void' needs to be ignored. 839 if (type->isVoidType()) { 840 return ABIArgInfo::getIgnore(); 841 } 842 843 // Everything else can be passed directly. 844 return ABIArgInfo::getDirect(); 845 } 846 847 ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) { 848 return classifyType(CGM, type, /*forReturn*/ true); 849 } 850 851 ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM, 852 CanQualType type) { 853 return classifyType(CGM, type, /*forReturn*/ false); 854 } 855 856 void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 857 auto &retInfo = FI.getReturnInfo(); 858 retInfo = classifyReturnType(CGM, FI.getReturnType()); 859 860 for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { 861 auto &argInfo = FI.arg_begin()[i]; 862 argInfo.info = classifyArgumentType(CGM, argInfo.type); 863 } 864 } 865 866 // Is swifterror lowered to a register by the target ABI. 867 bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) { 868 return getSwiftABIInfo(CGM).isSwiftErrorInRegister(); 869 } 870