1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the code that handles AST -> LLVM type lowering. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenTypes.h" 14 #include "CGCXXABI.h" 15 #include "CGCall.h" 16 #include "CGHLSLRuntime.h" 17 #include "CGOpenCLRuntime.h" 18 #include "CGRecordLayout.h" 19 #include "TargetInfo.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/RecordLayout.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Module.h" 29 30 using namespace clang; 31 using namespace CodeGen; 32 33 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 34 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 35 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 36 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 37 SkippedLayout = false; 38 LongDoubleReferenced = false; 39 } 40 41 CodeGenTypes::~CodeGenTypes() { 42 for (llvm::FoldingSet<CGFunctionInfo>::iterator 43 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 44 delete &*I++; 45 } 46 47 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { 48 return CGM.getCodeGenOpts(); 49 } 50 51 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 52 llvm::StructType *Ty, 53 StringRef suffix) { 54 SmallString<256> TypeName; 55 llvm::raw_svector_ostream OS(TypeName); 56 OS << RD->getKindName() << '.'; 57 58 // FIXME: We probably want to make more tweaks to the printing policy. For 59 // example, we should probably enable PrintCanonicalTypes and 60 // FullyQualifiedNames. 61 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); 62 Policy.SuppressInlineNamespace = false; 63 64 // Name the codegen type after the typedef name 65 // if there is no tag type name available 66 if (RD->getIdentifier()) { 67 // FIXME: We should not have to check for a null decl context here. 68 // Right now we do it because the implicit Obj-C decls don't have one. 69 if (RD->getDeclContext()) 70 RD->printQualifiedName(OS, Policy); 71 else 72 RD->printName(OS, Policy); 73 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 74 // FIXME: We should not have to check for a null decl context here. 75 // Right now we do it because the implicit Obj-C decls don't have one. 76 if (TDD->getDeclContext()) 77 TDD->printQualifiedName(OS, Policy); 78 else 79 TDD->printName(OS); 80 } else 81 OS << "anon"; 82 83 if (!suffix.empty()) 84 OS << suffix; 85 86 Ty->setName(OS.str()); 87 } 88 89 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 90 /// ConvertType in that it is used to convert to the memory representation for 91 /// a type. For example, the scalar representation for _Bool is i1, but the 92 /// memory representation is usually i8 or i32, depending on the target. 93 /// 94 /// We generally assume that the alloc size of this type under the LLVM 95 /// data layout is the same as the size of the AST type. The alignment 96 /// does not have to match: Clang should always use explicit alignments 97 /// and packed structs as necessary to produce the layout it needs. 98 /// But the size does need to be exactly right or else things like struct 99 /// layout will break. 100 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { 101 if (T->isConstantMatrixType()) { 102 const Type *Ty = Context.getCanonicalType(T).getTypePtr(); 103 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 104 return llvm::ArrayType::get(ConvertType(MT->getElementType()), 105 MT->getNumRows() * MT->getNumColumns()); 106 } 107 108 llvm::Type *R = ConvertType(T); 109 110 // Check for the boolean vector case. 111 if (T->isExtVectorBoolType()) { 112 auto *FixedVT = cast<llvm::FixedVectorType>(R); 113 // Pad to at least one byte. 114 uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8); 115 return llvm::IntegerType::get(FixedVT->getContext(), BytePadded); 116 } 117 118 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type 119 // with the exact semantic bit-width of the AST type; for example, 120 // _BitInt(17) will turn into i17. In memory, however, we need to store 121 // such values extended to their full storage size as decided by AST 122 // layout; this is an ABI requirement. Ideally, we would always use an 123 // integer type that's just the bit-size of the AST type; for example, if 124 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's 125 // returned by convertTypeForLoadStore. However, that type does not 126 // always satisfy the size requirement on memory representation types 127 // describe above. For example, a 32-bit platform might reasonably set 128 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size 129 // of 16 bytes in the LLVM data layout. In these cases, we simply return 130 // a byte array of the appropriate size. 131 if (T->isBitIntType()) { 132 if (typeRequiresSplitIntoByteArray(T, R)) 133 return llvm::ArrayType::get(CGM.Int8Ty, 134 Context.getTypeSizeInChars(T).getQuantity()); 135 return llvm::IntegerType::get(getLLVMContext(), 136 (unsigned)Context.getTypeSize(T)); 137 } 138 139 if (R->isIntegerTy(1)) 140 return llvm::IntegerType::get(getLLVMContext(), 141 (unsigned)Context.getTypeSize(T)); 142 143 // Else, don't map it. 144 return R; 145 } 146 147 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy, 148 llvm::Type *LLVMTy) { 149 if (!LLVMTy) 150 LLVMTy = ConvertType(ASTTy); 151 152 CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy); 153 CharUnits LLVMSize = 154 CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy)); 155 return ASTSize != LLVMSize; 156 } 157 158 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T, 159 llvm::Type *LLVMTy) { 160 if (!LLVMTy) 161 LLVMTy = ConvertType(T); 162 163 if (T->isBitIntType()) 164 return llvm::Type::getIntNTy( 165 getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8); 166 167 if (LLVMTy->isIntegerTy(1)) 168 return llvm::IntegerType::get(getLLVMContext(), 169 (unsigned)Context.getTypeSize(T)); 170 171 if (T->isExtVectorBoolType()) 172 return ConvertTypeForMem(T); 173 174 return LLVMTy; 175 } 176 177 /// isRecordLayoutComplete - Return true if the specified type is already 178 /// completely laid out. 179 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 180 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 181 RecordDeclTypes.find(Ty); 182 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 183 } 184 185 /// isFuncParamTypeConvertible - Return true if the specified type in a 186 /// function parameter or result position can be converted to an IR type at this 187 /// point. This boils down to being whether it is complete. 188 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 189 // Some ABIs cannot have their member pointers represented in IR unless 190 // certain circumstances have been reached. 191 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 192 return getCXXABI().isMemberPointerConvertible(MPT); 193 194 // If this isn't a tagged type, we can convert it! 195 const TagType *TT = Ty->getAs<TagType>(); 196 if (!TT) return true; 197 198 // Incomplete types cannot be converted. 199 return !TT->isIncompleteType(); 200 } 201 202 203 /// Code to verify a given function type is complete, i.e. the return type 204 /// and all of the parameter types are complete. Also check to see if we are in 205 /// a RS_StructPointer context, and if so whether any struct types have been 206 /// pended. If so, we don't want to ask the ABI lowering code to handle a type 207 /// that cannot be converted to an IR type. 208 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 209 if (!isFuncParamTypeConvertible(FT->getReturnType())) 210 return false; 211 212 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 213 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 214 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 215 return false; 216 217 return true; 218 } 219 220 /// UpdateCompletedType - When we find the full definition for a TagDecl, 221 /// replace the 'opaque' type we previously made for it if applicable. 222 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 223 // If this is an enum being completed, then we flush all non-struct types from 224 // the cache. This allows function types and other things that may be derived 225 // from the enum to be recomputed. 226 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 227 // Only flush the cache if we've actually already converted this type. 228 if (TypeCache.count(ED->getTypeForDecl())) { 229 // Okay, we formed some types based on this. We speculated that the enum 230 // would be lowered to i32, so we only need to flush the cache if this 231 // didn't happen. 232 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 233 TypeCache.clear(); 234 } 235 // If necessary, provide the full definition of a type only used with a 236 // declaration so far. 237 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 238 DI->completeType(ED); 239 return; 240 } 241 242 // If we completed a RecordDecl that we previously used and converted to an 243 // anonymous type, then go ahead and complete it now. 244 const RecordDecl *RD = cast<RecordDecl>(TD); 245 if (RD->isDependentType()) return; 246 247 // Only complete it if we converted it already. If we haven't converted it 248 // yet, we'll just do it lazily. 249 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 250 ConvertRecordDeclType(RD); 251 252 // If necessary, provide the full definition of a type only used with a 253 // declaration so far. 254 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 255 DI->completeType(RD); 256 } 257 258 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { 259 QualType T = Context.getRecordType(RD); 260 T = Context.getCanonicalType(T); 261 262 const Type *Ty = T.getTypePtr(); 263 if (RecordsWithOpaqueMemberPointers.count(Ty)) { 264 TypeCache.clear(); 265 RecordsWithOpaqueMemberPointers.clear(); 266 } 267 } 268 269 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 270 const llvm::fltSemantics &format, 271 bool UseNativeHalf = false) { 272 if (&format == &llvm::APFloat::IEEEhalf()) { 273 if (UseNativeHalf) 274 return llvm::Type::getHalfTy(VMContext); 275 else 276 return llvm::Type::getInt16Ty(VMContext); 277 } 278 if (&format == &llvm::APFloat::BFloat()) 279 return llvm::Type::getBFloatTy(VMContext); 280 if (&format == &llvm::APFloat::IEEEsingle()) 281 return llvm::Type::getFloatTy(VMContext); 282 if (&format == &llvm::APFloat::IEEEdouble()) 283 return llvm::Type::getDoubleTy(VMContext); 284 if (&format == &llvm::APFloat::IEEEquad()) 285 return llvm::Type::getFP128Ty(VMContext); 286 if (&format == &llvm::APFloat::PPCDoubleDouble()) 287 return llvm::Type::getPPC_FP128Ty(VMContext); 288 if (&format == &llvm::APFloat::x87DoubleExtended()) 289 return llvm::Type::getX86_FP80Ty(VMContext); 290 llvm_unreachable("Unknown float format!"); 291 } 292 293 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { 294 assert(QFT.isCanonical()); 295 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); 296 // First, check whether we can build the full function type. If the 297 // function type depends on an incomplete type (e.g. a struct or enum), we 298 // cannot lower the function type. 299 if (!isFuncTypeConvertible(FT)) { 300 // This function's type depends on an incomplete tag type. 301 302 // Force conversion of all the relevant record types, to make sure 303 // we re-convert the FunctionType when appropriate. 304 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 305 ConvertRecordDeclType(RT->getDecl()); 306 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 307 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 308 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 309 ConvertRecordDeclType(RT->getDecl()); 310 311 SkippedLayout = true; 312 313 // Return a placeholder type. 314 return llvm::StructType::get(getLLVMContext()); 315 } 316 317 // The function type can be built; call the appropriate routines to 318 // build it. 319 const CGFunctionInfo *FI; 320 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 321 FI = &arrangeFreeFunctionType( 322 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 323 } else { 324 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 325 FI = &arrangeFreeFunctionType( 326 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 327 } 328 329 llvm::Type *ResultType = nullptr; 330 // If there is something higher level prodding our CGFunctionInfo, then 331 // don't recurse into it again. 332 if (FunctionsBeingProcessed.count(FI)) { 333 334 ResultType = llvm::StructType::get(getLLVMContext()); 335 SkippedLayout = true; 336 } else { 337 338 // Otherwise, we're good to go, go ahead and convert it. 339 ResultType = GetFunctionType(*FI); 340 } 341 342 return ResultType; 343 } 344 345 /// ConvertType - Convert the specified type to its LLVM form. 346 llvm::Type *CodeGenTypes::ConvertType(QualType T) { 347 T = Context.getCanonicalType(T); 348 349 const Type *Ty = T.getTypePtr(); 350 351 // For the device-side compilation, CUDA device builtin surface/texture types 352 // may be represented in different types. 353 if (Context.getLangOpts().CUDAIsDevice) { 354 if (T->isCUDADeviceBuiltinSurfaceType()) { 355 if (auto *Ty = CGM.getTargetCodeGenInfo() 356 .getCUDADeviceBuiltinSurfaceDeviceType()) 357 return Ty; 358 } else if (T->isCUDADeviceBuiltinTextureType()) { 359 if (auto *Ty = CGM.getTargetCodeGenInfo() 360 .getCUDADeviceBuiltinTextureDeviceType()) 361 return Ty; 362 } 363 } 364 365 // RecordTypes are cached and processed specially. 366 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 367 return ConvertRecordDeclType(RT->getDecl()); 368 369 llvm::Type *CachedType = nullptr; 370 auto TCI = TypeCache.find(Ty); 371 if (TCI != TypeCache.end()) 372 CachedType = TCI->second; 373 // With expensive checks, check that the type we compute matches the 374 // cached type. 375 #ifndef EXPENSIVE_CHECKS 376 if (CachedType) 377 return CachedType; 378 #endif 379 380 // If we don't have it in the cache, convert it now. 381 llvm::Type *ResultType = nullptr; 382 switch (Ty->getTypeClass()) { 383 case Type::Record: // Handled above. 384 #define TYPE(Class, Base) 385 #define ABSTRACT_TYPE(Class, Base) 386 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 387 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 388 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 389 #include "clang/AST/TypeNodes.inc" 390 llvm_unreachable("Non-canonical or dependent types aren't possible."); 391 392 case Type::Builtin: { 393 switch (cast<BuiltinType>(Ty)->getKind()) { 394 case BuiltinType::Void: 395 case BuiltinType::ObjCId: 396 case BuiltinType::ObjCClass: 397 case BuiltinType::ObjCSel: 398 // LLVM void type can only be used as the result of a function call. Just 399 // map to the same as char. 400 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 401 break; 402 403 case BuiltinType::Bool: 404 // Note that we always return bool as i1 for use as a scalar type. 405 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 406 break; 407 408 case BuiltinType::Char_S: 409 case BuiltinType::Char_U: 410 case BuiltinType::SChar: 411 case BuiltinType::UChar: 412 case BuiltinType::Short: 413 case BuiltinType::UShort: 414 case BuiltinType::Int: 415 case BuiltinType::UInt: 416 case BuiltinType::Long: 417 case BuiltinType::ULong: 418 case BuiltinType::LongLong: 419 case BuiltinType::ULongLong: 420 case BuiltinType::WChar_S: 421 case BuiltinType::WChar_U: 422 case BuiltinType::Char8: 423 case BuiltinType::Char16: 424 case BuiltinType::Char32: 425 case BuiltinType::ShortAccum: 426 case BuiltinType::Accum: 427 case BuiltinType::LongAccum: 428 case BuiltinType::UShortAccum: 429 case BuiltinType::UAccum: 430 case BuiltinType::ULongAccum: 431 case BuiltinType::ShortFract: 432 case BuiltinType::Fract: 433 case BuiltinType::LongFract: 434 case BuiltinType::UShortFract: 435 case BuiltinType::UFract: 436 case BuiltinType::ULongFract: 437 case BuiltinType::SatShortAccum: 438 case BuiltinType::SatAccum: 439 case BuiltinType::SatLongAccum: 440 case BuiltinType::SatUShortAccum: 441 case BuiltinType::SatUAccum: 442 case BuiltinType::SatULongAccum: 443 case BuiltinType::SatShortFract: 444 case BuiltinType::SatFract: 445 case BuiltinType::SatLongFract: 446 case BuiltinType::SatUShortFract: 447 case BuiltinType::SatUFract: 448 case BuiltinType::SatULongFract: 449 ResultType = llvm::IntegerType::get(getLLVMContext(), 450 static_cast<unsigned>(Context.getTypeSize(T))); 451 break; 452 453 case BuiltinType::Float16: 454 ResultType = 455 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 456 /* UseNativeHalf = */ true); 457 break; 458 459 case BuiltinType::Half: 460 // Half FP can either be storage-only (lowered to i16) or native. 461 ResultType = getTypeForFormat( 462 getLLVMContext(), Context.getFloatTypeSemantics(T), 463 Context.getLangOpts().NativeHalfType || 464 !Context.getTargetInfo().useFP16ConversionIntrinsics()); 465 break; 466 case BuiltinType::LongDouble: 467 LongDoubleReferenced = true; 468 [[fallthrough]]; 469 case BuiltinType::BFloat16: 470 case BuiltinType::Float: 471 case BuiltinType::Double: 472 case BuiltinType::Float128: 473 case BuiltinType::Ibm128: 474 ResultType = getTypeForFormat(getLLVMContext(), 475 Context.getFloatTypeSemantics(T), 476 /* UseNativeHalf = */ false); 477 break; 478 479 case BuiltinType::NullPtr: 480 // Model std::nullptr_t as i8* 481 ResultType = llvm::PointerType::getUnqual(getLLVMContext()); 482 break; 483 484 case BuiltinType::UInt128: 485 case BuiltinType::Int128: 486 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 487 break; 488 489 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 490 case BuiltinType::Id: 491 #include "clang/Basic/OpenCLImageTypes.def" 492 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 493 case BuiltinType::Id: 494 #include "clang/Basic/OpenCLExtensionTypes.def" 495 case BuiltinType::OCLSampler: 496 case BuiltinType::OCLEvent: 497 case BuiltinType::OCLClkEvent: 498 case BuiltinType::OCLQueue: 499 case BuiltinType::OCLReserveID: 500 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 501 break; 502 case BuiltinType::SveInt8: 503 case BuiltinType::SveUint8: 504 case BuiltinType::SveInt8x2: 505 case BuiltinType::SveUint8x2: 506 case BuiltinType::SveInt8x3: 507 case BuiltinType::SveUint8x3: 508 case BuiltinType::SveInt8x4: 509 case BuiltinType::SveUint8x4: 510 case BuiltinType::SveInt16: 511 case BuiltinType::SveUint16: 512 case BuiltinType::SveInt16x2: 513 case BuiltinType::SveUint16x2: 514 case BuiltinType::SveInt16x3: 515 case BuiltinType::SveUint16x3: 516 case BuiltinType::SveInt16x4: 517 case BuiltinType::SveUint16x4: 518 case BuiltinType::SveInt32: 519 case BuiltinType::SveUint32: 520 case BuiltinType::SveInt32x2: 521 case BuiltinType::SveUint32x2: 522 case BuiltinType::SveInt32x3: 523 case BuiltinType::SveUint32x3: 524 case BuiltinType::SveInt32x4: 525 case BuiltinType::SveUint32x4: 526 case BuiltinType::SveInt64: 527 case BuiltinType::SveUint64: 528 case BuiltinType::SveInt64x2: 529 case BuiltinType::SveUint64x2: 530 case BuiltinType::SveInt64x3: 531 case BuiltinType::SveUint64x3: 532 case BuiltinType::SveInt64x4: 533 case BuiltinType::SveUint64x4: 534 case BuiltinType::SveBool: 535 case BuiltinType::SveBoolx2: 536 case BuiltinType::SveBoolx4: 537 case BuiltinType::SveFloat16: 538 case BuiltinType::SveFloat16x2: 539 case BuiltinType::SveFloat16x3: 540 case BuiltinType::SveFloat16x4: 541 case BuiltinType::SveFloat32: 542 case BuiltinType::SveFloat32x2: 543 case BuiltinType::SveFloat32x3: 544 case BuiltinType::SveFloat32x4: 545 case BuiltinType::SveFloat64: 546 case BuiltinType::SveFloat64x2: 547 case BuiltinType::SveFloat64x3: 548 case BuiltinType::SveFloat64x4: 549 case BuiltinType::SveBFloat16: 550 case BuiltinType::SveBFloat16x2: 551 case BuiltinType::SveBFloat16x3: 552 case BuiltinType::SveBFloat16x4: { 553 ASTContext::BuiltinVectorTypeInfo Info = 554 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 555 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 556 Info.EC.getKnownMinValue() * 557 Info.NumVectors); 558 } 559 case BuiltinType::SveCount: 560 return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount"); 561 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 562 case BuiltinType::Id: \ 563 ResultType = \ 564 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ 565 break; 566 #include "clang/Basic/PPCTypes.def" 567 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 568 #include "clang/Basic/RISCVVTypes.def" 569 { 570 ASTContext::BuiltinVectorTypeInfo Info = 571 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 572 // Tuple types are expressed as aggregregate types of the same scalable 573 // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x 574 // 2 x i32>, <vscale x 2 x i32>}). 575 if (Info.NumVectors != 1) { 576 llvm::Type *EltTy = llvm::ScalableVectorType::get( 577 ConvertType(Info.ElementType), Info.EC.getKnownMinValue()); 578 llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy); 579 return llvm::StructType::get(getLLVMContext(), EltTys); 580 } 581 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 582 Info.EC.getKnownMinValue()); 583 } 584 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 585 case BuiltinType::Id: { \ 586 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 587 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \ 588 else \ 589 llvm_unreachable("Unexpected wasm reference builtin type!"); \ 590 } break; 591 #include "clang/Basic/WebAssemblyReferenceTypes.def" 592 #define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \ 593 SingletonId) \ 594 case BuiltinType::Id: \ 595 return llvm::PointerType::get(getLLVMContext(), AS); 596 #include "clang/Basic/AMDGPUTypes.def" 597 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 598 #include "clang/Basic/HLSLIntangibleTypes.def" 599 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty); 600 break; 601 case BuiltinType::Dependent: 602 #define BUILTIN_TYPE(Id, SingletonId) 603 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 604 case BuiltinType::Id: 605 #include "clang/AST/BuiltinTypes.def" 606 llvm_unreachable("Unexpected placeholder builtin type!"); 607 } 608 break; 609 } 610 case Type::Auto: 611 case Type::DeducedTemplateSpecialization: 612 llvm_unreachable("Unexpected undeduced type!"); 613 case Type::Complex: { 614 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 615 ResultType = llvm::StructType::get(EltTy, EltTy); 616 break; 617 } 618 case Type::LValueReference: 619 case Type::RValueReference: { 620 const ReferenceType *RTy = cast<ReferenceType>(Ty); 621 QualType ETy = RTy->getPointeeType(); 622 unsigned AS = getTargetAddressSpace(ETy); 623 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 624 break; 625 } 626 case Type::Pointer: { 627 const PointerType *PTy = cast<PointerType>(Ty); 628 QualType ETy = PTy->getPointeeType(); 629 unsigned AS = getTargetAddressSpace(ETy); 630 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 631 break; 632 } 633 634 case Type::VariableArray: { 635 const VariableArrayType *A = cast<VariableArrayType>(Ty); 636 assert(A->getIndexTypeCVRQualifiers() == 0 && 637 "FIXME: We only handle trivial array types so far!"); 638 // VLAs resolve to the innermost element type; this matches 639 // the return of alloca, and there isn't any obviously better choice. 640 ResultType = ConvertTypeForMem(A->getElementType()); 641 break; 642 } 643 case Type::IncompleteArray: { 644 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 645 assert(A->getIndexTypeCVRQualifiers() == 0 && 646 "FIXME: We only handle trivial array types so far!"); 647 // int X[] -> [0 x int], unless the element type is not sized. If it is 648 // unsized (e.g. an incomplete struct) just use [0 x i8]. 649 ResultType = ConvertTypeForMem(A->getElementType()); 650 if (!ResultType->isSized()) { 651 SkippedLayout = true; 652 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 653 } 654 ResultType = llvm::ArrayType::get(ResultType, 0); 655 break; 656 } 657 case Type::ArrayParameter: 658 case Type::ConstantArray: { 659 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 660 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 661 662 // Lower arrays of undefined struct type to arrays of i8 just to have a 663 // concrete type. 664 if (!EltTy->isSized()) { 665 SkippedLayout = true; 666 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 667 } 668 669 ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize()); 670 break; 671 } 672 case Type::ExtVector: 673 case Type::Vector: { 674 const auto *VT = cast<VectorType>(Ty); 675 // An ext_vector_type of Bool is really a vector of bits. 676 llvm::Type *IRElemTy = VT->isExtVectorBoolType() 677 ? llvm::Type::getInt1Ty(getLLVMContext()) 678 : ConvertType(VT->getElementType()); 679 ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements()); 680 break; 681 } 682 case Type::ConstantMatrix: { 683 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 684 ResultType = 685 llvm::FixedVectorType::get(ConvertType(MT->getElementType()), 686 MT->getNumRows() * MT->getNumColumns()); 687 break; 688 } 689 case Type::FunctionNoProto: 690 case Type::FunctionProto: 691 ResultType = ConvertFunctionTypeInternal(T); 692 break; 693 case Type::ObjCObject: 694 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 695 break; 696 697 case Type::ObjCInterface: { 698 // Objective-C interfaces are always opaque (outside of the 699 // runtime, which can do whatever it likes); we never refine 700 // these. 701 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 702 if (!T) 703 T = llvm::StructType::create(getLLVMContext()); 704 ResultType = T; 705 break; 706 } 707 708 case Type::ObjCObjectPointer: 709 ResultType = llvm::PointerType::getUnqual(getLLVMContext()); 710 break; 711 712 case Type::Enum: { 713 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 714 if (ED->isCompleteDefinition() || ED->isFixed()) 715 return ConvertType(ED->getIntegerType()); 716 // Return a placeholder 'i32' type. This can be changed later when the 717 // type is defined (see UpdateCompletedType), but is likely to be the 718 // "right" answer. 719 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 720 break; 721 } 722 723 case Type::BlockPointer: { 724 // Block pointers lower to function type. For function type, 725 // getTargetAddressSpace() returns default address space for 726 // function pointer i.e. program address space. Therefore, for block 727 // pointers, it is important to pass the pointee AST address space when 728 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR 729 // address space for data pointers and not function pointers. 730 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 731 unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace()); 732 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 733 break; 734 } 735 736 case Type::MemberPointer: { 737 auto *MPTy = cast<MemberPointerType>(Ty); 738 if (!getCXXABI().isMemberPointerConvertible(MPTy)) { 739 auto *C = MPTy->getClass(); 740 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr}); 741 if (Insertion.second) 742 Insertion.first->second = llvm::StructType::create(getLLVMContext()); 743 ResultType = Insertion.first->second; 744 } else { 745 ResultType = getCXXABI().ConvertMemberPointerType(MPTy); 746 } 747 break; 748 } 749 750 case Type::Atomic: { 751 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 752 ResultType = ConvertTypeForMem(valueType); 753 754 // Pad out to the inflated size if necessary. 755 uint64_t valueSize = Context.getTypeSize(valueType); 756 uint64_t atomicSize = Context.getTypeSize(Ty); 757 if (valueSize != atomicSize) { 758 assert(valueSize < atomicSize); 759 llvm::Type *elts[] = { 760 ResultType, 761 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 762 }; 763 ResultType = 764 llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts)); 765 } 766 break; 767 } 768 case Type::Pipe: { 769 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); 770 break; 771 } 772 case Type::BitInt: { 773 const auto &EIT = cast<BitIntType>(Ty); 774 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); 775 break; 776 } 777 } 778 779 assert(ResultType && "Didn't convert a type?"); 780 assert((!CachedType || CachedType == ResultType) && 781 "Cached type doesn't match computed type"); 782 783 TypeCache[Ty] = ResultType; 784 return ResultType; 785 } 786 787 bool CodeGenModule::isPaddedAtomicType(QualType type) { 788 return isPaddedAtomicType(type->castAs<AtomicType>()); 789 } 790 791 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 792 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 793 } 794 795 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 796 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 797 // TagDecl's are not necessarily unique, instead use the (clang) 798 // type connected to the decl. 799 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 800 801 llvm::StructType *&Entry = RecordDeclTypes[Key]; 802 803 // If we don't have a StructType at all yet, create the forward declaration. 804 if (!Entry) { 805 Entry = llvm::StructType::create(getLLVMContext()); 806 addRecordTypeName(RD, Entry, ""); 807 } 808 llvm::StructType *Ty = Entry; 809 810 // If this is still a forward declaration, or the LLVM type is already 811 // complete, there's nothing more to do. 812 RD = RD->getDefinition(); 813 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 814 return Ty; 815 816 // Force conversion of non-virtual base classes recursively. 817 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 818 for (const auto &I : CRD->bases()) { 819 if (I.isVirtual()) continue; 820 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); 821 } 822 } 823 824 // Layout fields. 825 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); 826 CGRecordLayouts[Key] = std::move(Layout); 827 828 // If this struct blocked a FunctionType conversion, then recompute whatever 829 // was derived from that. 830 // FIXME: This is hugely overconservative. 831 if (SkippedLayout) 832 TypeCache.clear(); 833 834 return Ty; 835 } 836 837 /// getCGRecordLayout - Return record layout info for the given record decl. 838 const CGRecordLayout & 839 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 840 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 841 842 auto I = CGRecordLayouts.find(Key); 843 if (I != CGRecordLayouts.end()) 844 return *I->second; 845 // Compute the type information. 846 ConvertRecordDeclType(RD); 847 848 // Now try again. 849 I = CGRecordLayouts.find(Key); 850 851 assert(I != CGRecordLayouts.end() && 852 "Unable to find record layout information for type"); 853 return *I->second; 854 } 855 856 bool CodeGenTypes::isPointerZeroInitializable(QualType T) { 857 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); 858 return isZeroInitializable(T); 859 } 860 861 bool CodeGenTypes::isZeroInitializable(QualType T) { 862 if (T->getAs<PointerType>()) 863 return Context.getTargetNullPointerValue(T) == 0; 864 865 if (const auto *AT = Context.getAsArrayType(T)) { 866 if (isa<IncompleteArrayType>(AT)) 867 return true; 868 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 869 if (Context.getConstantArrayElementCount(CAT) == 0) 870 return true; 871 T = Context.getBaseElementType(T); 872 } 873 874 // Records are non-zero-initializable if they contain any 875 // non-zero-initializable subobjects. 876 if (const RecordType *RT = T->getAs<RecordType>()) { 877 const RecordDecl *RD = RT->getDecl(); 878 return isZeroInitializable(RD); 879 } 880 881 // We have to ask the ABI about member pointers. 882 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 883 return getCXXABI().isZeroInitializable(MPT); 884 885 // Everything else is okay. 886 return true; 887 } 888 889 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { 890 return getCGRecordLayout(RD).isZeroInitializable(); 891 } 892 893 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const { 894 // Return the address space for the type. If the type is a 895 // function type without an address space qualifier, the 896 // program address space is used. Otherwise, the target picks 897 // the best address space based on the type information 898 return T->isFunctionType() && !T.hasAddressSpace() 899 ? getDataLayout().getProgramAddressSpace() 900 : getContext().getTargetAddressSpace(T.getAddressSpace()); 901 } 902