1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the code that handles AST -> LLVM type lowering. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenTypes.h" 14 #include "CGCXXABI.h" 15 #include "CGCall.h" 16 #include "CGHLSLRuntime.h" 17 #include "CGOpenCLRuntime.h" 18 #include "CGRecordLayout.h" 19 #include "TargetInfo.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/RecordLayout.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Module.h" 29 30 using namespace clang; 31 using namespace CodeGen; 32 33 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 34 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 35 Target(cgm.getTarget()) { 36 SkippedLayout = false; 37 LongDoubleReferenced = false; 38 } 39 40 CodeGenTypes::~CodeGenTypes() { 41 for (llvm::FoldingSet<CGFunctionInfo>::iterator 42 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 43 delete &*I++; 44 } 45 46 CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); } 47 48 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { 49 return CGM.getCodeGenOpts(); 50 } 51 52 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 53 llvm::StructType *Ty, 54 StringRef suffix) { 55 SmallString<256> TypeName; 56 llvm::raw_svector_ostream OS(TypeName); 57 OS << RD->getKindName() << '.'; 58 59 // FIXME: We probably want to make more tweaks to the printing policy. For 60 // example, we should probably enable PrintCanonicalTypes and 61 // FullyQualifiedNames. 62 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); 63 Policy.SuppressInlineNamespace = false; 64 65 // Name the codegen type after the typedef name 66 // if there is no tag type name available 67 if (RD->getIdentifier()) { 68 // FIXME: We should not have to check for a null decl context here. 69 // Right now we do it because the implicit Obj-C decls don't have one. 70 if (RD->getDeclContext()) 71 RD->printQualifiedName(OS, Policy); 72 else 73 RD->printName(OS, Policy); 74 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 75 // FIXME: We should not have to check for a null decl context here. 76 // Right now we do it because the implicit Obj-C decls don't have one. 77 if (TDD->getDeclContext()) 78 TDD->printQualifiedName(OS, Policy); 79 else 80 TDD->printName(OS); 81 } else 82 OS << "anon"; 83 84 if (!suffix.empty()) 85 OS << suffix; 86 87 Ty->setName(OS.str()); 88 } 89 90 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 91 /// ConvertType in that it is used to convert to the memory representation for 92 /// a type. For example, the scalar representation for _Bool is i1, but the 93 /// memory representation is usually i8 or i32, depending on the target. 94 /// 95 /// We generally assume that the alloc size of this type under the LLVM 96 /// data layout is the same as the size of the AST type. The alignment 97 /// does not have to match: Clang should always use explicit alignments 98 /// and packed structs as necessary to produce the layout it needs. 99 /// But the size does need to be exactly right or else things like struct 100 /// layout will break. 101 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { 102 if (T->isConstantMatrixType()) { 103 const Type *Ty = Context.getCanonicalType(T).getTypePtr(); 104 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 105 return llvm::ArrayType::get(ConvertType(MT->getElementType()), 106 MT->getNumRows() * MT->getNumColumns()); 107 } 108 109 llvm::Type *R = ConvertType(T); 110 111 // Check for the boolean vector case. 112 if (T->isExtVectorBoolType()) { 113 auto *FixedVT = cast<llvm::FixedVectorType>(R); 114 // Pad to at least one byte. 115 uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8); 116 return llvm::IntegerType::get(FixedVT->getContext(), BytePadded); 117 } 118 119 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type 120 // with the exact semantic bit-width of the AST type; for example, 121 // _BitInt(17) will turn into i17. In memory, however, we need to store 122 // such values extended to their full storage size as decided by AST 123 // layout; this is an ABI requirement. Ideally, we would always use an 124 // integer type that's just the bit-size of the AST type; for example, if 125 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's 126 // returned by convertTypeForLoadStore. However, that type does not 127 // always satisfy the size requirement on memory representation types 128 // describe above. For example, a 32-bit platform might reasonably set 129 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size 130 // of 16 bytes in the LLVM data layout. In these cases, we simply return 131 // a byte array of the appropriate size. 132 if (T->isBitIntType()) { 133 if (typeRequiresSplitIntoByteArray(T, R)) 134 return llvm::ArrayType::get(CGM.Int8Ty, 135 Context.getTypeSizeInChars(T).getQuantity()); 136 return llvm::IntegerType::get(getLLVMContext(), 137 (unsigned)Context.getTypeSize(T)); 138 } 139 140 if (R->isIntegerTy(1)) 141 return llvm::IntegerType::get(getLLVMContext(), 142 (unsigned)Context.getTypeSize(T)); 143 144 // Else, don't map it. 145 return R; 146 } 147 148 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy, 149 llvm::Type *LLVMTy) { 150 if (!LLVMTy) 151 LLVMTy = ConvertType(ASTTy); 152 153 CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy); 154 CharUnits LLVMSize = 155 CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy)); 156 return ASTSize != LLVMSize; 157 } 158 159 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T, 160 llvm::Type *LLVMTy) { 161 if (!LLVMTy) 162 LLVMTy = ConvertType(T); 163 164 if (T->isBitIntType()) 165 return llvm::Type::getIntNTy( 166 getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8); 167 168 if (LLVMTy->isIntegerTy(1)) 169 return llvm::IntegerType::get(getLLVMContext(), 170 (unsigned)Context.getTypeSize(T)); 171 172 if (T->isExtVectorBoolType()) 173 return ConvertTypeForMem(T); 174 175 return LLVMTy; 176 } 177 178 /// isRecordLayoutComplete - Return true if the specified type is already 179 /// completely laid out. 180 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 181 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 182 RecordDeclTypes.find(Ty); 183 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 184 } 185 186 /// isFuncParamTypeConvertible - Return true if the specified type in a 187 /// function parameter or result position can be converted to an IR type at this 188 /// point. This boils down to being whether it is complete. 189 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 190 // Some ABIs cannot have their member pointers represented in IR unless 191 // certain circumstances have been reached. 192 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 193 return getCXXABI().isMemberPointerConvertible(MPT); 194 195 // If this isn't a tagged type, we can convert it! 196 const TagType *TT = Ty->getAs<TagType>(); 197 if (!TT) return true; 198 199 // Incomplete types cannot be converted. 200 return !TT->isIncompleteType(); 201 } 202 203 204 /// Code to verify a given function type is complete, i.e. the return type 205 /// and all of the parameter types are complete. Also check to see if we are in 206 /// a RS_StructPointer context, and if so whether any struct types have been 207 /// pended. If so, we don't want to ask the ABI lowering code to handle a type 208 /// that cannot be converted to an IR type. 209 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 210 if (!isFuncParamTypeConvertible(FT->getReturnType())) 211 return false; 212 213 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 214 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 215 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 216 return false; 217 218 return true; 219 } 220 221 /// UpdateCompletedType - When we find the full definition for a TagDecl, 222 /// replace the 'opaque' type we previously made for it if applicable. 223 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 224 // If this is an enum being completed, then we flush all non-struct types from 225 // the cache. This allows function types and other things that may be derived 226 // from the enum to be recomputed. 227 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 228 // Only flush the cache if we've actually already converted this type. 229 if (TypeCache.count(ED->getTypeForDecl())) { 230 // Okay, we formed some types based on this. We speculated that the enum 231 // would be lowered to i32, so we only need to flush the cache if this 232 // didn't happen. 233 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 234 TypeCache.clear(); 235 } 236 // If necessary, provide the full definition of a type only used with a 237 // declaration so far. 238 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 239 DI->completeType(ED); 240 return; 241 } 242 243 // If we completed a RecordDecl that we previously used and converted to an 244 // anonymous type, then go ahead and complete it now. 245 const RecordDecl *RD = cast<RecordDecl>(TD); 246 if (RD->isDependentType()) return; 247 248 // Only complete it if we converted it already. If we haven't converted it 249 // yet, we'll just do it lazily. 250 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 251 ConvertRecordDeclType(RD); 252 253 // If necessary, provide the full definition of a type only used with a 254 // declaration so far. 255 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 256 DI->completeType(RD); 257 } 258 259 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { 260 QualType T = Context.getRecordType(RD); 261 T = Context.getCanonicalType(T); 262 263 const Type *Ty = T.getTypePtr(); 264 if (RecordsWithOpaqueMemberPointers.count(Ty)) { 265 TypeCache.clear(); 266 RecordsWithOpaqueMemberPointers.clear(); 267 } 268 } 269 270 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 271 const llvm::fltSemantics &format, 272 bool UseNativeHalf = false) { 273 if (&format == &llvm::APFloat::IEEEhalf()) { 274 if (UseNativeHalf) 275 return llvm::Type::getHalfTy(VMContext); 276 else 277 return llvm::Type::getInt16Ty(VMContext); 278 } 279 if (&format == &llvm::APFloat::BFloat()) 280 return llvm::Type::getBFloatTy(VMContext); 281 if (&format == &llvm::APFloat::IEEEsingle()) 282 return llvm::Type::getFloatTy(VMContext); 283 if (&format == &llvm::APFloat::IEEEdouble()) 284 return llvm::Type::getDoubleTy(VMContext); 285 if (&format == &llvm::APFloat::IEEEquad()) 286 return llvm::Type::getFP128Ty(VMContext); 287 if (&format == &llvm::APFloat::PPCDoubleDouble()) 288 return llvm::Type::getPPC_FP128Ty(VMContext); 289 if (&format == &llvm::APFloat::x87DoubleExtended()) 290 return llvm::Type::getX86_FP80Ty(VMContext); 291 llvm_unreachable("Unknown float format!"); 292 } 293 294 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { 295 assert(QFT.isCanonical()); 296 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); 297 // First, check whether we can build the full function type. If the 298 // function type depends on an incomplete type (e.g. a struct or enum), we 299 // cannot lower the function type. 300 if (!isFuncTypeConvertible(FT)) { 301 // This function's type depends on an incomplete tag type. 302 303 // Force conversion of all the relevant record types, to make sure 304 // we re-convert the FunctionType when appropriate. 305 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 306 ConvertRecordDeclType(RT->getDecl()); 307 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 308 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 309 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 310 ConvertRecordDeclType(RT->getDecl()); 311 312 SkippedLayout = true; 313 314 // Return a placeholder type. 315 return llvm::StructType::get(getLLVMContext()); 316 } 317 318 // The function type can be built; call the appropriate routines to 319 // build it. 320 const CGFunctionInfo *FI; 321 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 322 FI = &arrangeFreeFunctionType( 323 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 324 } else { 325 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 326 FI = &arrangeFreeFunctionType( 327 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 328 } 329 330 llvm::Type *ResultType = nullptr; 331 // If there is something higher level prodding our CGFunctionInfo, then 332 // don't recurse into it again. 333 if (FunctionsBeingProcessed.count(FI)) { 334 335 ResultType = llvm::StructType::get(getLLVMContext()); 336 SkippedLayout = true; 337 } else { 338 339 // Otherwise, we're good to go, go ahead and convert it. 340 ResultType = GetFunctionType(*FI); 341 } 342 343 return ResultType; 344 } 345 346 /// ConvertType - Convert the specified type to its LLVM form. 347 llvm::Type *CodeGenTypes::ConvertType(QualType T) { 348 T = Context.getCanonicalType(T); 349 350 const Type *Ty = T.getTypePtr(); 351 352 // For the device-side compilation, CUDA device builtin surface/texture types 353 // may be represented in different types. 354 if (Context.getLangOpts().CUDAIsDevice) { 355 if (T->isCUDADeviceBuiltinSurfaceType()) { 356 if (auto *Ty = CGM.getTargetCodeGenInfo() 357 .getCUDADeviceBuiltinSurfaceDeviceType()) 358 return Ty; 359 } else if (T->isCUDADeviceBuiltinTextureType()) { 360 if (auto *Ty = CGM.getTargetCodeGenInfo() 361 .getCUDADeviceBuiltinTextureDeviceType()) 362 return Ty; 363 } 364 } 365 366 // RecordTypes are cached and processed specially. 367 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 368 return ConvertRecordDeclType(RT->getDecl()); 369 370 llvm::Type *CachedType = nullptr; 371 auto TCI = TypeCache.find(Ty); 372 if (TCI != TypeCache.end()) 373 CachedType = TCI->second; 374 // With expensive checks, check that the type we compute matches the 375 // cached type. 376 #ifndef EXPENSIVE_CHECKS 377 if (CachedType) 378 return CachedType; 379 #endif 380 381 // If we don't have it in the cache, convert it now. 382 llvm::Type *ResultType = nullptr; 383 switch (Ty->getTypeClass()) { 384 case Type::Record: // Handled above. 385 #define TYPE(Class, Base) 386 #define ABSTRACT_TYPE(Class, Base) 387 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 388 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 389 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 390 #include "clang/AST/TypeNodes.inc" 391 llvm_unreachable("Non-canonical or dependent types aren't possible."); 392 393 case Type::Builtin: { 394 switch (cast<BuiltinType>(Ty)->getKind()) { 395 case BuiltinType::Void: 396 case BuiltinType::ObjCId: 397 case BuiltinType::ObjCClass: 398 case BuiltinType::ObjCSel: 399 // LLVM void type can only be used as the result of a function call. Just 400 // map to the same as char. 401 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 402 break; 403 404 case BuiltinType::Bool: 405 // Note that we always return bool as i1 for use as a scalar type. 406 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 407 break; 408 409 case BuiltinType::Char_S: 410 case BuiltinType::Char_U: 411 case BuiltinType::SChar: 412 case BuiltinType::UChar: 413 case BuiltinType::Short: 414 case BuiltinType::UShort: 415 case BuiltinType::Int: 416 case BuiltinType::UInt: 417 case BuiltinType::Long: 418 case BuiltinType::ULong: 419 case BuiltinType::LongLong: 420 case BuiltinType::ULongLong: 421 case BuiltinType::WChar_S: 422 case BuiltinType::WChar_U: 423 case BuiltinType::Char8: 424 case BuiltinType::Char16: 425 case BuiltinType::Char32: 426 case BuiltinType::ShortAccum: 427 case BuiltinType::Accum: 428 case BuiltinType::LongAccum: 429 case BuiltinType::UShortAccum: 430 case BuiltinType::UAccum: 431 case BuiltinType::ULongAccum: 432 case BuiltinType::ShortFract: 433 case BuiltinType::Fract: 434 case BuiltinType::LongFract: 435 case BuiltinType::UShortFract: 436 case BuiltinType::UFract: 437 case BuiltinType::ULongFract: 438 case BuiltinType::SatShortAccum: 439 case BuiltinType::SatAccum: 440 case BuiltinType::SatLongAccum: 441 case BuiltinType::SatUShortAccum: 442 case BuiltinType::SatUAccum: 443 case BuiltinType::SatULongAccum: 444 case BuiltinType::SatShortFract: 445 case BuiltinType::SatFract: 446 case BuiltinType::SatLongFract: 447 case BuiltinType::SatUShortFract: 448 case BuiltinType::SatUFract: 449 case BuiltinType::SatULongFract: 450 ResultType = llvm::IntegerType::get(getLLVMContext(), 451 static_cast<unsigned>(Context.getTypeSize(T))); 452 break; 453 454 case BuiltinType::Float16: 455 ResultType = 456 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 457 /* UseNativeHalf = */ true); 458 break; 459 460 case BuiltinType::Half: 461 // Half FP can either be storage-only (lowered to i16) or native. 462 ResultType = getTypeForFormat( 463 getLLVMContext(), Context.getFloatTypeSemantics(T), 464 Context.getLangOpts().NativeHalfType || 465 !Context.getTargetInfo().useFP16ConversionIntrinsics()); 466 break; 467 case BuiltinType::LongDouble: 468 LongDoubleReferenced = true; 469 [[fallthrough]]; 470 case BuiltinType::BFloat16: 471 case BuiltinType::Float: 472 case BuiltinType::Double: 473 case BuiltinType::Float128: 474 case BuiltinType::Ibm128: 475 ResultType = getTypeForFormat(getLLVMContext(), 476 Context.getFloatTypeSemantics(T), 477 /* UseNativeHalf = */ false); 478 break; 479 480 case BuiltinType::NullPtr: 481 // Model std::nullptr_t as i8* 482 ResultType = llvm::PointerType::getUnqual(getLLVMContext()); 483 break; 484 485 case BuiltinType::UInt128: 486 case BuiltinType::Int128: 487 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 488 break; 489 490 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 491 case BuiltinType::Id: 492 #include "clang/Basic/OpenCLImageTypes.def" 493 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 494 case BuiltinType::Id: 495 #include "clang/Basic/OpenCLExtensionTypes.def" 496 case BuiltinType::OCLSampler: 497 case BuiltinType::OCLEvent: 498 case BuiltinType::OCLClkEvent: 499 case BuiltinType::OCLQueue: 500 case BuiltinType::OCLReserveID: 501 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 502 break; 503 case BuiltinType::SveInt8: 504 case BuiltinType::SveUint8: 505 case BuiltinType::SveInt8x2: 506 case BuiltinType::SveUint8x2: 507 case BuiltinType::SveInt8x3: 508 case BuiltinType::SveUint8x3: 509 case BuiltinType::SveInt8x4: 510 case BuiltinType::SveUint8x4: 511 case BuiltinType::SveInt16: 512 case BuiltinType::SveUint16: 513 case BuiltinType::SveInt16x2: 514 case BuiltinType::SveUint16x2: 515 case BuiltinType::SveInt16x3: 516 case BuiltinType::SveUint16x3: 517 case BuiltinType::SveInt16x4: 518 case BuiltinType::SveUint16x4: 519 case BuiltinType::SveInt32: 520 case BuiltinType::SveUint32: 521 case BuiltinType::SveInt32x2: 522 case BuiltinType::SveUint32x2: 523 case BuiltinType::SveInt32x3: 524 case BuiltinType::SveUint32x3: 525 case BuiltinType::SveInt32x4: 526 case BuiltinType::SveUint32x4: 527 case BuiltinType::SveInt64: 528 case BuiltinType::SveUint64: 529 case BuiltinType::SveInt64x2: 530 case BuiltinType::SveUint64x2: 531 case BuiltinType::SveInt64x3: 532 case BuiltinType::SveUint64x3: 533 case BuiltinType::SveInt64x4: 534 case BuiltinType::SveUint64x4: 535 case BuiltinType::SveBool: 536 case BuiltinType::SveBoolx2: 537 case BuiltinType::SveBoolx4: 538 case BuiltinType::SveFloat16: 539 case BuiltinType::SveFloat16x2: 540 case BuiltinType::SveFloat16x3: 541 case BuiltinType::SveFloat16x4: 542 case BuiltinType::SveFloat32: 543 case BuiltinType::SveFloat32x2: 544 case BuiltinType::SveFloat32x3: 545 case BuiltinType::SveFloat32x4: 546 case BuiltinType::SveFloat64: 547 case BuiltinType::SveFloat64x2: 548 case BuiltinType::SveFloat64x3: 549 case BuiltinType::SveFloat64x4: 550 case BuiltinType::SveBFloat16: 551 case BuiltinType::SveBFloat16x2: 552 case BuiltinType::SveBFloat16x3: 553 case BuiltinType::SveBFloat16x4: { 554 ASTContext::BuiltinVectorTypeInfo Info = 555 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 556 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 557 Info.EC.getKnownMinValue() * 558 Info.NumVectors); 559 } 560 case BuiltinType::SveCount: 561 return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount"); 562 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 563 case BuiltinType::Id: \ 564 ResultType = \ 565 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ 566 break; 567 #include "clang/Basic/PPCTypes.def" 568 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 569 #include "clang/Basic/RISCVVTypes.def" 570 { 571 ASTContext::BuiltinVectorTypeInfo Info = 572 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 573 // Tuple types are expressed as aggregregate types of the same scalable 574 // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x 575 // 2 x i32>, <vscale x 2 x i32>}). 576 if (Info.NumVectors != 1) { 577 llvm::Type *EltTy = llvm::ScalableVectorType::get( 578 ConvertType(Info.ElementType), Info.EC.getKnownMinValue()); 579 llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy); 580 return llvm::StructType::get(getLLVMContext(), EltTys); 581 } 582 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 583 Info.EC.getKnownMinValue()); 584 } 585 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 586 case BuiltinType::Id: { \ 587 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 588 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \ 589 else \ 590 llvm_unreachable("Unexpected wasm reference builtin type!"); \ 591 } break; 592 #include "clang/Basic/WebAssemblyReferenceTypes.def" 593 #define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \ 594 SingletonId) \ 595 case BuiltinType::Id: \ 596 return llvm::PointerType::get(getLLVMContext(), AS); 597 #include "clang/Basic/AMDGPUTypes.def" 598 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 599 #include "clang/Basic/HLSLIntangibleTypes.def" 600 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty); 601 break; 602 case BuiltinType::Dependent: 603 #define BUILTIN_TYPE(Id, SingletonId) 604 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 605 case BuiltinType::Id: 606 #include "clang/AST/BuiltinTypes.def" 607 llvm_unreachable("Unexpected placeholder builtin type!"); 608 } 609 break; 610 } 611 case Type::Auto: 612 case Type::DeducedTemplateSpecialization: 613 llvm_unreachable("Unexpected undeduced type!"); 614 case Type::Complex: { 615 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 616 ResultType = llvm::StructType::get(EltTy, EltTy); 617 break; 618 } 619 case Type::LValueReference: 620 case Type::RValueReference: { 621 const ReferenceType *RTy = cast<ReferenceType>(Ty); 622 QualType ETy = RTy->getPointeeType(); 623 unsigned AS = getTargetAddressSpace(ETy); 624 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 625 break; 626 } 627 case Type::Pointer: { 628 const PointerType *PTy = cast<PointerType>(Ty); 629 QualType ETy = PTy->getPointeeType(); 630 unsigned AS = getTargetAddressSpace(ETy); 631 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 632 break; 633 } 634 635 case Type::VariableArray: { 636 const VariableArrayType *A = cast<VariableArrayType>(Ty); 637 assert(A->getIndexTypeCVRQualifiers() == 0 && 638 "FIXME: We only handle trivial array types so far!"); 639 // VLAs resolve to the innermost element type; this matches 640 // the return of alloca, and there isn't any obviously better choice. 641 ResultType = ConvertTypeForMem(A->getElementType()); 642 break; 643 } 644 case Type::IncompleteArray: { 645 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 646 assert(A->getIndexTypeCVRQualifiers() == 0 && 647 "FIXME: We only handle trivial array types so far!"); 648 // int X[] -> [0 x int], unless the element type is not sized. If it is 649 // unsized (e.g. an incomplete struct) just use [0 x i8]. 650 ResultType = ConvertTypeForMem(A->getElementType()); 651 if (!ResultType->isSized()) { 652 SkippedLayout = true; 653 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 654 } 655 ResultType = llvm::ArrayType::get(ResultType, 0); 656 break; 657 } 658 case Type::ArrayParameter: 659 case Type::ConstantArray: { 660 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 661 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 662 663 // Lower arrays of undefined struct type to arrays of i8 just to have a 664 // concrete type. 665 if (!EltTy->isSized()) { 666 SkippedLayout = true; 667 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 668 } 669 670 ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize()); 671 break; 672 } 673 case Type::ExtVector: 674 case Type::Vector: { 675 const auto *VT = cast<VectorType>(Ty); 676 // An ext_vector_type of Bool is really a vector of bits. 677 llvm::Type *IRElemTy = VT->isExtVectorBoolType() 678 ? llvm::Type::getInt1Ty(getLLVMContext()) 679 : ConvertType(VT->getElementType()); 680 ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements()); 681 break; 682 } 683 case Type::ConstantMatrix: { 684 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 685 ResultType = 686 llvm::FixedVectorType::get(ConvertType(MT->getElementType()), 687 MT->getNumRows() * MT->getNumColumns()); 688 break; 689 } 690 case Type::FunctionNoProto: 691 case Type::FunctionProto: 692 ResultType = ConvertFunctionTypeInternal(T); 693 break; 694 case Type::ObjCObject: 695 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 696 break; 697 698 case Type::ObjCInterface: { 699 // Objective-C interfaces are always opaque (outside of the 700 // runtime, which can do whatever it likes); we never refine 701 // these. 702 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 703 if (!T) 704 T = llvm::StructType::create(getLLVMContext()); 705 ResultType = T; 706 break; 707 } 708 709 case Type::ObjCObjectPointer: 710 ResultType = llvm::PointerType::getUnqual(getLLVMContext()); 711 break; 712 713 case Type::Enum: { 714 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 715 if (ED->isCompleteDefinition() || ED->isFixed()) 716 return ConvertType(ED->getIntegerType()); 717 // Return a placeholder 'i32' type. This can be changed later when the 718 // type is defined (see UpdateCompletedType), but is likely to be the 719 // "right" answer. 720 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 721 break; 722 } 723 724 case Type::BlockPointer: { 725 // Block pointers lower to function type. For function type, 726 // getTargetAddressSpace() returns default address space for 727 // function pointer i.e. program address space. Therefore, for block 728 // pointers, it is important to pass the pointee AST address space when 729 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR 730 // address space for data pointers and not function pointers. 731 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 732 unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace()); 733 ResultType = llvm::PointerType::get(getLLVMContext(), AS); 734 break; 735 } 736 737 case Type::MemberPointer: { 738 auto *MPTy = cast<MemberPointerType>(Ty); 739 if (!getCXXABI().isMemberPointerConvertible(MPTy)) { 740 auto *C = MPTy->getClass(); 741 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr}); 742 if (Insertion.second) 743 Insertion.first->second = llvm::StructType::create(getLLVMContext()); 744 ResultType = Insertion.first->second; 745 } else { 746 ResultType = getCXXABI().ConvertMemberPointerType(MPTy); 747 } 748 break; 749 } 750 751 case Type::Atomic: { 752 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 753 ResultType = ConvertTypeForMem(valueType); 754 755 // Pad out to the inflated size if necessary. 756 uint64_t valueSize = Context.getTypeSize(valueType); 757 uint64_t atomicSize = Context.getTypeSize(Ty); 758 if (valueSize != atomicSize) { 759 assert(valueSize < atomicSize); 760 llvm::Type *elts[] = { 761 ResultType, 762 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 763 }; 764 ResultType = 765 llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts)); 766 } 767 break; 768 } 769 case Type::Pipe: { 770 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); 771 break; 772 } 773 case Type::BitInt: { 774 const auto &EIT = cast<BitIntType>(Ty); 775 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); 776 break; 777 } 778 } 779 780 assert(ResultType && "Didn't convert a type?"); 781 assert((!CachedType || CachedType == ResultType) && 782 "Cached type doesn't match computed type"); 783 784 TypeCache[Ty] = ResultType; 785 return ResultType; 786 } 787 788 bool CodeGenModule::isPaddedAtomicType(QualType type) { 789 return isPaddedAtomicType(type->castAs<AtomicType>()); 790 } 791 792 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 793 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 794 } 795 796 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 797 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 798 // TagDecl's are not necessarily unique, instead use the (clang) 799 // type connected to the decl. 800 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 801 802 llvm::StructType *&Entry = RecordDeclTypes[Key]; 803 804 // If we don't have a StructType at all yet, create the forward declaration. 805 if (!Entry) { 806 Entry = llvm::StructType::create(getLLVMContext()); 807 addRecordTypeName(RD, Entry, ""); 808 } 809 llvm::StructType *Ty = Entry; 810 811 // If this is still a forward declaration, or the LLVM type is already 812 // complete, there's nothing more to do. 813 RD = RD->getDefinition(); 814 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 815 return Ty; 816 817 // Force conversion of non-virtual base classes recursively. 818 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 819 for (const auto &I : CRD->bases()) { 820 if (I.isVirtual()) continue; 821 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); 822 } 823 } 824 825 // Layout fields. 826 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); 827 CGRecordLayouts[Key] = std::move(Layout); 828 829 // If this struct blocked a FunctionType conversion, then recompute whatever 830 // was derived from that. 831 // FIXME: This is hugely overconservative. 832 if (SkippedLayout) 833 TypeCache.clear(); 834 835 return Ty; 836 } 837 838 /// getCGRecordLayout - Return record layout info for the given record decl. 839 const CGRecordLayout & 840 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 841 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 842 843 auto I = CGRecordLayouts.find(Key); 844 if (I != CGRecordLayouts.end()) 845 return *I->second; 846 // Compute the type information. 847 ConvertRecordDeclType(RD); 848 849 // Now try again. 850 I = CGRecordLayouts.find(Key); 851 852 assert(I != CGRecordLayouts.end() && 853 "Unable to find record layout information for type"); 854 return *I->second; 855 } 856 857 bool CodeGenTypes::isPointerZeroInitializable(QualType T) { 858 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); 859 return isZeroInitializable(T); 860 } 861 862 bool CodeGenTypes::isZeroInitializable(QualType T) { 863 if (T->getAs<PointerType>()) 864 return Context.getTargetNullPointerValue(T) == 0; 865 866 if (const auto *AT = Context.getAsArrayType(T)) { 867 if (isa<IncompleteArrayType>(AT)) 868 return true; 869 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 870 if (Context.getConstantArrayElementCount(CAT) == 0) 871 return true; 872 T = Context.getBaseElementType(T); 873 } 874 875 // Records are non-zero-initializable if they contain any 876 // non-zero-initializable subobjects. 877 if (const RecordType *RT = T->getAs<RecordType>()) { 878 const RecordDecl *RD = RT->getDecl(); 879 return isZeroInitializable(RD); 880 } 881 882 // We have to ask the ABI about member pointers. 883 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 884 return getCXXABI().isZeroInitializable(MPT); 885 886 // Everything else is okay. 887 return true; 888 } 889 890 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { 891 return getCGRecordLayout(RD).isZeroInitializable(); 892 } 893 894 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const { 895 // Return the address space for the type. If the type is a 896 // function type without an address space qualifier, the 897 // program address space is used. Otherwise, the target picks 898 // the best address space based on the type information 899 return T->isFunctionType() && !T.hasAddressSpace() 900 ? getDataLayout().getProgramAddressSpace() 901 : getContext().getTargetAddressSpace(T.getAddressSpace()); 902 } 903