xref: /llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp (revision 6dad29aebc6914b32ddd17a01cbac511d2979bb9)
1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that handles AST -> LLVM type lowering.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenTypes.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGHLSLRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Module.h"
29 
30 using namespace clang;
31 using namespace CodeGen;
32 
33 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
34     : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
35       Target(cgm.getTarget()) {
36   SkippedLayout = false;
37   LongDoubleReferenced = false;
38 }
39 
40 CodeGenTypes::~CodeGenTypes() {
41   for (llvm::FoldingSet<CGFunctionInfo>::iterator
42        I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
43     delete &*I++;
44 }
45 
46 CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
47 
48 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
49   return CGM.getCodeGenOpts();
50 }
51 
52 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
53                                      llvm::StructType *Ty,
54                                      StringRef suffix) {
55   SmallString<256> TypeName;
56   llvm::raw_svector_ostream OS(TypeName);
57   OS << RD->getKindName() << '.';
58 
59   // FIXME: We probably want to make more tweaks to the printing policy. For
60   // example, we should probably enable PrintCanonicalTypes and
61   // FullyQualifiedNames.
62   PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
63   Policy.SuppressInlineNamespace =
64       PrintingPolicy::SuppressInlineNamespaceMode::None;
65 
66   // Name the codegen type after the typedef name
67   // if there is no tag type name available
68   if (RD->getIdentifier()) {
69     // FIXME: We should not have to check for a null decl context here.
70     // Right now we do it because the implicit Obj-C decls don't have one.
71     if (RD->getDeclContext())
72       RD->printQualifiedName(OS, Policy);
73     else
74       RD->printName(OS, Policy);
75   } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
76     // FIXME: We should not have to check for a null decl context here.
77     // Right now we do it because the implicit Obj-C decls don't have one.
78     if (TDD->getDeclContext())
79       TDD->printQualifiedName(OS, Policy);
80     else
81       TDD->printName(OS);
82   } else
83     OS << "anon";
84 
85   if (!suffix.empty())
86     OS << suffix;
87 
88   Ty->setName(OS.str());
89 }
90 
91 /// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
92 /// ConvertType in that it is used to convert to the memory representation for
93 /// a type.  For example, the scalar representation for _Bool is i1, but the
94 /// memory representation is usually i8 or i32, depending on the target.
95 ///
96 /// We generally assume that the alloc size of this type under the LLVM
97 /// data layout is the same as the size of the AST type.  The alignment
98 /// does not have to match: Clang should always use explicit alignments
99 /// and packed structs as necessary to produce the layout it needs.
100 /// But the size does need to be exactly right or else things like struct
101 /// layout will break.
102 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
103   if (T->isConstantMatrixType()) {
104     const Type *Ty = Context.getCanonicalType(T).getTypePtr();
105     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
106     return llvm::ArrayType::get(ConvertType(MT->getElementType()),
107                                 MT->getNumRows() * MT->getNumColumns());
108   }
109 
110   llvm::Type *R = ConvertType(T);
111 
112   // Check for the boolean vector case.
113   if (T->isExtVectorBoolType()) {
114     auto *FixedVT = cast<llvm::FixedVectorType>(R);
115     // Pad to at least one byte.
116     uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
117     return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
118   }
119 
120   // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
121   // with the exact semantic bit-width of the AST type; for example,
122   // _BitInt(17) will turn into i17. In memory, however, we need to store
123   // such values extended to their full storage size as decided by AST
124   // layout; this is an ABI requirement. Ideally, we would always use an
125   // integer type that's just the bit-size of the AST type; for example, if
126   // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
127   // returned by convertTypeForLoadStore. However, that type does not
128   // always satisfy the size requirement on memory representation types
129   // describe above. For example, a 32-bit platform might reasonably set
130   // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
131   // of 16 bytes in the LLVM data layout. In these cases, we simply return
132   // a byte array of the appropriate size.
133   if (T->isBitIntType()) {
134     if (typeRequiresSplitIntoByteArray(T, R))
135       return llvm::ArrayType::get(CGM.Int8Ty,
136                                   Context.getTypeSizeInChars(T).getQuantity());
137     return llvm::IntegerType::get(getLLVMContext(),
138                                   (unsigned)Context.getTypeSize(T));
139   }
140 
141   if (R->isIntegerTy(1))
142     return llvm::IntegerType::get(getLLVMContext(),
143                                   (unsigned)Context.getTypeSize(T));
144 
145   // Else, don't map it.
146   return R;
147 }
148 
149 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
150                                                   llvm::Type *LLVMTy) {
151   if (!LLVMTy)
152     LLVMTy = ConvertType(ASTTy);
153 
154   CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy);
155   CharUnits LLVMSize =
156       CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy));
157   return ASTSize != LLVMSize;
158 }
159 
160 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
161                                                   llvm::Type *LLVMTy) {
162   if (!LLVMTy)
163     LLVMTy = ConvertType(T);
164 
165   if (T->isBitIntType())
166     return llvm::Type::getIntNTy(
167         getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8);
168 
169   if (LLVMTy->isIntegerTy(1))
170     return llvm::IntegerType::get(getLLVMContext(),
171                                   (unsigned)Context.getTypeSize(T));
172 
173   if (T->isExtVectorBoolType())
174     return ConvertTypeForMem(T);
175 
176   return LLVMTy;
177 }
178 
179 /// isRecordLayoutComplete - Return true if the specified type is already
180 /// completely laid out.
181 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
182   llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
183   RecordDeclTypes.find(Ty);
184   return I != RecordDeclTypes.end() && !I->second->isOpaque();
185 }
186 
187 /// isFuncParamTypeConvertible - Return true if the specified type in a
188 /// function parameter or result position can be converted to an IR type at this
189 /// point. This boils down to being whether it is complete.
190 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
191   // Some ABIs cannot have their member pointers represented in IR unless
192   // certain circumstances have been reached.
193   if (const auto *MPT = Ty->getAs<MemberPointerType>())
194     return getCXXABI().isMemberPointerConvertible(MPT);
195 
196   // If this isn't a tagged type, we can convert it!
197   const TagType *TT = Ty->getAs<TagType>();
198   if (!TT) return true;
199 
200   // Incomplete types cannot be converted.
201   return !TT->isIncompleteType();
202 }
203 
204 
205 /// Code to verify a given function type is complete, i.e. the return type
206 /// and all of the parameter types are complete.  Also check to see if we are in
207 /// a RS_StructPointer context, and if so whether any struct types have been
208 /// pended.  If so, we don't want to ask the ABI lowering code to handle a type
209 /// that cannot be converted to an IR type.
210 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
211   if (!isFuncParamTypeConvertible(FT->getReturnType()))
212     return false;
213 
214   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
215     for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
216       if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
217         return false;
218 
219   return true;
220 }
221 
222 /// UpdateCompletedType - When we find the full definition for a TagDecl,
223 /// replace the 'opaque' type we previously made for it if applicable.
224 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
225   // If this is an enum being completed, then we flush all non-struct types from
226   // the cache.  This allows function types and other things that may be derived
227   // from the enum to be recomputed.
228   if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
229     // Only flush the cache if we've actually already converted this type.
230     if (TypeCache.count(ED->getTypeForDecl())) {
231       // Okay, we formed some types based on this.  We speculated that the enum
232       // would be lowered to i32, so we only need to flush the cache if this
233       // didn't happen.
234       if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
235         TypeCache.clear();
236     }
237     // If necessary, provide the full definition of a type only used with a
238     // declaration so far.
239     if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
240       DI->completeType(ED);
241     return;
242   }
243 
244   // If we completed a RecordDecl that we previously used and converted to an
245   // anonymous type, then go ahead and complete it now.
246   const RecordDecl *RD = cast<RecordDecl>(TD);
247   if (RD->isDependentType()) return;
248 
249   // Only complete it if we converted it already.  If we haven't converted it
250   // yet, we'll just do it lazily.
251   if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
252     ConvertRecordDeclType(RD);
253 
254   // If necessary, provide the full definition of a type only used with a
255   // declaration so far.
256   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
257     DI->completeType(RD);
258 }
259 
260 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
261   QualType T = Context.getRecordType(RD);
262   T = Context.getCanonicalType(T);
263 
264   const Type *Ty = T.getTypePtr();
265   if (RecordsWithOpaqueMemberPointers.count(Ty)) {
266     TypeCache.clear();
267     RecordsWithOpaqueMemberPointers.clear();
268   }
269 }
270 
271 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
272                                     const llvm::fltSemantics &format,
273                                     bool UseNativeHalf = false) {
274   if (&format == &llvm::APFloat::IEEEhalf()) {
275     if (UseNativeHalf)
276       return llvm::Type::getHalfTy(VMContext);
277     else
278       return llvm::Type::getInt16Ty(VMContext);
279   }
280   if (&format == &llvm::APFloat::BFloat())
281     return llvm::Type::getBFloatTy(VMContext);
282   if (&format == &llvm::APFloat::IEEEsingle())
283     return llvm::Type::getFloatTy(VMContext);
284   if (&format == &llvm::APFloat::IEEEdouble())
285     return llvm::Type::getDoubleTy(VMContext);
286   if (&format == &llvm::APFloat::IEEEquad())
287     return llvm::Type::getFP128Ty(VMContext);
288   if (&format == &llvm::APFloat::PPCDoubleDouble())
289     return llvm::Type::getPPC_FP128Ty(VMContext);
290   if (&format == &llvm::APFloat::x87DoubleExtended())
291     return llvm::Type::getX86_FP80Ty(VMContext);
292   llvm_unreachable("Unknown float format!");
293 }
294 
295 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
296   assert(QFT.isCanonical());
297   const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
298   // First, check whether we can build the full function type.  If the
299   // function type depends on an incomplete type (e.g. a struct or enum), we
300   // cannot lower the function type.
301   if (!isFuncTypeConvertible(FT)) {
302     // This function's type depends on an incomplete tag type.
303 
304     // Force conversion of all the relevant record types, to make sure
305     // we re-convert the FunctionType when appropriate.
306     if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
307       ConvertRecordDeclType(RT->getDecl());
308     if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
309       for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
310         if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
311           ConvertRecordDeclType(RT->getDecl());
312 
313     SkippedLayout = true;
314 
315     // Return a placeholder type.
316     return llvm::StructType::get(getLLVMContext());
317   }
318 
319   // The function type can be built; call the appropriate routines to
320   // build it.
321   const CGFunctionInfo *FI;
322   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
323     FI = &arrangeFreeFunctionType(
324         CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
325   } else {
326     const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
327     FI = &arrangeFreeFunctionType(
328         CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
329   }
330 
331   llvm::Type *ResultType = nullptr;
332   // If there is something higher level prodding our CGFunctionInfo, then
333   // don't recurse into it again.
334   if (FunctionsBeingProcessed.count(FI)) {
335 
336     ResultType = llvm::StructType::get(getLLVMContext());
337     SkippedLayout = true;
338   } else {
339 
340     // Otherwise, we're good to go, go ahead and convert it.
341     ResultType = GetFunctionType(*FI);
342   }
343 
344   return ResultType;
345 }
346 
347 /// ConvertType - Convert the specified type to its LLVM form.
348 llvm::Type *CodeGenTypes::ConvertType(QualType T) {
349   T = Context.getCanonicalType(T);
350 
351   const Type *Ty = T.getTypePtr();
352 
353   // For the device-side compilation, CUDA device builtin surface/texture types
354   // may be represented in different types.
355   if (Context.getLangOpts().CUDAIsDevice) {
356     if (T->isCUDADeviceBuiltinSurfaceType()) {
357       if (auto *Ty = CGM.getTargetCodeGenInfo()
358                          .getCUDADeviceBuiltinSurfaceDeviceType())
359         return Ty;
360     } else if (T->isCUDADeviceBuiltinTextureType()) {
361       if (auto *Ty = CGM.getTargetCodeGenInfo()
362                          .getCUDADeviceBuiltinTextureDeviceType())
363         return Ty;
364     }
365   }
366 
367   // RecordTypes are cached and processed specially.
368   if (const RecordType *RT = dyn_cast<RecordType>(Ty))
369     return ConvertRecordDeclType(RT->getDecl());
370 
371   llvm::Type *CachedType = nullptr;
372   auto TCI = TypeCache.find(Ty);
373   if (TCI != TypeCache.end())
374     CachedType = TCI->second;
375     // With expensive checks, check that the type we compute matches the
376     // cached type.
377 #ifndef EXPENSIVE_CHECKS
378   if (CachedType)
379     return CachedType;
380 #endif
381 
382   // If we don't have it in the cache, convert it now.
383   llvm::Type *ResultType = nullptr;
384   switch (Ty->getTypeClass()) {
385   case Type::Record: // Handled above.
386 #define TYPE(Class, Base)
387 #define ABSTRACT_TYPE(Class, Base)
388 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
389 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
390 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
391 #include "clang/AST/TypeNodes.inc"
392     llvm_unreachable("Non-canonical or dependent types aren't possible.");
393 
394   case Type::Builtin: {
395     switch (cast<BuiltinType>(Ty)->getKind()) {
396     case BuiltinType::Void:
397     case BuiltinType::ObjCId:
398     case BuiltinType::ObjCClass:
399     case BuiltinType::ObjCSel:
400       // LLVM void type can only be used as the result of a function call.  Just
401       // map to the same as char.
402       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
403       break;
404 
405     case BuiltinType::Bool:
406       // Note that we always return bool as i1 for use as a scalar type.
407       ResultType = llvm::Type::getInt1Ty(getLLVMContext());
408       break;
409 
410     case BuiltinType::Char_S:
411     case BuiltinType::Char_U:
412     case BuiltinType::SChar:
413     case BuiltinType::UChar:
414     case BuiltinType::Short:
415     case BuiltinType::UShort:
416     case BuiltinType::Int:
417     case BuiltinType::UInt:
418     case BuiltinType::Long:
419     case BuiltinType::ULong:
420     case BuiltinType::LongLong:
421     case BuiltinType::ULongLong:
422     case BuiltinType::WChar_S:
423     case BuiltinType::WChar_U:
424     case BuiltinType::Char8:
425     case BuiltinType::Char16:
426     case BuiltinType::Char32:
427     case BuiltinType::ShortAccum:
428     case BuiltinType::Accum:
429     case BuiltinType::LongAccum:
430     case BuiltinType::UShortAccum:
431     case BuiltinType::UAccum:
432     case BuiltinType::ULongAccum:
433     case BuiltinType::ShortFract:
434     case BuiltinType::Fract:
435     case BuiltinType::LongFract:
436     case BuiltinType::UShortFract:
437     case BuiltinType::UFract:
438     case BuiltinType::ULongFract:
439     case BuiltinType::SatShortAccum:
440     case BuiltinType::SatAccum:
441     case BuiltinType::SatLongAccum:
442     case BuiltinType::SatUShortAccum:
443     case BuiltinType::SatUAccum:
444     case BuiltinType::SatULongAccum:
445     case BuiltinType::SatShortFract:
446     case BuiltinType::SatFract:
447     case BuiltinType::SatLongFract:
448     case BuiltinType::SatUShortFract:
449     case BuiltinType::SatUFract:
450     case BuiltinType::SatULongFract:
451       ResultType = llvm::IntegerType::get(getLLVMContext(),
452                                  static_cast<unsigned>(Context.getTypeSize(T)));
453       break;
454 
455     case BuiltinType::Float16:
456       ResultType =
457           getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
458                            /* UseNativeHalf = */ true);
459       break;
460 
461     case BuiltinType::Half:
462       // Half FP can either be storage-only (lowered to i16) or native.
463       ResultType = getTypeForFormat(
464           getLLVMContext(), Context.getFloatTypeSemantics(T),
465           Context.getLangOpts().NativeHalfType ||
466               !Context.getTargetInfo().useFP16ConversionIntrinsics());
467       break;
468     case BuiltinType::LongDouble:
469       LongDoubleReferenced = true;
470       [[fallthrough]];
471     case BuiltinType::BFloat16:
472     case BuiltinType::Float:
473     case BuiltinType::Double:
474     case BuiltinType::Float128:
475     case BuiltinType::Ibm128:
476       ResultType = getTypeForFormat(getLLVMContext(),
477                                     Context.getFloatTypeSemantics(T),
478                                     /* UseNativeHalf = */ false);
479       break;
480 
481     case BuiltinType::NullPtr:
482       // Model std::nullptr_t as i8*
483       ResultType = llvm::PointerType::getUnqual(getLLVMContext());
484       break;
485 
486     case BuiltinType::UInt128:
487     case BuiltinType::Int128:
488       ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
489       break;
490 
491 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
492     case BuiltinType::Id:
493 #include "clang/Basic/OpenCLImageTypes.def"
494 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
495     case BuiltinType::Id:
496 #include "clang/Basic/OpenCLExtensionTypes.def"
497     case BuiltinType::OCLSampler:
498     case BuiltinType::OCLEvent:
499     case BuiltinType::OCLClkEvent:
500     case BuiltinType::OCLQueue:
501     case BuiltinType::OCLReserveID:
502       ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
503       break;
504 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId)                    \
505   case BuiltinType::Id:
506 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId)                 \
507   case BuiltinType::Id:
508 #define AARCH64_VECTOR_TYPE(Name, MangledName, Id, SingletonId)                \
509   case BuiltinType::Id:
510 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId)
511 #include "clang/Basic/AArch64SVEACLETypes.def"
512       {
513         ASTContext::BuiltinVectorTypeInfo Info =
514             Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
515         auto VTy =
516             llvm::VectorType::get(ConvertType(Info.ElementType), Info.EC);
517         switch (Info.NumVectors) {
518         default:
519           llvm_unreachable("Expected 1, 2, 3 or 4 vectors!");
520         case 1:
521           return VTy;
522         case 2:
523           return llvm::StructType::get(VTy, VTy);
524         case 3:
525           return llvm::StructType::get(VTy, VTy, VTy);
526         case 4:
527           return llvm::StructType::get(VTy, VTy, VTy, VTy);
528         }
529       }
530     case BuiltinType::SveCount:
531       return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
532 #define PPC_VECTOR_TYPE(Name, Id, Size) \
533     case BuiltinType::Id: \
534       ResultType = \
535         llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
536       break;
537 #include "clang/Basic/PPCTypes.def"
538 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
539 #include "clang/Basic/RISCVVTypes.def"
540       {
541         ASTContext::BuiltinVectorTypeInfo Info =
542             Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
543         if (Info.NumVectors != 1) {
544           unsigned I8EltCount =
545               Info.EC.getKnownMinValue() *
546               ConvertType(Info.ElementType)->getScalarSizeInBits() / 8;
547           return llvm::TargetExtType::get(
548               getLLVMContext(), "riscv.vector.tuple",
549               llvm::ScalableVectorType::get(
550                   llvm::Type::getInt8Ty(getLLVMContext()), I8EltCount),
551               Info.NumVectors);
552         }
553         return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
554                                              Info.EC.getKnownMinValue());
555       }
556 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS)                  \
557   case BuiltinType::Id: {                                                      \
558     if (BuiltinType::Id == BuiltinType::WasmExternRef)                         \
559       ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
560     else                                                                       \
561       llvm_unreachable("Unexpected wasm reference builtin type!");             \
562   } break;
563 #include "clang/Basic/WebAssemblyReferenceTypes.def"
564 #define AMDGPU_OPAQUE_PTR_TYPE(Name, Id, SingletonId, Width, Align, AS)        \
565   case BuiltinType::Id:                                                        \
566     return llvm::PointerType::get(getLLVMContext(), AS);
567 #include "clang/Basic/AMDGPUTypes.def"
568 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
569 #include "clang/Basic/HLSLIntangibleTypes.def"
570       ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty);
571       break;
572     case BuiltinType::Dependent:
573 #define BUILTIN_TYPE(Id, SingletonId)
574 #define PLACEHOLDER_TYPE(Id, SingletonId) \
575     case BuiltinType::Id:
576 #include "clang/AST/BuiltinTypes.def"
577       llvm_unreachable("Unexpected placeholder builtin type!");
578     }
579     break;
580   }
581   case Type::Auto:
582   case Type::DeducedTemplateSpecialization:
583     llvm_unreachable("Unexpected undeduced type!");
584   case Type::Complex: {
585     llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
586     ResultType = llvm::StructType::get(EltTy, EltTy);
587     break;
588   }
589   case Type::LValueReference:
590   case Type::RValueReference: {
591     const ReferenceType *RTy = cast<ReferenceType>(Ty);
592     QualType ETy = RTy->getPointeeType();
593     unsigned AS = getTargetAddressSpace(ETy);
594     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
595     break;
596   }
597   case Type::Pointer: {
598     const PointerType *PTy = cast<PointerType>(Ty);
599     QualType ETy = PTy->getPointeeType();
600     unsigned AS = getTargetAddressSpace(ETy);
601     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
602     break;
603   }
604 
605   case Type::VariableArray: {
606     const VariableArrayType *A = cast<VariableArrayType>(Ty);
607     assert(A->getIndexTypeCVRQualifiers() == 0 &&
608            "FIXME: We only handle trivial array types so far!");
609     // VLAs resolve to the innermost element type; this matches
610     // the return of alloca, and there isn't any obviously better choice.
611     ResultType = ConvertTypeForMem(A->getElementType());
612     break;
613   }
614   case Type::IncompleteArray: {
615     const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
616     assert(A->getIndexTypeCVRQualifiers() == 0 &&
617            "FIXME: We only handle trivial array types so far!");
618     // int X[] -> [0 x int], unless the element type is not sized.  If it is
619     // unsized (e.g. an incomplete struct) just use [0 x i8].
620     ResultType = ConvertTypeForMem(A->getElementType());
621     if (!ResultType->isSized()) {
622       SkippedLayout = true;
623       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
624     }
625     ResultType = llvm::ArrayType::get(ResultType, 0);
626     break;
627   }
628   case Type::ArrayParameter:
629   case Type::ConstantArray: {
630     const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
631     llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
632 
633     // Lower arrays of undefined struct type to arrays of i8 just to have a
634     // concrete type.
635     if (!EltTy->isSized()) {
636       SkippedLayout = true;
637       EltTy = llvm::Type::getInt8Ty(getLLVMContext());
638     }
639 
640     ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
641     break;
642   }
643   case Type::ExtVector:
644   case Type::Vector: {
645     const auto *VT = cast<VectorType>(Ty);
646     // An ext_vector_type of Bool is really a vector of bits.
647     llvm::Type *IRElemTy = VT->isExtVectorBoolType()
648                                ? llvm::Type::getInt1Ty(getLLVMContext())
649                                : ConvertType(VT->getElementType());
650     ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
651     break;
652   }
653   case Type::ConstantMatrix: {
654     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
655     ResultType =
656         llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
657                                    MT->getNumRows() * MT->getNumColumns());
658     break;
659   }
660   case Type::FunctionNoProto:
661   case Type::FunctionProto:
662     ResultType = ConvertFunctionTypeInternal(T);
663     break;
664   case Type::ObjCObject:
665     ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
666     break;
667 
668   case Type::ObjCInterface: {
669     // Objective-C interfaces are always opaque (outside of the
670     // runtime, which can do whatever it likes); we never refine
671     // these.
672     llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
673     if (!T)
674       T = llvm::StructType::create(getLLVMContext());
675     ResultType = T;
676     break;
677   }
678 
679   case Type::ObjCObjectPointer:
680     ResultType = llvm::PointerType::getUnqual(getLLVMContext());
681     break;
682 
683   case Type::Enum: {
684     const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
685     if (ED->isCompleteDefinition() || ED->isFixed())
686       return ConvertType(ED->getIntegerType());
687     // Return a placeholder 'i32' type.  This can be changed later when the
688     // type is defined (see UpdateCompletedType), but is likely to be the
689     // "right" answer.
690     ResultType = llvm::Type::getInt32Ty(getLLVMContext());
691     break;
692   }
693 
694   case Type::BlockPointer: {
695     // Block pointers lower to function type. For function type,
696     // getTargetAddressSpace() returns default address space for
697     // function pointer i.e. program address space. Therefore, for block
698     // pointers, it is important to pass the pointee AST address space when
699     // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
700     // address space for data pointers and not function pointers.
701     const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
702     unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
703     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
704     break;
705   }
706 
707   case Type::MemberPointer: {
708     auto *MPTy = cast<MemberPointerType>(Ty);
709     if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
710       auto *C = MPTy->getClass();
711       auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
712       if (Insertion.second)
713         Insertion.first->second = llvm::StructType::create(getLLVMContext());
714       ResultType = Insertion.first->second;
715     } else {
716       ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
717     }
718     break;
719   }
720 
721   case Type::Atomic: {
722     QualType valueType = cast<AtomicType>(Ty)->getValueType();
723     ResultType = ConvertTypeForMem(valueType);
724 
725     // Pad out to the inflated size if necessary.
726     uint64_t valueSize = Context.getTypeSize(valueType);
727     uint64_t atomicSize = Context.getTypeSize(Ty);
728     if (valueSize != atomicSize) {
729       assert(valueSize < atomicSize);
730       llvm::Type *elts[] = {
731         ResultType,
732         llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
733       };
734       ResultType =
735           llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
736     }
737     break;
738   }
739   case Type::Pipe: {
740     ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
741     break;
742   }
743   case Type::BitInt: {
744     const auto &EIT = cast<BitIntType>(Ty);
745     ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
746     break;
747   }
748   case Type::HLSLAttributedResource:
749     ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty);
750     break;
751   }
752 
753   assert(ResultType && "Didn't convert a type?");
754   assert((!CachedType || CachedType == ResultType) &&
755          "Cached type doesn't match computed type");
756 
757   TypeCache[Ty] = ResultType;
758   return ResultType;
759 }
760 
761 bool CodeGenModule::isPaddedAtomicType(QualType type) {
762   return isPaddedAtomicType(type->castAs<AtomicType>());
763 }
764 
765 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
766   return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
767 }
768 
769 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
770 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
771   // TagDecl's are not necessarily unique, instead use the (clang)
772   // type connected to the decl.
773   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
774 
775   llvm::StructType *&Entry = RecordDeclTypes[Key];
776 
777   // If we don't have a StructType at all yet, create the forward declaration.
778   if (!Entry) {
779     Entry = llvm::StructType::create(getLLVMContext());
780     addRecordTypeName(RD, Entry, "");
781   }
782   llvm::StructType *Ty = Entry;
783 
784   // If this is still a forward declaration, or the LLVM type is already
785   // complete, there's nothing more to do.
786   RD = RD->getDefinition();
787   if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
788     return Ty;
789 
790   // Force conversion of non-virtual base classes recursively.
791   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
792     for (const auto &I : CRD->bases()) {
793       if (I.isVirtual()) continue;
794       ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
795     }
796   }
797 
798   // Layout fields.
799   std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
800   CGRecordLayouts[Key] = std::move(Layout);
801 
802   // If this struct blocked a FunctionType conversion, then recompute whatever
803   // was derived from that.
804   // FIXME: This is hugely overconservative.
805   if (SkippedLayout)
806     TypeCache.clear();
807 
808   return Ty;
809 }
810 
811 /// getCGRecordLayout - Return record layout info for the given record decl.
812 const CGRecordLayout &
813 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
814   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
815 
816   auto I = CGRecordLayouts.find(Key);
817   if (I != CGRecordLayouts.end())
818     return *I->second;
819   // Compute the type information.
820   ConvertRecordDeclType(RD);
821 
822   // Now try again.
823   I = CGRecordLayouts.find(Key);
824 
825   assert(I != CGRecordLayouts.end() &&
826          "Unable to find record layout information for type");
827   return *I->second;
828 }
829 
830 bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
831   assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
832   return isZeroInitializable(T);
833 }
834 
835 bool CodeGenTypes::isZeroInitializable(QualType T) {
836   if (T->getAs<PointerType>())
837     return Context.getTargetNullPointerValue(T) == 0;
838 
839   if (const auto *AT = Context.getAsArrayType(T)) {
840     if (isa<IncompleteArrayType>(AT))
841       return true;
842     if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
843       if (Context.getConstantArrayElementCount(CAT) == 0)
844         return true;
845     T = Context.getBaseElementType(T);
846   }
847 
848   // Records are non-zero-initializable if they contain any
849   // non-zero-initializable subobjects.
850   if (const RecordType *RT = T->getAs<RecordType>()) {
851     const RecordDecl *RD = RT->getDecl();
852     return isZeroInitializable(RD);
853   }
854 
855   // We have to ask the ABI about member pointers.
856   if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
857     return getCXXABI().isZeroInitializable(MPT);
858 
859   // Everything else is okay.
860   return true;
861 }
862 
863 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
864   return getCGRecordLayout(RD).isZeroInitializable();
865 }
866 
867 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
868   // Return the address space for the type. If the type is a
869   // function type without an address space qualifier, the
870   // program address space is used. Otherwise, the target picks
871   // the best address space based on the type information
872   return T->isFunctionType() && !T.hasAddressSpace()
873              ? getDataLayout().getProgramAddressSpace()
874              : getContext().getTargetAddressSpace(T.getAddressSpace());
875 }
876