xref: /llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp (revision f95a8bde3425ada0ef004186eb8ccda6e723241c)
1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that handles AST -> LLVM type lowering.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenTypes.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGHLSLRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Module.h"
29 
30 using namespace clang;
31 using namespace CodeGen;
32 
33 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
34     : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
35       Target(cgm.getTarget()) {
36   SkippedLayout = false;
37   LongDoubleReferenced = false;
38 }
39 
40 CodeGenTypes::~CodeGenTypes() {
41   for (llvm::FoldingSet<CGFunctionInfo>::iterator
42        I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
43     delete &*I++;
44 }
45 
46 CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
47 
48 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
49   return CGM.getCodeGenOpts();
50 }
51 
52 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
53                                      llvm::StructType *Ty,
54                                      StringRef suffix) {
55   SmallString<256> TypeName;
56   llvm::raw_svector_ostream OS(TypeName);
57   OS << RD->getKindName() << '.';
58 
59   // FIXME: We probably want to make more tweaks to the printing policy. For
60   // example, we should probably enable PrintCanonicalTypes and
61   // FullyQualifiedNames.
62   PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
63   Policy.SuppressInlineNamespace =
64       PrintingPolicy::SuppressInlineNamespaceMode::None;
65 
66   // Name the codegen type after the typedef name
67   // if there is no tag type name available
68   if (RD->getIdentifier()) {
69     // FIXME: We should not have to check for a null decl context here.
70     // Right now we do it because the implicit Obj-C decls don't have one.
71     if (RD->getDeclContext())
72       RD->printQualifiedName(OS, Policy);
73     else
74       RD->printName(OS, Policy);
75   } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
76     // FIXME: We should not have to check for a null decl context here.
77     // Right now we do it because the implicit Obj-C decls don't have one.
78     if (TDD->getDeclContext())
79       TDD->printQualifiedName(OS, Policy);
80     else
81       TDD->printName(OS);
82   } else
83     OS << "anon";
84 
85   if (!suffix.empty())
86     OS << suffix;
87 
88   Ty->setName(OS.str());
89 }
90 
91 /// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
92 /// ConvertType in that it is used to convert to the memory representation for
93 /// a type.  For example, the scalar representation for _Bool is i1, but the
94 /// memory representation is usually i8 or i32, depending on the target.
95 ///
96 /// We generally assume that the alloc size of this type under the LLVM
97 /// data layout is the same as the size of the AST type.  The alignment
98 /// does not have to match: Clang should always use explicit alignments
99 /// and packed structs as necessary to produce the layout it needs.
100 /// But the size does need to be exactly right or else things like struct
101 /// layout will break.
102 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
103   if (T->isConstantMatrixType()) {
104     const Type *Ty = Context.getCanonicalType(T).getTypePtr();
105     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
106     return llvm::ArrayType::get(ConvertType(MT->getElementType()),
107                                 MT->getNumRows() * MT->getNumColumns());
108   }
109 
110   llvm::Type *R = ConvertType(T);
111 
112   // Check for the boolean vector case.
113   if (T->isExtVectorBoolType()) {
114     auto *FixedVT = cast<llvm::FixedVectorType>(R);
115     // Pad to at least one byte.
116     uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
117     return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
118   }
119 
120   // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
121   // with the exact semantic bit-width of the AST type; for example,
122   // _BitInt(17) will turn into i17. In memory, however, we need to store
123   // such values extended to their full storage size as decided by AST
124   // layout; this is an ABI requirement. Ideally, we would always use an
125   // integer type that's just the bit-size of the AST type; for example, if
126   // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
127   // returned by convertTypeForLoadStore. However, that type does not
128   // always satisfy the size requirement on memory representation types
129   // describe above. For example, a 32-bit platform might reasonably set
130   // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
131   // of 16 bytes in the LLVM data layout. In these cases, we simply return
132   // a byte array of the appropriate size.
133   if (T->isBitIntType()) {
134     if (typeRequiresSplitIntoByteArray(T, R))
135       return llvm::ArrayType::get(CGM.Int8Ty,
136                                   Context.getTypeSizeInChars(T).getQuantity());
137     return llvm::IntegerType::get(getLLVMContext(),
138                                   (unsigned)Context.getTypeSize(T));
139   }
140 
141   if (R->isIntegerTy(1))
142     return llvm::IntegerType::get(getLLVMContext(),
143                                   (unsigned)Context.getTypeSize(T));
144 
145   // Else, don't map it.
146   return R;
147 }
148 
149 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
150                                                   llvm::Type *LLVMTy) {
151   if (!LLVMTy)
152     LLVMTy = ConvertType(ASTTy);
153 
154   CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy);
155   CharUnits LLVMSize =
156       CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy));
157   return ASTSize != LLVMSize;
158 }
159 
160 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
161                                                   llvm::Type *LLVMTy) {
162   if (!LLVMTy)
163     LLVMTy = ConvertType(T);
164 
165   if (T->isBitIntType())
166     return llvm::Type::getIntNTy(
167         getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8);
168 
169   if (LLVMTy->isIntegerTy(1))
170     return llvm::IntegerType::get(getLLVMContext(),
171                                   (unsigned)Context.getTypeSize(T));
172 
173   if (T->isExtVectorBoolType())
174     return ConvertTypeForMem(T);
175 
176   return LLVMTy;
177 }
178 
179 /// isRecordLayoutComplete - Return true if the specified type is already
180 /// completely laid out.
181 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
182   llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
183   RecordDeclTypes.find(Ty);
184   return I != RecordDeclTypes.end() && !I->second->isOpaque();
185 }
186 
187 /// isFuncParamTypeConvertible - Return true if the specified type in a
188 /// function parameter or result position can be converted to an IR type at this
189 /// point. This boils down to being whether it is complete.
190 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
191   // Some ABIs cannot have their member pointers represented in IR unless
192   // certain circumstances have been reached.
193   if (const auto *MPT = Ty->getAs<MemberPointerType>())
194     return getCXXABI().isMemberPointerConvertible(MPT);
195 
196   // If this isn't a tagged type, we can convert it!
197   const TagType *TT = Ty->getAs<TagType>();
198   if (!TT) return true;
199 
200   // Incomplete types cannot be converted.
201   return !TT->isIncompleteType();
202 }
203 
204 
205 /// Code to verify a given function type is complete, i.e. the return type
206 /// and all of the parameter types are complete.  Also check to see if we are in
207 /// a RS_StructPointer context, and if so whether any struct types have been
208 /// pended.  If so, we don't want to ask the ABI lowering code to handle a type
209 /// that cannot be converted to an IR type.
210 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
211   if (!isFuncParamTypeConvertible(FT->getReturnType()))
212     return false;
213 
214   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
215     for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
216       if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
217         return false;
218 
219   return true;
220 }
221 
222 /// UpdateCompletedType - When we find the full definition for a TagDecl,
223 /// replace the 'opaque' type we previously made for it if applicable.
224 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
225   // If this is an enum being completed, then we flush all non-struct types from
226   // the cache.  This allows function types and other things that may be derived
227   // from the enum to be recomputed.
228   if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
229     // Only flush the cache if we've actually already converted this type.
230     if (TypeCache.count(ED->getTypeForDecl())) {
231       // Okay, we formed some types based on this.  We speculated that the enum
232       // would be lowered to i32, so we only need to flush the cache if this
233       // didn't happen.
234       if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
235         TypeCache.clear();
236     }
237     // If necessary, provide the full definition of a type only used with a
238     // declaration so far.
239     if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
240       DI->completeType(ED);
241     return;
242   }
243 
244   // If we completed a RecordDecl that we previously used and converted to an
245   // anonymous type, then go ahead and complete it now.
246   const RecordDecl *RD = cast<RecordDecl>(TD);
247   if (RD->isDependentType()) return;
248 
249   // Only complete it if we converted it already.  If we haven't converted it
250   // yet, we'll just do it lazily.
251   if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
252     ConvertRecordDeclType(RD);
253 
254   // If necessary, provide the full definition of a type only used with a
255   // declaration so far.
256   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
257     DI->completeType(RD);
258 }
259 
260 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
261   QualType T = Context.getRecordType(RD);
262   T = Context.getCanonicalType(T);
263 
264   const Type *Ty = T.getTypePtr();
265   if (RecordsWithOpaqueMemberPointers.count(Ty)) {
266     TypeCache.clear();
267     RecordsWithOpaqueMemberPointers.clear();
268   }
269 }
270 
271 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
272                                     const llvm::fltSemantics &format,
273                                     bool UseNativeHalf = false) {
274   if (&format == &llvm::APFloat::IEEEhalf()) {
275     if (UseNativeHalf)
276       return llvm::Type::getHalfTy(VMContext);
277     else
278       return llvm::Type::getInt16Ty(VMContext);
279   }
280   if (&format == &llvm::APFloat::BFloat())
281     return llvm::Type::getBFloatTy(VMContext);
282   if (&format == &llvm::APFloat::IEEEsingle())
283     return llvm::Type::getFloatTy(VMContext);
284   if (&format == &llvm::APFloat::IEEEdouble())
285     return llvm::Type::getDoubleTy(VMContext);
286   if (&format == &llvm::APFloat::IEEEquad())
287     return llvm::Type::getFP128Ty(VMContext);
288   if (&format == &llvm::APFloat::PPCDoubleDouble())
289     return llvm::Type::getPPC_FP128Ty(VMContext);
290   if (&format == &llvm::APFloat::x87DoubleExtended())
291     return llvm::Type::getX86_FP80Ty(VMContext);
292   llvm_unreachable("Unknown float format!");
293 }
294 
295 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
296   assert(QFT.isCanonical());
297   const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
298   // First, check whether we can build the full function type.  If the
299   // function type depends on an incomplete type (e.g. a struct or enum), we
300   // cannot lower the function type.
301   if (!isFuncTypeConvertible(FT)) {
302     // This function's type depends on an incomplete tag type.
303 
304     // Force conversion of all the relevant record types, to make sure
305     // we re-convert the FunctionType when appropriate.
306     if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
307       ConvertRecordDeclType(RT->getDecl());
308     if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
309       for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
310         if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
311           ConvertRecordDeclType(RT->getDecl());
312 
313     SkippedLayout = true;
314 
315     // Return a placeholder type.
316     return llvm::StructType::get(getLLVMContext());
317   }
318 
319   // The function type can be built; call the appropriate routines to
320   // build it.
321   const CGFunctionInfo *FI;
322   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
323     FI = &arrangeFreeFunctionType(
324         CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
325   } else {
326     const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
327     FI = &arrangeFreeFunctionType(
328         CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
329   }
330 
331   llvm::Type *ResultType = nullptr;
332   // If there is something higher level prodding our CGFunctionInfo, then
333   // don't recurse into it again.
334   if (FunctionsBeingProcessed.count(FI)) {
335 
336     ResultType = llvm::StructType::get(getLLVMContext());
337     SkippedLayout = true;
338   } else {
339 
340     // Otherwise, we're good to go, go ahead and convert it.
341     ResultType = GetFunctionType(*FI);
342   }
343 
344   return ResultType;
345 }
346 
347 /// ConvertType - Convert the specified type to its LLVM form.
348 llvm::Type *CodeGenTypes::ConvertType(QualType T) {
349   T = Context.getCanonicalType(T);
350 
351   const Type *Ty = T.getTypePtr();
352 
353   // For the device-side compilation, CUDA device builtin surface/texture types
354   // may be represented in different types.
355   if (Context.getLangOpts().CUDAIsDevice) {
356     if (T->isCUDADeviceBuiltinSurfaceType()) {
357       if (auto *Ty = CGM.getTargetCodeGenInfo()
358                          .getCUDADeviceBuiltinSurfaceDeviceType())
359         return Ty;
360     } else if (T->isCUDADeviceBuiltinTextureType()) {
361       if (auto *Ty = CGM.getTargetCodeGenInfo()
362                          .getCUDADeviceBuiltinTextureDeviceType())
363         return Ty;
364     }
365   }
366 
367   // RecordTypes are cached and processed specially.
368   if (const RecordType *RT = dyn_cast<RecordType>(Ty))
369     return ConvertRecordDeclType(RT->getDecl());
370 
371   llvm::Type *CachedType = nullptr;
372   auto TCI = TypeCache.find(Ty);
373   if (TCI != TypeCache.end())
374     CachedType = TCI->second;
375     // With expensive checks, check that the type we compute matches the
376     // cached type.
377 #ifndef EXPENSIVE_CHECKS
378   if (CachedType)
379     return CachedType;
380 #endif
381 
382   // If we don't have it in the cache, convert it now.
383   llvm::Type *ResultType = nullptr;
384   switch (Ty->getTypeClass()) {
385   case Type::Record: // Handled above.
386 #define TYPE(Class, Base)
387 #define ABSTRACT_TYPE(Class, Base)
388 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
389 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
390 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
391 #include "clang/AST/TypeNodes.inc"
392     llvm_unreachable("Non-canonical or dependent types aren't possible.");
393 
394   case Type::Builtin: {
395     switch (cast<BuiltinType>(Ty)->getKind()) {
396     case BuiltinType::Void:
397     case BuiltinType::ObjCId:
398     case BuiltinType::ObjCClass:
399     case BuiltinType::ObjCSel:
400       // LLVM void type can only be used as the result of a function call.  Just
401       // map to the same as char.
402       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
403       break;
404 
405     case BuiltinType::Bool:
406       // Note that we always return bool as i1 for use as a scalar type.
407       ResultType = llvm::Type::getInt1Ty(getLLVMContext());
408       break;
409 
410     case BuiltinType::Char_S:
411     case BuiltinType::Char_U:
412     case BuiltinType::SChar:
413     case BuiltinType::UChar:
414     case BuiltinType::Short:
415     case BuiltinType::UShort:
416     case BuiltinType::Int:
417     case BuiltinType::UInt:
418     case BuiltinType::Long:
419     case BuiltinType::ULong:
420     case BuiltinType::LongLong:
421     case BuiltinType::ULongLong:
422     case BuiltinType::WChar_S:
423     case BuiltinType::WChar_U:
424     case BuiltinType::Char8:
425     case BuiltinType::Char16:
426     case BuiltinType::Char32:
427     case BuiltinType::ShortAccum:
428     case BuiltinType::Accum:
429     case BuiltinType::LongAccum:
430     case BuiltinType::UShortAccum:
431     case BuiltinType::UAccum:
432     case BuiltinType::ULongAccum:
433     case BuiltinType::ShortFract:
434     case BuiltinType::Fract:
435     case BuiltinType::LongFract:
436     case BuiltinType::UShortFract:
437     case BuiltinType::UFract:
438     case BuiltinType::ULongFract:
439     case BuiltinType::SatShortAccum:
440     case BuiltinType::SatAccum:
441     case BuiltinType::SatLongAccum:
442     case BuiltinType::SatUShortAccum:
443     case BuiltinType::SatUAccum:
444     case BuiltinType::SatULongAccum:
445     case BuiltinType::SatShortFract:
446     case BuiltinType::SatFract:
447     case BuiltinType::SatLongFract:
448     case BuiltinType::SatUShortFract:
449     case BuiltinType::SatUFract:
450     case BuiltinType::SatULongFract:
451       ResultType = llvm::IntegerType::get(getLLVMContext(),
452                                  static_cast<unsigned>(Context.getTypeSize(T)));
453       break;
454 
455     case BuiltinType::Float16:
456       ResultType =
457           getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
458                            /* UseNativeHalf = */ true);
459       break;
460 
461     case BuiltinType::Half:
462       // Half FP can either be storage-only (lowered to i16) or native.
463       ResultType = getTypeForFormat(
464           getLLVMContext(), Context.getFloatTypeSemantics(T),
465           Context.getLangOpts().NativeHalfType ||
466               !Context.getTargetInfo().useFP16ConversionIntrinsics());
467       break;
468     case BuiltinType::LongDouble:
469       LongDoubleReferenced = true;
470       [[fallthrough]];
471     case BuiltinType::BFloat16:
472     case BuiltinType::Float:
473     case BuiltinType::Double:
474     case BuiltinType::Float128:
475     case BuiltinType::Ibm128:
476       ResultType = getTypeForFormat(getLLVMContext(),
477                                     Context.getFloatTypeSemantics(T),
478                                     /* UseNativeHalf = */ false);
479       break;
480 
481     case BuiltinType::NullPtr:
482       // Model std::nullptr_t as i8*
483       ResultType = llvm::PointerType::getUnqual(getLLVMContext());
484       break;
485 
486     case BuiltinType::UInt128:
487     case BuiltinType::Int128:
488       ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
489       break;
490 
491 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
492     case BuiltinType::Id:
493 #include "clang/Basic/OpenCLImageTypes.def"
494 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
495     case BuiltinType::Id:
496 #include "clang/Basic/OpenCLExtensionTypes.def"
497     case BuiltinType::OCLSampler:
498     case BuiltinType::OCLEvent:
499     case BuiltinType::OCLClkEvent:
500     case BuiltinType::OCLQueue:
501     case BuiltinType::OCLReserveID:
502       ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
503       break;
504 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId)                    \
505   case BuiltinType::Id:
506 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId)                 \
507   case BuiltinType::Id:
508 #define SVE_TYPE(Name, Id, SingletonId)
509 #include "clang/Basic/AArch64SVEACLETypes.def"
510       {
511         ASTContext::BuiltinVectorTypeInfo Info =
512             Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
513         // The `__mfp8` type maps to `<1 x i8>` which can't be used to build
514         // a <N x i8> vector type, hence bypass the call to `ConvertType` for
515         // the element type and create the vector type directly.
516         auto *EltTy = Info.ElementType->isMFloat8Type()
517                           ? llvm::Type::getInt8Ty(getLLVMContext())
518                           : ConvertType(Info.ElementType);
519         auto *VTy = llvm::VectorType::get(EltTy, Info.EC);
520         switch (Info.NumVectors) {
521         default:
522           llvm_unreachable("Expected 1, 2, 3 or 4 vectors!");
523         case 1:
524           return VTy;
525         case 2:
526           return llvm::StructType::get(VTy, VTy);
527         case 3:
528           return llvm::StructType::get(VTy, VTy, VTy);
529         case 4:
530           return llvm::StructType::get(VTy, VTy, VTy, VTy);
531         }
532       }
533     case BuiltinType::SveCount:
534       return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
535     case BuiltinType::MFloat8:
536       return llvm::VectorType::get(llvm::Type::getInt8Ty(getLLVMContext()), 1,
537                                    false);
538 #define PPC_VECTOR_TYPE(Name, Id, Size) \
539     case BuiltinType::Id: \
540       ResultType = \
541         llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
542       break;
543 #include "clang/Basic/PPCTypes.def"
544 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
545 #include "clang/Basic/RISCVVTypes.def"
546       {
547         ASTContext::BuiltinVectorTypeInfo Info =
548             Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
549         if (Info.NumVectors != 1) {
550           unsigned I8EltCount =
551               Info.EC.getKnownMinValue() *
552               ConvertType(Info.ElementType)->getScalarSizeInBits() / 8;
553           return llvm::TargetExtType::get(
554               getLLVMContext(), "riscv.vector.tuple",
555               llvm::ScalableVectorType::get(
556                   llvm::Type::getInt8Ty(getLLVMContext()), I8EltCount),
557               Info.NumVectors);
558         }
559         return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
560                                              Info.EC.getKnownMinValue());
561       }
562 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS)                  \
563   case BuiltinType::Id: {                                                      \
564     if (BuiltinType::Id == BuiltinType::WasmExternRef)                         \
565       ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
566     else                                                                       \
567       llvm_unreachable("Unexpected wasm reference builtin type!");             \
568   } break;
569 #include "clang/Basic/WebAssemblyReferenceTypes.def"
570 #define AMDGPU_OPAQUE_PTR_TYPE(Name, Id, SingletonId, Width, Align, AS)        \
571   case BuiltinType::Id:                                                        \
572     return llvm::PointerType::get(getLLVMContext(), AS);
573 #define AMDGPU_NAMED_BARRIER_TYPE(Name, Id, SingletonId, Width, Align, Scope)  \
574   case BuiltinType::Id:                                                        \
575     return llvm::TargetExtType::get(getLLVMContext(), "amdgcn.named.barrier",  \
576                                     {}, {Scope});
577 #include "clang/Basic/AMDGPUTypes.def"
578 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
579 #include "clang/Basic/HLSLIntangibleTypes.def"
580       ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty);
581       break;
582     case BuiltinType::Dependent:
583 #define BUILTIN_TYPE(Id, SingletonId)
584 #define PLACEHOLDER_TYPE(Id, SingletonId) \
585     case BuiltinType::Id:
586 #include "clang/AST/BuiltinTypes.def"
587       llvm_unreachable("Unexpected placeholder builtin type!");
588     }
589     break;
590   }
591   case Type::Auto:
592   case Type::DeducedTemplateSpecialization:
593     llvm_unreachable("Unexpected undeduced type!");
594   case Type::Complex: {
595     llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
596     ResultType = llvm::StructType::get(EltTy, EltTy);
597     break;
598   }
599   case Type::LValueReference:
600   case Type::RValueReference: {
601     const ReferenceType *RTy = cast<ReferenceType>(Ty);
602     QualType ETy = RTy->getPointeeType();
603     unsigned AS = getTargetAddressSpace(ETy);
604     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
605     break;
606   }
607   case Type::Pointer: {
608     const PointerType *PTy = cast<PointerType>(Ty);
609     QualType ETy = PTy->getPointeeType();
610     unsigned AS = getTargetAddressSpace(ETy);
611     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
612     break;
613   }
614 
615   case Type::VariableArray: {
616     const VariableArrayType *A = cast<VariableArrayType>(Ty);
617     assert(A->getIndexTypeCVRQualifiers() == 0 &&
618            "FIXME: We only handle trivial array types so far!");
619     // VLAs resolve to the innermost element type; this matches
620     // the return of alloca, and there isn't any obviously better choice.
621     ResultType = ConvertTypeForMem(A->getElementType());
622     break;
623   }
624   case Type::IncompleteArray: {
625     const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
626     assert(A->getIndexTypeCVRQualifiers() == 0 &&
627            "FIXME: We only handle trivial array types so far!");
628     // int X[] -> [0 x int], unless the element type is not sized.  If it is
629     // unsized (e.g. an incomplete struct) just use [0 x i8].
630     ResultType = ConvertTypeForMem(A->getElementType());
631     if (!ResultType->isSized()) {
632       SkippedLayout = true;
633       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
634     }
635     ResultType = llvm::ArrayType::get(ResultType, 0);
636     break;
637   }
638   case Type::ArrayParameter:
639   case Type::ConstantArray: {
640     const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
641     llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
642 
643     // Lower arrays of undefined struct type to arrays of i8 just to have a
644     // concrete type.
645     if (!EltTy->isSized()) {
646       SkippedLayout = true;
647       EltTy = llvm::Type::getInt8Ty(getLLVMContext());
648     }
649 
650     ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
651     break;
652   }
653   case Type::ExtVector:
654   case Type::Vector: {
655     const auto *VT = cast<VectorType>(Ty);
656     // An ext_vector_type of Bool is really a vector of bits.
657     llvm::Type *IRElemTy = VT->isExtVectorBoolType()
658                                ? llvm::Type::getInt1Ty(getLLVMContext())
659                            : VT->getElementType()->isMFloat8Type()
660                                ? llvm::Type::getInt8Ty(getLLVMContext())
661                                : ConvertType(VT->getElementType());
662     ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
663     break;
664   }
665   case Type::ConstantMatrix: {
666     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
667     ResultType =
668         llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
669                                    MT->getNumRows() * MT->getNumColumns());
670     break;
671   }
672   case Type::FunctionNoProto:
673   case Type::FunctionProto:
674     ResultType = ConvertFunctionTypeInternal(T);
675     break;
676   case Type::ObjCObject:
677     ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
678     break;
679 
680   case Type::ObjCInterface: {
681     // Objective-C interfaces are always opaque (outside of the
682     // runtime, which can do whatever it likes); we never refine
683     // these.
684     llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
685     if (!T)
686       T = llvm::StructType::create(getLLVMContext());
687     ResultType = T;
688     break;
689   }
690 
691   case Type::ObjCObjectPointer:
692     ResultType = llvm::PointerType::getUnqual(getLLVMContext());
693     break;
694 
695   case Type::Enum: {
696     const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
697     if (ED->isCompleteDefinition() || ED->isFixed())
698       return ConvertType(ED->getIntegerType());
699     // Return a placeholder 'i32' type.  This can be changed later when the
700     // type is defined (see UpdateCompletedType), but is likely to be the
701     // "right" answer.
702     ResultType = llvm::Type::getInt32Ty(getLLVMContext());
703     break;
704   }
705 
706   case Type::BlockPointer: {
707     // Block pointers lower to function type. For function type,
708     // getTargetAddressSpace() returns default address space for
709     // function pointer i.e. program address space. Therefore, for block
710     // pointers, it is important to pass the pointee AST address space when
711     // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
712     // address space for data pointers and not function pointers.
713     const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
714     unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
715     ResultType = llvm::PointerType::get(getLLVMContext(), AS);
716     break;
717   }
718 
719   case Type::MemberPointer: {
720     auto *MPTy = cast<MemberPointerType>(Ty);
721     if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
722       auto *C = MPTy->getClass();
723       auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
724       if (Insertion.second)
725         Insertion.first->second = llvm::StructType::create(getLLVMContext());
726       ResultType = Insertion.first->second;
727     } else {
728       ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
729     }
730     break;
731   }
732 
733   case Type::Atomic: {
734     QualType valueType = cast<AtomicType>(Ty)->getValueType();
735     ResultType = ConvertTypeForMem(valueType);
736 
737     // Pad out to the inflated size if necessary.
738     uint64_t valueSize = Context.getTypeSize(valueType);
739     uint64_t atomicSize = Context.getTypeSize(Ty);
740     if (valueSize != atomicSize) {
741       assert(valueSize < atomicSize);
742       llvm::Type *elts[] = {
743         ResultType,
744         llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
745       };
746       ResultType =
747           llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
748     }
749     break;
750   }
751   case Type::Pipe: {
752     ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
753     break;
754   }
755   case Type::BitInt: {
756     const auto &EIT = cast<BitIntType>(Ty);
757     ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
758     break;
759   }
760   case Type::HLSLAttributedResource:
761     ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty);
762     break;
763   }
764 
765   assert(ResultType && "Didn't convert a type?");
766   assert((!CachedType || CachedType == ResultType) &&
767          "Cached type doesn't match computed type");
768 
769   TypeCache[Ty] = ResultType;
770   return ResultType;
771 }
772 
773 bool CodeGenModule::isPaddedAtomicType(QualType type) {
774   return isPaddedAtomicType(type->castAs<AtomicType>());
775 }
776 
777 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
778   return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
779 }
780 
781 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
782 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
783   // TagDecl's are not necessarily unique, instead use the (clang)
784   // type connected to the decl.
785   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
786 
787   llvm::StructType *&Entry = RecordDeclTypes[Key];
788 
789   // If we don't have a StructType at all yet, create the forward declaration.
790   if (!Entry) {
791     Entry = llvm::StructType::create(getLLVMContext());
792     addRecordTypeName(RD, Entry, "");
793   }
794   llvm::StructType *Ty = Entry;
795 
796   // If this is still a forward declaration, or the LLVM type is already
797   // complete, there's nothing more to do.
798   RD = RD->getDefinition();
799   if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
800     return Ty;
801 
802   // Force conversion of non-virtual base classes recursively.
803   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
804     for (const auto &I : CRD->bases()) {
805       if (I.isVirtual()) continue;
806       ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
807     }
808   }
809 
810   // Layout fields.
811   std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
812   CGRecordLayouts[Key] = std::move(Layout);
813 
814   // If this struct blocked a FunctionType conversion, then recompute whatever
815   // was derived from that.
816   // FIXME: This is hugely overconservative.
817   if (SkippedLayout)
818     TypeCache.clear();
819 
820   return Ty;
821 }
822 
823 /// getCGRecordLayout - Return record layout info for the given record decl.
824 const CGRecordLayout &
825 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
826   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
827 
828   auto I = CGRecordLayouts.find(Key);
829   if (I != CGRecordLayouts.end())
830     return *I->second;
831   // Compute the type information.
832   ConvertRecordDeclType(RD);
833 
834   // Now try again.
835   I = CGRecordLayouts.find(Key);
836 
837   assert(I != CGRecordLayouts.end() &&
838          "Unable to find record layout information for type");
839   return *I->second;
840 }
841 
842 bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
843   assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
844   return isZeroInitializable(T);
845 }
846 
847 bool CodeGenTypes::isZeroInitializable(QualType T) {
848   if (T->getAs<PointerType>())
849     return Context.getTargetNullPointerValue(T) == 0;
850 
851   if (const auto *AT = Context.getAsArrayType(T)) {
852     if (isa<IncompleteArrayType>(AT))
853       return true;
854     if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
855       if (Context.getConstantArrayElementCount(CAT) == 0)
856         return true;
857     T = Context.getBaseElementType(T);
858   }
859 
860   // Records are non-zero-initializable if they contain any
861   // non-zero-initializable subobjects.
862   if (const RecordType *RT = T->getAs<RecordType>()) {
863     const RecordDecl *RD = RT->getDecl();
864     return isZeroInitializable(RD);
865   }
866 
867   // We have to ask the ABI about member pointers.
868   if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
869     return getCXXABI().isZeroInitializable(MPT);
870 
871   // Everything else is okay.
872   return true;
873 }
874 
875 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
876   return getCGRecordLayout(RD).isZeroInitializable();
877 }
878 
879 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
880   // Return the address space for the type. If the type is a
881   // function type without an address space qualifier, the
882   // program address space is used. Otherwise, the target picks
883   // the best address space based on the type information
884   return T->isFunctionType() && !T.hasAddressSpace()
885              ? getDataLayout().getProgramAddressSpace()
886              : getContext().getTargetAddressSpace(T.getAddressSpace());
887 }
888