1*06c3fb27SDimitry Andric //===- NVPTX.cpp ----------------------------------------------------------===// 2*06c3fb27SDimitry Andric // 3*06c3fb27SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*06c3fb27SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*06c3fb27SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*06c3fb27SDimitry Andric // 7*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 8*06c3fb27SDimitry Andric 9*06c3fb27SDimitry Andric #include "ABIInfoImpl.h" 10*06c3fb27SDimitry Andric #include "TargetInfo.h" 11*06c3fb27SDimitry Andric #include "llvm/IR/IntrinsicsNVPTX.h" 12*06c3fb27SDimitry Andric 13*06c3fb27SDimitry Andric using namespace clang; 14*06c3fb27SDimitry Andric using namespace clang::CodeGen; 15*06c3fb27SDimitry Andric 16*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 17*06c3fb27SDimitry Andric // NVPTX ABI Implementation 18*06c3fb27SDimitry Andric //===----------------------------------------------------------------------===// 19*06c3fb27SDimitry Andric 20*06c3fb27SDimitry Andric namespace { 21*06c3fb27SDimitry Andric 22*06c3fb27SDimitry Andric class NVPTXTargetCodeGenInfo; 23*06c3fb27SDimitry Andric 24*06c3fb27SDimitry Andric class NVPTXABIInfo : public ABIInfo { 25*06c3fb27SDimitry Andric NVPTXTargetCodeGenInfo &CGInfo; 26*06c3fb27SDimitry Andric 27*06c3fb27SDimitry Andric public: 28*06c3fb27SDimitry Andric NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info) 29*06c3fb27SDimitry Andric : ABIInfo(CGT), CGInfo(Info) {} 30*06c3fb27SDimitry Andric 31*06c3fb27SDimitry Andric ABIArgInfo classifyReturnType(QualType RetTy) const; 32*06c3fb27SDimitry Andric ABIArgInfo classifyArgumentType(QualType Ty) const; 33*06c3fb27SDimitry Andric 34*06c3fb27SDimitry Andric void computeInfo(CGFunctionInfo &FI) const override; 35*06c3fb27SDimitry Andric Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 36*06c3fb27SDimitry Andric QualType Ty) const override; 37*06c3fb27SDimitry Andric bool isUnsupportedType(QualType T) const; 38*06c3fb27SDimitry Andric ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const; 39*06c3fb27SDimitry Andric }; 40*06c3fb27SDimitry Andric 41*06c3fb27SDimitry Andric class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 42*06c3fb27SDimitry Andric public: 43*06c3fb27SDimitry Andric NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 44*06c3fb27SDimitry Andric : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {} 45*06c3fb27SDimitry Andric 46*06c3fb27SDimitry Andric void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 47*06c3fb27SDimitry Andric CodeGen::CodeGenModule &M) const override; 48*06c3fb27SDimitry Andric bool shouldEmitStaticExternCAliases() const override; 49*06c3fb27SDimitry Andric 50*06c3fb27SDimitry Andric llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override { 51*06c3fb27SDimitry Andric // On the device side, surface reference is represented as an object handle 52*06c3fb27SDimitry Andric // in 64-bit integer. 53*06c3fb27SDimitry Andric return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 54*06c3fb27SDimitry Andric } 55*06c3fb27SDimitry Andric 56*06c3fb27SDimitry Andric llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override { 57*06c3fb27SDimitry Andric // On the device side, texture reference is represented as an object handle 58*06c3fb27SDimitry Andric // in 64-bit integer. 59*06c3fb27SDimitry Andric return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 60*06c3fb27SDimitry Andric } 61*06c3fb27SDimitry Andric 62*06c3fb27SDimitry Andric bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst, 63*06c3fb27SDimitry Andric LValue Src) const override { 64*06c3fb27SDimitry Andric emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 65*06c3fb27SDimitry Andric return true; 66*06c3fb27SDimitry Andric } 67*06c3fb27SDimitry Andric 68*06c3fb27SDimitry Andric bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst, 69*06c3fb27SDimitry Andric LValue Src) const override { 70*06c3fb27SDimitry Andric emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 71*06c3fb27SDimitry Andric return true; 72*06c3fb27SDimitry Andric } 73*06c3fb27SDimitry Andric 74*06c3fb27SDimitry Andric private: 75*06c3fb27SDimitry Andric // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the 76*06c3fb27SDimitry Andric // resulting MDNode to the nvvm.annotations MDNode. 77*06c3fb27SDimitry Andric static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name, 78*06c3fb27SDimitry Andric int Operand); 79*06c3fb27SDimitry Andric 80*06c3fb27SDimitry Andric static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst, 81*06c3fb27SDimitry Andric LValue Src) { 82*06c3fb27SDimitry Andric llvm::Value *Handle = nullptr; 83*06c3fb27SDimitry Andric llvm::Constant *C = 84*06c3fb27SDimitry Andric llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer()); 85*06c3fb27SDimitry Andric // Lookup `addrspacecast` through the constant pointer if any. 86*06c3fb27SDimitry Andric if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C)) 87*06c3fb27SDimitry Andric C = llvm::cast<llvm::Constant>(ASC->getPointerOperand()); 88*06c3fb27SDimitry Andric if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) { 89*06c3fb27SDimitry Andric // Load the handle from the specific global variable using 90*06c3fb27SDimitry Andric // `nvvm.texsurf.handle.internal` intrinsic. 91*06c3fb27SDimitry Andric Handle = CGF.EmitRuntimeCall( 92*06c3fb27SDimitry Andric CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal, 93*06c3fb27SDimitry Andric {GV->getType()}), 94*06c3fb27SDimitry Andric {GV}, "texsurf_handle"); 95*06c3fb27SDimitry Andric } else 96*06c3fb27SDimitry Andric Handle = CGF.EmitLoadOfScalar(Src, SourceLocation()); 97*06c3fb27SDimitry Andric CGF.EmitStoreOfScalar(Handle, Dst); 98*06c3fb27SDimitry Andric } 99*06c3fb27SDimitry Andric }; 100*06c3fb27SDimitry Andric 101*06c3fb27SDimitry Andric /// Checks if the type is unsupported directly by the current target. 102*06c3fb27SDimitry Andric bool NVPTXABIInfo::isUnsupportedType(QualType T) const { 103*06c3fb27SDimitry Andric ASTContext &Context = getContext(); 104*06c3fb27SDimitry Andric if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) 105*06c3fb27SDimitry Andric return true; 106*06c3fb27SDimitry Andric if (!Context.getTargetInfo().hasFloat128Type() && 107*06c3fb27SDimitry Andric (T->isFloat128Type() || 108*06c3fb27SDimitry Andric (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) 109*06c3fb27SDimitry Andric return true; 110*06c3fb27SDimitry Andric if (const auto *EIT = T->getAs<BitIntType>()) 111*06c3fb27SDimitry Andric return EIT->getNumBits() > 112*06c3fb27SDimitry Andric (Context.getTargetInfo().hasInt128Type() ? 128U : 64U); 113*06c3fb27SDimitry Andric if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && 114*06c3fb27SDimitry Andric Context.getTypeSize(T) > 64U) 115*06c3fb27SDimitry Andric return true; 116*06c3fb27SDimitry Andric if (const auto *AT = T->getAsArrayTypeUnsafe()) 117*06c3fb27SDimitry Andric return isUnsupportedType(AT->getElementType()); 118*06c3fb27SDimitry Andric const auto *RT = T->getAs<RecordType>(); 119*06c3fb27SDimitry Andric if (!RT) 120*06c3fb27SDimitry Andric return false; 121*06c3fb27SDimitry Andric const RecordDecl *RD = RT->getDecl(); 122*06c3fb27SDimitry Andric 123*06c3fb27SDimitry Andric // If this is a C++ record, check the bases first. 124*06c3fb27SDimitry Andric if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 125*06c3fb27SDimitry Andric for (const CXXBaseSpecifier &I : CXXRD->bases()) 126*06c3fb27SDimitry Andric if (isUnsupportedType(I.getType())) 127*06c3fb27SDimitry Andric return true; 128*06c3fb27SDimitry Andric 129*06c3fb27SDimitry Andric for (const FieldDecl *I : RD->fields()) 130*06c3fb27SDimitry Andric if (isUnsupportedType(I->getType())) 131*06c3fb27SDimitry Andric return true; 132*06c3fb27SDimitry Andric return false; 133*06c3fb27SDimitry Andric } 134*06c3fb27SDimitry Andric 135*06c3fb27SDimitry Andric /// Coerce the given type into an array with maximum allowed size of elements. 136*06c3fb27SDimitry Andric ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty, 137*06c3fb27SDimitry Andric unsigned MaxSize) const { 138*06c3fb27SDimitry Andric // Alignment and Size are measured in bits. 139*06c3fb27SDimitry Andric const uint64_t Size = getContext().getTypeSize(Ty); 140*06c3fb27SDimitry Andric const uint64_t Alignment = getContext().getTypeAlign(Ty); 141*06c3fb27SDimitry Andric const unsigned Div = std::min<unsigned>(MaxSize, Alignment); 142*06c3fb27SDimitry Andric llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div); 143*06c3fb27SDimitry Andric const uint64_t NumElements = (Size + Div - 1) / Div; 144*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 145*06c3fb27SDimitry Andric } 146*06c3fb27SDimitry Andric 147*06c3fb27SDimitry Andric ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 148*06c3fb27SDimitry Andric if (RetTy->isVoidType()) 149*06c3fb27SDimitry Andric return ABIArgInfo::getIgnore(); 150*06c3fb27SDimitry Andric 151*06c3fb27SDimitry Andric if (getContext().getLangOpts().OpenMP && 152*06c3fb27SDimitry Andric getContext().getLangOpts().OpenMPIsTargetDevice && 153*06c3fb27SDimitry Andric isUnsupportedType(RetTy)) 154*06c3fb27SDimitry Andric return coerceToIntArrayWithLimit(RetTy, 64); 155*06c3fb27SDimitry Andric 156*06c3fb27SDimitry Andric // note: this is different from default ABI 157*06c3fb27SDimitry Andric if (!RetTy->isScalarType()) 158*06c3fb27SDimitry Andric return ABIArgInfo::getDirect(); 159*06c3fb27SDimitry Andric 160*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 161*06c3fb27SDimitry Andric if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 162*06c3fb27SDimitry Andric RetTy = EnumTy->getDecl()->getIntegerType(); 163*06c3fb27SDimitry Andric 164*06c3fb27SDimitry Andric return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 165*06c3fb27SDimitry Andric : ABIArgInfo::getDirect()); 166*06c3fb27SDimitry Andric } 167*06c3fb27SDimitry Andric 168*06c3fb27SDimitry Andric ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 169*06c3fb27SDimitry Andric // Treat an enum type as its underlying type. 170*06c3fb27SDimitry Andric if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 171*06c3fb27SDimitry Andric Ty = EnumTy->getDecl()->getIntegerType(); 172*06c3fb27SDimitry Andric 173*06c3fb27SDimitry Andric // Return aggregates type as indirect by value 174*06c3fb27SDimitry Andric if (isAggregateTypeForABI(Ty)) { 175*06c3fb27SDimitry Andric // Under CUDA device compilation, tex/surf builtin types are replaced with 176*06c3fb27SDimitry Andric // object types and passed directly. 177*06c3fb27SDimitry Andric if (getContext().getLangOpts().CUDAIsDevice) { 178*06c3fb27SDimitry Andric if (Ty->isCUDADeviceBuiltinSurfaceType()) 179*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 180*06c3fb27SDimitry Andric CGInfo.getCUDADeviceBuiltinSurfaceDeviceType()); 181*06c3fb27SDimitry Andric if (Ty->isCUDADeviceBuiltinTextureType()) 182*06c3fb27SDimitry Andric return ABIArgInfo::getDirect( 183*06c3fb27SDimitry Andric CGInfo.getCUDADeviceBuiltinTextureDeviceType()); 184*06c3fb27SDimitry Andric } 185*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /* byval */ true); 186*06c3fb27SDimitry Andric } 187*06c3fb27SDimitry Andric 188*06c3fb27SDimitry Andric if (const auto *EIT = Ty->getAs<BitIntType>()) { 189*06c3fb27SDimitry Andric if ((EIT->getNumBits() > 128) || 190*06c3fb27SDimitry Andric (!getContext().getTargetInfo().hasInt128Type() && 191*06c3fb27SDimitry Andric EIT->getNumBits() > 64)) 192*06c3fb27SDimitry Andric return getNaturalAlignIndirect(Ty, /* byval */ true); 193*06c3fb27SDimitry Andric } 194*06c3fb27SDimitry Andric 195*06c3fb27SDimitry Andric return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 196*06c3fb27SDimitry Andric : ABIArgInfo::getDirect()); 197*06c3fb27SDimitry Andric } 198*06c3fb27SDimitry Andric 199*06c3fb27SDimitry Andric void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 200*06c3fb27SDimitry Andric if (!getCXXABI().classifyReturnType(FI)) 201*06c3fb27SDimitry Andric FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 202*06c3fb27SDimitry Andric for (auto &I : FI.arguments()) 203*06c3fb27SDimitry Andric I.info = classifyArgumentType(I.type); 204*06c3fb27SDimitry Andric 205*06c3fb27SDimitry Andric // Always honor user-specified calling convention. 206*06c3fb27SDimitry Andric if (FI.getCallingConvention() != llvm::CallingConv::C) 207*06c3fb27SDimitry Andric return; 208*06c3fb27SDimitry Andric 209*06c3fb27SDimitry Andric FI.setEffectiveCallingConvention(getRuntimeCC()); 210*06c3fb27SDimitry Andric } 211*06c3fb27SDimitry Andric 212*06c3fb27SDimitry Andric Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 213*06c3fb27SDimitry Andric QualType Ty) const { 214*06c3fb27SDimitry Andric llvm_unreachable("NVPTX does not support varargs"); 215*06c3fb27SDimitry Andric } 216*06c3fb27SDimitry Andric 217*06c3fb27SDimitry Andric void NVPTXTargetCodeGenInfo::setTargetAttributes( 218*06c3fb27SDimitry Andric const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 219*06c3fb27SDimitry Andric if (GV->isDeclaration()) 220*06c3fb27SDimitry Andric return; 221*06c3fb27SDimitry Andric const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); 222*06c3fb27SDimitry Andric if (VD) { 223*06c3fb27SDimitry Andric if (M.getLangOpts().CUDA) { 224*06c3fb27SDimitry Andric if (VD->getType()->isCUDADeviceBuiltinSurfaceType()) 225*06c3fb27SDimitry Andric addNVVMMetadata(GV, "surface", 1); 226*06c3fb27SDimitry Andric else if (VD->getType()->isCUDADeviceBuiltinTextureType()) 227*06c3fb27SDimitry Andric addNVVMMetadata(GV, "texture", 1); 228*06c3fb27SDimitry Andric return; 229*06c3fb27SDimitry Andric } 230*06c3fb27SDimitry Andric } 231*06c3fb27SDimitry Andric 232*06c3fb27SDimitry Andric const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 233*06c3fb27SDimitry Andric if (!FD) return; 234*06c3fb27SDimitry Andric 235*06c3fb27SDimitry Andric llvm::Function *F = cast<llvm::Function>(GV); 236*06c3fb27SDimitry Andric 237*06c3fb27SDimitry Andric // Perform special handling in OpenCL mode 238*06c3fb27SDimitry Andric if (M.getLangOpts().OpenCL) { 239*06c3fb27SDimitry Andric // Use OpenCL function attributes to check for kernel functions 240*06c3fb27SDimitry Andric // By default, all functions are device functions 241*06c3fb27SDimitry Andric if (FD->hasAttr<OpenCLKernelAttr>()) { 242*06c3fb27SDimitry Andric // OpenCL __kernel functions get kernel metadata 243*06c3fb27SDimitry Andric // Create !{<func-ref>, metadata !"kernel", i32 1} node 244*06c3fb27SDimitry Andric addNVVMMetadata(F, "kernel", 1); 245*06c3fb27SDimitry Andric // And kernel functions are not subject to inlining 246*06c3fb27SDimitry Andric F->addFnAttr(llvm::Attribute::NoInline); 247*06c3fb27SDimitry Andric } 248*06c3fb27SDimitry Andric } 249*06c3fb27SDimitry Andric 250*06c3fb27SDimitry Andric // Perform special handling in CUDA mode. 251*06c3fb27SDimitry Andric if (M.getLangOpts().CUDA) { 252*06c3fb27SDimitry Andric // CUDA __global__ functions get a kernel metadata entry. Since 253*06c3fb27SDimitry Andric // __global__ functions cannot be called from the device, we do not 254*06c3fb27SDimitry Andric // need to set the noinline attribute. 255*06c3fb27SDimitry Andric if (FD->hasAttr<CUDAGlobalAttr>()) { 256*06c3fb27SDimitry Andric // Create !{<func-ref>, metadata !"kernel", i32 1} node 257*06c3fb27SDimitry Andric addNVVMMetadata(F, "kernel", 1); 258*06c3fb27SDimitry Andric } 259*06c3fb27SDimitry Andric if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 260*06c3fb27SDimitry Andric // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 261*06c3fb27SDimitry Andric llvm::APSInt MaxThreads(32); 262*06c3fb27SDimitry Andric MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 263*06c3fb27SDimitry Andric if (MaxThreads > 0) 264*06c3fb27SDimitry Andric addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 265*06c3fb27SDimitry Andric 266*06c3fb27SDimitry Andric // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 267*06c3fb27SDimitry Andric // not specified in __launch_bounds__ or if the user specified a 0 value, 268*06c3fb27SDimitry Andric // we don't have to add a PTX directive. 269*06c3fb27SDimitry Andric if (Attr->getMinBlocks()) { 270*06c3fb27SDimitry Andric llvm::APSInt MinBlocks(32); 271*06c3fb27SDimitry Andric MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 272*06c3fb27SDimitry Andric if (MinBlocks > 0) 273*06c3fb27SDimitry Andric // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 274*06c3fb27SDimitry Andric addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 275*06c3fb27SDimitry Andric } 276*06c3fb27SDimitry Andric } 277*06c3fb27SDimitry Andric } 278*06c3fb27SDimitry Andric 279*06c3fb27SDimitry Andric // Attach kernel metadata directly if compiling for NVPTX. 280*06c3fb27SDimitry Andric if (FD->hasAttr<NVPTXKernelAttr>()) { 281*06c3fb27SDimitry Andric addNVVMMetadata(F, "kernel", 1); 282*06c3fb27SDimitry Andric } 283*06c3fb27SDimitry Andric } 284*06c3fb27SDimitry Andric 285*06c3fb27SDimitry Andric void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV, 286*06c3fb27SDimitry Andric StringRef Name, int Operand) { 287*06c3fb27SDimitry Andric llvm::Module *M = GV->getParent(); 288*06c3fb27SDimitry Andric llvm::LLVMContext &Ctx = M->getContext(); 289*06c3fb27SDimitry Andric 290*06c3fb27SDimitry Andric // Get "nvvm.annotations" metadata node 291*06c3fb27SDimitry Andric llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 292*06c3fb27SDimitry Andric 293*06c3fb27SDimitry Andric llvm::Metadata *MDVals[] = { 294*06c3fb27SDimitry Andric llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name), 295*06c3fb27SDimitry Andric llvm::ConstantAsMetadata::get( 296*06c3fb27SDimitry Andric llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 297*06c3fb27SDimitry Andric // Append metadata to nvvm.annotations 298*06c3fb27SDimitry Andric MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 299*06c3fb27SDimitry Andric } 300*06c3fb27SDimitry Andric 301*06c3fb27SDimitry Andric bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 302*06c3fb27SDimitry Andric return false; 303*06c3fb27SDimitry Andric } 304*06c3fb27SDimitry Andric } 305*06c3fb27SDimitry Andric 306*06c3fb27SDimitry Andric std::unique_ptr<TargetCodeGenInfo> 307*06c3fb27SDimitry Andric CodeGen::createNVPTXTargetCodeGenInfo(CodeGenModule &CGM) { 308*06c3fb27SDimitry Andric return std::make_unique<NVPTXTargetCodeGenInfo>(CGM.getTypes()); 309*06c3fb27SDimitry Andric } 310