1e5dd7070Spatrick //===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
2e5dd7070Spatrick //
3e5dd7070Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e5dd7070Spatrick // See https://llvm.org/LICENSE.txt for license information.
5e5dd7070Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e5dd7070Spatrick //
7e5dd7070Spatrick //===----------------------------------------------------------------------===//
8e5dd7070Spatrick //
9e5dd7070Spatrick // This provides a class for CUDA code generation targeting the NVIDIA CUDA
10e5dd7070Spatrick // runtime library.
11e5dd7070Spatrick //
12e5dd7070Spatrick //===----------------------------------------------------------------------===//
13e5dd7070Spatrick
14e5dd7070Spatrick #include "CGCUDARuntime.h"
15a9ac8606Spatrick #include "CGCXXABI.h"
16e5dd7070Spatrick #include "CodeGenFunction.h"
17e5dd7070Spatrick #include "CodeGenModule.h"
18e5dd7070Spatrick #include "clang/AST/Decl.h"
19e5dd7070Spatrick #include "clang/Basic/Cuda.h"
20e5dd7070Spatrick #include "clang/CodeGen/CodeGenABITypes.h"
21e5dd7070Spatrick #include "clang/CodeGen/ConstantInitBuilder.h"
22e5dd7070Spatrick #include "llvm/IR/BasicBlock.h"
23e5dd7070Spatrick #include "llvm/IR/Constants.h"
24e5dd7070Spatrick #include "llvm/IR/DerivedTypes.h"
25a9ac8606Spatrick #include "llvm/IR/ReplaceConstant.h"
26e5dd7070Spatrick #include "llvm/Support/Format.h"
27e5dd7070Spatrick
28e5dd7070Spatrick using namespace clang;
29e5dd7070Spatrick using namespace CodeGen;
30e5dd7070Spatrick
31e5dd7070Spatrick namespace {
32e5dd7070Spatrick constexpr unsigned CudaFatMagic = 0x466243b1;
33e5dd7070Spatrick constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
34e5dd7070Spatrick
35e5dd7070Spatrick class CGNVCUDARuntime : public CGCUDARuntime {
36e5dd7070Spatrick
37e5dd7070Spatrick private:
38e5dd7070Spatrick llvm::IntegerType *IntTy, *SizeTy;
39e5dd7070Spatrick llvm::Type *VoidTy;
40e5dd7070Spatrick llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
41e5dd7070Spatrick
42e5dd7070Spatrick /// Convenience reference to LLVM Context
43e5dd7070Spatrick llvm::LLVMContext &Context;
44e5dd7070Spatrick /// Convenience reference to the current module
45e5dd7070Spatrick llvm::Module &TheModule;
46a9ac8606Spatrick /// Keeps track of kernel launch stubs and handles emitted in this module
47e5dd7070Spatrick struct KernelInfo {
48a9ac8606Spatrick llvm::Function *Kernel; // stub function to help launch kernel
49e5dd7070Spatrick const Decl *D;
50e5dd7070Spatrick };
51e5dd7070Spatrick llvm::SmallVector<KernelInfo, 16> EmittedKernels;
52*12c85518Srobert // Map a kernel mangled name to a symbol for identifying kernel in host code
53a9ac8606Spatrick // For CUDA, the symbol for identifying the kernel is the same as the device
54a9ac8606Spatrick // stub function. For HIP, they are different.
55*12c85518Srobert llvm::DenseMap<StringRef, llvm::GlobalValue *> KernelHandles;
56a9ac8606Spatrick // Map a kernel handle to the kernel stub.
57a9ac8606Spatrick llvm::DenseMap<llvm::GlobalValue *, llvm::Function *> KernelStubs;
58e5dd7070Spatrick struct VarInfo {
59e5dd7070Spatrick llvm::GlobalVariable *Var;
60e5dd7070Spatrick const VarDecl *D;
61ec727ea7Spatrick DeviceVarFlags Flags;
62e5dd7070Spatrick };
63e5dd7070Spatrick llvm::SmallVector<VarInfo, 16> DeviceVars;
64e5dd7070Spatrick /// Keeps track of variable containing handle of GPU binary. Populated by
65e5dd7070Spatrick /// ModuleCtorFunction() and used to create corresponding cleanup calls in
66e5dd7070Spatrick /// ModuleDtorFunction()
67e5dd7070Spatrick llvm::GlobalVariable *GpuBinaryHandle = nullptr;
68e5dd7070Spatrick /// Whether we generate relocatable device code.
69e5dd7070Spatrick bool RelocatableDeviceCode;
70e5dd7070Spatrick /// Mangle context for device.
71e5dd7070Spatrick std::unique_ptr<MangleContext> DeviceMC;
72*12c85518Srobert /// Some zeros used for GEPs.
73*12c85518Srobert llvm::Constant *Zeros[2];
74e5dd7070Spatrick
75e5dd7070Spatrick llvm::FunctionCallee getSetupArgumentFn() const;
76e5dd7070Spatrick llvm::FunctionCallee getLaunchFn() const;
77e5dd7070Spatrick
78e5dd7070Spatrick llvm::FunctionType *getRegisterGlobalsFnTy() const;
79e5dd7070Spatrick llvm::FunctionType *getCallbackFnTy() const;
80e5dd7070Spatrick llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
81e5dd7070Spatrick std::string addPrefixToName(StringRef FuncName) const;
82e5dd7070Spatrick std::string addUnderscoredPrefixToName(StringRef FuncName) const;
83e5dd7070Spatrick
84e5dd7070Spatrick /// Creates a function to register all kernel stubs generated in this module.
85e5dd7070Spatrick llvm::Function *makeRegisterGlobalsFn();
86e5dd7070Spatrick
87e5dd7070Spatrick /// Helper function that generates a constant string and returns a pointer to
88e5dd7070Spatrick /// the start of the string. The result of this function can be used anywhere
89e5dd7070Spatrick /// where the C code specifies const char*.
makeConstantString(const std::string & Str,const std::string & Name="")90e5dd7070Spatrick llvm::Constant *makeConstantString(const std::string &Str,
91*12c85518Srobert const std::string &Name = "") {
92e5dd7070Spatrick auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
93*12c85518Srobert return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
94*12c85518Srobert ConstStr.getPointer(), Zeros);
95*12c85518Srobert }
96*12c85518Srobert
97*12c85518Srobert /// Helper function which generates an initialized constant array from Str,
98*12c85518Srobert /// and optionally sets section name and alignment. AddNull specifies whether
99*12c85518Srobert /// the array should nave NUL termination.
makeConstantArray(StringRef Str,StringRef Name="",StringRef SectionName="",unsigned Alignment=0,bool AddNull=false)100*12c85518Srobert llvm::Constant *makeConstantArray(StringRef Str,
101*12c85518Srobert StringRef Name = "",
102*12c85518Srobert StringRef SectionName = "",
103*12c85518Srobert unsigned Alignment = 0,
104*12c85518Srobert bool AddNull = false) {
105*12c85518Srobert llvm::Constant *Value =
106*12c85518Srobert llvm::ConstantDataArray::getString(Context, Str, AddNull);
107*12c85518Srobert auto *GV = new llvm::GlobalVariable(
108*12c85518Srobert TheModule, Value->getType(), /*isConstant=*/true,
109*12c85518Srobert llvm::GlobalValue::PrivateLinkage, Value, Name);
110e5dd7070Spatrick if (!SectionName.empty()) {
111e5dd7070Spatrick GV->setSection(SectionName);
112e5dd7070Spatrick // Mark the address as used which make sure that this section isn't
113e5dd7070Spatrick // merged and we will really have it in the object file.
114e5dd7070Spatrick GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
115e5dd7070Spatrick }
116e5dd7070Spatrick if (Alignment)
117e5dd7070Spatrick GV->setAlignment(llvm::Align(Alignment));
118*12c85518Srobert return llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
119e5dd7070Spatrick }
120e5dd7070Spatrick
121e5dd7070Spatrick /// Helper function that generates an empty dummy function returning void.
makeDummyFunction(llvm::FunctionType * FnTy)122e5dd7070Spatrick llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
123e5dd7070Spatrick assert(FnTy->getReturnType()->isVoidTy() &&
124e5dd7070Spatrick "Can only generate dummy functions returning void!");
125e5dd7070Spatrick llvm::Function *DummyFunc = llvm::Function::Create(
126e5dd7070Spatrick FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
127e5dd7070Spatrick
128e5dd7070Spatrick llvm::BasicBlock *DummyBlock =
129e5dd7070Spatrick llvm::BasicBlock::Create(Context, "", DummyFunc);
130e5dd7070Spatrick CGBuilderTy FuncBuilder(CGM, Context);
131e5dd7070Spatrick FuncBuilder.SetInsertPoint(DummyBlock);
132e5dd7070Spatrick FuncBuilder.CreateRetVoid();
133e5dd7070Spatrick
134e5dd7070Spatrick return DummyFunc;
135e5dd7070Spatrick }
136e5dd7070Spatrick
137e5dd7070Spatrick void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
138e5dd7070Spatrick void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
139ec727ea7Spatrick std::string getDeviceSideName(const NamedDecl *ND) override;
140e5dd7070Spatrick
registerDeviceVar(const VarDecl * VD,llvm::GlobalVariable & Var,bool Extern,bool Constant)141e5dd7070Spatrick void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
142a9ac8606Spatrick bool Extern, bool Constant) {
143ec727ea7Spatrick DeviceVars.push_back({&Var,
144ec727ea7Spatrick VD,
145ec727ea7Spatrick {DeviceVarFlags::Variable, Extern, Constant,
146a9ac8606Spatrick VD->hasAttr<HIPManagedAttr>(),
147a9ac8606Spatrick /*Normalized*/ false, 0}});
148ec727ea7Spatrick }
registerDeviceSurf(const VarDecl * VD,llvm::GlobalVariable & Var,bool Extern,int Type)149ec727ea7Spatrick void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
150a9ac8606Spatrick bool Extern, int Type) {
151ec727ea7Spatrick DeviceVars.push_back({&Var,
152ec727ea7Spatrick VD,
153ec727ea7Spatrick {DeviceVarFlags::Surface, Extern, /*Constant*/ false,
154a9ac8606Spatrick /*Managed*/ false,
155ec727ea7Spatrick /*Normalized*/ false, Type}});
156ec727ea7Spatrick }
registerDeviceTex(const VarDecl * VD,llvm::GlobalVariable & Var,bool Extern,int Type,bool Normalized)157ec727ea7Spatrick void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
158a9ac8606Spatrick bool Extern, int Type, bool Normalized) {
159ec727ea7Spatrick DeviceVars.push_back({&Var,
160ec727ea7Spatrick VD,
161ec727ea7Spatrick {DeviceVarFlags::Texture, Extern, /*Constant*/ false,
162a9ac8606Spatrick /*Managed*/ false, Normalized, Type}});
163e5dd7070Spatrick }
164e5dd7070Spatrick
165e5dd7070Spatrick /// Creates module constructor function
166a9ac8606Spatrick llvm::Function *makeModuleCtorFunction();
167e5dd7070Spatrick /// Creates module destructor function
168a9ac8606Spatrick llvm::Function *makeModuleDtorFunction();
169a9ac8606Spatrick /// Transform managed variables for device compilation.
170a9ac8606Spatrick void transformManagedVars();
171*12c85518Srobert /// Create offloading entries to register globals in RDC mode.
172*12c85518Srobert void createOffloadingEntries();
173a9ac8606Spatrick
174a9ac8606Spatrick public:
175a9ac8606Spatrick CGNVCUDARuntime(CodeGenModule &CGM);
176a9ac8606Spatrick
177a9ac8606Spatrick llvm::GlobalValue *getKernelHandle(llvm::Function *F, GlobalDecl GD) override;
getKernelStub(llvm::GlobalValue * Handle)178a9ac8606Spatrick llvm::Function *getKernelStub(llvm::GlobalValue *Handle) override {
179a9ac8606Spatrick auto Loc = KernelStubs.find(Handle);
180a9ac8606Spatrick assert(Loc != KernelStubs.end());
181a9ac8606Spatrick return Loc->second;
182a9ac8606Spatrick }
183a9ac8606Spatrick void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
184a9ac8606Spatrick void handleVarRegistration(const VarDecl *VD,
185a9ac8606Spatrick llvm::GlobalVariable &Var) override;
186a9ac8606Spatrick void
187a9ac8606Spatrick internalizeDeviceSideVar(const VarDecl *D,
188a9ac8606Spatrick llvm::GlobalValue::LinkageTypes &Linkage) override;
189a9ac8606Spatrick
190a9ac8606Spatrick llvm::Function *finalizeModule() override;
191e5dd7070Spatrick };
192e5dd7070Spatrick
193*12c85518Srobert } // end anonymous namespace
194e5dd7070Spatrick
addPrefixToName(StringRef FuncName) const195e5dd7070Spatrick std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
196e5dd7070Spatrick if (CGM.getLangOpts().HIP)
197e5dd7070Spatrick return ((Twine("hip") + Twine(FuncName)).str());
198e5dd7070Spatrick return ((Twine("cuda") + Twine(FuncName)).str());
199e5dd7070Spatrick }
200e5dd7070Spatrick std::string
addUnderscoredPrefixToName(StringRef FuncName) const201e5dd7070Spatrick CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
202e5dd7070Spatrick if (CGM.getLangOpts().HIP)
203e5dd7070Spatrick return ((Twine("__hip") + Twine(FuncName)).str());
204e5dd7070Spatrick return ((Twine("__cuda") + Twine(FuncName)).str());
205e5dd7070Spatrick }
206e5dd7070Spatrick
InitDeviceMC(CodeGenModule & CGM)207a9ac8606Spatrick static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
208a9ac8606Spatrick // If the host and device have different C++ ABIs, mark it as the device
209a9ac8606Spatrick // mangle context so that the mangling needs to retrieve the additional
210a9ac8606Spatrick // device lambda mangling number instead of the regular host one.
211a9ac8606Spatrick if (CGM.getContext().getAuxTargetInfo() &&
212a9ac8606Spatrick CGM.getContext().getTargetInfo().getCXXABI().isMicrosoft() &&
213a9ac8606Spatrick CGM.getContext().getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
214a9ac8606Spatrick return std::unique_ptr<MangleContext>(
215a9ac8606Spatrick CGM.getContext().createDeviceMangleContext(
216a9ac8606Spatrick *CGM.getContext().getAuxTargetInfo()));
217a9ac8606Spatrick }
218a9ac8606Spatrick
219a9ac8606Spatrick return std::unique_ptr<MangleContext>(CGM.getContext().createMangleContext(
220a9ac8606Spatrick CGM.getContext().getAuxTargetInfo()));
221a9ac8606Spatrick }
222a9ac8606Spatrick
CGNVCUDARuntime(CodeGenModule & CGM)223e5dd7070Spatrick CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
224e5dd7070Spatrick : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
225e5dd7070Spatrick TheModule(CGM.getModule()),
226e5dd7070Spatrick RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
227a9ac8606Spatrick DeviceMC(InitDeviceMC(CGM)) {
228e5dd7070Spatrick CodeGen::CodeGenTypes &Types = CGM.getTypes();
229e5dd7070Spatrick ASTContext &Ctx = CGM.getContext();
230e5dd7070Spatrick
231e5dd7070Spatrick IntTy = CGM.IntTy;
232e5dd7070Spatrick SizeTy = CGM.SizeTy;
233e5dd7070Spatrick VoidTy = CGM.VoidTy;
234*12c85518Srobert Zeros[0] = llvm::ConstantInt::get(SizeTy, 0);
235*12c85518Srobert Zeros[1] = Zeros[0];
236e5dd7070Spatrick
237e5dd7070Spatrick CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
238e5dd7070Spatrick VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
239e5dd7070Spatrick VoidPtrPtrTy = VoidPtrTy->getPointerTo();
240e5dd7070Spatrick }
241e5dd7070Spatrick
getSetupArgumentFn() const242e5dd7070Spatrick llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
243e5dd7070Spatrick // cudaError_t cudaSetupArgument(void *, size_t, size_t)
244e5dd7070Spatrick llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
245e5dd7070Spatrick return CGM.CreateRuntimeFunction(
246e5dd7070Spatrick llvm::FunctionType::get(IntTy, Params, false),
247e5dd7070Spatrick addPrefixToName("SetupArgument"));
248e5dd7070Spatrick }
249e5dd7070Spatrick
getLaunchFn() const250e5dd7070Spatrick llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
251e5dd7070Spatrick if (CGM.getLangOpts().HIP) {
252e5dd7070Spatrick // hipError_t hipLaunchByPtr(char *);
253e5dd7070Spatrick return CGM.CreateRuntimeFunction(
254e5dd7070Spatrick llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
255*12c85518Srobert }
256e5dd7070Spatrick // cudaError_t cudaLaunch(char *);
257e5dd7070Spatrick return CGM.CreateRuntimeFunction(
258e5dd7070Spatrick llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
259e5dd7070Spatrick }
260e5dd7070Spatrick
getRegisterGlobalsFnTy() const261e5dd7070Spatrick llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
262e5dd7070Spatrick return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
263e5dd7070Spatrick }
264e5dd7070Spatrick
getCallbackFnTy() const265e5dd7070Spatrick llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
266e5dd7070Spatrick return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
267e5dd7070Spatrick }
268e5dd7070Spatrick
getRegisterLinkedBinaryFnTy() const269e5dd7070Spatrick llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
270*12c85518Srobert auto *CallbackFnTy = getCallbackFnTy();
271*12c85518Srobert auto *RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
272e5dd7070Spatrick llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
273e5dd7070Spatrick VoidPtrTy, CallbackFnTy->getPointerTo()};
274e5dd7070Spatrick return llvm::FunctionType::get(VoidTy, Params, false);
275e5dd7070Spatrick }
276e5dd7070Spatrick
getDeviceSideName(const NamedDecl * ND)277ec727ea7Spatrick std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
278ec727ea7Spatrick GlobalDecl GD;
279ec727ea7Spatrick // D could be either a kernel or a variable.
280ec727ea7Spatrick if (auto *FD = dyn_cast<FunctionDecl>(ND))
281ec727ea7Spatrick GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
282ec727ea7Spatrick else
283ec727ea7Spatrick GD = GlobalDecl(ND);
284e5dd7070Spatrick std::string DeviceSideName;
285a9ac8606Spatrick MangleContext *MC;
286a9ac8606Spatrick if (CGM.getLangOpts().CUDAIsDevice)
287a9ac8606Spatrick MC = &CGM.getCXXABI().getMangleContext();
288a9ac8606Spatrick else
289a9ac8606Spatrick MC = DeviceMC.get();
290a9ac8606Spatrick if (MC->shouldMangleDeclName(ND)) {
291e5dd7070Spatrick SmallString<256> Buffer;
292e5dd7070Spatrick llvm::raw_svector_ostream Out(Buffer);
293a9ac8606Spatrick MC->mangleName(GD, Out);
294ec727ea7Spatrick DeviceSideName = std::string(Out.str());
295e5dd7070Spatrick } else
296ec727ea7Spatrick DeviceSideName = std::string(ND->getIdentifier()->getName());
297a9ac8606Spatrick
298a9ac8606Spatrick // Make unique name for device side static file-scope variable for HIP.
299*12c85518Srobert if (CGM.getContext().shouldExternalize(ND) &&
300*12c85518Srobert CGM.getLangOpts().GPURelocatableDeviceCode) {
301a9ac8606Spatrick SmallString<256> Buffer;
302a9ac8606Spatrick llvm::raw_svector_ostream Out(Buffer);
303a9ac8606Spatrick Out << DeviceSideName;
304*12c85518Srobert CGM.printPostfixForExternalizedDecl(Out, ND);
305a9ac8606Spatrick DeviceSideName = std::string(Out.str());
306a9ac8606Spatrick }
307e5dd7070Spatrick return DeviceSideName;
308e5dd7070Spatrick }
309e5dd7070Spatrick
emitDeviceStub(CodeGenFunction & CGF,FunctionArgList & Args)310e5dd7070Spatrick void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
311e5dd7070Spatrick FunctionArgList &Args) {
312e5dd7070Spatrick EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
313*12c85518Srobert if (auto *GV =
314*12c85518Srobert dyn_cast<llvm::GlobalVariable>(KernelHandles[CGF.CurFn->getName()])) {
315a9ac8606Spatrick GV->setLinkage(CGF.CurFn->getLinkage());
316a9ac8606Spatrick GV->setInitializer(CGF.CurFn);
317a9ac8606Spatrick }
318e5dd7070Spatrick if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
319e5dd7070Spatrick CudaFeature::CUDA_USES_NEW_LAUNCH) ||
320ec727ea7Spatrick (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
321e5dd7070Spatrick emitDeviceStubBodyNew(CGF, Args);
322e5dd7070Spatrick else
323e5dd7070Spatrick emitDeviceStubBodyLegacy(CGF, Args);
324e5dd7070Spatrick }
325e5dd7070Spatrick
326e5dd7070Spatrick // CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
327e5dd7070Spatrick // array and kernels are launched using cudaLaunchKernel().
emitDeviceStubBodyNew(CodeGenFunction & CGF,FunctionArgList & Args)328e5dd7070Spatrick void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
329e5dd7070Spatrick FunctionArgList &Args) {
330e5dd7070Spatrick // Build the shadow stack entry at the very start of the function.
331e5dd7070Spatrick
332e5dd7070Spatrick // Calculate amount of space we will need for all arguments. If we have no
333e5dd7070Spatrick // args, allocate a single pointer so we still have a valid pointer to the
334e5dd7070Spatrick // argument array that we can pass to runtime, even if it will be unused.
335e5dd7070Spatrick Address KernelArgs = CGF.CreateTempAlloca(
336e5dd7070Spatrick VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
337e5dd7070Spatrick llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
338e5dd7070Spatrick // Store pointers to the arguments in a locally allocated launch_args.
339e5dd7070Spatrick for (unsigned i = 0; i < Args.size(); ++i) {
340e5dd7070Spatrick llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
341e5dd7070Spatrick llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
342e5dd7070Spatrick CGF.Builder.CreateDefaultAlignedStore(
343a9ac8606Spatrick VoidVarPtr,
344a9ac8606Spatrick CGF.Builder.CreateConstGEP1_32(VoidPtrTy, KernelArgs.getPointer(), i));
345e5dd7070Spatrick }
346e5dd7070Spatrick
347e5dd7070Spatrick llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
348e5dd7070Spatrick
349e5dd7070Spatrick // Lookup cudaLaunchKernel/hipLaunchKernel function.
350*12c85518Srobert // HIP kernel launching API name depends on -fgpu-default-stream option. For
351*12c85518Srobert // the default value 'legacy', it is hipLaunchKernel. For 'per-thread',
352*12c85518Srobert // it is hipLaunchKernel_spt.
353e5dd7070Spatrick // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
354e5dd7070Spatrick // void **args, size_t sharedMem,
355e5dd7070Spatrick // cudaStream_t stream);
356*12c85518Srobert // hipError_t hipLaunchKernel[_spt](const void *func, dim3 gridDim,
357*12c85518Srobert // dim3 blockDim, void **args,
358*12c85518Srobert // size_t sharedMem, hipStream_t stream);
359e5dd7070Spatrick TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
360e5dd7070Spatrick DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
361*12c85518Srobert std::string KernelLaunchAPI = "LaunchKernel";
362*12c85518Srobert if (CGF.getLangOpts().HIP && CGF.getLangOpts().GPUDefaultStream ==
363*12c85518Srobert LangOptions::GPUDefaultStreamKind::PerThread)
364*12c85518Srobert KernelLaunchAPI = KernelLaunchAPI + "_spt";
365*12c85518Srobert auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
366e5dd7070Spatrick IdentifierInfo &cudaLaunchKernelII =
367e5dd7070Spatrick CGM.getContext().Idents.get(LaunchKernelName);
368e5dd7070Spatrick FunctionDecl *cudaLaunchKernelFD = nullptr;
369a9ac8606Spatrick for (auto *Result : DC->lookup(&cudaLaunchKernelII)) {
370e5dd7070Spatrick if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
371e5dd7070Spatrick cudaLaunchKernelFD = FD;
372e5dd7070Spatrick }
373e5dd7070Spatrick
374e5dd7070Spatrick if (cudaLaunchKernelFD == nullptr) {
375e5dd7070Spatrick CGM.Error(CGF.CurFuncDecl->getLocation(),
376e5dd7070Spatrick "Can't find declaration for " + LaunchKernelName);
377e5dd7070Spatrick return;
378e5dd7070Spatrick }
379e5dd7070Spatrick // Create temporary dim3 grid_dim, block_dim.
380e5dd7070Spatrick ParmVarDecl *GridDimParam = cudaLaunchKernelFD->getParamDecl(1);
381e5dd7070Spatrick QualType Dim3Ty = GridDimParam->getType();
382e5dd7070Spatrick Address GridDim =
383e5dd7070Spatrick CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "grid_dim");
384e5dd7070Spatrick Address BlockDim =
385e5dd7070Spatrick CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
386e5dd7070Spatrick Address ShmemSize =
387e5dd7070Spatrick CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
388e5dd7070Spatrick Address Stream =
389e5dd7070Spatrick CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
390e5dd7070Spatrick llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
391e5dd7070Spatrick llvm::FunctionType::get(IntTy,
392e5dd7070Spatrick {/*gridDim=*/GridDim.getType(),
393e5dd7070Spatrick /*blockDim=*/BlockDim.getType(),
394e5dd7070Spatrick /*ShmemSize=*/ShmemSize.getType(),
395e5dd7070Spatrick /*Stream=*/Stream.getType()},
396e5dd7070Spatrick /*isVarArg=*/false),
397e5dd7070Spatrick addUnderscoredPrefixToName("PopCallConfiguration"));
398e5dd7070Spatrick
399e5dd7070Spatrick CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
400e5dd7070Spatrick {GridDim.getPointer(), BlockDim.getPointer(),
401e5dd7070Spatrick ShmemSize.getPointer(), Stream.getPointer()});
402e5dd7070Spatrick
403e5dd7070Spatrick // Emit the call to cudaLaunch
404*12c85518Srobert llvm::Value *Kernel = CGF.Builder.CreatePointerCast(
405*12c85518Srobert KernelHandles[CGF.CurFn->getName()], VoidPtrTy);
406e5dd7070Spatrick CallArgList LaunchKernelArgs;
407e5dd7070Spatrick LaunchKernelArgs.add(RValue::get(Kernel),
408e5dd7070Spatrick cudaLaunchKernelFD->getParamDecl(0)->getType());
409e5dd7070Spatrick LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
410e5dd7070Spatrick LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
411e5dd7070Spatrick LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
412e5dd7070Spatrick cudaLaunchKernelFD->getParamDecl(3)->getType());
413e5dd7070Spatrick LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
414e5dd7070Spatrick cudaLaunchKernelFD->getParamDecl(4)->getType());
415e5dd7070Spatrick LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(Stream)),
416e5dd7070Spatrick cudaLaunchKernelFD->getParamDecl(5)->getType());
417e5dd7070Spatrick
418e5dd7070Spatrick QualType QT = cudaLaunchKernelFD->getType();
419e5dd7070Spatrick QualType CQT = QT.getCanonicalType();
420e5dd7070Spatrick llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
421*12c85518Srobert llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
422e5dd7070Spatrick
423e5dd7070Spatrick const CGFunctionInfo &FI =
424e5dd7070Spatrick CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
425e5dd7070Spatrick llvm::FunctionCallee cudaLaunchKernelFn =
426e5dd7070Spatrick CGM.CreateRuntimeFunction(FTy, LaunchKernelName);
427e5dd7070Spatrick CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
428e5dd7070Spatrick LaunchKernelArgs);
429e5dd7070Spatrick CGF.EmitBranch(EndBlock);
430e5dd7070Spatrick
431e5dd7070Spatrick CGF.EmitBlock(EndBlock);
432e5dd7070Spatrick }
433e5dd7070Spatrick
emitDeviceStubBodyLegacy(CodeGenFunction & CGF,FunctionArgList & Args)434e5dd7070Spatrick void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
435e5dd7070Spatrick FunctionArgList &Args) {
436e5dd7070Spatrick // Emit a call to cudaSetupArgument for each arg in Args.
437e5dd7070Spatrick llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn();
438e5dd7070Spatrick llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
439e5dd7070Spatrick CharUnits Offset = CharUnits::Zero();
440e5dd7070Spatrick for (const VarDecl *A : Args) {
441a9ac8606Spatrick auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
442a9ac8606Spatrick Offset = Offset.alignTo(TInfo.Align);
443e5dd7070Spatrick llvm::Value *Args[] = {
444e5dd7070Spatrick CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
445e5dd7070Spatrick VoidPtrTy),
446a9ac8606Spatrick llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
447e5dd7070Spatrick llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
448e5dd7070Spatrick };
449e5dd7070Spatrick llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
450e5dd7070Spatrick llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
451e5dd7070Spatrick llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero);
452e5dd7070Spatrick llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
453e5dd7070Spatrick CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
454e5dd7070Spatrick CGF.EmitBlock(NextBlock);
455a9ac8606Spatrick Offset += TInfo.Width;
456e5dd7070Spatrick }
457e5dd7070Spatrick
458e5dd7070Spatrick // Emit the call to cudaLaunch
459e5dd7070Spatrick llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
460*12c85518Srobert llvm::Value *Arg = CGF.Builder.CreatePointerCast(
461*12c85518Srobert KernelHandles[CGF.CurFn->getName()], CharPtrTy);
462e5dd7070Spatrick CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
463e5dd7070Spatrick CGF.EmitBranch(EndBlock);
464e5dd7070Spatrick
465e5dd7070Spatrick CGF.EmitBlock(EndBlock);
466e5dd7070Spatrick }
467e5dd7070Spatrick
468a9ac8606Spatrick // Replace the original variable Var with the address loaded from variable
469a9ac8606Spatrick // ManagedVar populated by HIP runtime.
replaceManagedVar(llvm::GlobalVariable * Var,llvm::GlobalVariable * ManagedVar)470a9ac8606Spatrick static void replaceManagedVar(llvm::GlobalVariable *Var,
471a9ac8606Spatrick llvm::GlobalVariable *ManagedVar) {
472a9ac8606Spatrick SmallVector<SmallVector<llvm::User *, 8>, 8> WorkList;
473a9ac8606Spatrick for (auto &&VarUse : Var->uses()) {
474a9ac8606Spatrick WorkList.push_back({VarUse.getUser()});
475a9ac8606Spatrick }
476a9ac8606Spatrick while (!WorkList.empty()) {
477a9ac8606Spatrick auto &&WorkItem = WorkList.pop_back_val();
478a9ac8606Spatrick auto *U = WorkItem.back();
479a9ac8606Spatrick if (isa<llvm::ConstantExpr>(U)) {
480a9ac8606Spatrick for (auto &&UU : U->uses()) {
481a9ac8606Spatrick WorkItem.push_back(UU.getUser());
482a9ac8606Spatrick WorkList.push_back(WorkItem);
483a9ac8606Spatrick WorkItem.pop_back();
484a9ac8606Spatrick }
485a9ac8606Spatrick continue;
486a9ac8606Spatrick }
487a9ac8606Spatrick if (auto *I = dyn_cast<llvm::Instruction>(U)) {
488a9ac8606Spatrick llvm::Value *OldV = Var;
489a9ac8606Spatrick llvm::Instruction *NewV =
490a9ac8606Spatrick new llvm::LoadInst(Var->getType(), ManagedVar, "ld.managed", false,
491a9ac8606Spatrick llvm::Align(Var->getAlignment()), I);
492a9ac8606Spatrick WorkItem.pop_back();
493a9ac8606Spatrick // Replace constant expressions directly or indirectly using the managed
494a9ac8606Spatrick // variable with instructions.
495a9ac8606Spatrick for (auto &&Op : WorkItem) {
496a9ac8606Spatrick auto *CE = cast<llvm::ConstantExpr>(Op);
497*12c85518Srobert auto *NewInst = CE->getAsInstruction(I);
498a9ac8606Spatrick NewInst->replaceUsesOfWith(OldV, NewV);
499a9ac8606Spatrick OldV = CE;
500a9ac8606Spatrick NewV = NewInst;
501a9ac8606Spatrick }
502a9ac8606Spatrick I->replaceUsesOfWith(OldV, NewV);
503a9ac8606Spatrick } else {
504a9ac8606Spatrick llvm_unreachable("Invalid use of managed variable");
505a9ac8606Spatrick }
506a9ac8606Spatrick }
507a9ac8606Spatrick }
508a9ac8606Spatrick
509e5dd7070Spatrick /// Creates a function that sets up state on the host side for CUDA objects that
510e5dd7070Spatrick /// have a presence on both the host and device sides. Specifically, registers
511e5dd7070Spatrick /// the host side of kernel functions and device global variables with the CUDA
512e5dd7070Spatrick /// runtime.
513e5dd7070Spatrick /// \code
514e5dd7070Spatrick /// void __cuda_register_globals(void** GpuBinaryHandle) {
515e5dd7070Spatrick /// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
516e5dd7070Spatrick /// ...
517e5dd7070Spatrick /// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
518e5dd7070Spatrick /// __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
519e5dd7070Spatrick /// ...
520e5dd7070Spatrick /// __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
521e5dd7070Spatrick /// }
522e5dd7070Spatrick /// \endcode
makeRegisterGlobalsFn()523e5dd7070Spatrick llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
524e5dd7070Spatrick // No need to register anything
525e5dd7070Spatrick if (EmittedKernels.empty() && DeviceVars.empty())
526e5dd7070Spatrick return nullptr;
527e5dd7070Spatrick
528e5dd7070Spatrick llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
529e5dd7070Spatrick getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
530e5dd7070Spatrick addUnderscoredPrefixToName("_register_globals"), &TheModule);
531e5dd7070Spatrick llvm::BasicBlock *EntryBB =
532e5dd7070Spatrick llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
533e5dd7070Spatrick CGBuilderTy Builder(CGM, Context);
534e5dd7070Spatrick Builder.SetInsertPoint(EntryBB);
535e5dd7070Spatrick
536e5dd7070Spatrick // void __cudaRegisterFunction(void **, const char *, char *, const char *,
537e5dd7070Spatrick // int, uint3*, uint3*, dim3*, dim3*, int*)
538e5dd7070Spatrick llvm::Type *RegisterFuncParams[] = {
539e5dd7070Spatrick VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
540e5dd7070Spatrick VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
541e5dd7070Spatrick llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
542e5dd7070Spatrick llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
543e5dd7070Spatrick addUnderscoredPrefixToName("RegisterFunction"));
544e5dd7070Spatrick
545e5dd7070Spatrick // Extract GpuBinaryHandle passed as the first argument passed to
546e5dd7070Spatrick // __cuda_register_globals() and generate __cudaRegisterFunction() call for
547e5dd7070Spatrick // each emitted kernel.
548e5dd7070Spatrick llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
549e5dd7070Spatrick for (auto &&I : EmittedKernels) {
550ec727ea7Spatrick llvm::Constant *KernelName =
551ec727ea7Spatrick makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
552e5dd7070Spatrick llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
553e5dd7070Spatrick llvm::Value *Args[] = {
554e5dd7070Spatrick &GpuBinaryHandlePtr,
555*12c85518Srobert Builder.CreateBitCast(KernelHandles[I.Kernel->getName()], VoidPtrTy),
556e5dd7070Spatrick KernelName,
557e5dd7070Spatrick KernelName,
558e5dd7070Spatrick llvm::ConstantInt::get(IntTy, -1),
559e5dd7070Spatrick NullPtr,
560e5dd7070Spatrick NullPtr,
561e5dd7070Spatrick NullPtr,
562e5dd7070Spatrick NullPtr,
563e5dd7070Spatrick llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
564e5dd7070Spatrick Builder.CreateCall(RegisterFunc, Args);
565e5dd7070Spatrick }
566e5dd7070Spatrick
567ec727ea7Spatrick llvm::Type *VarSizeTy = IntTy;
568ec727ea7Spatrick // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
569ec727ea7Spatrick if (CGM.getLangOpts().HIP ||
570ec727ea7Spatrick ToCudaVersion(CGM.getTarget().getSDKVersion()) >= CudaVersion::CUDA_90)
571ec727ea7Spatrick VarSizeTy = SizeTy;
572ec727ea7Spatrick
573e5dd7070Spatrick // void __cudaRegisterVar(void **, char *, char *, const char *,
574e5dd7070Spatrick // int, int, int, int)
575e5dd7070Spatrick llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
576ec727ea7Spatrick CharPtrTy, IntTy, VarSizeTy,
577e5dd7070Spatrick IntTy, IntTy};
578e5dd7070Spatrick llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
579ec727ea7Spatrick llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
580e5dd7070Spatrick addUnderscoredPrefixToName("RegisterVar"));
581a9ac8606Spatrick // void __hipRegisterManagedVar(void **, char *, char *, const char *,
582a9ac8606Spatrick // size_t, unsigned)
583a9ac8606Spatrick llvm::Type *RegisterManagedVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
584a9ac8606Spatrick CharPtrTy, VarSizeTy, IntTy};
585a9ac8606Spatrick llvm::FunctionCallee RegisterManagedVar = CGM.CreateRuntimeFunction(
586a9ac8606Spatrick llvm::FunctionType::get(VoidTy, RegisterManagedVarParams, false),
587a9ac8606Spatrick addUnderscoredPrefixToName("RegisterManagedVar"));
588ec727ea7Spatrick // void __cudaRegisterSurface(void **, const struct surfaceReference *,
589ec727ea7Spatrick // const void **, const char *, int, int);
590ec727ea7Spatrick llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
591ec727ea7Spatrick llvm::FunctionType::get(
592ec727ea7Spatrick VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
593ec727ea7Spatrick false),
594ec727ea7Spatrick addUnderscoredPrefixToName("RegisterSurface"));
595ec727ea7Spatrick // void __cudaRegisterTexture(void **, const struct textureReference *,
596ec727ea7Spatrick // const void **, const char *, int, int, int)
597ec727ea7Spatrick llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
598ec727ea7Spatrick llvm::FunctionType::get(
599ec727ea7Spatrick VoidTy,
600ec727ea7Spatrick {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
601ec727ea7Spatrick false),
602ec727ea7Spatrick addUnderscoredPrefixToName("RegisterTexture"));
603e5dd7070Spatrick for (auto &&Info : DeviceVars) {
604e5dd7070Spatrick llvm::GlobalVariable *Var = Info.Var;
605a9ac8606Spatrick assert((!Var->isDeclaration() || Info.Flags.isManaged()) &&
606a9ac8606Spatrick "External variables should not show up here, except HIP managed "
607a9ac8606Spatrick "variables");
608e5dd7070Spatrick llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
609ec727ea7Spatrick switch (Info.Flags.getKind()) {
610ec727ea7Spatrick case DeviceVarFlags::Variable: {
611e5dd7070Spatrick uint64_t VarSize =
612e5dd7070Spatrick CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
613a9ac8606Spatrick if (Info.Flags.isManaged()) {
614*12c85518Srobert auto *ManagedVar = new llvm::GlobalVariable(
615a9ac8606Spatrick CGM.getModule(), Var->getType(),
616a9ac8606Spatrick /*isConstant=*/false, Var->getLinkage(),
617a9ac8606Spatrick /*Init=*/Var->isDeclaration()
618a9ac8606Spatrick ? nullptr
619a9ac8606Spatrick : llvm::ConstantPointerNull::get(Var->getType()),
620a9ac8606Spatrick /*Name=*/"", /*InsertBefore=*/nullptr,
621a9ac8606Spatrick llvm::GlobalVariable::NotThreadLocal);
622a9ac8606Spatrick ManagedVar->setDSOLocal(Var->isDSOLocal());
623a9ac8606Spatrick ManagedVar->setVisibility(Var->getVisibility());
624a9ac8606Spatrick ManagedVar->setExternallyInitialized(true);
625a9ac8606Spatrick ManagedVar->takeName(Var);
626a9ac8606Spatrick Var->setName(Twine(ManagedVar->getName() + ".managed"));
627a9ac8606Spatrick replaceManagedVar(Var, ManagedVar);
628a9ac8606Spatrick llvm::Value *Args[] = {
629a9ac8606Spatrick &GpuBinaryHandlePtr,
630a9ac8606Spatrick Builder.CreateBitCast(ManagedVar, VoidPtrTy),
631a9ac8606Spatrick Builder.CreateBitCast(Var, VoidPtrTy),
632a9ac8606Spatrick VarName,
633a9ac8606Spatrick llvm::ConstantInt::get(VarSizeTy, VarSize),
634a9ac8606Spatrick llvm::ConstantInt::get(IntTy, Var->getAlignment())};
635a9ac8606Spatrick if (!Var->isDeclaration())
636a9ac8606Spatrick Builder.CreateCall(RegisterManagedVar, Args);
637a9ac8606Spatrick } else {
638e5dd7070Spatrick llvm::Value *Args[] = {
639e5dd7070Spatrick &GpuBinaryHandlePtr,
640e5dd7070Spatrick Builder.CreateBitCast(Var, VoidPtrTy),
641e5dd7070Spatrick VarName,
642e5dd7070Spatrick VarName,
643ec727ea7Spatrick llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
644ec727ea7Spatrick llvm::ConstantInt::get(VarSizeTy, VarSize),
645ec727ea7Spatrick llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
646e5dd7070Spatrick llvm::ConstantInt::get(IntTy, 0)};
647e5dd7070Spatrick Builder.CreateCall(RegisterVar, Args);
648a9ac8606Spatrick }
649ec727ea7Spatrick break;
650ec727ea7Spatrick }
651ec727ea7Spatrick case DeviceVarFlags::Surface:
652ec727ea7Spatrick Builder.CreateCall(
653ec727ea7Spatrick RegisterSurf,
654ec727ea7Spatrick {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
655ec727ea7Spatrick VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
656ec727ea7Spatrick llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
657ec727ea7Spatrick break;
658ec727ea7Spatrick case DeviceVarFlags::Texture:
659ec727ea7Spatrick Builder.CreateCall(
660ec727ea7Spatrick RegisterTex,
661ec727ea7Spatrick {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
662ec727ea7Spatrick VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
663ec727ea7Spatrick llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
664ec727ea7Spatrick llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
665ec727ea7Spatrick break;
666ec727ea7Spatrick }
667e5dd7070Spatrick }
668e5dd7070Spatrick
669e5dd7070Spatrick Builder.CreateRetVoid();
670e5dd7070Spatrick return RegisterKernelsFunc;
671e5dd7070Spatrick }
672e5dd7070Spatrick
673e5dd7070Spatrick /// Creates a global constructor function for the module:
674e5dd7070Spatrick ///
675e5dd7070Spatrick /// For CUDA:
676e5dd7070Spatrick /// \code
677*12c85518Srobert /// void __cuda_module_ctor() {
678e5dd7070Spatrick /// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
679e5dd7070Spatrick /// __cuda_register_globals(Handle);
680e5dd7070Spatrick /// }
681e5dd7070Spatrick /// \endcode
682e5dd7070Spatrick ///
683e5dd7070Spatrick /// For HIP:
684e5dd7070Spatrick /// \code
685*12c85518Srobert /// void __hip_module_ctor() {
686e5dd7070Spatrick /// if (__hip_gpubin_handle == 0) {
687e5dd7070Spatrick /// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
688e5dd7070Spatrick /// __hip_register_globals(__hip_gpubin_handle);
689e5dd7070Spatrick /// }
690e5dd7070Spatrick /// }
691e5dd7070Spatrick /// \endcode
makeModuleCtorFunction()692e5dd7070Spatrick llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
693e5dd7070Spatrick bool IsHIP = CGM.getLangOpts().HIP;
694e5dd7070Spatrick bool IsCUDA = CGM.getLangOpts().CUDA;
695e5dd7070Spatrick // No need to generate ctors/dtors if there is no GPU binary.
696e5dd7070Spatrick StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
697e5dd7070Spatrick if (CudaGpuBinaryFileName.empty() && !IsHIP)
698e5dd7070Spatrick return nullptr;
699e5dd7070Spatrick if ((IsHIP || (IsCUDA && !RelocatableDeviceCode)) && EmittedKernels.empty() &&
700e5dd7070Spatrick DeviceVars.empty())
701e5dd7070Spatrick return nullptr;
702e5dd7070Spatrick
703e5dd7070Spatrick // void __{cuda|hip}_register_globals(void* handle);
704e5dd7070Spatrick llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
705e5dd7070Spatrick // We always need a function to pass in as callback. Create a dummy
706e5dd7070Spatrick // implementation if we don't need to register anything.
707e5dd7070Spatrick if (RelocatableDeviceCode && !RegisterGlobalsFunc)
708e5dd7070Spatrick RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
709e5dd7070Spatrick
710e5dd7070Spatrick // void ** __{cuda|hip}RegisterFatBinary(void *);
711e5dd7070Spatrick llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
712e5dd7070Spatrick llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
713e5dd7070Spatrick addUnderscoredPrefixToName("RegisterFatBinary"));
714e5dd7070Spatrick // struct { int magic, int version, void * gpu_binary, void * dont_care };
715e5dd7070Spatrick llvm::StructType *FatbinWrapperTy =
716e5dd7070Spatrick llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
717e5dd7070Spatrick
718e5dd7070Spatrick // Register GPU binary with the CUDA runtime, store returned handle in a
719e5dd7070Spatrick // global variable and save a reference in GpuBinaryHandle to be cleaned up
720e5dd7070Spatrick // in destructor on exit. Then associate all known kernels with the GPU binary
721e5dd7070Spatrick // handle so CUDA runtime can figure out what to call on the GPU side.
722e5dd7070Spatrick std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
723e5dd7070Spatrick if (!CudaGpuBinaryFileName.empty()) {
724e5dd7070Spatrick llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
725e5dd7070Spatrick llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
726e5dd7070Spatrick if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
727e5dd7070Spatrick CGM.getDiags().Report(diag::err_cannot_open_file)
728e5dd7070Spatrick << CudaGpuBinaryFileName << EC.message();
729e5dd7070Spatrick return nullptr;
730e5dd7070Spatrick }
731e5dd7070Spatrick CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
732e5dd7070Spatrick }
733e5dd7070Spatrick
734e5dd7070Spatrick llvm::Function *ModuleCtorFunc = llvm::Function::Create(
735*12c85518Srobert llvm::FunctionType::get(VoidTy, false),
736e5dd7070Spatrick llvm::GlobalValue::InternalLinkage,
737e5dd7070Spatrick addUnderscoredPrefixToName("_module_ctor"), &TheModule);
738e5dd7070Spatrick llvm::BasicBlock *CtorEntryBB =
739e5dd7070Spatrick llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
740e5dd7070Spatrick CGBuilderTy CtorBuilder(CGM, Context);
741e5dd7070Spatrick
742e5dd7070Spatrick CtorBuilder.SetInsertPoint(CtorEntryBB);
743e5dd7070Spatrick
744e5dd7070Spatrick const char *FatbinConstantName;
745e5dd7070Spatrick const char *FatbinSectionName;
746e5dd7070Spatrick const char *ModuleIDSectionName;
747e5dd7070Spatrick StringRef ModuleIDPrefix;
748e5dd7070Spatrick llvm::Constant *FatBinStr;
749e5dd7070Spatrick unsigned FatMagic;
750e5dd7070Spatrick if (IsHIP) {
751e5dd7070Spatrick FatbinConstantName = ".hip_fatbin";
752e5dd7070Spatrick FatbinSectionName = ".hipFatBinSegment";
753e5dd7070Spatrick
754e5dd7070Spatrick ModuleIDSectionName = "__hip_module_id";
755e5dd7070Spatrick ModuleIDPrefix = "__hip_";
756e5dd7070Spatrick
757e5dd7070Spatrick if (CudaGpuBinary) {
758e5dd7070Spatrick // If fatbin is available from early finalization, create a string
759e5dd7070Spatrick // literal containing the fat binary loaded from the given file.
760a9ac8606Spatrick const unsigned HIPCodeObjectAlign = 4096;
761*12c85518Srobert FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
762a9ac8606Spatrick FatbinConstantName, HIPCodeObjectAlign);
763e5dd7070Spatrick } else {
764e5dd7070Spatrick // If fatbin is not available, create an external symbol
765e5dd7070Spatrick // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
766e5dd7070Spatrick // to contain the fat binary but will be populated somewhere else,
767e5dd7070Spatrick // e.g. by lld through link script.
768e5dd7070Spatrick FatBinStr = new llvm::GlobalVariable(
769e5dd7070Spatrick CGM.getModule(), CGM.Int8Ty,
770e5dd7070Spatrick /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
771e5dd7070Spatrick "__hip_fatbin", nullptr,
772e5dd7070Spatrick llvm::GlobalVariable::NotThreadLocal);
773e5dd7070Spatrick cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
774e5dd7070Spatrick }
775e5dd7070Spatrick
776e5dd7070Spatrick FatMagic = HIPFatMagic;
777e5dd7070Spatrick } else {
778e5dd7070Spatrick if (RelocatableDeviceCode)
779e5dd7070Spatrick FatbinConstantName = CGM.getTriple().isMacOSX()
780e5dd7070Spatrick ? "__NV_CUDA,__nv_relfatbin"
781e5dd7070Spatrick : "__nv_relfatbin";
782e5dd7070Spatrick else
783e5dd7070Spatrick FatbinConstantName =
784e5dd7070Spatrick CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
785e5dd7070Spatrick // NVIDIA's cuobjdump looks for fatbins in this section.
786e5dd7070Spatrick FatbinSectionName =
787e5dd7070Spatrick CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
788e5dd7070Spatrick
789e5dd7070Spatrick ModuleIDSectionName = CGM.getTriple().isMacOSX()
790e5dd7070Spatrick ? "__NV_CUDA,__nv_module_id"
791e5dd7070Spatrick : "__nv_module_id";
792e5dd7070Spatrick ModuleIDPrefix = "__nv_";
793e5dd7070Spatrick
794e5dd7070Spatrick // For CUDA, create a string literal containing the fat binary loaded from
795e5dd7070Spatrick // the given file.
796*12c85518Srobert FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
797e5dd7070Spatrick FatbinConstantName, 8);
798e5dd7070Spatrick FatMagic = CudaFatMagic;
799e5dd7070Spatrick }
800e5dd7070Spatrick
801e5dd7070Spatrick // Create initialized wrapper structure that points to the loaded GPU binary
802e5dd7070Spatrick ConstantInitBuilder Builder(CGM);
803e5dd7070Spatrick auto Values = Builder.beginStruct(FatbinWrapperTy);
804e5dd7070Spatrick // Fatbin wrapper magic.
805e5dd7070Spatrick Values.addInt(IntTy, FatMagic);
806e5dd7070Spatrick // Fatbin version.
807e5dd7070Spatrick Values.addInt(IntTy, 1);
808e5dd7070Spatrick // Data.
809e5dd7070Spatrick Values.add(FatBinStr);
810e5dd7070Spatrick // Unused in fatbin v1.
811e5dd7070Spatrick Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
812e5dd7070Spatrick llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
813e5dd7070Spatrick addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
814e5dd7070Spatrick /*constant*/ true);
815e5dd7070Spatrick FatbinWrapper->setSection(FatbinSectionName);
816e5dd7070Spatrick
817e5dd7070Spatrick // There is only one HIP fat binary per linked module, however there are
818e5dd7070Spatrick // multiple constructor functions. Make sure the fat binary is registered
819e5dd7070Spatrick // only once. The constructor functions are executed by the dynamic loader
820e5dd7070Spatrick // before the program gains control. The dynamic loader cannot execute the
821e5dd7070Spatrick // constructor functions concurrently since doing that would not guarantee
822e5dd7070Spatrick // thread safety of the loaded program. Therefore we can assume sequential
823e5dd7070Spatrick // execution of constructor functions here.
824e5dd7070Spatrick if (IsHIP) {
825e5dd7070Spatrick auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
826e5dd7070Spatrick llvm::GlobalValue::LinkOnceAnyLinkage;
827e5dd7070Spatrick llvm::BasicBlock *IfBlock =
828e5dd7070Spatrick llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
829e5dd7070Spatrick llvm::BasicBlock *ExitBlock =
830e5dd7070Spatrick llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
831e5dd7070Spatrick // The name, size, and initialization pattern of this variable is part
832e5dd7070Spatrick // of HIP ABI.
833e5dd7070Spatrick GpuBinaryHandle = new llvm::GlobalVariable(
834e5dd7070Spatrick TheModule, VoidPtrPtrTy, /*isConstant=*/false,
835e5dd7070Spatrick Linkage,
836e5dd7070Spatrick /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
837e5dd7070Spatrick "__hip_gpubin_handle");
838*12c85518Srobert if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
839*12c85518Srobert GpuBinaryHandle->setComdat(
840*12c85518Srobert CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
841e5dd7070Spatrick GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
842e5dd7070Spatrick // Prevent the weak symbol in different shared libraries being merged.
843e5dd7070Spatrick if (Linkage != llvm::GlobalValue::InternalLinkage)
844e5dd7070Spatrick GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
845e5dd7070Spatrick Address GpuBinaryAddr(
846*12c85518Srobert GpuBinaryHandle, VoidPtrPtrTy,
847e5dd7070Spatrick CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
848e5dd7070Spatrick {
849*12c85518Srobert auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
850e5dd7070Spatrick llvm::Constant *Zero =
851e5dd7070Spatrick llvm::Constant::getNullValue(HandleValue->getType());
852e5dd7070Spatrick llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
853e5dd7070Spatrick CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
854e5dd7070Spatrick }
855e5dd7070Spatrick {
856e5dd7070Spatrick CtorBuilder.SetInsertPoint(IfBlock);
857e5dd7070Spatrick // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
858e5dd7070Spatrick llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
859e5dd7070Spatrick RegisterFatbinFunc,
860e5dd7070Spatrick CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
861e5dd7070Spatrick CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
862e5dd7070Spatrick CtorBuilder.CreateBr(ExitBlock);
863e5dd7070Spatrick }
864e5dd7070Spatrick {
865e5dd7070Spatrick CtorBuilder.SetInsertPoint(ExitBlock);
866e5dd7070Spatrick // Call __hip_register_globals(GpuBinaryHandle);
867e5dd7070Spatrick if (RegisterGlobalsFunc) {
868*12c85518Srobert auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
869e5dd7070Spatrick CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
870e5dd7070Spatrick }
871e5dd7070Spatrick }
872e5dd7070Spatrick } else if (!RelocatableDeviceCode) {
873e5dd7070Spatrick // Register binary with CUDA runtime. This is substantially different in
874e5dd7070Spatrick // default mode vs. separate compilation!
875e5dd7070Spatrick // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
876e5dd7070Spatrick llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
877e5dd7070Spatrick RegisterFatbinFunc,
878e5dd7070Spatrick CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
879e5dd7070Spatrick GpuBinaryHandle = new llvm::GlobalVariable(
880e5dd7070Spatrick TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
881e5dd7070Spatrick llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
882e5dd7070Spatrick GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
883e5dd7070Spatrick CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
884e5dd7070Spatrick CGM.getPointerAlign());
885e5dd7070Spatrick
886e5dd7070Spatrick // Call __cuda_register_globals(GpuBinaryHandle);
887e5dd7070Spatrick if (RegisterGlobalsFunc)
888e5dd7070Spatrick CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
889e5dd7070Spatrick
890e5dd7070Spatrick // Call __cudaRegisterFatBinaryEnd(Handle) if this CUDA version needs it.
891e5dd7070Spatrick if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
892e5dd7070Spatrick CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
893e5dd7070Spatrick // void __cudaRegisterFatBinaryEnd(void **);
894e5dd7070Spatrick llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
895e5dd7070Spatrick llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
896e5dd7070Spatrick "__cudaRegisterFatBinaryEnd");
897e5dd7070Spatrick CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
898e5dd7070Spatrick }
899e5dd7070Spatrick } else {
900e5dd7070Spatrick // Generate a unique module ID.
901e5dd7070Spatrick SmallString<64> ModuleID;
902e5dd7070Spatrick llvm::raw_svector_ostream OS(ModuleID);
903e5dd7070Spatrick OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
904*12c85518Srobert llvm::Constant *ModuleIDConstant = makeConstantArray(
905*12c85518Srobert std::string(ModuleID.str()), "", ModuleIDSectionName, 32, /*AddNull=*/true);
906e5dd7070Spatrick
907e5dd7070Spatrick // Create an alias for the FatbinWrapper that nvcc will look for.
908e5dd7070Spatrick llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
909e5dd7070Spatrick Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
910e5dd7070Spatrick
911e5dd7070Spatrick // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
912e5dd7070Spatrick // void *, void (*)(void **))
913e5dd7070Spatrick SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
914e5dd7070Spatrick RegisterLinkedBinaryName += ModuleID;
915e5dd7070Spatrick llvm::FunctionCallee RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
916e5dd7070Spatrick getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
917e5dd7070Spatrick
918e5dd7070Spatrick assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
919e5dd7070Spatrick llvm::Value *Args[] = {RegisterGlobalsFunc,
920e5dd7070Spatrick CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
921e5dd7070Spatrick ModuleIDConstant,
922e5dd7070Spatrick makeDummyFunction(getCallbackFnTy())};
923e5dd7070Spatrick CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
924e5dd7070Spatrick }
925e5dd7070Spatrick
926e5dd7070Spatrick // Create destructor and register it with atexit() the way NVCC does it. Doing
927e5dd7070Spatrick // it during regular destructor phase worked in CUDA before 9.2 but results in
928e5dd7070Spatrick // double-free in 9.2.
929e5dd7070Spatrick if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
930e5dd7070Spatrick // extern "C" int atexit(void (*f)(void));
931e5dd7070Spatrick llvm::FunctionType *AtExitTy =
932e5dd7070Spatrick llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
933e5dd7070Spatrick llvm::FunctionCallee AtExitFunc =
934e5dd7070Spatrick CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
935e5dd7070Spatrick /*Local=*/true);
936e5dd7070Spatrick CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
937e5dd7070Spatrick }
938e5dd7070Spatrick
939e5dd7070Spatrick CtorBuilder.CreateRetVoid();
940e5dd7070Spatrick return ModuleCtorFunc;
941e5dd7070Spatrick }
942e5dd7070Spatrick
943e5dd7070Spatrick /// Creates a global destructor function that unregisters the GPU code blob
944e5dd7070Spatrick /// registered by constructor.
945e5dd7070Spatrick ///
946e5dd7070Spatrick /// For CUDA:
947e5dd7070Spatrick /// \code
948*12c85518Srobert /// void __cuda_module_dtor() {
949e5dd7070Spatrick /// __cudaUnregisterFatBinary(Handle);
950e5dd7070Spatrick /// }
951e5dd7070Spatrick /// \endcode
952e5dd7070Spatrick ///
953e5dd7070Spatrick /// For HIP:
954e5dd7070Spatrick /// \code
955*12c85518Srobert /// void __hip_module_dtor() {
956e5dd7070Spatrick /// if (__hip_gpubin_handle) {
957e5dd7070Spatrick /// __hipUnregisterFatBinary(__hip_gpubin_handle);
958e5dd7070Spatrick /// __hip_gpubin_handle = 0;
959e5dd7070Spatrick /// }
960e5dd7070Spatrick /// }
961e5dd7070Spatrick /// \endcode
makeModuleDtorFunction()962e5dd7070Spatrick llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
963e5dd7070Spatrick // No need for destructor if we don't have a handle to unregister.
964e5dd7070Spatrick if (!GpuBinaryHandle)
965e5dd7070Spatrick return nullptr;
966e5dd7070Spatrick
967e5dd7070Spatrick // void __cudaUnregisterFatBinary(void ** handle);
968e5dd7070Spatrick llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
969e5dd7070Spatrick llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
970e5dd7070Spatrick addUnderscoredPrefixToName("UnregisterFatBinary"));
971e5dd7070Spatrick
972e5dd7070Spatrick llvm::Function *ModuleDtorFunc = llvm::Function::Create(
973*12c85518Srobert llvm::FunctionType::get(VoidTy, false),
974e5dd7070Spatrick llvm::GlobalValue::InternalLinkage,
975e5dd7070Spatrick addUnderscoredPrefixToName("_module_dtor"), &TheModule);
976e5dd7070Spatrick
977e5dd7070Spatrick llvm::BasicBlock *DtorEntryBB =
978e5dd7070Spatrick llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
979e5dd7070Spatrick CGBuilderTy DtorBuilder(CGM, Context);
980e5dd7070Spatrick DtorBuilder.SetInsertPoint(DtorEntryBB);
981e5dd7070Spatrick
982*12c85518Srobert Address GpuBinaryAddr(
983*12c85518Srobert GpuBinaryHandle, GpuBinaryHandle->getValueType(),
984*12c85518Srobert CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
985*12c85518Srobert auto *HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
986e5dd7070Spatrick // There is only one HIP fat binary per linked module, however there are
987e5dd7070Spatrick // multiple destructor functions. Make sure the fat binary is unregistered
988e5dd7070Spatrick // only once.
989e5dd7070Spatrick if (CGM.getLangOpts().HIP) {
990e5dd7070Spatrick llvm::BasicBlock *IfBlock =
991e5dd7070Spatrick llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
992e5dd7070Spatrick llvm::BasicBlock *ExitBlock =
993e5dd7070Spatrick llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
994e5dd7070Spatrick llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
995e5dd7070Spatrick llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
996e5dd7070Spatrick DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
997e5dd7070Spatrick
998e5dd7070Spatrick DtorBuilder.SetInsertPoint(IfBlock);
999e5dd7070Spatrick DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
1000e5dd7070Spatrick DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
1001e5dd7070Spatrick DtorBuilder.CreateBr(ExitBlock);
1002e5dd7070Spatrick
1003e5dd7070Spatrick DtorBuilder.SetInsertPoint(ExitBlock);
1004e5dd7070Spatrick } else {
1005e5dd7070Spatrick DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
1006e5dd7070Spatrick }
1007e5dd7070Spatrick DtorBuilder.CreateRetVoid();
1008e5dd7070Spatrick return ModuleDtorFunc;
1009e5dd7070Spatrick }
1010e5dd7070Spatrick
CreateNVCUDARuntime(CodeGenModule & CGM)1011e5dd7070Spatrick CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
1012e5dd7070Spatrick return new CGNVCUDARuntime(CGM);
1013e5dd7070Spatrick }
1014a9ac8606Spatrick
internalizeDeviceSideVar(const VarDecl * D,llvm::GlobalValue::LinkageTypes & Linkage)1015a9ac8606Spatrick void CGNVCUDARuntime::internalizeDeviceSideVar(
1016a9ac8606Spatrick const VarDecl *D, llvm::GlobalValue::LinkageTypes &Linkage) {
1017a9ac8606Spatrick // For -fno-gpu-rdc, host-side shadows of external declarations of device-side
1018a9ac8606Spatrick // global variables become internal definitions. These have to be internal in
1019a9ac8606Spatrick // order to prevent name conflicts with global host variables with the same
1020a9ac8606Spatrick // name in a different TUs.
1021a9ac8606Spatrick //
1022a9ac8606Spatrick // For -fgpu-rdc, the shadow variables should not be internalized because
1023a9ac8606Spatrick // they may be accessed by different TU.
1024a9ac8606Spatrick if (CGM.getLangOpts().GPURelocatableDeviceCode)
1025a9ac8606Spatrick return;
1026a9ac8606Spatrick
1027a9ac8606Spatrick // __shared__ variables are odd. Shadows do get created, but
1028a9ac8606Spatrick // they are not registered with the CUDA runtime, so they
1029a9ac8606Spatrick // can't really be used to access their device-side
1030a9ac8606Spatrick // counterparts. It's not clear yet whether it's nvcc's bug or
1031a9ac8606Spatrick // a feature, but we've got to do the same for compatibility.
1032a9ac8606Spatrick if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
1033a9ac8606Spatrick D->hasAttr<CUDASharedAttr>() ||
1034a9ac8606Spatrick D->getType()->isCUDADeviceBuiltinSurfaceType() ||
1035a9ac8606Spatrick D->getType()->isCUDADeviceBuiltinTextureType()) {
1036a9ac8606Spatrick Linkage = llvm::GlobalValue::InternalLinkage;
1037a9ac8606Spatrick }
1038a9ac8606Spatrick }
1039a9ac8606Spatrick
handleVarRegistration(const VarDecl * D,llvm::GlobalVariable & GV)1040a9ac8606Spatrick void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
1041a9ac8606Spatrick llvm::GlobalVariable &GV) {
1042a9ac8606Spatrick if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
1043a9ac8606Spatrick // Shadow variables and their properties must be registered with CUDA
1044a9ac8606Spatrick // runtime. Skip Extern global variables, which will be registered in
1045a9ac8606Spatrick // the TU where they are defined.
1046a9ac8606Spatrick //
1047a9ac8606Spatrick // Don't register a C++17 inline variable. The local symbol can be
1048a9ac8606Spatrick // discarded and referencing a discarded local symbol from outside the
1049a9ac8606Spatrick // comdat (__cuda_register_globals) is disallowed by the ELF spec.
1050a9ac8606Spatrick //
1051a9ac8606Spatrick // HIP managed variables need to be always recorded in device and host
1052a9ac8606Spatrick // compilations for transformation.
1053a9ac8606Spatrick //
1054a9ac8606Spatrick // HIP managed variables and variables in CUDADeviceVarODRUsedByHost are
1055a9ac8606Spatrick // added to llvm.compiler-used, therefore they are safe to be registered.
1056a9ac8606Spatrick if ((!D->hasExternalStorage() && !D->isInline()) ||
1057a9ac8606Spatrick CGM.getContext().CUDADeviceVarODRUsedByHost.contains(D) ||
1058a9ac8606Spatrick D->hasAttr<HIPManagedAttr>()) {
1059a9ac8606Spatrick registerDeviceVar(D, GV, !D->hasDefinition(),
1060a9ac8606Spatrick D->hasAttr<CUDAConstantAttr>());
1061a9ac8606Spatrick }
1062a9ac8606Spatrick } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
1063a9ac8606Spatrick D->getType()->isCUDADeviceBuiltinTextureType()) {
1064a9ac8606Spatrick // Builtin surfaces and textures and their template arguments are
1065a9ac8606Spatrick // also registered with CUDA runtime.
1066a9ac8606Spatrick const auto *TD = cast<ClassTemplateSpecializationDecl>(
1067a9ac8606Spatrick D->getType()->castAs<RecordType>()->getDecl());
1068a9ac8606Spatrick const TemplateArgumentList &Args = TD->getTemplateArgs();
1069a9ac8606Spatrick if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
1070a9ac8606Spatrick assert(Args.size() == 2 &&
1071a9ac8606Spatrick "Unexpected number of template arguments of CUDA device "
1072a9ac8606Spatrick "builtin surface type.");
1073a9ac8606Spatrick auto SurfType = Args[1].getAsIntegral();
1074a9ac8606Spatrick if (!D->hasExternalStorage())
1075a9ac8606Spatrick registerDeviceSurf(D, GV, !D->hasDefinition(), SurfType.getSExtValue());
1076a9ac8606Spatrick } else {
1077a9ac8606Spatrick assert(Args.size() == 3 &&
1078a9ac8606Spatrick "Unexpected number of template arguments of CUDA device "
1079a9ac8606Spatrick "builtin texture type.");
1080a9ac8606Spatrick auto TexType = Args[1].getAsIntegral();
1081a9ac8606Spatrick auto Normalized = Args[2].getAsIntegral();
1082a9ac8606Spatrick if (!D->hasExternalStorage())
1083a9ac8606Spatrick registerDeviceTex(D, GV, !D->hasDefinition(), TexType.getSExtValue(),
1084a9ac8606Spatrick Normalized.getZExtValue());
1085a9ac8606Spatrick }
1086a9ac8606Spatrick }
1087a9ac8606Spatrick }
1088a9ac8606Spatrick
1089a9ac8606Spatrick // Transform managed variables to pointers to managed variables in device code.
1090a9ac8606Spatrick // Each use of the original managed variable is replaced by a load from the
1091a9ac8606Spatrick // transformed managed variable. The transformed managed variable contains
1092a9ac8606Spatrick // the address of managed memory which will be allocated by the runtime.
transformManagedVars()1093a9ac8606Spatrick void CGNVCUDARuntime::transformManagedVars() {
1094a9ac8606Spatrick for (auto &&Info : DeviceVars) {
1095a9ac8606Spatrick llvm::GlobalVariable *Var = Info.Var;
1096a9ac8606Spatrick if (Info.Flags.getKind() == DeviceVarFlags::Variable &&
1097a9ac8606Spatrick Info.Flags.isManaged()) {
1098*12c85518Srobert auto *ManagedVar = new llvm::GlobalVariable(
1099a9ac8606Spatrick CGM.getModule(), Var->getType(),
1100a9ac8606Spatrick /*isConstant=*/false, Var->getLinkage(),
1101a9ac8606Spatrick /*Init=*/Var->isDeclaration()
1102a9ac8606Spatrick ? nullptr
1103a9ac8606Spatrick : llvm::ConstantPointerNull::get(Var->getType()),
1104a9ac8606Spatrick /*Name=*/"", /*InsertBefore=*/nullptr,
1105a9ac8606Spatrick llvm::GlobalVariable::NotThreadLocal,
1106a9ac8606Spatrick CGM.getContext().getTargetAddressSpace(LangAS::cuda_device));
1107a9ac8606Spatrick ManagedVar->setDSOLocal(Var->isDSOLocal());
1108a9ac8606Spatrick ManagedVar->setVisibility(Var->getVisibility());
1109a9ac8606Spatrick ManagedVar->setExternallyInitialized(true);
1110a9ac8606Spatrick replaceManagedVar(Var, ManagedVar);
1111a9ac8606Spatrick ManagedVar->takeName(Var);
1112a9ac8606Spatrick Var->setName(Twine(ManagedVar->getName()) + ".managed");
1113a9ac8606Spatrick // Keep managed variables even if they are not used in device code since
1114a9ac8606Spatrick // they need to be allocated by the runtime.
1115a9ac8606Spatrick if (!Var->isDeclaration()) {
1116a9ac8606Spatrick assert(!ManagedVar->isDeclaration());
1117a9ac8606Spatrick CGM.addCompilerUsedGlobal(Var);
1118a9ac8606Spatrick CGM.addCompilerUsedGlobal(ManagedVar);
1119a9ac8606Spatrick }
1120a9ac8606Spatrick }
1121a9ac8606Spatrick }
1122a9ac8606Spatrick }
1123a9ac8606Spatrick
1124*12c85518Srobert // Creates offloading entries for all the kernels and globals that must be
1125*12c85518Srobert // registered. The linker will provide a pointer to this section so we can
1126*12c85518Srobert // register the symbols with the linked device image.
createOffloadingEntries()1127*12c85518Srobert void CGNVCUDARuntime::createOffloadingEntries() {
1128*12c85518Srobert llvm::OpenMPIRBuilder OMPBuilder(CGM.getModule());
1129*12c85518Srobert OMPBuilder.initialize();
1130*12c85518Srobert
1131*12c85518Srobert StringRef Section = CGM.getLangOpts().HIP ? "hip_offloading_entries"
1132*12c85518Srobert : "cuda_offloading_entries";
1133*12c85518Srobert for (KernelInfo &I : EmittedKernels)
1134*12c85518Srobert OMPBuilder.emitOffloadingEntry(KernelHandles[I.Kernel->getName()],
1135*12c85518Srobert getDeviceSideName(cast<NamedDecl>(I.D)), 0,
1136*12c85518Srobert DeviceVarFlags::OffloadGlobalEntry, Section);
1137*12c85518Srobert
1138*12c85518Srobert for (VarInfo &I : DeviceVars) {
1139*12c85518Srobert uint64_t VarSize =
1140*12c85518Srobert CGM.getDataLayout().getTypeAllocSize(I.Var->getValueType());
1141*12c85518Srobert if (I.Flags.getKind() == DeviceVarFlags::Variable) {
1142*12c85518Srobert OMPBuilder.emitOffloadingEntry(
1143*12c85518Srobert I.Var, getDeviceSideName(I.D), VarSize,
1144*12c85518Srobert I.Flags.isManaged() ? DeviceVarFlags::OffloadGlobalManagedEntry
1145*12c85518Srobert : DeviceVarFlags::OffloadGlobalEntry,
1146*12c85518Srobert Section);
1147*12c85518Srobert } else if (I.Flags.getKind() == DeviceVarFlags::Surface) {
1148*12c85518Srobert OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
1149*12c85518Srobert DeviceVarFlags::OffloadGlobalSurfaceEntry,
1150*12c85518Srobert Section);
1151*12c85518Srobert } else if (I.Flags.getKind() == DeviceVarFlags::Texture) {
1152*12c85518Srobert OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
1153*12c85518Srobert DeviceVarFlags::OffloadGlobalTextureEntry,
1154*12c85518Srobert Section);
1155*12c85518Srobert }
1156*12c85518Srobert }
1157*12c85518Srobert }
1158*12c85518Srobert
1159a9ac8606Spatrick // Returns module constructor to be added.
finalizeModule()1160a9ac8606Spatrick llvm::Function *CGNVCUDARuntime::finalizeModule() {
1161a9ac8606Spatrick if (CGM.getLangOpts().CUDAIsDevice) {
1162a9ac8606Spatrick transformManagedVars();
1163a9ac8606Spatrick
1164a9ac8606Spatrick // Mark ODR-used device variables as compiler used to prevent it from being
1165a9ac8606Spatrick // eliminated by optimization. This is necessary for device variables
1166a9ac8606Spatrick // ODR-used by host functions. Sema correctly marks them as ODR-used no
1167a9ac8606Spatrick // matter whether they are ODR-used by device or host functions.
1168a9ac8606Spatrick //
1169a9ac8606Spatrick // We do not need to do this if the variable has used attribute since it
1170a9ac8606Spatrick // has already been added.
1171a9ac8606Spatrick //
1172a9ac8606Spatrick // Static device variables have been externalized at this point, therefore
1173a9ac8606Spatrick // variables with LLVM private or internal linkage need not be added.
1174a9ac8606Spatrick for (auto &&Info : DeviceVars) {
1175a9ac8606Spatrick auto Kind = Info.Flags.getKind();
1176a9ac8606Spatrick if (!Info.Var->isDeclaration() &&
1177a9ac8606Spatrick !llvm::GlobalValue::isLocalLinkage(Info.Var->getLinkage()) &&
1178a9ac8606Spatrick (Kind == DeviceVarFlags::Variable ||
1179a9ac8606Spatrick Kind == DeviceVarFlags::Surface ||
1180a9ac8606Spatrick Kind == DeviceVarFlags::Texture) &&
1181a9ac8606Spatrick Info.D->isUsed() && !Info.D->hasAttr<UsedAttr>()) {
1182a9ac8606Spatrick CGM.addCompilerUsedGlobal(Info.Var);
1183a9ac8606Spatrick }
1184a9ac8606Spatrick }
1185a9ac8606Spatrick return nullptr;
1186a9ac8606Spatrick }
1187*12c85518Srobert if (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode)
1188*12c85518Srobert createOffloadingEntries();
1189*12c85518Srobert else
1190a9ac8606Spatrick return makeModuleCtorFunction();
1191*12c85518Srobert
1192*12c85518Srobert return nullptr;
1193a9ac8606Spatrick }
1194a9ac8606Spatrick
getKernelHandle(llvm::Function * F,GlobalDecl GD)1195a9ac8606Spatrick llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
1196a9ac8606Spatrick GlobalDecl GD) {
1197*12c85518Srobert auto Loc = KernelHandles.find(F->getName());
1198a9ac8606Spatrick if (Loc != KernelHandles.end())
1199a9ac8606Spatrick return Loc->second;
1200a9ac8606Spatrick
1201a9ac8606Spatrick if (!CGM.getLangOpts().HIP) {
1202*12c85518Srobert KernelHandles[F->getName()] = F;
1203a9ac8606Spatrick KernelStubs[F] = F;
1204a9ac8606Spatrick return F;
1205a9ac8606Spatrick }
1206a9ac8606Spatrick
1207a9ac8606Spatrick auto *Var = new llvm::GlobalVariable(
1208a9ac8606Spatrick TheModule, F->getType(), /*isConstant=*/true, F->getLinkage(),
1209a9ac8606Spatrick /*Initializer=*/nullptr,
1210a9ac8606Spatrick CGM.getMangledName(
1211a9ac8606Spatrick GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel)));
1212a9ac8606Spatrick Var->setAlignment(CGM.getPointerAlign().getAsAlign());
1213a9ac8606Spatrick Var->setDSOLocal(F->isDSOLocal());
1214a9ac8606Spatrick Var->setVisibility(F->getVisibility());
1215*12c85518Srobert CGM.maybeSetTrivialComdat(*GD.getDecl(), *Var);
1216*12c85518Srobert KernelHandles[F->getName()] = Var;
1217a9ac8606Spatrick KernelStubs[Var] = F;
1218a9ac8606Spatrick return Var;
1219a9ac8606Spatrick }
1220