1*0a6a1f1dSLionel Sambuc //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
2*0a6a1f1dSLionel Sambuc //
3*0a6a1f1dSLionel Sambuc // The LLVM Compiler Infrastructure
4*0a6a1f1dSLionel Sambuc //
5*0a6a1f1dSLionel Sambuc // This file is distributed under the University of Illinois Open Source
6*0a6a1f1dSLionel Sambuc // License. See LICENSE.TXT for details.
7*0a6a1f1dSLionel Sambuc //
8*0a6a1f1dSLionel Sambuc //===----------------------------------------------------------------------===//
9*0a6a1f1dSLionel Sambuc /// \file
10*0a6a1f1dSLionel Sambuc /// \brief This file implements semantic analysis for CUDA constructs.
11*0a6a1f1dSLionel Sambuc ///
12*0a6a1f1dSLionel Sambuc //===----------------------------------------------------------------------===//
13*0a6a1f1dSLionel Sambuc
14*0a6a1f1dSLionel Sambuc #include "clang/Sema/Sema.h"
15*0a6a1f1dSLionel Sambuc #include "clang/AST/ASTContext.h"
16*0a6a1f1dSLionel Sambuc #include "clang/AST/Decl.h"
17*0a6a1f1dSLionel Sambuc #include "clang/Lex/Preprocessor.h"
18*0a6a1f1dSLionel Sambuc #include "clang/Sema/SemaDiagnostic.h"
19*0a6a1f1dSLionel Sambuc #include "llvm/ADT/Optional.h"
20*0a6a1f1dSLionel Sambuc #include "llvm/ADT/SmallVector.h"
21*0a6a1f1dSLionel Sambuc using namespace clang;
22*0a6a1f1dSLionel Sambuc
ActOnCUDAExecConfigExpr(Scope * S,SourceLocation LLLLoc,MultiExprArg ExecConfig,SourceLocation GGGLoc)23*0a6a1f1dSLionel Sambuc ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
24*0a6a1f1dSLionel Sambuc MultiExprArg ExecConfig,
25*0a6a1f1dSLionel Sambuc SourceLocation GGGLoc) {
26*0a6a1f1dSLionel Sambuc FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
27*0a6a1f1dSLionel Sambuc if (!ConfigDecl)
28*0a6a1f1dSLionel Sambuc return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
29*0a6a1f1dSLionel Sambuc << "cudaConfigureCall");
30*0a6a1f1dSLionel Sambuc QualType ConfigQTy = ConfigDecl->getType();
31*0a6a1f1dSLionel Sambuc
32*0a6a1f1dSLionel Sambuc DeclRefExpr *ConfigDR = new (Context)
33*0a6a1f1dSLionel Sambuc DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
34*0a6a1f1dSLionel Sambuc MarkFunctionReferenced(LLLLoc, ConfigDecl);
35*0a6a1f1dSLionel Sambuc
36*0a6a1f1dSLionel Sambuc return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
37*0a6a1f1dSLionel Sambuc /*IsExecConfig=*/true);
38*0a6a1f1dSLionel Sambuc }
39*0a6a1f1dSLionel Sambuc
40*0a6a1f1dSLionel Sambuc /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
IdentifyCUDATarget(const FunctionDecl * D)41*0a6a1f1dSLionel Sambuc Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
42*0a6a1f1dSLionel Sambuc if (D->hasAttr<CUDAInvalidTargetAttr>())
43*0a6a1f1dSLionel Sambuc return CFT_InvalidTarget;
44*0a6a1f1dSLionel Sambuc
45*0a6a1f1dSLionel Sambuc if (D->hasAttr<CUDAGlobalAttr>())
46*0a6a1f1dSLionel Sambuc return CFT_Global;
47*0a6a1f1dSLionel Sambuc
48*0a6a1f1dSLionel Sambuc if (D->hasAttr<CUDADeviceAttr>()) {
49*0a6a1f1dSLionel Sambuc if (D->hasAttr<CUDAHostAttr>())
50*0a6a1f1dSLionel Sambuc return CFT_HostDevice;
51*0a6a1f1dSLionel Sambuc return CFT_Device;
52*0a6a1f1dSLionel Sambuc } else if (D->hasAttr<CUDAHostAttr>()) {
53*0a6a1f1dSLionel Sambuc return CFT_Host;
54*0a6a1f1dSLionel Sambuc } else if (D->isImplicit()) {
55*0a6a1f1dSLionel Sambuc // Some implicit declarations (like intrinsic functions) are not marked.
56*0a6a1f1dSLionel Sambuc // Set the most lenient target on them for maximal flexibility.
57*0a6a1f1dSLionel Sambuc return CFT_HostDevice;
58*0a6a1f1dSLionel Sambuc }
59*0a6a1f1dSLionel Sambuc
60*0a6a1f1dSLionel Sambuc return CFT_Host;
61*0a6a1f1dSLionel Sambuc }
62*0a6a1f1dSLionel Sambuc
CheckCUDATarget(const FunctionDecl * Caller,const FunctionDecl * Callee)63*0a6a1f1dSLionel Sambuc bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
64*0a6a1f1dSLionel Sambuc const FunctionDecl *Callee) {
65*0a6a1f1dSLionel Sambuc CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
66*0a6a1f1dSLionel Sambuc CalleeTarget = IdentifyCUDATarget(Callee);
67*0a6a1f1dSLionel Sambuc
68*0a6a1f1dSLionel Sambuc // If one of the targets is invalid, the check always fails, no matter what
69*0a6a1f1dSLionel Sambuc // the other target is.
70*0a6a1f1dSLionel Sambuc if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
71*0a6a1f1dSLionel Sambuc return true;
72*0a6a1f1dSLionel Sambuc
73*0a6a1f1dSLionel Sambuc // CUDA B.1.1 "The __device__ qualifier declares a function that is [...]
74*0a6a1f1dSLionel Sambuc // Callable from the device only."
75*0a6a1f1dSLionel Sambuc if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
76*0a6a1f1dSLionel Sambuc return true;
77*0a6a1f1dSLionel Sambuc
78*0a6a1f1dSLionel Sambuc // CUDA B.1.2 "The __global__ qualifier declares a function that is [...]
79*0a6a1f1dSLionel Sambuc // Callable from the host only."
80*0a6a1f1dSLionel Sambuc // CUDA B.1.3 "The __host__ qualifier declares a function that is [...]
81*0a6a1f1dSLionel Sambuc // Callable from the host only."
82*0a6a1f1dSLionel Sambuc if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
83*0a6a1f1dSLionel Sambuc (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
84*0a6a1f1dSLionel Sambuc return true;
85*0a6a1f1dSLionel Sambuc
86*0a6a1f1dSLionel Sambuc // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together
87*0a6a1f1dSLionel Sambuc // however, in which case the function is compiled for both the host and the
88*0a6a1f1dSLionel Sambuc // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code
89*0a6a1f1dSLionel Sambuc // paths between host and device."
90*0a6a1f1dSLionel Sambuc if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) {
91*0a6a1f1dSLionel Sambuc // If the caller is implicit then the check always passes.
92*0a6a1f1dSLionel Sambuc if (Caller->isImplicit()) return false;
93*0a6a1f1dSLionel Sambuc
94*0a6a1f1dSLionel Sambuc bool InDeviceMode = getLangOpts().CUDAIsDevice;
95*0a6a1f1dSLionel Sambuc if ((InDeviceMode && CalleeTarget != CFT_Device) ||
96*0a6a1f1dSLionel Sambuc (!InDeviceMode && CalleeTarget != CFT_Host))
97*0a6a1f1dSLionel Sambuc return true;
98*0a6a1f1dSLionel Sambuc }
99*0a6a1f1dSLionel Sambuc
100*0a6a1f1dSLionel Sambuc return false;
101*0a6a1f1dSLionel Sambuc }
102*0a6a1f1dSLionel Sambuc
103*0a6a1f1dSLionel Sambuc /// When an implicitly-declared special member has to invoke more than one
104*0a6a1f1dSLionel Sambuc /// base/field special member, conflicts may occur in the targets of these
105*0a6a1f1dSLionel Sambuc /// members. For example, if one base's member __host__ and another's is
106*0a6a1f1dSLionel Sambuc /// __device__, it's a conflict.
107*0a6a1f1dSLionel Sambuc /// This function figures out if the given targets \param Target1 and
108*0a6a1f1dSLionel Sambuc /// \param Target2 conflict, and if they do not it fills in
109*0a6a1f1dSLionel Sambuc /// \param ResolvedTarget with a target that resolves for both calls.
110*0a6a1f1dSLionel Sambuc /// \return true if there's a conflict, false otherwise.
111*0a6a1f1dSLionel Sambuc static bool
resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,Sema::CUDAFunctionTarget Target2,Sema::CUDAFunctionTarget * ResolvedTarget)112*0a6a1f1dSLionel Sambuc resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
113*0a6a1f1dSLionel Sambuc Sema::CUDAFunctionTarget Target2,
114*0a6a1f1dSLionel Sambuc Sema::CUDAFunctionTarget *ResolvedTarget) {
115*0a6a1f1dSLionel Sambuc if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) {
116*0a6a1f1dSLionel Sambuc // TODO: this shouldn't happen, really. Methods cannot be marked __global__.
117*0a6a1f1dSLionel Sambuc // Clang should detect this earlier and produce an error. Then this
118*0a6a1f1dSLionel Sambuc // condition can be changed to an assertion.
119*0a6a1f1dSLionel Sambuc return true;
120*0a6a1f1dSLionel Sambuc }
121*0a6a1f1dSLionel Sambuc
122*0a6a1f1dSLionel Sambuc if (Target1 == Sema::CFT_HostDevice) {
123*0a6a1f1dSLionel Sambuc *ResolvedTarget = Target2;
124*0a6a1f1dSLionel Sambuc } else if (Target2 == Sema::CFT_HostDevice) {
125*0a6a1f1dSLionel Sambuc *ResolvedTarget = Target1;
126*0a6a1f1dSLionel Sambuc } else if (Target1 != Target2) {
127*0a6a1f1dSLionel Sambuc return true;
128*0a6a1f1dSLionel Sambuc } else {
129*0a6a1f1dSLionel Sambuc *ResolvedTarget = Target1;
130*0a6a1f1dSLionel Sambuc }
131*0a6a1f1dSLionel Sambuc
132*0a6a1f1dSLionel Sambuc return false;
133*0a6a1f1dSLionel Sambuc }
134*0a6a1f1dSLionel Sambuc
inferCUDATargetForImplicitSpecialMember(CXXRecordDecl * ClassDecl,CXXSpecialMember CSM,CXXMethodDecl * MemberDecl,bool ConstRHS,bool Diagnose)135*0a6a1f1dSLionel Sambuc bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
136*0a6a1f1dSLionel Sambuc CXXSpecialMember CSM,
137*0a6a1f1dSLionel Sambuc CXXMethodDecl *MemberDecl,
138*0a6a1f1dSLionel Sambuc bool ConstRHS,
139*0a6a1f1dSLionel Sambuc bool Diagnose) {
140*0a6a1f1dSLionel Sambuc llvm::Optional<CUDAFunctionTarget> InferredTarget;
141*0a6a1f1dSLionel Sambuc
142*0a6a1f1dSLionel Sambuc // We're going to invoke special member lookup; mark that these special
143*0a6a1f1dSLionel Sambuc // members are called from this one, and not from its caller.
144*0a6a1f1dSLionel Sambuc ContextRAII MethodContext(*this, MemberDecl);
145*0a6a1f1dSLionel Sambuc
146*0a6a1f1dSLionel Sambuc // Look for special members in base classes that should be invoked from here.
147*0a6a1f1dSLionel Sambuc // Infer the target of this member base on the ones it should call.
148*0a6a1f1dSLionel Sambuc // Skip direct and indirect virtual bases for abstract classes.
149*0a6a1f1dSLionel Sambuc llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
150*0a6a1f1dSLionel Sambuc for (const auto &B : ClassDecl->bases()) {
151*0a6a1f1dSLionel Sambuc if (!B.isVirtual()) {
152*0a6a1f1dSLionel Sambuc Bases.push_back(&B);
153*0a6a1f1dSLionel Sambuc }
154*0a6a1f1dSLionel Sambuc }
155*0a6a1f1dSLionel Sambuc
156*0a6a1f1dSLionel Sambuc if (!ClassDecl->isAbstract()) {
157*0a6a1f1dSLionel Sambuc for (const auto &VB : ClassDecl->vbases()) {
158*0a6a1f1dSLionel Sambuc Bases.push_back(&VB);
159*0a6a1f1dSLionel Sambuc }
160*0a6a1f1dSLionel Sambuc }
161*0a6a1f1dSLionel Sambuc
162*0a6a1f1dSLionel Sambuc for (const auto *B : Bases) {
163*0a6a1f1dSLionel Sambuc const RecordType *BaseType = B->getType()->getAs<RecordType>();
164*0a6a1f1dSLionel Sambuc if (!BaseType) {
165*0a6a1f1dSLionel Sambuc continue;
166*0a6a1f1dSLionel Sambuc }
167*0a6a1f1dSLionel Sambuc
168*0a6a1f1dSLionel Sambuc CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
169*0a6a1f1dSLionel Sambuc Sema::SpecialMemberOverloadResult *SMOR =
170*0a6a1f1dSLionel Sambuc LookupSpecialMember(BaseClassDecl, CSM,
171*0a6a1f1dSLionel Sambuc /* ConstArg */ ConstRHS,
172*0a6a1f1dSLionel Sambuc /* VolatileArg */ false,
173*0a6a1f1dSLionel Sambuc /* RValueThis */ false,
174*0a6a1f1dSLionel Sambuc /* ConstThis */ false,
175*0a6a1f1dSLionel Sambuc /* VolatileThis */ false);
176*0a6a1f1dSLionel Sambuc
177*0a6a1f1dSLionel Sambuc if (!SMOR || !SMOR->getMethod()) {
178*0a6a1f1dSLionel Sambuc continue;
179*0a6a1f1dSLionel Sambuc }
180*0a6a1f1dSLionel Sambuc
181*0a6a1f1dSLionel Sambuc CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
182*0a6a1f1dSLionel Sambuc if (!InferredTarget.hasValue()) {
183*0a6a1f1dSLionel Sambuc InferredTarget = BaseMethodTarget;
184*0a6a1f1dSLionel Sambuc } else {
185*0a6a1f1dSLionel Sambuc bool ResolutionError = resolveCalleeCUDATargetConflict(
186*0a6a1f1dSLionel Sambuc InferredTarget.getValue(), BaseMethodTarget,
187*0a6a1f1dSLionel Sambuc InferredTarget.getPointer());
188*0a6a1f1dSLionel Sambuc if (ResolutionError) {
189*0a6a1f1dSLionel Sambuc if (Diagnose) {
190*0a6a1f1dSLionel Sambuc Diag(ClassDecl->getLocation(),
191*0a6a1f1dSLionel Sambuc diag::note_implicit_member_target_infer_collision)
192*0a6a1f1dSLionel Sambuc << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
193*0a6a1f1dSLionel Sambuc }
194*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
195*0a6a1f1dSLionel Sambuc return true;
196*0a6a1f1dSLionel Sambuc }
197*0a6a1f1dSLionel Sambuc }
198*0a6a1f1dSLionel Sambuc }
199*0a6a1f1dSLionel Sambuc
200*0a6a1f1dSLionel Sambuc // Same as for bases, but now for special members of fields.
201*0a6a1f1dSLionel Sambuc for (const auto *F : ClassDecl->fields()) {
202*0a6a1f1dSLionel Sambuc if (F->isInvalidDecl()) {
203*0a6a1f1dSLionel Sambuc continue;
204*0a6a1f1dSLionel Sambuc }
205*0a6a1f1dSLionel Sambuc
206*0a6a1f1dSLionel Sambuc const RecordType *FieldType =
207*0a6a1f1dSLionel Sambuc Context.getBaseElementType(F->getType())->getAs<RecordType>();
208*0a6a1f1dSLionel Sambuc if (!FieldType) {
209*0a6a1f1dSLionel Sambuc continue;
210*0a6a1f1dSLionel Sambuc }
211*0a6a1f1dSLionel Sambuc
212*0a6a1f1dSLionel Sambuc CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
213*0a6a1f1dSLionel Sambuc Sema::SpecialMemberOverloadResult *SMOR =
214*0a6a1f1dSLionel Sambuc LookupSpecialMember(FieldRecDecl, CSM,
215*0a6a1f1dSLionel Sambuc /* ConstArg */ ConstRHS && !F->isMutable(),
216*0a6a1f1dSLionel Sambuc /* VolatileArg */ false,
217*0a6a1f1dSLionel Sambuc /* RValueThis */ false,
218*0a6a1f1dSLionel Sambuc /* ConstThis */ false,
219*0a6a1f1dSLionel Sambuc /* VolatileThis */ false);
220*0a6a1f1dSLionel Sambuc
221*0a6a1f1dSLionel Sambuc if (!SMOR || !SMOR->getMethod()) {
222*0a6a1f1dSLionel Sambuc continue;
223*0a6a1f1dSLionel Sambuc }
224*0a6a1f1dSLionel Sambuc
225*0a6a1f1dSLionel Sambuc CUDAFunctionTarget FieldMethodTarget =
226*0a6a1f1dSLionel Sambuc IdentifyCUDATarget(SMOR->getMethod());
227*0a6a1f1dSLionel Sambuc if (!InferredTarget.hasValue()) {
228*0a6a1f1dSLionel Sambuc InferredTarget = FieldMethodTarget;
229*0a6a1f1dSLionel Sambuc } else {
230*0a6a1f1dSLionel Sambuc bool ResolutionError = resolveCalleeCUDATargetConflict(
231*0a6a1f1dSLionel Sambuc InferredTarget.getValue(), FieldMethodTarget,
232*0a6a1f1dSLionel Sambuc InferredTarget.getPointer());
233*0a6a1f1dSLionel Sambuc if (ResolutionError) {
234*0a6a1f1dSLionel Sambuc if (Diagnose) {
235*0a6a1f1dSLionel Sambuc Diag(ClassDecl->getLocation(),
236*0a6a1f1dSLionel Sambuc diag::note_implicit_member_target_infer_collision)
237*0a6a1f1dSLionel Sambuc << (unsigned)CSM << InferredTarget.getValue()
238*0a6a1f1dSLionel Sambuc << FieldMethodTarget;
239*0a6a1f1dSLionel Sambuc }
240*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
241*0a6a1f1dSLionel Sambuc return true;
242*0a6a1f1dSLionel Sambuc }
243*0a6a1f1dSLionel Sambuc }
244*0a6a1f1dSLionel Sambuc }
245*0a6a1f1dSLionel Sambuc
246*0a6a1f1dSLionel Sambuc if (InferredTarget.hasValue()) {
247*0a6a1f1dSLionel Sambuc if (InferredTarget.getValue() == CFT_Device) {
248*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
249*0a6a1f1dSLionel Sambuc } else if (InferredTarget.getValue() == CFT_Host) {
250*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
251*0a6a1f1dSLionel Sambuc } else {
252*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
253*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
254*0a6a1f1dSLionel Sambuc }
255*0a6a1f1dSLionel Sambuc } else {
256*0a6a1f1dSLionel Sambuc // If no target was inferred, mark this member as __host__ __device__;
257*0a6a1f1dSLionel Sambuc // it's the least restrictive option that can be invoked from any target.
258*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
259*0a6a1f1dSLionel Sambuc MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
260*0a6a1f1dSLionel Sambuc }
261*0a6a1f1dSLionel Sambuc
262*0a6a1f1dSLionel Sambuc return false;
263*0a6a1f1dSLionel Sambuc }
264