blob: 5973500826e3a588399d8bf7d3fce5a151cf5e4e [file] [log] [blame]
Stephen Hines176edba2014-12-01 14:53:08 -08001//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// \brief This file implements semantic analysis for CUDA constructs.
11///
12//===----------------------------------------------------------------------===//
13
14#include "clang/Sema/Sema.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Decl.h"
Stephen Hines0e2c34f2015-03-23 12:09:02 -070017#include "clang/Lex/Preprocessor.h"
Stephen Hines176edba2014-12-01 14:53:08 -080018#include "clang/Sema/SemaDiagnostic.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/SmallVector.h"
21using namespace clang;
22
23ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
24 MultiExprArg ExecConfig,
25 SourceLocation GGGLoc) {
26 FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
27 if (!ConfigDecl)
28 return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
29 << "cudaConfigureCall");
30 QualType ConfigQTy = ConfigDecl->getType();
31
32 DeclRefExpr *ConfigDR = new (Context)
33 DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
34 MarkFunctionReferenced(LLLLoc, ConfigDecl);
35
36 return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
37 /*IsExecConfig=*/true);
38}
39
40/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
41Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
42 if (D->hasAttr<CUDAInvalidTargetAttr>())
43 return CFT_InvalidTarget;
44
45 if (D->hasAttr<CUDAGlobalAttr>())
46 return CFT_Global;
47
48 if (D->hasAttr<CUDADeviceAttr>()) {
49 if (D->hasAttr<CUDAHostAttr>())
50 return CFT_HostDevice;
51 return CFT_Device;
52 } else if (D->hasAttr<CUDAHostAttr>()) {
53 return CFT_Host;
54 } else if (D->isImplicit()) {
55 // Some implicit declarations (like intrinsic functions) are not marked.
56 // Set the most lenient target on them for maximal flexibility.
57 return CFT_HostDevice;
58 }
59
60 return CFT_Host;
61}
62
63bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
64 const FunctionDecl *Callee) {
Pirama Arumuga Nainar33337ca2015-05-06 11:48:57 -070065 // The CUDADisableTargetCallChecks short-circuits this check: we assume all
66 // cross-target calls are valid.
67 if (getLangOpts().CUDADisableTargetCallChecks)
68 return false;
69
Stephen Hines0e2c34f2015-03-23 12:09:02 -070070 CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
71 CalleeTarget = IdentifyCUDATarget(Callee);
Stephen Hines176edba2014-12-01 14:53:08 -080072
Stephen Hines176edba2014-12-01 14:53:08 -080073 // If one of the targets is invalid, the check always fails, no matter what
74 // the other target is.
75 if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
76 return true;
77
Stephen Hines0e2c34f2015-03-23 12:09:02 -070078 // CUDA B.1.1 "The __device__ qualifier declares a function that is [...]
Stephen Hines176edba2014-12-01 14:53:08 -080079 // Callable from the device only."
80 if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
81 return true;
82
Stephen Hines0e2c34f2015-03-23 12:09:02 -070083 // CUDA B.1.2 "The __global__ qualifier declares a function that is [...]
Stephen Hines176edba2014-12-01 14:53:08 -080084 // Callable from the host only."
Stephen Hines0e2c34f2015-03-23 12:09:02 -070085 // CUDA B.1.3 "The __host__ qualifier declares a function that is [...]
Stephen Hines176edba2014-12-01 14:53:08 -080086 // Callable from the host only."
87 if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
88 (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
89 return true;
90
Stephen Hines0e2c34f2015-03-23 12:09:02 -070091 // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together
92 // however, in which case the function is compiled for both the host and the
93 // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code
94 // paths between host and device."
95 if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) {
96 // If the caller is implicit then the check always passes.
97 if (Caller->isImplicit()) return false;
98
99 bool InDeviceMode = getLangOpts().CUDAIsDevice;
100 if (!InDeviceMode && CalleeTarget != CFT_Host)
101 return true;
102 if (InDeviceMode && CalleeTarget != CFT_Device) {
103 // Allow host device functions to call host functions if explicitly
104 // requested.
105 if (CalleeTarget == CFT_Host &&
106 getLangOpts().CUDAAllowHostCallsFromHostDevice) {
107 Diag(Caller->getLocation(),
108 diag::warn_host_calls_from_host_device)
109 << Callee->getNameAsString() << Caller->getNameAsString();
110 return false;
111 }
112
113 return true;
114 }
115 }
Stephen Hines176edba2014-12-01 14:53:08 -0800116
117 return false;
118}
119
120/// When an implicitly-declared special member has to invoke more than one
121/// base/field special member, conflicts may occur in the targets of these
122/// members. For example, if one base's member __host__ and another's is
123/// __device__, it's a conflict.
124/// This function figures out if the given targets \param Target1 and
125/// \param Target2 conflict, and if they do not it fills in
126/// \param ResolvedTarget with a target that resolves for both calls.
127/// \return true if there's a conflict, false otherwise.
128static bool
129resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
130 Sema::CUDAFunctionTarget Target2,
131 Sema::CUDAFunctionTarget *ResolvedTarget) {
132 if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) {
133 // TODO: this shouldn't happen, really. Methods cannot be marked __global__.
134 // Clang should detect this earlier and produce an error. Then this
135 // condition can be changed to an assertion.
136 return true;
137 }
138
139 if (Target1 == Sema::CFT_HostDevice) {
140 *ResolvedTarget = Target2;
141 } else if (Target2 == Sema::CFT_HostDevice) {
142 *ResolvedTarget = Target1;
143 } else if (Target1 != Target2) {
144 return true;
145 } else {
146 *ResolvedTarget = Target1;
147 }
148
149 return false;
150}
151
152bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
153 CXXSpecialMember CSM,
154 CXXMethodDecl *MemberDecl,
155 bool ConstRHS,
156 bool Diagnose) {
157 llvm::Optional<CUDAFunctionTarget> InferredTarget;
158
159 // We're going to invoke special member lookup; mark that these special
160 // members are called from this one, and not from its caller.
161 ContextRAII MethodContext(*this, MemberDecl);
162
163 // Look for special members in base classes that should be invoked from here.
164 // Infer the target of this member base on the ones it should call.
165 // Skip direct and indirect virtual bases for abstract classes.
166 llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
167 for (const auto &B : ClassDecl->bases()) {
168 if (!B.isVirtual()) {
169 Bases.push_back(&B);
170 }
171 }
172
173 if (!ClassDecl->isAbstract()) {
174 for (const auto &VB : ClassDecl->vbases()) {
175 Bases.push_back(&VB);
176 }
177 }
178
179 for (const auto *B : Bases) {
180 const RecordType *BaseType = B->getType()->getAs<RecordType>();
181 if (!BaseType) {
182 continue;
183 }
184
185 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
186 Sema::SpecialMemberOverloadResult *SMOR =
187 LookupSpecialMember(BaseClassDecl, CSM,
188 /* ConstArg */ ConstRHS,
189 /* VolatileArg */ false,
190 /* RValueThis */ false,
191 /* ConstThis */ false,
192 /* VolatileThis */ false);
193
194 if (!SMOR || !SMOR->getMethod()) {
195 continue;
196 }
197
198 CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
199 if (!InferredTarget.hasValue()) {
200 InferredTarget = BaseMethodTarget;
201 } else {
202 bool ResolutionError = resolveCalleeCUDATargetConflict(
203 InferredTarget.getValue(), BaseMethodTarget,
204 InferredTarget.getPointer());
205 if (ResolutionError) {
206 if (Diagnose) {
207 Diag(ClassDecl->getLocation(),
208 diag::note_implicit_member_target_infer_collision)
209 << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
210 }
211 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
212 return true;
213 }
214 }
215 }
216
217 // Same as for bases, but now for special members of fields.
218 for (const auto *F : ClassDecl->fields()) {
219 if (F->isInvalidDecl()) {
220 continue;
221 }
222
223 const RecordType *FieldType =
224 Context.getBaseElementType(F->getType())->getAs<RecordType>();
225 if (!FieldType) {
226 continue;
227 }
228
229 CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
230 Sema::SpecialMemberOverloadResult *SMOR =
231 LookupSpecialMember(FieldRecDecl, CSM,
232 /* ConstArg */ ConstRHS && !F->isMutable(),
233 /* VolatileArg */ false,
234 /* RValueThis */ false,
235 /* ConstThis */ false,
236 /* VolatileThis */ false);
237
238 if (!SMOR || !SMOR->getMethod()) {
239 continue;
240 }
241
242 CUDAFunctionTarget FieldMethodTarget =
243 IdentifyCUDATarget(SMOR->getMethod());
244 if (!InferredTarget.hasValue()) {
245 InferredTarget = FieldMethodTarget;
246 } else {
247 bool ResolutionError = resolveCalleeCUDATargetConflict(
248 InferredTarget.getValue(), FieldMethodTarget,
249 InferredTarget.getPointer());
250 if (ResolutionError) {
251 if (Diagnose) {
252 Diag(ClassDecl->getLocation(),
253 diag::note_implicit_member_target_infer_collision)
254 << (unsigned)CSM << InferredTarget.getValue()
255 << FieldMethodTarget;
256 }
257 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
258 return true;
259 }
260 }
261 }
262
263 if (InferredTarget.hasValue()) {
264 if (InferredTarget.getValue() == CFT_Device) {
265 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
266 } else if (InferredTarget.getValue() == CFT_Host) {
267 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
268 } else {
269 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
270 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
271 }
272 } else {
273 // If no target was inferred, mark this member as __host__ __device__;
274 // it's the least restrictive option that can be invoked from any target.
275 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
276 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
277 }
278
279 return false;
280}