xref: /llvm-project/clang/lib/Basic/Cuda.cpp (revision de0fd64bedd23660f557833cc0108c3fb2be3918)
1 #include "clang/Basic/Cuda.h"
2 
3 #include "llvm/ADT/StringRef.h"
4 #include "llvm/ADT/Twine.h"
5 #include "llvm/Support/ErrorHandling.h"
6 #include "llvm/Support/VersionTuple.h"
7 
8 namespace clang {
9 
10 struct CudaVersionMapEntry {
11   const char *Name;
12   CudaVersion Version;
13   llvm::VersionTuple TVersion;
14 };
15 #define CUDA_ENTRY(major, minor)                                               \
16   {                                                                            \
17     #major "." #minor, CudaVersion::CUDA_##major##minor,                       \
18         llvm::VersionTuple(major, minor)                                       \
19   }
20 
21 static const CudaVersionMapEntry CudaNameVersionMap[] = {
22     CUDA_ENTRY(7, 0),
23     CUDA_ENTRY(7, 5),
24     CUDA_ENTRY(8, 0),
25     CUDA_ENTRY(9, 0),
26     CUDA_ENTRY(9, 1),
27     CUDA_ENTRY(9, 2),
28     CUDA_ENTRY(10, 0),
29     CUDA_ENTRY(10, 1),
30     CUDA_ENTRY(10, 2),
31     CUDA_ENTRY(11, 0),
32     CUDA_ENTRY(11, 1),
33     CUDA_ENTRY(11, 2),
34     CUDA_ENTRY(11, 3),
35     CUDA_ENTRY(11, 4),
36     CUDA_ENTRY(11, 5),
37     CUDA_ENTRY(11, 6),
38     CUDA_ENTRY(11, 7),
39     CUDA_ENTRY(11, 8),
40     CUDA_ENTRY(12, 0),
41     CUDA_ENTRY(12, 1),
42     CUDA_ENTRY(12, 2),
43     CUDA_ENTRY(12, 3),
44     CUDA_ENTRY(12, 4),
45     CUDA_ENTRY(12, 5),
46     CUDA_ENTRY(12, 6),
47     {"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())},
48     {"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone.
49 };
50 #undef CUDA_ENTRY
51 
52 const char *CudaVersionToString(CudaVersion V) {
53   for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
54     if (I->Version == V)
55       return I->Name;
56 
57   return CudaVersionToString(CudaVersion::UNKNOWN);
58 }
59 
60 CudaVersion CudaStringToVersion(const llvm::Twine &S) {
61   std::string VS = S.str();
62   for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
63     if (I->Name == VS)
64       return I->Version;
65   return CudaVersion::UNKNOWN;
66 }
67 
68 CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
69   for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
70     if (I->TVersion == Version)
71       return I->Version;
72   return CudaVersion::UNKNOWN;
73 }
74 
75 namespace {
76 struct OffloadArchToStringMap {
77   OffloadArch arch;
78   const char *arch_name;
79   const char *virtual_arch_name;
80 };
81 } // namespace
82 
83 #define SM2(sm, ca) {OffloadArch::SM_##sm, "sm_" #sm, ca}
84 #define SM(sm) SM2(sm, "compute_" #sm)
85 #define GFX(gpu) {OffloadArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn"}
86 static const OffloadArchToStringMap arch_names[] = {
87     // clang-format off
88     {OffloadArch::UNUSED, "", ""},
89     SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
90     SM(30), {OffloadArch::SM_32_, "sm_32", "compute_32"}, SM(35), SM(37),  // Kepler
91     SM(50), SM(52), SM(53),          // Maxwell
92     SM(60), SM(61), SM(62),          // Pascal
93     SM(70), SM(72),                  // Volta
94     SM(75),                          // Turing
95     SM(80), SM(86),                  // Ampere
96     SM(87),                          // Jetson/Drive AGX Orin
97     SM(89),                          // Ada Lovelace
98     SM(90),                          // Hopper
99     SM(90a),                         // Hopper
100     SM(100),                         // Blackwell
101     GFX(600),  // gfx600
102     GFX(601),  // gfx601
103     GFX(602),  // gfx602
104     GFX(700),  // gfx700
105     GFX(701),  // gfx701
106     GFX(702),  // gfx702
107     GFX(703),  // gfx703
108     GFX(704),  // gfx704
109     GFX(705),  // gfx705
110     GFX(801),  // gfx801
111     GFX(802),  // gfx802
112     GFX(803),  // gfx803
113     GFX(805),  // gfx805
114     GFX(810),  // gfx810
115     {OffloadArch::GFX9_GENERIC, "gfx9-generic", "compute_amdgcn"},
116     GFX(900),  // gfx900
117     GFX(902),  // gfx902
118     GFX(904),  // gfx903
119     GFX(906),  // gfx906
120     GFX(908),  // gfx908
121     GFX(909),  // gfx909
122     GFX(90a),  // gfx90a
123     GFX(90c),  // gfx90c
124     {OffloadArch::GFX9_4_GENERIC, "gfx9-4-generic", "compute_amdgcn"},
125     GFX(940),  // gfx940
126     GFX(941),  // gfx941
127     GFX(942),  // gfx942
128     {OffloadArch::GFX10_1_GENERIC, "gfx10-1-generic", "compute_amdgcn"},
129     GFX(1010), // gfx1010
130     GFX(1011), // gfx1011
131     GFX(1012), // gfx1012
132     GFX(1013), // gfx1013
133     {OffloadArch::GFX10_3_GENERIC, "gfx10-3-generic", "compute_amdgcn"},
134     GFX(1030), // gfx1030
135     GFX(1031), // gfx1031
136     GFX(1032), // gfx1032
137     GFX(1033), // gfx1033
138     GFX(1034), // gfx1034
139     GFX(1035), // gfx1035
140     GFX(1036), // gfx1036
141     {OffloadArch::GFX11_GENERIC, "gfx11-generic", "compute_amdgcn"},
142     GFX(1100), // gfx1100
143     GFX(1101), // gfx1101
144     GFX(1102), // gfx1102
145     GFX(1103), // gfx1103
146     GFX(1150), // gfx1150
147     GFX(1151), // gfx1151
148     GFX(1152), // gfx1152
149     GFX(1153), // gfx1153
150     {OffloadArch::GFX12_GENERIC, "gfx12-generic", "compute_amdgcn"},
151     GFX(1200), // gfx1200
152     GFX(1201), // gfx1201
153     {OffloadArch::AMDGCNSPIRV, "amdgcnspirv", "compute_amdgcn"},
154     {OffloadArch::Generic, "generic", ""},
155     // clang-format on
156 };
157 #undef SM
158 #undef SM2
159 #undef GFX
160 
161 const char *OffloadArchToString(OffloadArch A) {
162   auto result = std::find_if(
163       std::begin(arch_names), std::end(arch_names),
164       [A](const OffloadArchToStringMap &map) { return A == map.arch; });
165   if (result == std::end(arch_names))
166     return "unknown";
167   return result->arch_name;
168 }
169 
170 const char *OffloadArchToVirtualArchString(OffloadArch A) {
171   auto result = std::find_if(
172       std::begin(arch_names), std::end(arch_names),
173       [A](const OffloadArchToStringMap &map) { return A == map.arch; });
174   if (result == std::end(arch_names))
175     return "unknown";
176   return result->virtual_arch_name;
177 }
178 
179 OffloadArch StringToOffloadArch(llvm::StringRef S) {
180   auto result = std::find_if(
181       std::begin(arch_names), std::end(arch_names),
182       [S](const OffloadArchToStringMap &map) { return S == map.arch_name; });
183   if (result == std::end(arch_names))
184     return OffloadArch::UNKNOWN;
185   return result->arch;
186 }
187 
188 CudaVersion MinVersionForOffloadArch(OffloadArch A) {
189   if (A == OffloadArch::UNKNOWN)
190     return CudaVersion::UNKNOWN;
191 
192   // AMD GPUs do not depend on CUDA versions.
193   if (IsAMDOffloadArch(A))
194     return CudaVersion::CUDA_70;
195 
196   switch (A) {
197   case OffloadArch::SM_20:
198   case OffloadArch::SM_21:
199   case OffloadArch::SM_30:
200   case OffloadArch::SM_32_:
201   case OffloadArch::SM_35:
202   case OffloadArch::SM_37:
203   case OffloadArch::SM_50:
204   case OffloadArch::SM_52:
205   case OffloadArch::SM_53:
206     return CudaVersion::CUDA_70;
207   case OffloadArch::SM_60:
208   case OffloadArch::SM_61:
209   case OffloadArch::SM_62:
210     return CudaVersion::CUDA_80;
211   case OffloadArch::SM_70:
212     return CudaVersion::CUDA_90;
213   case OffloadArch::SM_72:
214     return CudaVersion::CUDA_91;
215   case OffloadArch::SM_75:
216     return CudaVersion::CUDA_100;
217   case OffloadArch::SM_80:
218     return CudaVersion::CUDA_110;
219   case OffloadArch::SM_86:
220     return CudaVersion::CUDA_111;
221   case OffloadArch::SM_87:
222     return CudaVersion::CUDA_114;
223   case OffloadArch::SM_89:
224   case OffloadArch::SM_90:
225     return CudaVersion::CUDA_118;
226   case OffloadArch::SM_90a:
227     return CudaVersion::CUDA_120;
228   case OffloadArch::SM_100:
229     return CudaVersion::NEW; // TODO: use specific CUDA version once it's
230                              // public.
231   default:
232     llvm_unreachable("invalid enum");
233   }
234 }
235 
236 CudaVersion MaxVersionForOffloadArch(OffloadArch A) {
237   // AMD GPUs do not depend on CUDA versions.
238   if (IsAMDOffloadArch(A))
239     return CudaVersion::NEW;
240 
241   switch (A) {
242   case OffloadArch::UNKNOWN:
243     return CudaVersion::UNKNOWN;
244   case OffloadArch::SM_20:
245   case OffloadArch::SM_21:
246     return CudaVersion::CUDA_80;
247   case OffloadArch::SM_30:
248   case OffloadArch::SM_32_:
249     return CudaVersion::CUDA_102;
250   case OffloadArch::SM_35:
251   case OffloadArch::SM_37:
252     return CudaVersion::CUDA_118;
253   default:
254     return CudaVersion::NEW;
255   }
256 }
257 
258 bool CudaFeatureEnabled(llvm::VersionTuple Version, CudaFeature Feature) {
259   return CudaFeatureEnabled(ToCudaVersion(Version), Feature);
260 }
261 
262 bool CudaFeatureEnabled(CudaVersion Version, CudaFeature Feature) {
263   switch (Feature) {
264   case CudaFeature::CUDA_USES_NEW_LAUNCH:
265     return Version >= CudaVersion::CUDA_92;
266   case CudaFeature::CUDA_USES_FATBIN_REGISTER_END:
267     return Version >= CudaVersion::CUDA_101;
268   }
269   llvm_unreachable("Unknown CUDA feature.");
270 }
271 } // namespace clang
272