1 #include "clang/Basic/Cuda.h" 2 3 #include "llvm/ADT/StringRef.h" 4 #include "llvm/ADT/Twine.h" 5 #include "llvm/Support/ErrorHandling.h" 6 #include "llvm/Support/VersionTuple.h" 7 8 namespace clang { 9 10 struct CudaVersionMapEntry { 11 const char *Name; 12 CudaVersion Version; 13 llvm::VersionTuple TVersion; 14 }; 15 #define CUDA_ENTRY(major, minor) \ 16 { \ 17 #major "." #minor, CudaVersion::CUDA_##major##minor, \ 18 llvm::VersionTuple(major, minor) \ 19 } 20 21 static const CudaVersionMapEntry CudaNameVersionMap[] = { 22 CUDA_ENTRY(7, 0), 23 CUDA_ENTRY(7, 5), 24 CUDA_ENTRY(8, 0), 25 CUDA_ENTRY(9, 0), 26 CUDA_ENTRY(9, 1), 27 CUDA_ENTRY(9, 2), 28 CUDA_ENTRY(10, 0), 29 CUDA_ENTRY(10, 1), 30 CUDA_ENTRY(10, 2), 31 CUDA_ENTRY(11, 0), 32 CUDA_ENTRY(11, 1), 33 CUDA_ENTRY(11, 2), 34 CUDA_ENTRY(11, 3), 35 CUDA_ENTRY(11, 4), 36 CUDA_ENTRY(11, 5), 37 CUDA_ENTRY(11, 6), 38 CUDA_ENTRY(11, 7), 39 CUDA_ENTRY(11, 8), 40 CUDA_ENTRY(12, 0), 41 CUDA_ENTRY(12, 1), 42 {"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())}, 43 {"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone. 44 }; 45 #undef CUDA_ENTRY 46 47 const char *CudaVersionToString(CudaVersion V) { 48 for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I) 49 if (I->Version == V) 50 return I->Name; 51 52 return CudaVersionToString(CudaVersion::UNKNOWN); 53 } 54 55 CudaVersion CudaStringToVersion(const llvm::Twine &S) { 56 std::string VS = S.str(); 57 for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I) 58 if (I->Name == VS) 59 return I->Version; 60 return CudaVersion::UNKNOWN; 61 } 62 63 CudaVersion ToCudaVersion(llvm::VersionTuple Version) { 64 for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I) 65 if (I->TVersion == Version) 66 return I->Version; 67 return CudaVersion::UNKNOWN; 68 } 69 70 namespace { 71 struct CudaArchToStringMap { 72 CudaArch arch; 73 const char *arch_name; 74 const char *virtual_arch_name; 75 }; 76 } // namespace 77 78 #define SM2(sm, ca) \ 79 { CudaArch::SM_##sm, "sm_" #sm, ca } 80 #define SM(sm) SM2(sm, "compute_" #sm) 81 #define GFX(gpu) \ 82 { CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" } 83 static const CudaArchToStringMap arch_names[] = { 84 // clang-format off 85 {CudaArch::UNUSED, "", ""}, 86 SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi 87 SM(30), SM(32), SM(35), SM(37), // Kepler 88 SM(50), SM(52), SM(53), // Maxwell 89 SM(60), SM(61), SM(62), // Pascal 90 SM(70), SM(72), // Volta 91 SM(75), // Turing 92 SM(80), SM(86), // Ampere 93 SM(87), // Jetson/Drive AGX Orin 94 SM(89), // Ada Lovelace 95 SM(90), // Hopper 96 GFX(600), // gfx600 97 GFX(601), // gfx601 98 GFX(602), // gfx602 99 GFX(700), // gfx700 100 GFX(701), // gfx701 101 GFX(702), // gfx702 102 GFX(703), // gfx703 103 GFX(704), // gfx704 104 GFX(705), // gfx705 105 GFX(801), // gfx801 106 GFX(802), // gfx802 107 GFX(803), // gfx803 108 GFX(805), // gfx805 109 GFX(810), // gfx810 110 GFX(900), // gfx900 111 GFX(902), // gfx902 112 GFX(904), // gfx903 113 GFX(906), // gfx906 114 GFX(908), // gfx908 115 GFX(909), // gfx909 116 GFX(90a), // gfx90a 117 GFX(90c), // gfx90c 118 GFX(940), // gfx940 119 GFX(941), // gfx941 120 GFX(942), // gfx942 121 GFX(1010), // gfx1010 122 GFX(1011), // gfx1011 123 GFX(1012), // gfx1012 124 GFX(1013), // gfx1013 125 GFX(1030), // gfx1030 126 GFX(1031), // gfx1031 127 GFX(1032), // gfx1032 128 GFX(1033), // gfx1033 129 GFX(1034), // gfx1034 130 GFX(1035), // gfx1035 131 GFX(1036), // gfx1036 132 GFX(1100), // gfx1100 133 GFX(1101), // gfx1101 134 GFX(1102), // gfx1102 135 GFX(1103), // gfx1103 136 GFX(1150), // gfx1150 137 GFX(1151), // gfx1151 138 GFX(1200), // gfx1200 139 GFX(1201), // gfx1201 140 {CudaArch::Generic, "generic", ""}, 141 // clang-format on 142 }; 143 #undef SM 144 #undef SM2 145 #undef GFX 146 147 const char *CudaArchToString(CudaArch A) { 148 auto result = std::find_if( 149 std::begin(arch_names), std::end(arch_names), 150 [A](const CudaArchToStringMap &map) { return A == map.arch; }); 151 if (result == std::end(arch_names)) 152 return "unknown"; 153 return result->arch_name; 154 } 155 156 const char *CudaArchToVirtualArchString(CudaArch A) { 157 auto result = std::find_if( 158 std::begin(arch_names), std::end(arch_names), 159 [A](const CudaArchToStringMap &map) { return A == map.arch; }); 160 if (result == std::end(arch_names)) 161 return "unknown"; 162 return result->virtual_arch_name; 163 } 164 165 CudaArch StringToCudaArch(llvm::StringRef S) { 166 auto result = std::find_if( 167 std::begin(arch_names), std::end(arch_names), 168 [S](const CudaArchToStringMap &map) { return S == map.arch_name; }); 169 if (result == std::end(arch_names)) 170 return CudaArch::UNKNOWN; 171 return result->arch; 172 } 173 174 CudaVersion MinVersionForCudaArch(CudaArch A) { 175 if (A == CudaArch::UNKNOWN) 176 return CudaVersion::UNKNOWN; 177 178 // AMD GPUs do not depend on CUDA versions. 179 if (IsAMDGpuArch(A)) 180 return CudaVersion::CUDA_70; 181 182 switch (A) { 183 case CudaArch::SM_20: 184 case CudaArch::SM_21: 185 case CudaArch::SM_30: 186 case CudaArch::SM_32: 187 case CudaArch::SM_35: 188 case CudaArch::SM_37: 189 case CudaArch::SM_50: 190 case CudaArch::SM_52: 191 case CudaArch::SM_53: 192 return CudaVersion::CUDA_70; 193 case CudaArch::SM_60: 194 case CudaArch::SM_61: 195 case CudaArch::SM_62: 196 return CudaVersion::CUDA_80; 197 case CudaArch::SM_70: 198 return CudaVersion::CUDA_90; 199 case CudaArch::SM_72: 200 return CudaVersion::CUDA_91; 201 case CudaArch::SM_75: 202 return CudaVersion::CUDA_100; 203 case CudaArch::SM_80: 204 return CudaVersion::CUDA_110; 205 case CudaArch::SM_86: 206 return CudaVersion::CUDA_111; 207 case CudaArch::SM_87: 208 return CudaVersion::CUDA_114; 209 case CudaArch::SM_89: 210 case CudaArch::SM_90: 211 return CudaVersion::CUDA_118; 212 default: 213 llvm_unreachable("invalid enum"); 214 } 215 } 216 217 CudaVersion MaxVersionForCudaArch(CudaArch A) { 218 // AMD GPUs do not depend on CUDA versions. 219 if (IsAMDGpuArch(A)) 220 return CudaVersion::NEW; 221 222 switch (A) { 223 case CudaArch::UNKNOWN: 224 return CudaVersion::UNKNOWN; 225 case CudaArch::SM_20: 226 case CudaArch::SM_21: 227 return CudaVersion::CUDA_80; 228 case CudaArch::SM_30: 229 case CudaArch::SM_32: 230 return CudaVersion::CUDA_102; 231 case CudaArch::SM_35: 232 case CudaArch::SM_37: 233 return CudaVersion::CUDA_118; 234 default: 235 return CudaVersion::NEW; 236 } 237 } 238 239 bool CudaFeatureEnabled(llvm::VersionTuple Version, CudaFeature Feature) { 240 return CudaFeatureEnabled(ToCudaVersion(Version), Feature); 241 } 242 243 bool CudaFeatureEnabled(CudaVersion Version, CudaFeature Feature) { 244 switch (Feature) { 245 case CudaFeature::CUDA_USES_NEW_LAUNCH: 246 return Version >= CudaVersion::CUDA_92; 247 case CudaFeature::CUDA_USES_FATBIN_REGISTER_END: 248 return Version >= CudaVersion::CUDA_101; 249 } 250 llvm_unreachable("Unknown CUDA feature."); 251 } 252 } // namespace clang 253