1 //===--- Cuda.cpp - Cuda Tool and ToolChain Implementations -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "Cuda.h" 10 #include "CommonArgs.h" 11 #include "clang/Basic/Cuda.h" 12 #include "clang/Config/config.h" 13 #include "clang/Driver/Compilation.h" 14 #include "clang/Driver/Distro.h" 15 #include "clang/Driver/Driver.h" 16 #include "clang/Driver/DriverDiagnostic.h" 17 #include "clang/Driver/InputInfo.h" 18 #include "clang/Driver/Options.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/Config/llvm-config.h" // for LLVM_HOST_TRIPLE 21 #include "llvm/Option/ArgList.h" 22 #include "llvm/Support/FileSystem.h" 23 #include "llvm/Support/FormatAdapters.h" 24 #include "llvm/Support/FormatVariadic.h" 25 #include "llvm/Support/Path.h" 26 #include "llvm/Support/Process.h" 27 #include "llvm/Support/Program.h" 28 #include "llvm/Support/VirtualFileSystem.h" 29 #include "llvm/TargetParser/Host.h" 30 #include "llvm/TargetParser/TargetParser.h" 31 #include <system_error> 32 33 using namespace clang::driver; 34 using namespace clang::driver::toolchains; 35 using namespace clang::driver::tools; 36 using namespace clang; 37 using namespace llvm::opt; 38 39 namespace { 40 41 CudaVersion getCudaVersion(uint32_t raw_version) { 42 if (raw_version < 7050) 43 return CudaVersion::CUDA_70; 44 if (raw_version < 8000) 45 return CudaVersion::CUDA_75; 46 if (raw_version < 9000) 47 return CudaVersion::CUDA_80; 48 if (raw_version < 9010) 49 return CudaVersion::CUDA_90; 50 if (raw_version < 9020) 51 return CudaVersion::CUDA_91; 52 if (raw_version < 10000) 53 return CudaVersion::CUDA_92; 54 if (raw_version < 10010) 55 return CudaVersion::CUDA_100; 56 if (raw_version < 10020) 57 return CudaVersion::CUDA_101; 58 if (raw_version < 11000) 59 return CudaVersion::CUDA_102; 60 if (raw_version < 11010) 61 return CudaVersion::CUDA_110; 62 if (raw_version < 11020) 63 return CudaVersion::CUDA_111; 64 if (raw_version < 11030) 65 return CudaVersion::CUDA_112; 66 if (raw_version < 11040) 67 return CudaVersion::CUDA_113; 68 if (raw_version < 11050) 69 return CudaVersion::CUDA_114; 70 if (raw_version < 11060) 71 return CudaVersion::CUDA_115; 72 if (raw_version < 11070) 73 return CudaVersion::CUDA_116; 74 if (raw_version < 11080) 75 return CudaVersion::CUDA_117; 76 if (raw_version < 11090) 77 return CudaVersion::CUDA_118; 78 if (raw_version < 12010) 79 return CudaVersion::CUDA_120; 80 if (raw_version < 12020) 81 return CudaVersion::CUDA_121; 82 if (raw_version < 12030) 83 return CudaVersion::CUDA_122; 84 if (raw_version < 12040) 85 return CudaVersion::CUDA_123; 86 if (raw_version < 12050) 87 return CudaVersion::CUDA_124; 88 if (raw_version < 12060) 89 return CudaVersion::CUDA_125; 90 if (raw_version < 12070) 91 return CudaVersion::CUDA_126; 92 if (raw_version < 12090) 93 return CudaVersion::CUDA_128; 94 return CudaVersion::NEW; 95 } 96 97 CudaVersion parseCudaHFile(llvm::StringRef Input) { 98 // Helper lambda which skips the words if the line starts with them or returns 99 // std::nullopt otherwise. 100 auto StartsWithWords = 101 [](llvm::StringRef Line, 102 const SmallVector<StringRef, 3> words) -> std::optional<StringRef> { 103 for (StringRef word : words) { 104 if (!Line.consume_front(word)) 105 return {}; 106 Line = Line.ltrim(); 107 } 108 return Line; 109 }; 110 111 Input = Input.ltrim(); 112 while (!Input.empty()) { 113 if (auto Line = 114 StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) { 115 uint32_t RawVersion; 116 Line->consumeInteger(10, RawVersion); 117 return getCudaVersion(RawVersion); 118 } 119 // Find next non-empty line. 120 Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim(); 121 } 122 return CudaVersion::UNKNOWN; 123 } 124 } // namespace 125 126 void CudaInstallationDetector::WarnIfUnsupportedVersion() { 127 if (Version > CudaVersion::PARTIALLY_SUPPORTED) { 128 std::string VersionString = CudaVersionToString(Version); 129 if (!VersionString.empty()) 130 VersionString.insert(0, " "); 131 D.Diag(diag::warn_drv_new_cuda_version) 132 << VersionString 133 << (CudaVersion::PARTIALLY_SUPPORTED != CudaVersion::FULLY_SUPPORTED) 134 << CudaVersionToString(CudaVersion::PARTIALLY_SUPPORTED); 135 } else if (Version > CudaVersion::FULLY_SUPPORTED) 136 D.Diag(diag::warn_drv_partially_supported_cuda_version) 137 << CudaVersionToString(Version); 138 } 139 140 CudaInstallationDetector::CudaInstallationDetector( 141 const Driver &D, const llvm::Triple &HostTriple, 142 const llvm::opt::ArgList &Args) 143 : D(D) { 144 struct Candidate { 145 std::string Path; 146 bool StrictChecking; 147 148 Candidate(std::string Path, bool StrictChecking = false) 149 : Path(Path), StrictChecking(StrictChecking) {} 150 }; 151 SmallVector<Candidate, 4> Candidates; 152 153 // In decreasing order so we prefer newer versions to older versions. 154 std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"}; 155 auto &FS = D.getVFS(); 156 157 if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) { 158 Candidates.emplace_back( 159 Args.getLastArgValue(clang::driver::options::OPT_cuda_path_EQ).str()); 160 } else if (HostTriple.isOSWindows()) { 161 for (const char *Ver : Versions) 162 Candidates.emplace_back( 163 D.SysRoot + "/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v" + 164 Ver); 165 } else { 166 if (!Args.hasArg(clang::driver::options::OPT_cuda_path_ignore_env)) { 167 // Try to find ptxas binary. If the executable is located in a directory 168 // called 'bin/', its parent directory might be a good guess for a valid 169 // CUDA installation. 170 // However, some distributions might installs 'ptxas' to /usr/bin. In that 171 // case the candidate would be '/usr' which passes the following checks 172 // because '/usr/include' exists as well. To avoid this case, we always 173 // check for the directory potentially containing files for libdevice, 174 // even if the user passes -nocudalib. 175 if (llvm::ErrorOr<std::string> ptxas = 176 llvm::sys::findProgramByName("ptxas")) { 177 SmallString<256> ptxasAbsolutePath; 178 llvm::sys::fs::real_path(*ptxas, ptxasAbsolutePath); 179 180 StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath); 181 if (llvm::sys::path::filename(ptxasDir) == "bin") 182 Candidates.emplace_back( 183 std::string(llvm::sys::path::parent_path(ptxasDir)), 184 /*StrictChecking=*/true); 185 } 186 } 187 188 Candidates.emplace_back(D.SysRoot + "/usr/local/cuda"); 189 for (const char *Ver : Versions) 190 Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver); 191 192 Distro Dist(FS, llvm::Triple(llvm::sys::getProcessTriple())); 193 if (Dist.IsDebian() || Dist.IsUbuntu()) 194 // Special case for Debian to have nvidia-cuda-toolkit work 195 // out of the box. More info on http://bugs.debian.org/882505 196 Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda"); 197 } 198 199 bool NoCudaLib = Args.hasArg(options::OPT_nogpulib); 200 201 for (const auto &Candidate : Candidates) { 202 InstallPath = Candidate.Path; 203 if (InstallPath.empty() || !FS.exists(InstallPath)) 204 continue; 205 206 BinPath = InstallPath + "/bin"; 207 IncludePath = InstallPath + "/include"; 208 LibDevicePath = InstallPath + "/nvvm/libdevice"; 209 210 if (!(FS.exists(IncludePath) && FS.exists(BinPath))) 211 continue; 212 bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking); 213 if (CheckLibDevice && !FS.exists(LibDevicePath)) 214 continue; 215 216 Version = CudaVersion::UNKNOWN; 217 if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h")) 218 Version = parseCudaHFile((*CudaHFile)->getBuffer()); 219 // As the last resort, make an educated guess between CUDA-7.0, which had 220 // old-style libdevice bitcode, and an unknown recent CUDA version. 221 if (Version == CudaVersion::UNKNOWN) { 222 Version = FS.exists(LibDevicePath + "/libdevice.10.bc") 223 ? CudaVersion::NEW 224 : CudaVersion::CUDA_70; 225 } 226 227 if (Version >= CudaVersion::CUDA_90) { 228 // CUDA-9+ uses single libdevice file for all GPU variants. 229 std::string FilePath = LibDevicePath + "/libdevice.10.bc"; 230 if (FS.exists(FilePath)) { 231 for (int Arch = (int)OffloadArch::SM_30, E = (int)OffloadArch::LAST; 232 Arch < E; ++Arch) { 233 OffloadArch OA = static_cast<OffloadArch>(Arch); 234 if (!IsNVIDIAOffloadArch(OA)) 235 continue; 236 std::string OffloadArchName(OffloadArchToString(OA)); 237 LibDeviceMap[OffloadArchName] = FilePath; 238 } 239 } 240 } else { 241 std::error_code EC; 242 for (llvm::vfs::directory_iterator LI = FS.dir_begin(LibDevicePath, EC), 243 LE; 244 !EC && LI != LE; LI = LI.increment(EC)) { 245 StringRef FilePath = LI->path(); 246 StringRef FileName = llvm::sys::path::filename(FilePath); 247 // Process all bitcode filenames that look like 248 // libdevice.compute_XX.YY.bc 249 const StringRef LibDeviceName = "libdevice."; 250 if (!(FileName.starts_with(LibDeviceName) && FileName.ends_with(".bc"))) 251 continue; 252 StringRef GpuArch = FileName.slice( 253 LibDeviceName.size(), FileName.find('.', LibDeviceName.size())); 254 LibDeviceMap[GpuArch] = FilePath.str(); 255 // Insert map entries for specific devices with this compute 256 // capability. NVCC's choice of the libdevice library version is 257 // rather peculiar and depends on the CUDA version. 258 if (GpuArch == "compute_20") { 259 LibDeviceMap["sm_20"] = std::string(FilePath); 260 LibDeviceMap["sm_21"] = std::string(FilePath); 261 LibDeviceMap["sm_32"] = std::string(FilePath); 262 } else if (GpuArch == "compute_30") { 263 LibDeviceMap["sm_30"] = std::string(FilePath); 264 if (Version < CudaVersion::CUDA_80) { 265 LibDeviceMap["sm_50"] = std::string(FilePath); 266 LibDeviceMap["sm_52"] = std::string(FilePath); 267 LibDeviceMap["sm_53"] = std::string(FilePath); 268 } 269 LibDeviceMap["sm_60"] = std::string(FilePath); 270 LibDeviceMap["sm_61"] = std::string(FilePath); 271 LibDeviceMap["sm_62"] = std::string(FilePath); 272 } else if (GpuArch == "compute_35") { 273 LibDeviceMap["sm_35"] = std::string(FilePath); 274 LibDeviceMap["sm_37"] = std::string(FilePath); 275 } else if (GpuArch == "compute_50") { 276 if (Version >= CudaVersion::CUDA_80) { 277 LibDeviceMap["sm_50"] = std::string(FilePath); 278 LibDeviceMap["sm_52"] = std::string(FilePath); 279 LibDeviceMap["sm_53"] = std::string(FilePath); 280 } 281 } 282 } 283 } 284 285 // Check that we have found at least one libdevice that we can link in if 286 // -nocudalib hasn't been specified. 287 if (LibDeviceMap.empty() && !NoCudaLib) 288 continue; 289 290 IsValid = true; 291 break; 292 } 293 } 294 295 void CudaInstallationDetector::AddCudaIncludeArgs( 296 const ArgList &DriverArgs, ArgStringList &CC1Args) const { 297 if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { 298 // Add cuda_wrappers/* to our system include path. This lets us wrap 299 // standard library headers. 300 SmallString<128> P(D.ResourceDir); 301 llvm::sys::path::append(P, "include"); 302 llvm::sys::path::append(P, "cuda_wrappers"); 303 CC1Args.push_back("-internal-isystem"); 304 CC1Args.push_back(DriverArgs.MakeArgString(P)); 305 } 306 307 if (DriverArgs.hasArg(options::OPT_nogpuinc)) 308 return; 309 310 if (!isValid()) { 311 D.Diag(diag::err_drv_no_cuda_installation); 312 return; 313 } 314 315 CC1Args.push_back("-include"); 316 CC1Args.push_back("__clang_cuda_runtime_wrapper.h"); 317 } 318 319 void CudaInstallationDetector::CheckCudaVersionSupportsArch( 320 OffloadArch Arch) const { 321 if (Arch == OffloadArch::UNKNOWN || Version == CudaVersion::UNKNOWN || 322 ArchsWithBadVersion[(int)Arch]) 323 return; 324 325 auto MinVersion = MinVersionForOffloadArch(Arch); 326 auto MaxVersion = MaxVersionForOffloadArch(Arch); 327 if (Version < MinVersion || Version > MaxVersion) { 328 ArchsWithBadVersion[(int)Arch] = true; 329 D.Diag(diag::err_drv_cuda_version_unsupported) 330 << OffloadArchToString(Arch) << CudaVersionToString(MinVersion) 331 << CudaVersionToString(MaxVersion) << InstallPath 332 << CudaVersionToString(Version); 333 } 334 } 335 336 void CudaInstallationDetector::print(raw_ostream &OS) const { 337 if (isValid()) 338 OS << "Found CUDA installation: " << InstallPath << ", version " 339 << CudaVersionToString(Version) << "\n"; 340 } 341 342 namespace { 343 /// Debug info level for the NVPTX devices. We may need to emit different debug 344 /// info level for the host and for the device itselfi. This type controls 345 /// emission of the debug info for the devices. It either prohibits disable info 346 /// emission completely, or emits debug directives only, or emits same debug 347 /// info as for the host. 348 enum DeviceDebugInfoLevel { 349 DisableDebugInfo, /// Do not emit debug info for the devices. 350 DebugDirectivesOnly, /// Emit only debug directives. 351 EmitSameDebugInfoAsHost, /// Use the same debug info level just like for the 352 /// host. 353 }; 354 } // anonymous namespace 355 356 /// Define debug info level for the NVPTX devices. If the debug info for both 357 /// the host and device are disabled (-g0/-ggdb0 or no debug options at all). If 358 /// only debug directives are requested for the both host and device 359 /// (-gline-directvies-only), or the debug info only for the device is disabled 360 /// (optimization is on and --cuda-noopt-device-debug was not specified), the 361 /// debug directves only must be emitted for the device. Otherwise, use the same 362 /// debug info level just like for the host (with the limitations of only 363 /// supported DWARF2 standard). 364 static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) { 365 const Arg *A = Args.getLastArg(options::OPT_O_Group); 366 bool IsDebugEnabled = !A || A->getOption().matches(options::OPT_O0) || 367 Args.hasFlag(options::OPT_cuda_noopt_device_debug, 368 options::OPT_no_cuda_noopt_device_debug, 369 /*Default=*/false); 370 if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) { 371 const Option &Opt = A->getOption(); 372 if (Opt.matches(options::OPT_gN_Group)) { 373 if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0)) 374 return DisableDebugInfo; 375 if (Opt.matches(options::OPT_gline_directives_only)) 376 return DebugDirectivesOnly; 377 } 378 return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly; 379 } 380 return willEmitRemarks(Args) ? DebugDirectivesOnly : DisableDebugInfo; 381 } 382 383 void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA, 384 const InputInfo &Output, 385 const InputInfoList &Inputs, 386 const ArgList &Args, 387 const char *LinkingOutput) const { 388 const auto &TC = 389 static_cast<const toolchains::NVPTXToolChain &>(getToolChain()); 390 assert(TC.getTriple().isNVPTX() && "Wrong platform"); 391 392 StringRef GPUArchName; 393 // If this is a CUDA action we need to extract the device architecture 394 // from the Job's associated architecture, otherwise use the -march=arch 395 // option. This option may come from -Xopenmp-target flag or the default 396 // value. 397 if (JA.isDeviceOffloading(Action::OFK_Cuda)) { 398 GPUArchName = JA.getOffloadingArch(); 399 } else { 400 GPUArchName = Args.getLastArgValue(options::OPT_march_EQ); 401 if (GPUArchName.empty()) { 402 C.getDriver().Diag(diag::err_drv_offload_missing_gpu_arch) 403 << getToolChain().getArchName() << getShortName(); 404 return; 405 } 406 } 407 408 // Obtain architecture from the action. 409 OffloadArch gpu_arch = StringToOffloadArch(GPUArchName); 410 assert(gpu_arch != OffloadArch::UNKNOWN && 411 "Device action expected to have an architecture."); 412 413 // Check that our installation's ptxas supports gpu_arch. 414 if (!Args.hasArg(options::OPT_no_cuda_version_check)) { 415 TC.CudaInstallation.CheckCudaVersionSupportsArch(gpu_arch); 416 } 417 418 ArgStringList CmdArgs; 419 CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32"); 420 DeviceDebugInfoLevel DIKind = mustEmitDebugInfo(Args); 421 if (DIKind == EmitSameDebugInfoAsHost) { 422 // ptxas does not accept -g option if optimization is enabled, so 423 // we ignore the compiler's -O* options if we want debug info. 424 CmdArgs.push_back("-g"); 425 CmdArgs.push_back("--dont-merge-basicblocks"); 426 CmdArgs.push_back("--return-at-end"); 427 } else if (Arg *A = Args.getLastArg(options::OPT_O_Group)) { 428 // Map the -O we received to -O{0,1,2,3}. 429 // 430 // TODO: Perhaps we should map host -O2 to ptxas -O3. -O3 is ptxas's 431 // default, so it may correspond more closely to the spirit of clang -O2. 432 433 // -O3 seems like the least-bad option when -Osomething is specified to 434 // clang but it isn't handled below. 435 StringRef OOpt = "3"; 436 if (A->getOption().matches(options::OPT_O4) || 437 A->getOption().matches(options::OPT_Ofast)) 438 OOpt = "3"; 439 else if (A->getOption().matches(options::OPT_O0)) 440 OOpt = "0"; 441 else if (A->getOption().matches(options::OPT_O)) { 442 // -Os, -Oz, and -O(anything else) map to -O2, for lack of better options. 443 OOpt = llvm::StringSwitch<const char *>(A->getValue()) 444 .Case("1", "1") 445 .Case("2", "2") 446 .Case("3", "3") 447 .Case("s", "2") 448 .Case("z", "2") 449 .Default("2"); 450 } 451 CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt)); 452 } else { 453 // If no -O was passed, pass -O0 to ptxas -- no opt flag should correspond 454 // to no optimizations, but ptxas's default is -O3. 455 CmdArgs.push_back("-O0"); 456 } 457 if (DIKind == DebugDirectivesOnly) 458 CmdArgs.push_back("-lineinfo"); 459 460 // Pass -v to ptxas if it was passed to the driver. 461 if (Args.hasArg(options::OPT_v)) 462 CmdArgs.push_back("-v"); 463 464 CmdArgs.push_back("--gpu-name"); 465 CmdArgs.push_back(Args.MakeArgString(OffloadArchToString(gpu_arch))); 466 CmdArgs.push_back("--output-file"); 467 std::string OutputFileName = TC.getInputFilename(Output); 468 469 if (Output.isFilename() && OutputFileName != Output.getFilename()) 470 C.addTempFile(Args.MakeArgString(OutputFileName)); 471 472 CmdArgs.push_back(Args.MakeArgString(OutputFileName)); 473 for (const auto &II : Inputs) 474 CmdArgs.push_back(Args.MakeArgString(II.getFilename())); 475 476 for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas)) 477 CmdArgs.push_back(Args.MakeArgString(A)); 478 479 bool Relocatable; 480 if (JA.isOffloading(Action::OFK_OpenMP)) 481 // In OpenMP we need to generate relocatable code. 482 Relocatable = Args.hasFlag(options::OPT_fopenmp_relocatable_target, 483 options::OPT_fnoopenmp_relocatable_target, 484 /*Default=*/true); 485 else if (JA.isOffloading(Action::OFK_Cuda)) 486 // In CUDA we generate relocatable code by default. 487 Relocatable = Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, 488 /*Default=*/false); 489 else 490 // Otherwise, we are compiling directly and should create linkable output. 491 Relocatable = true; 492 493 if (Relocatable) 494 CmdArgs.push_back("-c"); 495 496 const char *Exec; 497 if (Arg *A = Args.getLastArg(options::OPT_ptxas_path_EQ)) 498 Exec = A->getValue(); 499 else 500 Exec = Args.MakeArgString(TC.GetProgramPath("ptxas")); 501 C.addCommand(std::make_unique<Command>( 502 JA, *this, 503 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8, 504 "--options-file"}, 505 Exec, CmdArgs, Inputs, Output)); 506 } 507 508 static bool shouldIncludePTX(const ArgList &Args, StringRef InputArch) { 509 // The new driver does not include PTX by default to avoid overhead. 510 bool includePTX = !Args.hasFlag(options::OPT_offload_new_driver, 511 options::OPT_no_offload_new_driver, true); 512 for (Arg *A : Args.filtered(options::OPT_cuda_include_ptx_EQ, 513 options::OPT_no_cuda_include_ptx_EQ)) { 514 A->claim(); 515 const StringRef ArchStr = A->getValue(); 516 if (A->getOption().matches(options::OPT_cuda_include_ptx_EQ) && 517 (ArchStr == "all" || ArchStr == InputArch)) 518 includePTX = true; 519 else if (A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ) && 520 (ArchStr == "all" || ArchStr == InputArch)) 521 includePTX = false; 522 } 523 return includePTX; 524 } 525 526 // All inputs to this linker must be from CudaDeviceActions, as we need to look 527 // at the Inputs' Actions in order to figure out which GPU architecture they 528 // correspond to. 529 void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA, 530 const InputInfo &Output, 531 const InputInfoList &Inputs, 532 const ArgList &Args, 533 const char *LinkingOutput) const { 534 const auto &TC = 535 static_cast<const toolchains::CudaToolChain &>(getToolChain()); 536 assert(TC.getTriple().isNVPTX() && "Wrong platform"); 537 538 ArgStringList CmdArgs; 539 if (TC.CudaInstallation.version() <= CudaVersion::CUDA_100) 540 CmdArgs.push_back("--cuda"); 541 CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32"); 542 CmdArgs.push_back(Args.MakeArgString("--create")); 543 CmdArgs.push_back(Args.MakeArgString(Output.getFilename())); 544 if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost) 545 CmdArgs.push_back("-g"); 546 547 for (const auto &II : Inputs) { 548 auto *A = II.getAction(); 549 assert(A->getInputs().size() == 1 && 550 "Device offload action is expected to have a single input"); 551 const char *gpu_arch_str = A->getOffloadingArch(); 552 assert(gpu_arch_str && 553 "Device action expected to have associated a GPU architecture!"); 554 OffloadArch gpu_arch = StringToOffloadArch(gpu_arch_str); 555 556 if (II.getType() == types::TY_PP_Asm && 557 !shouldIncludePTX(Args, gpu_arch_str)) 558 continue; 559 // We need to pass an Arch of the form "sm_XX" for cubin files and 560 // "compute_XX" for ptx. 561 const char *Arch = (II.getType() == types::TY_PP_Asm) 562 ? OffloadArchToVirtualArchString(gpu_arch) 563 : gpu_arch_str; 564 CmdArgs.push_back( 565 Args.MakeArgString(llvm::Twine("--image=profile=") + Arch + 566 ",file=" + getToolChain().getInputFilename(II))); 567 } 568 569 for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary)) 570 CmdArgs.push_back(Args.MakeArgString(A)); 571 572 const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary")); 573 C.addCommand(std::make_unique<Command>( 574 JA, *this, 575 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8, 576 "--options-file"}, 577 Exec, CmdArgs, Inputs, Output)); 578 } 579 580 void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA, 581 const InputInfo &Output, 582 const InputInfoList &Inputs, 583 const ArgList &Args, 584 const char *LinkingOutput) const { 585 const auto &TC = 586 static_cast<const toolchains::NVPTXToolChain &>(getToolChain()); 587 ArgStringList CmdArgs; 588 589 assert(TC.getTriple().isNVPTX() && "Wrong platform"); 590 591 assert((Output.isFilename() || Output.isNothing()) && "Invalid output."); 592 if (Output.isFilename()) { 593 CmdArgs.push_back("-o"); 594 CmdArgs.push_back(Output.getFilename()); 595 } 596 597 if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost) 598 CmdArgs.push_back("-g"); 599 600 if (Args.hasArg(options::OPT_v)) 601 CmdArgs.push_back("-v"); 602 603 StringRef GPUArch = Args.getLastArgValue(options::OPT_march_EQ); 604 if (GPUArch.empty() && !C.getDriver().isUsingLTO()) { 605 C.getDriver().Diag(diag::err_drv_offload_missing_gpu_arch) 606 << getToolChain().getArchName() << getShortName(); 607 return; 608 } 609 610 if (!GPUArch.empty()) { 611 CmdArgs.push_back("-arch"); 612 CmdArgs.push_back(Args.MakeArgString(GPUArch)); 613 } 614 615 if (Args.hasArg(options::OPT_ptxas_path_EQ)) 616 CmdArgs.push_back(Args.MakeArgString( 617 "--pxtas-path=" + Args.getLastArgValue(options::OPT_ptxas_path_EQ))); 618 619 if (Args.hasArg(options::OPT_cuda_path_EQ)) 620 CmdArgs.push_back(Args.MakeArgString( 621 "--cuda-path=" + Args.getLastArgValue(options::OPT_cuda_path_EQ))); 622 623 // Add paths specified in LIBRARY_PATH environment variable as -L options. 624 addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH"); 625 626 // Add standard library search paths passed on the command line. 627 Args.AddAllArgs(CmdArgs, options::OPT_L); 628 getToolChain().AddFilePathLibArgs(Args, CmdArgs); 629 AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); 630 631 if (C.getDriver().isUsingLTO()) 632 addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0], 633 C.getDriver().getLTOMode() == LTOK_Thin); 634 635 // Forward the PTX features if the nvlink-wrapper needs it. 636 std::vector<StringRef> Features; 637 getNVPTXTargetFeatures(C.getDriver(), getToolChain().getTriple(), Args, 638 Features); 639 CmdArgs.push_back( 640 Args.MakeArgString("--plugin-opt=-mattr=" + llvm::join(Features, ","))); 641 642 // Enable ctor / dtor lowering for the direct / freestanding NVPTX target. 643 CmdArgs.append({"-mllvm", "--nvptx-lower-global-ctor-dtor"}); 644 645 // Add paths for the default clang library path. 646 SmallString<256> DefaultLibPath = 647 llvm::sys::path::parent_path(TC.getDriver().Dir); 648 llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME); 649 CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath)); 650 651 if (Args.hasArg(options::OPT_stdlib)) 652 CmdArgs.append({"-lc", "-lm"}); 653 if (Args.hasArg(options::OPT_startfiles)) { 654 std::optional<std::string> IncludePath = getToolChain().getStdlibPath(); 655 if (!IncludePath) 656 IncludePath = "/lib"; 657 SmallString<128> P(*IncludePath); 658 llvm::sys::path::append(P, "crt1.o"); 659 CmdArgs.push_back(Args.MakeArgString(P)); 660 } 661 662 C.addCommand(std::make_unique<Command>( 663 JA, *this, 664 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8, 665 "--options-file"}, 666 Args.MakeArgString(getToolChain().GetProgramPath("clang-nvlink-wrapper")), 667 CmdArgs, Inputs, Output)); 668 } 669 670 void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple, 671 const llvm::opt::ArgList &Args, 672 std::vector<StringRef> &Features) { 673 if (Args.hasArg(options::OPT_cuda_feature_EQ)) { 674 StringRef PtxFeature = 675 Args.getLastArgValue(options::OPT_cuda_feature_EQ, "+ptx42"); 676 Features.push_back(Args.MakeArgString(PtxFeature)); 677 return; 678 } 679 CudaInstallationDetector CudaInstallation(D, Triple, Args); 680 681 // New CUDA versions often introduce new instructions that are only supported 682 // by new PTX version, so we need to raise PTX level to enable them in NVPTX 683 // back-end. 684 const char *PtxFeature = nullptr; 685 switch (CudaInstallation.version()) { 686 #define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \ 687 case CudaVersion::CUDA_##CUDA_VER: \ 688 PtxFeature = "+ptx" #PTX_VER; \ 689 break; 690 CASE_CUDA_VERSION(128, 87); 691 CASE_CUDA_VERSION(126, 85); 692 CASE_CUDA_VERSION(125, 85); 693 CASE_CUDA_VERSION(124, 84); 694 CASE_CUDA_VERSION(123, 83); 695 CASE_CUDA_VERSION(122, 82); 696 CASE_CUDA_VERSION(121, 81); 697 CASE_CUDA_VERSION(120, 80); 698 CASE_CUDA_VERSION(118, 78); 699 CASE_CUDA_VERSION(117, 77); 700 CASE_CUDA_VERSION(116, 76); 701 CASE_CUDA_VERSION(115, 75); 702 CASE_CUDA_VERSION(114, 74); 703 CASE_CUDA_VERSION(113, 73); 704 CASE_CUDA_VERSION(112, 72); 705 CASE_CUDA_VERSION(111, 71); 706 CASE_CUDA_VERSION(110, 70); 707 CASE_CUDA_VERSION(102, 65); 708 CASE_CUDA_VERSION(101, 64); 709 CASE_CUDA_VERSION(100, 63); 710 CASE_CUDA_VERSION(92, 61); 711 CASE_CUDA_VERSION(91, 61); 712 CASE_CUDA_VERSION(90, 60); 713 #undef CASE_CUDA_VERSION 714 // TODO: Use specific CUDA version once it's public. 715 case clang::CudaVersion::NEW: 716 PtxFeature = "+ptx86"; 717 break; 718 default: 719 PtxFeature = "+ptx42"; 720 } 721 Features.push_back(PtxFeature); 722 } 723 724 /// NVPTX toolchain. Our assembler is ptxas, and our linker is nvlink. This 725 /// operates as a stand-alone version of the NVPTX tools without the host 726 /// toolchain. 727 NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple, 728 const llvm::Triple &HostTriple, 729 const ArgList &Args, bool Freestanding = false) 730 : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args), 731 Freestanding(Freestanding) { 732 if (CudaInstallation.isValid()) 733 getProgramPaths().push_back(std::string(CudaInstallation.getBinPath())); 734 // Lookup binaries into the driver directory, this is used to 735 // discover the 'nvptx-arch' executable. 736 getProgramPaths().push_back(getDriver().Dir); 737 } 738 739 /// We only need the host triple to locate the CUDA binary utilities, use the 740 /// system's default triple if not provided. 741 NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple, 742 const ArgList &Args) 743 : NVPTXToolChain(D, Triple, llvm::Triple(LLVM_HOST_TRIPLE), Args, 744 /*Freestanding=*/true) {} 745 746 llvm::opt::DerivedArgList * 747 NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args, 748 StringRef BoundArch, 749 Action::OffloadKind OffloadKind) const { 750 DerivedArgList *DAL = ToolChain::TranslateArgs(Args, BoundArch, OffloadKind); 751 if (!DAL) 752 DAL = new DerivedArgList(Args.getBaseArgs()); 753 754 const OptTable &Opts = getDriver().getOpts(); 755 756 for (Arg *A : Args) 757 if (!llvm::is_contained(*DAL, A)) 758 DAL->append(A); 759 760 if (!DAL->hasArg(options::OPT_march_EQ) && OffloadKind != Action::OFK_None) { 761 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), 762 OffloadArchToString(OffloadArch::CudaDefault)); 763 } else if (DAL->getLastArgValue(options::OPT_march_EQ) == "generic" && 764 OffloadKind == Action::OFK_None) { 765 DAL->eraseArg(options::OPT_march_EQ); 766 } else if (DAL->getLastArgValue(options::OPT_march_EQ) == "native") { 767 auto GPUsOrErr = getSystemGPUArchs(Args); 768 if (!GPUsOrErr) { 769 getDriver().Diag(diag::err_drv_undetermined_gpu_arch) 770 << getArchName() << llvm::toString(GPUsOrErr.takeError()) << "-march"; 771 } else { 772 if (GPUsOrErr->size() > 1) 773 getDriver().Diag(diag::warn_drv_multi_gpu_arch) 774 << getArchName() << llvm::join(*GPUsOrErr, ", ") << "-march"; 775 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), 776 Args.MakeArgString(GPUsOrErr->front())); 777 } 778 } 779 780 return DAL; 781 } 782 783 void NVPTXToolChain::addClangTargetOptions( 784 const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, 785 Action::OffloadKind DeviceOffloadingKind) const { 786 // If we are compiling with a standalone NVPTX toolchain we want to try to 787 // mimic a standard environment as much as possible. So we enable lowering 788 // ctor / dtor functions to global symbols that can be registered. 789 if (Freestanding && !getDriver().isUsingLTO()) 790 CC1Args.append({"-mllvm", "--nvptx-lower-global-ctor-dtor"}); 791 } 792 793 bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const { 794 const Option &O = A->getOption(); 795 return (O.matches(options::OPT_gN_Group) && 796 !O.matches(options::OPT_gmodules)) || 797 O.matches(options::OPT_g_Flag) || 798 O.matches(options::OPT_ggdbN_Group) || O.matches(options::OPT_ggdb) || 799 O.matches(options::OPT_gdwarf) || O.matches(options::OPT_gdwarf_2) || 800 O.matches(options::OPT_gdwarf_3) || O.matches(options::OPT_gdwarf_4) || 801 O.matches(options::OPT_gdwarf_5) || 802 O.matches(options::OPT_gcolumn_info); 803 } 804 805 void NVPTXToolChain::adjustDebugInfoKind( 806 llvm::codegenoptions::DebugInfoKind &DebugInfoKind, 807 const ArgList &Args) const { 808 switch (mustEmitDebugInfo(Args)) { 809 case DisableDebugInfo: 810 DebugInfoKind = llvm::codegenoptions::NoDebugInfo; 811 break; 812 case DebugDirectivesOnly: 813 DebugInfoKind = llvm::codegenoptions::DebugDirectivesOnly; 814 break; 815 case EmitSameDebugInfoAsHost: 816 // Use same debug info level as the host. 817 break; 818 } 819 } 820 821 Expected<SmallVector<std::string>> 822 NVPTXToolChain::getSystemGPUArchs(const ArgList &Args) const { 823 // Detect NVIDIA GPUs availible on the system. 824 std::string Program; 825 if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ)) 826 Program = A->getValue(); 827 else 828 Program = GetProgramPath("nvptx-arch"); 829 830 auto StdoutOrErr = executeToolChainProgram(Program); 831 if (!StdoutOrErr) 832 return StdoutOrErr.takeError(); 833 834 SmallVector<std::string, 1> GPUArchs; 835 for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n")) 836 if (!Arch.empty()) 837 GPUArchs.push_back(Arch.str()); 838 839 if (GPUArchs.empty()) 840 return llvm::createStringError(std::error_code(), 841 "No NVIDIA GPU detected in the system"); 842 843 return std::move(GPUArchs); 844 } 845 846 /// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary, 847 /// which isn't properly a linker but nonetheless performs the step of stitching 848 /// together object files from the assembler into a single blob. 849 850 CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple, 851 const ToolChain &HostTC, const ArgList &Args) 852 : NVPTXToolChain(D, Triple, HostTC.getTriple(), Args), HostTC(HostTC) {} 853 854 void CudaToolChain::addClangTargetOptions( 855 const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, 856 Action::OffloadKind DeviceOffloadingKind) const { 857 HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind); 858 859 StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ); 860 assert((DeviceOffloadingKind == Action::OFK_OpenMP || 861 DeviceOffloadingKind == Action::OFK_Cuda) && 862 "Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs."); 863 864 CC1Args.append({"-fcuda-is-device", "-mllvm", 865 "-enable-memcpyopt-without-libcalls", 866 "-fno-threadsafe-statics"}); 867 868 // Unsized function arguments used for variadics were introduced in CUDA-9.0 869 // We still do not support generating code that actually uses variadic 870 // arguments yet, but we do need to allow parsing them as recent CUDA 871 // headers rely on that. https://github.com/llvm/llvm-project/issues/58410 872 if (CudaInstallation.version() >= CudaVersion::CUDA_90) 873 CC1Args.push_back("-fcuda-allow-variadic-functions"); 874 875 if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr, 876 options::OPT_fno_cuda_short_ptr, false)) 877 CC1Args.append({"-mllvm", "--nvptx-short-ptr"}); 878 879 if (DriverArgs.hasArg(options::OPT_nogpulib)) 880 return; 881 882 if (DeviceOffloadingKind == Action::OFK_OpenMP && 883 DriverArgs.hasArg(options::OPT_S)) 884 return; 885 886 std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch); 887 if (LibDeviceFile.empty()) { 888 getDriver().Diag(diag::err_drv_no_cuda_libdevice) << GpuArch; 889 return; 890 } 891 892 CC1Args.push_back("-mlink-builtin-bitcode"); 893 CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile)); 894 895 // For now, we don't use any Offload/OpenMP device runtime when we offload 896 // CUDA via LLVM/Offload. We should split the Offload/OpenMP device runtime 897 // and include the "generic" (or CUDA-specific) parts. 898 if (DriverArgs.hasFlag(options::OPT_foffload_via_llvm, 899 options::OPT_fno_offload_via_llvm, false)) 900 return; 901 902 clang::CudaVersion CudaInstallationVersion = CudaInstallation.version(); 903 904 if (CudaInstallationVersion >= CudaVersion::UNKNOWN) 905 CC1Args.push_back( 906 DriverArgs.MakeArgString(Twine("-target-sdk-version=") + 907 CudaVersionToString(CudaInstallationVersion))); 908 909 if (DeviceOffloadingKind == Action::OFK_OpenMP) { 910 if (CudaInstallationVersion < CudaVersion::CUDA_92) { 911 getDriver().Diag( 912 diag::err_drv_omp_offload_target_cuda_version_not_support) 913 << CudaVersionToString(CudaInstallationVersion); 914 return; 915 } 916 917 // Link the bitcode library late if we're using device LTO. 918 if (getDriver().isUsingOffloadLTO()) 919 return; 920 921 addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(), 922 getTriple(), HostTC); 923 } 924 } 925 926 llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType( 927 const llvm::opt::ArgList &DriverArgs, const JobAction &JA, 928 const llvm::fltSemantics *FPType) const { 929 if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) { 930 if (FPType && FPType == &llvm::APFloat::IEEEsingle() && 931 DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero, 932 options::OPT_fno_gpu_flush_denormals_to_zero, false)) 933 return llvm::DenormalMode::getPreserveSign(); 934 } 935 936 assert(JA.getOffloadingDeviceKind() != Action::OFK_Host); 937 return llvm::DenormalMode::getIEEE(); 938 } 939 940 void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs, 941 ArgStringList &CC1Args) const { 942 // Check our CUDA version if we're going to include the CUDA headers. 943 if (!DriverArgs.hasArg(options::OPT_nogpuinc) && 944 !DriverArgs.hasArg(options::OPT_no_cuda_version_check)) { 945 StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ); 946 assert(!Arch.empty() && "Must have an explicit GPU arch."); 947 CudaInstallation.CheckCudaVersionSupportsArch(StringToOffloadArch(Arch)); 948 } 949 CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args); 950 } 951 952 std::string CudaToolChain::getInputFilename(const InputInfo &Input) const { 953 // Only object files are changed, for example assembly files keep their .s 954 // extensions. If the user requested device-only compilation don't change it. 955 if (Input.getType() != types::TY_Object || getDriver().offloadDeviceOnly()) 956 return ToolChain::getInputFilename(Input); 957 958 return ToolChain::getInputFilename(Input); 959 } 960 961 llvm::opt::DerivedArgList * 962 CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args, 963 StringRef BoundArch, 964 Action::OffloadKind DeviceOffloadKind) const { 965 DerivedArgList *DAL = 966 HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind); 967 if (!DAL) 968 DAL = new DerivedArgList(Args.getBaseArgs()); 969 970 const OptTable &Opts = getDriver().getOpts(); 971 972 // For OpenMP device offloading, append derived arguments. Make sure 973 // flags are not duplicated. 974 // Also append the compute capability. 975 if (DeviceOffloadKind == Action::OFK_OpenMP) { 976 for (Arg *A : Args) 977 if (!llvm::is_contained(*DAL, A)) 978 DAL->append(A); 979 980 if (!DAL->hasArg(options::OPT_march_EQ)) { 981 StringRef Arch = BoundArch; 982 if (Arch.empty()) { 983 auto ArchsOrErr = getSystemGPUArchs(Args); 984 if (!ArchsOrErr) { 985 std::string ErrMsg = 986 llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError())); 987 getDriver().Diag(diag::err_drv_undetermined_gpu_arch) 988 << llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march"; 989 Arch = OffloadArchToString(OffloadArch::CudaDefault); 990 } else { 991 Arch = Args.MakeArgString(ArchsOrErr->front()); 992 } 993 } 994 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch); 995 } 996 997 return DAL; 998 } 999 1000 for (Arg *A : Args) { 1001 // Make sure flags are not duplicated. 1002 if (!llvm::is_contained(*DAL, A)) { 1003 DAL->append(A); 1004 } 1005 } 1006 1007 if (!BoundArch.empty()) { 1008 DAL->eraseArg(options::OPT_march_EQ); 1009 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), 1010 BoundArch); 1011 } 1012 return DAL; 1013 } 1014 1015 Tool *NVPTXToolChain::buildAssembler() const { 1016 return new tools::NVPTX::Assembler(*this); 1017 } 1018 1019 Tool *NVPTXToolChain::buildLinker() const { 1020 return new tools::NVPTX::Linker(*this); 1021 } 1022 1023 Tool *CudaToolChain::buildAssembler() const { 1024 return new tools::NVPTX::Assembler(*this); 1025 } 1026 1027 Tool *CudaToolChain::buildLinker() const { 1028 return new tools::NVPTX::FatBinary(*this); 1029 } 1030 1031 void CudaToolChain::addClangWarningOptions(ArgStringList &CC1Args) const { 1032 HostTC.addClangWarningOptions(CC1Args); 1033 } 1034 1035 ToolChain::CXXStdlibType 1036 CudaToolChain::GetCXXStdlibType(const ArgList &Args) const { 1037 return HostTC.GetCXXStdlibType(Args); 1038 } 1039 1040 void CudaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, 1041 ArgStringList &CC1Args) const { 1042 HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args); 1043 1044 if (!DriverArgs.hasArg(options::OPT_nogpuinc) && CudaInstallation.isValid()) 1045 CC1Args.append( 1046 {"-internal-isystem", 1047 DriverArgs.MakeArgString(CudaInstallation.getIncludePath())}); 1048 } 1049 1050 void CudaToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args, 1051 ArgStringList &CC1Args) const { 1052 HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args); 1053 } 1054 1055 void CudaToolChain::AddIAMCUIncludeArgs(const ArgList &Args, 1056 ArgStringList &CC1Args) const { 1057 HostTC.AddIAMCUIncludeArgs(Args, CC1Args); 1058 } 1059 1060 SanitizerMask CudaToolChain::getSupportedSanitizers() const { 1061 // The CudaToolChain only supports sanitizers in the sense that it allows 1062 // sanitizer arguments on the command line if they are supported by the host 1063 // toolchain. The CudaToolChain will actually ignore any command line 1064 // arguments for any of these "supported" sanitizers. That means that no 1065 // sanitization of device code is actually supported at this time. 1066 // 1067 // This behavior is necessary because the host and device toolchains 1068 // invocations often share the command line, so the device toolchain must 1069 // tolerate flags meant only for the host toolchain. 1070 return HostTC.getSupportedSanitizers(); 1071 } 1072 1073 VersionTuple CudaToolChain::computeMSVCVersion(const Driver *D, 1074 const ArgList &Args) const { 1075 return HostTC.computeMSVCVersion(D, Args); 1076 } 1077