HomeSort by: relevance | last modified time | path
    Searched refs:CUDA (Results 1 - 25 of 56) sorted by relevancy

1 2 3

  /src/external/apache2/llvm/dist/clang/lib/Frontend/
FrontendOptions.cpp 22 .Case("cui", InputKind(Language::CUDA).getPreprocessed())
33 .Cases("cu", "cuh", Language::CUDA)
CompilerInvocation.cpp 1798 // When linking CUDA bitcode, propagate function attributes so that
2542 case Language::CUDA:
2543 Lang = "cuda";
2731 .Case("cuda", Language::CUDA)
3099 case Language::CUDA:
3195 Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
3206 } else if (Opts.CUDA) {
3244 case Language::CUDA
    [all...]
CompilerInstance.cpp 108 // other side of CUDA/OpenMP/SYCL compilation.
110 (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
474 // Initialize the header search object. In CUDA compilations, we use the aux
476 // find the host headers in order to compile the CUDA code.
478 if (PP->getTargetInfo().getTriple().getOS() == llvm::Triple::CUDA &&
990 if (getLangOpts().CUDA) {
1029 if (LangOpts.CUDA)
1030 return Language::CUDA;
InitPreprocessor.cpp 493 if (LangOpts.CUDA && !LangOpts.HIP)
1149 // CUDA device path compilaton
1156 // We need to communicate this to our CUDA header wrapper, which in turn
1157 // informs the proper CUDA headers of this choice.
1211 if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
FrontendActions.cpp 872 case Language::CUDA:
  /src/external/apache2/llvm/dist/llvm/lib/Target/NVPTX/
NVPTXSubtarget.cpp 40 // Set default to PTX 3.2 (CUDA 5.5)
56 // Enable handles for Kepler+, where CUDA supports indirect surfaces and
58 if (TM.getDrvInterface() == NVPTX::CUDA)
NVPTX.h 72 CUDA
NVPTXLowerArgs.cpp 12 // http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces
30 // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the
130 return "Lower pointer arguments of CUDA kernels";
332 if (TM && TM->getDrvInterface() == NVPTX::CUDA) {
356 else if (TM && TM->getDrvInterface() == NVPTX::CUDA)
NVPTXReplaceImageHandles.cpp 148 if (TM.getDrvInterface() == NVPTX::CUDA) {
149 // For CUDA, we preserve the param loads coming from function arguments
NVPTXAsmPrinter.h 286 // Since the address value should always be generic in CUDA C and always
295 NVPTX::CUDA) {}
NVPTXTargetMachine.cpp 128 drvInterface = NVPTX::CUDA;
NVPTXAsmPrinter.cpp 657 * Currently, this is valid for CUDA shared variables, which have local
957 if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) {
1483 NVPTX::CUDA) {
  /src/external/apache2/llvm/dist/clang/include/clang/Basic/
LangStandard.h 36 CUDA,
  /src/external/apache2/llvm/dist/clang/lib/AST/
MicrosoftCXXABI.cpp 96 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
162 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
  /src/external/apache2/llvm/dist/clang/lib/Sema/
SemaCUDA.cpp 1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
9 /// This file implements semantic analysis for CUDA constructs.
16 #include "clang/Basic/Cuda.h"
38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
115 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
144 /// IdentifyTarget - Determine the CUDA compilation target for this variable.
173 // * CUDA Call preference tabl
    [all...]
SemaLambda.cpp 464 if (!MCtx && getLangOpts().CUDA) {
465 // Force lambda numbering in CUDA/HIP as we need to name lambdas following
996 // CUDA lambdas get implicit host and device attributes.
997 if (getLangOpts().CUDA)
1871 if (LangOpts.CUDA)
  /src/external/apache2/llvm/dist/clang/lib/Basic/
Builtins.cpp 78 bool CUDAUnsupported = !LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG;
  /src/external/apache2/llvm/dist/clang/include/clang/Sema/
SemaInternal.h 41 // Helper function to check whether D's attributes match current CUDA mode.
43 // ignored during this CUDA compilation pass.
45 if (!LangOpts.CUDA || !D)
  /src/external/apache2/llvm/dist/clang/lib/CodeGen/
CGOpenMPRuntimeGPU.h 368 /// Target codegen is specialized based on two data-sharing modes: CUDA, in
373 /// CUDA data sharing mode.
374 CUDA,
CodeGenModule.cpp 145 if (LangOpts.CUDA)
490 if (Context.getLangOpts().CUDA && CUDARuntime) {
543 // CUDA/HIP device and host libraries are different. Currently there is no
850 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
1328 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
2379 // Emit CUDA/HIP static device variables referenced by host code only.
2382 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
2855 // If this is CUDA, be selective about which declarations we emit.
2856 if (LangOpts.CUDA) {
2867 // device-side variables because the CUDA runtime needs thei
    [all...]
CodeGenPGO.cpp 791 // Skip CUDA/HIP kernel launch stub functions.
792 if (CGM.getLangOpts().CUDA && !CGM.getLangOpts().CUDAIsDevice &&
860 // Skip host-only functions in the CUDA device compilation and device-only
864 if (CGM.getLangOpts().CUDA &&
CGDeclCXX.cpp 193 // For example, in the above CUDA code, the static local variable s has a
456 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
647 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
  /src/external/apache2/llvm/dist/llvm/include/llvm/ADT/
Triple.h 188 CUDA, // NVIDIA CUDA
  /src/external/apache2/llvm/dist/clang/lib/Headers/
__clang_cuda_math.h 1 /*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
12 #error "This file is for CUDA compilation only."
17 #error This file is intended to be used with CUDA-9+ only.
  /src/external/apache2/llvm/dist/llvm/lib/Support/
Triple.cpp 195 case CUDA: return "cuda";
527 .StartsWith("cuda", Triple::CUDA)

Completed in 131 milliseconds

1 2 3