xref: /llvm-project/clang/test/SemaCUDA/bf16.cu (revision e62175736551abf40a3410bc246f58e650eb8158)
1 // REQUIRES: nvptx-registered-target
2 // REQUIRES: x86-registered-target
3 
4 // RUN: %clang_cc1 "-triple" "x86_64-unknown-linux-gnu" "-aux-triple" "nvptx64-nvidia-cuda" \
5 // RUN:    "-target-cpu" "x86-64" -fsyntax-only -verify=scalar -Wno-unused %s
6 // RUN: %clang_cc1 "-aux-triple" "x86_64-unknown-linux-gnu" "-triple" "nvptx64-nvidia-cuda" \
7 // RUN:    -fcuda-is-device "-aux-target-cpu" "x86-64" -fsyntax-only -verify=scalar -Wno-unused %s
8 
9 #include "Inputs/cuda.h"
10 
test(bool b,__bf16 * out,__bf16 in)11 __device__ void test(bool b, __bf16 *out, __bf16 in) {
12   __bf16 bf16 = in; // No error on using the type itself.
13 
14   bf16 + bf16;
15   bf16 - bf16;
16   bf16 * bf16;
17   bf16 / bf16;
18 
19   __fp16 fp16;
20 
21   bf16 + fp16;
22   fp16 + bf16;
23   bf16 - fp16;
24   fp16 - bf16;
25   bf16 * fp16;
26   fp16 * bf16;
27   bf16 / fp16;
28   fp16 / bf16;
29   bf16 = fp16; // scalar-error {{assigning to '__bf16' from incompatible type '__fp16'}}
30   fp16 = bf16; // scalar-error {{assigning to '__fp16' from incompatible type '__bf16'}}
31   bf16 + (b ? fp16 : bf16);
32   *out = bf16;
33 }
34