1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -o - | FileCheck -check-prefix=DEFAULT %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -munsafe-fp-atomics -emit-llvm %s -fopenmp-is-target-device -o - | FileCheck -check-prefix=UNSAFE-FP-ATOMICS %s 4 5 #pragma omp declare target 6 7 float fv, fx; 8 double dv, dx; 9 10 // DEFAULT-LABEL: define hidden void @_Z15atomic_fadd_f32v( 11 // DEFAULT-SAME: ) #[[ATTR0:[0-9]+]] { 12 // DEFAULT-NEXT: [[ENTRY:.*:]] 13 // DEFAULT-NEXT: [[TMP0:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4 14 // DEFAULT-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4 15 // DEFAULT-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[TMP0]] 16 // DEFAULT-NEXT: store float [[ADD]], ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4 17 // DEFAULT-NEXT: ret void 18 // 19 // UNSAFE-FP-ATOMICS-LABEL: define hidden void @_Z15atomic_fadd_f32v( 20 // UNSAFE-FP-ATOMICS-SAME: ) #[[ATTR0:[0-9]+]] { 21 // UNSAFE-FP-ATOMICS-NEXT: [[ENTRY:.*:]] 22 // UNSAFE-FP-ATOMICS-NEXT: [[TMP0:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4 23 // UNSAFE-FP-ATOMICS-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META5:![0-9]+]], !amdgpu.ignore.denormal.mode [[META5]] 24 // UNSAFE-FP-ATOMICS-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[TMP0]] 25 // UNSAFE-FP-ATOMICS-NEXT: store float [[ADD]], ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4 26 // UNSAFE-FP-ATOMICS-NEXT: ret void 27 // 28 void atomic_fadd_f32() { 29 #pragma omp atomic capture 30 fv = fx = fx + fv; 31 } 32 33 // DEFAULT-LABEL: define hidden void @_Z15atomic_fadd_f64v( 34 // DEFAULT-SAME: ) #[[ATTR0]] { 35 // DEFAULT-NEXT: [[ENTRY:.*:]] 36 // DEFAULT-NEXT: [[TMP0:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8 37 // DEFAULT-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8 38 // DEFAULT-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], [[TMP0]] 39 // DEFAULT-NEXT: store double [[ADD]], ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8 40 // DEFAULT-NEXT: ret void 41 // 42 // UNSAFE-FP-ATOMICS-LABEL: define hidden void @_Z15atomic_fadd_f64v( 43 // UNSAFE-FP-ATOMICS-SAME: ) #[[ATTR0]] { 44 // UNSAFE-FP-ATOMICS-NEXT: [[ENTRY:.*:]] 45 // UNSAFE-FP-ATOMICS-NEXT: [[TMP0:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8 46 // UNSAFE-FP-ATOMICS-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8, !amdgpu.no.fine.grained.memory [[META5]] 47 // UNSAFE-FP-ATOMICS-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], [[TMP0]] 48 // UNSAFE-FP-ATOMICS-NEXT: store double [[ADD]], ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8 49 // UNSAFE-FP-ATOMICS-NEXT: ret void 50 // 51 void atomic_fadd_f64() { 52 #pragma omp atomic capture 53 dv = dx = dx + dv; 54 } 55 56 #pragma omp end declare target 57 //. 58 // UNSAFE-FP-ATOMICS: [[META5]] = !{} 59 //. 60