xref: /llvm-project/clang/test/CodeGen/SystemZ/builtins-systemz-zvector3-constrained.c (revision 057e6bb5540b5ec57e73c56dca80c17fabc949e5)
1*057e6bb5SAbhina Sreeskantharajan // REQUIRES: systemz-registered-target
2*057e6bb5SAbhina Sreeskantharajan // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
3*057e6bb5SAbhina Sreeskantharajan // RUN: -O2 -fzvector -flax-vector-conversions=none \
4*057e6bb5SAbhina Sreeskantharajan // RUN: -ffp-exception-behavior=strict \
5*057e6bb5SAbhina Sreeskantharajan // RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
6*057e6bb5SAbhina Sreeskantharajan // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
7*057e6bb5SAbhina Sreeskantharajan // RUN: -O2 -fzvector -flax-vector-conversions=none \
8*057e6bb5SAbhina Sreeskantharajan // RUN: -ffp-exception-behavior=strict \
9*057e6bb5SAbhina Sreeskantharajan // RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
10*057e6bb5SAbhina Sreeskantharajan 
11*057e6bb5SAbhina Sreeskantharajan #include <vecintrin.h>
12*057e6bb5SAbhina Sreeskantharajan 
13*057e6bb5SAbhina Sreeskantharajan volatile vector signed int vsi;
14*057e6bb5SAbhina Sreeskantharajan volatile vector signed long long vsl;
15*057e6bb5SAbhina Sreeskantharajan volatile vector unsigned int vui;
16*057e6bb5SAbhina Sreeskantharajan volatile vector unsigned long long vul;
17*057e6bb5SAbhina Sreeskantharajan volatile vector float vf;
18*057e6bb5SAbhina Sreeskantharajan volatile vector double vd;
19*057e6bb5SAbhina Sreeskantharajan 
20*057e6bb5SAbhina Sreeskantharajan volatile float f;
21*057e6bb5SAbhina Sreeskantharajan volatile double d;
22*057e6bb5SAbhina Sreeskantharajan 
23*057e6bb5SAbhina Sreeskantharajan const float * volatile cptrf;
24*057e6bb5SAbhina Sreeskantharajan const double * volatile cptrd;
25*057e6bb5SAbhina Sreeskantharajan 
26*057e6bb5SAbhina Sreeskantharajan float * volatile ptrf;
27*057e6bb5SAbhina Sreeskantharajan double * volatile ptrd;
28*057e6bb5SAbhina Sreeskantharajan 
29*057e6bb5SAbhina Sreeskantharajan volatile int idx;
30*057e6bb5SAbhina Sreeskantharajan 
test_core(void)31*057e6bb5SAbhina Sreeskantharajan void test_core(void) {
32*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM-LABEL: test_core
33*057e6bb5SAbhina Sreeskantharajan   vector float vf2;
34*057e6bb5SAbhina Sreeskantharajan   vector double vd2;
35*057e6bb5SAbhina Sreeskantharajan 
36*057e6bb5SAbhina Sreeskantharajan   vf += vec_revb(vec_xl(idx, cptrf));
37*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlbrf
38*057e6bb5SAbhina Sreeskantharajan   vd += vec_revb(vec_xl(idx, cptrd));
39*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlbrg
40*057e6bb5SAbhina Sreeskantharajan 
41*057e6bb5SAbhina Sreeskantharajan   vec_xst(vec_revb(vf), idx, ptrf);
42*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vstbrf
43*057e6bb5SAbhina Sreeskantharajan   vec_xst(vec_revb(vd), idx, ptrd);
44*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vstbrg
45*057e6bb5SAbhina Sreeskantharajan 
46*057e6bb5SAbhina Sreeskantharajan   vf += vec_revb(vec_insert_and_zero(cptrf));
47*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vllebrzf
48*057e6bb5SAbhina Sreeskantharajan   vd += vec_revb(vec_insert_and_zero(cptrd));
49*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vllebrzg
50*057e6bb5SAbhina Sreeskantharajan 
51*057e6bb5SAbhina Sreeskantharajan   vf += vec_revb(vec_splats(f));
52*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlbrrepf
53*057e6bb5SAbhina Sreeskantharajan   vd += vec_revb(vec_splats(d));
54*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlbrrepg
55*057e6bb5SAbhina Sreeskantharajan 
56*057e6bb5SAbhina Sreeskantharajan   vf2 = vf;
57*057e6bb5SAbhina Sreeskantharajan   vf += vec_revb(vec_insert(f, vec_revb(vf2), 0));
58*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlebrf
59*057e6bb5SAbhina Sreeskantharajan   vd2 = vd;
60*057e6bb5SAbhina Sreeskantharajan   vd += vec_revb(vec_insert(d, vec_revb(vd2), 0));
61*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlebrg
62*057e6bb5SAbhina Sreeskantharajan 
63*057e6bb5SAbhina Sreeskantharajan   f = vec_extract(vec_revb(vf), 0);
64*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vstebrf
65*057e6bb5SAbhina Sreeskantharajan   d = vec_extract(vec_revb(vd), 0);
66*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vstebrg
67*057e6bb5SAbhina Sreeskantharajan 
68*057e6bb5SAbhina Sreeskantharajan   vf += vec_reve(vec_xl(idx, cptrf));
69*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlerf
70*057e6bb5SAbhina Sreeskantharajan   vd += vec_reve(vec_xl(idx, cptrd));
71*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vlerg
72*057e6bb5SAbhina Sreeskantharajan 
73*057e6bb5SAbhina Sreeskantharajan   vec_xst(vec_reve(vf), idx, ptrf);
74*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vsterf
75*057e6bb5SAbhina Sreeskantharajan   vec_xst(vec_reve(vd), idx, ptrd);
76*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vsterg
77*057e6bb5SAbhina Sreeskantharajan }
78*057e6bb5SAbhina Sreeskantharajan 
test_float(void)79*057e6bb5SAbhina Sreeskantharajan void test_float(void) {
80*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM-LABEL: test_float
81*057e6bb5SAbhina Sreeskantharajan 
82*057e6bb5SAbhina Sreeskantharajan   vd = vec_double(vsl);
83*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
84*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcdgb
85*057e6bb5SAbhina Sreeskantharajan   vd = vec_double(vul);
86*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
87*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcdlgb
88*057e6bb5SAbhina Sreeskantharajan   vf = vec_float(vsi);
89*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
90*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcefb
91*057e6bb5SAbhina Sreeskantharajan   vf = vec_float(vui);
92*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
93*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcelfb
94*057e6bb5SAbhina Sreeskantharajan 
95*057e6bb5SAbhina Sreeskantharajan   vsl = vec_signed(vd);
96*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
97*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcgdb
98*057e6bb5SAbhina Sreeskantharajan   vsi = vec_signed(vf);
99*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
100*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vcfeb
101*057e6bb5SAbhina Sreeskantharajan   vul = vec_unsigned(vd);
102*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
103*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vclgdb
104*057e6bb5SAbhina Sreeskantharajan   vui = vec_unsigned(vf);
105*057e6bb5SAbhina Sreeskantharajan   // xHECK: fptoui <4 x float> %{{.*}} to <4 x i32>
106*057e6bb5SAbhina Sreeskantharajan   // CHECK: call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
107*057e6bb5SAbhina Sreeskantharajan   // CHECK-ASM: vclfeb
108*057e6bb5SAbhina Sreeskantharajan }
109*057e6bb5SAbhina Sreeskantharajan 
110