xref: /llvm-project/flang/test/Lower/PowerPC/ppc-vec-max-min-madd-nmsub.f90 (revision 0b3f9d8561226e3771db7f49dfb43d1301efc3c3)
1! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2! REQUIRES: target=powerpc{{.*}}
3
4! vec_max
5
6! CHECK-LABEL: vec_max_testf32
7subroutine vec_max_testf32(x, y)
8  vector(real(4)) :: vmax, x, y
9  vmax = vec_max(x, y)
10
11! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
12! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
13! LLVMIR: %[[vmax:.*]] = call contract <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %[[x]], <4 x float> %[[y]])
14! LLVMIR: store <4 x float> %[[vmax]], ptr %{{[0-9]}}, align 16
15end subroutine vec_max_testf32
16
17! CHECK-LABEL: vec_max_testf64
18subroutine vec_max_testf64(x, y)
19  vector(real(8)) :: vmax, x, y
20  vmax = vec_max(x, y)
21
22! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
23! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
24! LLVMIR: %[[vmax:.*]] = call contract <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %[[x]], <2 x double> %[[y]])
25! LLVMIR: store <2 x double> %[[vmax]], ptr %{{[0-9]}}, align 16
26end subroutine vec_max_testf64
27
28! CHECK-LABEL: vec_max_testi8
29subroutine vec_max_testi8(x, y)
30  vector(integer(1)) :: vmax, x, y
31  vmax = vec_max(x, y)
32
33! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
34! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
35! LLVMIR: %[[vmax:.*]] = call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %[[x]], <16 x i8> %[[y]])
36! LLVMIR: store <16 x i8> %[[vmax]], ptr %{{[0-9]}}, align 16
37end subroutine vec_max_testi8
38
39! CHECK-LABEL: vec_max_testi16
40subroutine vec_max_testi16(x, y)
41  vector(integer(2)) :: vmax, x, y
42  vmax = vec_max(x, y)
43
44! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
45! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
46! LLVMIR: %[[vmax:.*]] = call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %[[x]], <8 x i16> %[[y]])
47! LLVMIR: store <8 x i16> %[[vmax]], ptr %{{[0-9]}}, align 16
48end subroutine vec_max_testi16
49
50! CHECK-LABEL: vec_max_testi32
51subroutine vec_max_testi32(x, y)
52  vector(integer(4)) :: vmax, x, y
53  vmax = vec_max(x, y)
54
55! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
56! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
57! LLVMIR: %[[vmax:.*]] = call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %[[x]], <4 x i32> %[[y]])
58! LLVMIR: store <4 x i32> %[[vmax]], ptr %{{[0-9]}}, align 16
59end subroutine vec_max_testi32
60
61! CHECK-LABEL: vec_max_testi64
62subroutine vec_max_testi64(x, y)
63  vector(integer(8)) :: vmax, x, y
64  vmax = vec_max(x, y)
65
66! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
67! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
68! LLVMIR: %[[vmax:.*]] = call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %[[x]], <2 x i64> %[[y]])
69! LLVMIR: store <2 x i64> %[[vmax]], ptr %{{[0-9]}}, align 16
70end subroutine vec_max_testi64
71
72! CHECK-LABEL: vec_max_testui8
73subroutine vec_max_testui8(x, y)
74  vector(unsigned(1)) :: vmax, x, y
75  vmax = vec_max(x, y)
76
77! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
78! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
79! LLVMIR: %[[vmax:.*]] = call <16 x i8> @llvm.ppc.altivec.vmaxub(<16 x i8> %[[x]], <16 x i8> %[[y]])
80! LLVMIR: store <16 x i8> %[[vmax]], ptr %{{[0-9]}}, align 16
81end subroutine vec_max_testui8
82
83! CHECK-LABEL: vec_max_testui16
84subroutine vec_max_testui16(x, y)
85  vector(unsigned(2)) :: vmax, x, y
86  vmax = vec_max(x, y)
87
88! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
89! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
90! LLVMIR: %[[vmax:.*]] = call <8 x i16> @llvm.ppc.altivec.vmaxuh(<8 x i16> %[[x]], <8 x i16> %[[y]])
91! LLVMIR: store <8 x i16> %[[vmax]], ptr %{{[0-9]}}, align 16
92end subroutine vec_max_testui16
93
94! CHECK-LABEL: vec_max_testui32
95subroutine vec_max_testui32(x, y)
96  vector(unsigned(4)) :: vmax, x, y
97  vmax = vec_max(x, y)
98
99! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
100! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
101! LLVMIR: %[[vmax:.*]] = call <4 x i32> @llvm.ppc.altivec.vmaxuw(<4 x i32> %[[x]], <4 x i32> %[[y]])
102! LLVMIR: store <4 x i32> %[[vmax]], ptr %{{[0-9]}}, align 16
103end subroutine vec_max_testui32
104
105! CHECK-LABEL: vec_max_testui64
106subroutine vec_max_testui64(x, y)
107  vector(unsigned(8)) :: vmax, x, y
108  vmax = vec_max(x, y)
109
110! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
111! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
112! LLVMIR: %[[vmax:.*]] = call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %[[x]], <2 x i64> %[[y]])
113! LLVMIR: store <2 x i64> %[[vmax]], ptr %{{[0-9]}}, align 16
114end subroutine vec_max_testui64
115
116! vec_min
117
118! CHECK-LABEL: vec_min_testf32
119subroutine vec_min_testf32(x, y)
120  vector(real(4)) :: vmin, x, y
121  vmin = vec_min(x, y)
122
123! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
124! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
125! LLVMIR: %[[vmin:.*]] = call contract <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %[[x]], <4 x float> %[[y]])
126! LLVMIR: store <4 x float> %[[vmin]], ptr %{{[0-9]}}, align 16
127end subroutine vec_min_testf32
128
129! CHECK-LABEL: vec_min_testf64
130subroutine vec_min_testf64(x, y)
131  vector(real(8)) :: vmin, x, y
132  vmin = vec_min(x, y)
133
134! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
135! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
136! LLVMIR: %[[vmin:.*]] = call contract <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %[[x]], <2 x double> %[[y]])
137! LLVMIR: store <2 x double> %[[vmin]], ptr %{{[0-9]}}, align 16
138end subroutine vec_min_testf64
139
140! CHECK-LABEL: vec_min_testi8
141subroutine vec_min_testi8(x, y)
142  vector(integer(1)) :: vmin, x, y
143  vmin = vec_min(x, y)
144
145! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
146! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
147! LLVMIR: %[[vmin:.*]] = call <16 x i8> @llvm.ppc.altivec.vminsb(<16 x i8> %[[x]], <16 x i8> %[[y]])
148! LLVMIR: store <16 x i8> %[[vmin]], ptr %{{[0-9]}}, align 16
149end subroutine vec_min_testi8
150
151! CHECK-LABEL: vec_min_testi16
152subroutine vec_min_testi16(x, y)
153  vector(integer(2)) :: vmin, x, y
154  vmin = vec_min(x, y)
155
156! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
157! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
158! LLVMIR: %[[vmin:.*]] = call <8 x i16> @llvm.ppc.altivec.vminsh(<8 x i16> %[[x]], <8 x i16> %[[y]])
159! LLVMIR: store <8 x i16> %[[vmin]], ptr %{{[0-9]}}, align 16
160end subroutine vec_min_testi16
161
162! CHECK-LABEL: vec_min_testi32
163subroutine vec_min_testi32(x, y)
164  vector(integer(4)) :: vmin, x, y
165  vmin = vec_min(x, y)
166
167! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
168! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
169! LLVMIR: %[[vmin:.*]] = call <4 x i32> @llvm.ppc.altivec.vminsw(<4 x i32> %[[x]], <4 x i32> %[[y]])
170! LLVMIR: store <4 x i32> %[[vmin]], ptr %{{[0-9]}}, align 16
171end subroutine vec_min_testi32
172
173! CHECK-LABEL: vec_min_testi64
174subroutine vec_min_testi64(x, y)
175  vector(integer(8)) :: vmin, x, y
176  vmin = vec_min(x, y)
177
178! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
179! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
180! LLVMIR: %[[vmin:.*]] = call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %[[x]], <2 x i64> %[[y]])
181! LLVMIR: store <2 x i64> %[[vmin]], ptr %{{[0-9]}}, align 16
182end subroutine vec_min_testi64
183
184! CHECK-LABEL: vec_min_testui8
185subroutine vec_min_testui8(x, y)
186  vector(unsigned(1)) :: vmin, x, y
187  vmin = vec_min(x, y)
188
189! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
190! LLVMIR: %[[y:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
191! LLVMIR: %[[vmin:.*]] = call <16 x i8> @llvm.ppc.altivec.vminub(<16 x i8> %[[x]], <16 x i8> %[[y]])
192! LLVMIR: store <16 x i8> %[[vmin]], ptr %{{[0-9]}}, align 16
193end subroutine vec_min_testui8
194
195! CHECK-LABEL: vec_min_testui16
196subroutine vec_min_testui16(x, y)
197  vector(unsigned(2)) :: vmin, x, y
198  vmin = vec_min(x, y)
199
200! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
201! LLVMIR: %[[y:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
202! LLVMIR: %[[vmin:.*]] = call <8 x i16> @llvm.ppc.altivec.vminuh(<8 x i16> %[[x]], <8 x i16> %[[y]])
203! LLVMIR: store <8 x i16> %[[vmin]], ptr %{{[0-9]}}, align 16
204end subroutine vec_min_testui16
205
206! CHECK-LABEL: vec_min_testui32
207subroutine vec_min_testui32(x, y)
208  vector(unsigned(4)) :: vmin, x, y
209  vmin = vec_min(x, y)
210
211! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
212! LLVMIR: %[[y:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
213! LLVMIR: %[[vmin:.*]] = call <4 x i32> @llvm.ppc.altivec.vminuw(<4 x i32> %[[x]], <4 x i32> %[[y]])
214! LLVMIR: store <4 x i32> %[[vmin]], ptr %{{[0-9]}}, align 16
215end subroutine vec_min_testui32
216
217! CHECK-LABEL: vec_min_testui64
218subroutine vec_min_testui64(x, y)
219  vector(unsigned(8)) :: vmin, x, y
220  vmin = vec_min(x, y)
221
222! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
223! LLVMIR: %[[y:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
224! LLVMIR: %[[vmin:.*]] = call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %[[x]], <2 x i64> %[[y]])
225! LLVMIR: store <2 x i64> %[[vmin]], ptr %{{[0-9]}}, align 16
226end subroutine vec_min_testui64
227
228! vec_madd
229
230! CHECK-LABEL: vec_madd_testf32
231subroutine vec_madd_testf32(x, y, z)
232  vector(real(4)) :: vmsum, x, y, z
233  vmsum = vec_madd(x, y, z)
234
235! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
236! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
237! LLVMIR: %[[z:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
238! LLVMIR: %[[vmsum:.*]] = call contract <4 x float> @llvm.fma.v4f32(<4 x float> %[[x]], <4 x float> %[[y]], <4 x float> %[[z]])
239! LLVMIR: store <4 x float> %[[vmsum]], ptr %{{[0-9]}}, align 16
240end subroutine vec_madd_testf32
241
242! CHECK-LABEL: vec_madd_testf64
243subroutine vec_madd_testf64(x, y, z)
244  vector(real(8)) :: vmsum, x, y, z
245  vmsum = vec_madd(x, y, z)
246
247! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
248! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
249! LLVMIR: %[[z:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
250! LLVMIR: %[[vmsum:.*]] = call contract <2 x double> @llvm.fma.v2f64(<2 x double> %[[x]], <2 x double> %[[y]], <2 x double> %[[z]])
251! LLVMIR: store <2 x double> %[[vmsum]], ptr %{{[0-9]}}, align 16
252end subroutine vec_madd_testf64
253
254! vec_nmsub
255
256! CHECK-LABEL: vec_nmsub_testf32
257subroutine vec_nmsub_testf32(x, y, z)
258  vector(real(4)) :: vnmsub, x, y, z
259  vnmsub = vec_nmsub(x, y, z)
260
261! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
262! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
263! LLVMIR: %[[z:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
264! LLVMIR: %[[vnmsub:.*]] = call contract <4 x float> @llvm.ppc.fnmsub.v4f32(<4 x float> %[[x]], <4 x float> %[[y]], <4 x float> %[[z]])
265! LLVMIR: store <4 x float> %[[vnmsub]], ptr %{{[0-9]}}, align 16
266end subroutine vec_nmsub_testf32
267
268! CHECK-LABEL: vec_nmsub_testf64
269subroutine vec_nmsub_testf64(x, y, z)
270  vector(real(8)) :: vnmsub, x, y, z
271  vnmsub = vec_nmsub(x, y, z)
272
273! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
274! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
275! LLVMIR: %[[z:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
276! LLVMIR: %[[vnmsub:.*]] = call contract <2 x double> @llvm.ppc.fnmsub.v2f64(<2 x double> %[[x]], <2 x double> %[[y]], <2 x double> %[[z]])
277! LLVMIR: store <2 x double> %[[vnmsub]], ptr %{{[0-9]}}, align 16
278end subroutine vec_nmsub_testf64
279
280! vec_msub
281
282! CHECK-LABEL: vec_msub_testf32
283subroutine vec_msub_testf32(x, y, z)
284  vector(real(4)) :: vmsub, x, y, z
285  vmsub = vec_msub(x, y, z)
286
287! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
288! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
289! LLVMIR: %[[z:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
290! LLVMIR: %[[nz:.*]] = fneg contract <4 x float> %[[z]]
291! LLVMIR: %[[vmsub:.*]] = call contract <4 x float> @llvm.fma.v4f32(<4 x float> %[[x]], <4 x float> %[[y]], <4 x float> %[[nz]])
292! LLVMIR: store <4 x float> %[[vmsub]], ptr %{{[0-9]}}, align 16
293end subroutine vec_msub_testf32
294
295! CHECK-LABEL: vec_msub_testf64
296subroutine vec_msub_testf64(x, y, z)
297  vector(real(8)) :: vmsub, x, y, z
298  vmsub = vec_msub(x, y, z)
299
300! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
301! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
302! LLVMIR: %[[z:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
303! LLVMIR: %[[nz:.*]] = fneg contract <2 x double> %[[z]]
304! LLVMIR: %[[vmsub:.*]] = call contract <2 x double> @llvm.fma.v2f64(<2 x double> %[[x]], <2 x double> %[[y]], <2 x double> %[[nz]])
305! LLVMIR: store <2 x double> %[[vmsub]], ptr %{{[0-9]}}, align 16
306end subroutine vec_msub_testf64
307
308! vec_nmadd
309
310! CHECK-LABEL: vec_nmadd_testf32
311subroutine vec_nmadd_testf32(x, y, z)
312  vector(real(4)) :: vnmsum, x, y, z
313  vnmsum = vec_nmadd(x, y, z)
314
315! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
316! LLVMIR: %[[y:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
317! LLVMIR: %[[z:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
318! LLVMIR: %[[msum:.*]] = call contract <4 x float> @llvm.fma.v4f32(<4 x float> %[[x]], <4 x float> %[[y]], <4 x float> %[[z]])
319! LLVMIR: %[[vnmsum:.*]] = fneg contract <4 x float> %[[msum]]
320! LLVMIR: store <4 x float> %[[vnmsum]], ptr %{{[0-9]}}, align 16
321end subroutine vec_nmadd_testf32
322
323! CHECK-LABEL: vec_nmadd_testf64
324subroutine vec_nmadd_testf64(x, y, z)
325  vector(real(8)) :: vnmsum, x, y, z
326  vnmsum = vec_nmadd(x, y, z)
327
328! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
329! LLVMIR: %[[y:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
330! LLVMIR: %[[z:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
331! LLVMIR: %[[msum:.*]] = call contract <2 x double> @llvm.fma.v2f64(<2 x double> %[[x]], <2 x double> %[[y]], <2 x double> %[[z]])
332! LLVMIR: %[[vnmsum:.*]] = fneg contract <2 x double> %[[msum]]
333! LLVMIR: store <2 x double> %[[vnmsum]], ptr %{{[0-9]}}, align 16
334end subroutine vec_nmadd_testf64
335