xref: /llvm-project/flang/test/Lower/PowerPC/ppc-vec-store.f90 (revision a9adcef45011f95ab54e7e40983da7cf9e14a08a)
1! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2! REQUIRES: target=powerpc{{.*}}
3
4!----------------------
5! vec_st
6!----------------------
7
8! CHECK-LABEL: vec_st_vi1i2vi1
9subroutine vec_st_vi1i2vi1(arg1, arg2, arg3)
10  vector(integer(1)) :: arg1, arg3
11  integer(2) :: arg2
12  call vec_st(arg1, arg2, arg3)
13
14! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
15! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
16! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
17! LLVMIR: %[[bcArg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
18! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
19end subroutine vec_st_vi1i2vi1
20
21! CHECK-LABEL: vec_st_vi2i2vi2
22subroutine vec_st_vi2i2vi2(arg1, arg2, arg3)
23  vector(integer(2)) :: arg1, arg3
24  integer(2) :: arg2
25  call vec_st(arg1, arg2, arg3)
26
27! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
28! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
29! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
30! LLVMIR: %[[bcArg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
31! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
32end subroutine vec_st_vi2i2vi2
33
34! CHECK-LABEL: vec_st_vi4i2vi4
35subroutine vec_st_vi4i2vi4(arg1, arg2, arg3)
36  vector(integer(4)) :: arg1, arg3
37  integer(2) :: arg2
38  call vec_st(arg1, arg2, arg3)
39
40! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
41! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
42! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
43! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[arg3]])
44end subroutine vec_st_vi4i2vi4
45
46! CHECK-LABEL: vec_st_vu1i4vu1
47subroutine vec_st_vu1i4vu1(arg1, arg2, arg3)
48  vector(unsigned(1)) :: arg1, arg3
49  integer(4) :: arg2
50  call vec_st(arg1, arg2, arg3)
51
52! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
53! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
54! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
55! LLVMIR: %[[bcArg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
56! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
57end subroutine vec_st_vu1i4vu1
58
59! CHECK-LABEL: vec_st_vu2i4vu2
60subroutine vec_st_vu2i4vu2(arg1, arg2, arg3)
61  vector(unsigned(2)) :: arg1, arg3
62  integer(4) :: arg2
63  call vec_st(arg1, arg2, arg3)
64
65! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
66! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
67! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
68! LLVMIR: %[[bcArg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
69! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
70end subroutine vec_st_vu2i4vu2
71
72! CHECK-LABEL: vec_st_vu4i4vu4
73subroutine vec_st_vu4i4vu4(arg1, arg2, arg3)
74  vector(unsigned(4)) :: arg1, arg3
75  integer(4) :: arg2
76  call vec_st(arg1, arg2, arg3)
77
78! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
79! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
80! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
81! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[arg3]])
82end subroutine vec_st_vu4i4vu4
83
84! CHECK-LABEL: vec_st_vi4i4via4
85subroutine vec_st_vi4i4via4(arg1, arg2, arg3, i)
86  vector(integer(4)) :: arg1, arg3(5)
87  integer(4) :: arg2, i
88  call vec_st(arg1, arg2, arg3(i))
89
90! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
91! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
92! LLVMIR: %[[iextsub:.*]] = sub nsw i64 %[[iext]], 1
93! LLVMIR: %[[iextmul:.*]] = mul nsw i64 %[[iextsub]], 1
94! LLVMIR: %[[iextmul2:.*]] = mul nsw i64 %[[iextmul]], 1
95! LLVMIR: %[[iextadd:.*]] = add nsw i64 %[[iextmul2]], 0
96! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iextadd]]
97! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
98! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
99! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
100! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[gep2]])
101end subroutine vec_st_vi4i4via4
102
103!----------------------
104! vec_ste
105!----------------------
106
107! CHECK-LABEL: vec_ste_vi1i2i1
108subroutine vec_ste_vi1i2i1(arg1, arg2, arg3)
109  vector(integer(1)) :: arg1
110  integer(2) :: arg2
111  integer(1) :: arg3
112  call vec_ste(arg1, arg2, arg3)
113
114! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
115! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
116! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
117! LLVMIR: call void @llvm.ppc.altivec.stvebx(<16 x i8> %[[arg1]], ptr %[[arg3]])
118end subroutine vec_ste_vi1i2i1
119
120! CHECK-LABEL: vec_ste_vi2i2i2
121subroutine vec_ste_vi2i2i2(arg1, arg2, arg3)
122  vector(integer(2)) :: arg1
123  integer(2) :: arg2
124  integer(2) :: arg3
125  call vec_ste(arg1, arg2, arg3)
126
127! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
128! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
129! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
130! LLVMIR: call void @llvm.ppc.altivec.stvehx(<8 x i16> %[[arg1]], ptr %[[arg3]])
131end subroutine vec_ste_vi2i2i2
132
133! CHECK-LABEL: vec_ste_vi4i2i4
134subroutine vec_ste_vi4i2i4(arg1, arg2, arg3)
135  vector(integer(4)) :: arg1
136  integer(2) :: arg2
137  integer(4) :: arg3
138  call vec_ste(arg1, arg2, arg3)
139
140! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
141! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
142! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
143! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[arg3]])
144end subroutine vec_ste_vi4i2i4
145
146! CHECK-LABEL: vec_ste_vu1i4u1
147subroutine vec_ste_vu1i4u1(arg1, arg2, arg3)
148  vector(unsigned(1)) :: arg1
149  integer(4) :: arg2
150  integer(1) :: arg3
151  call vec_ste(arg1, arg2, arg3)
152
153! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
154! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
155! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
156! LLVMIR: call void @llvm.ppc.altivec.stvebx(<16 x i8> %[[arg1]], ptr %[[arg3]])
157end subroutine vec_ste_vu1i4u1
158
159! CHECK-LABEL: vec_ste_vu2i4u2
160subroutine vec_ste_vu2i4u2(arg1, arg2, arg3)
161  vector(unsigned(2)) :: arg1
162  integer(4) :: arg2
163  integer(2) :: arg3
164  call vec_ste(arg1, arg2, arg3)
165
166! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
167! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
168! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
169! LLVMIR: call void @llvm.ppc.altivec.stvehx(<8 x i16> %[[arg1]], ptr %[[arg3]])
170end subroutine vec_ste_vu2i4u2
171
172! CHECK-LABEL: vec_ste_vu4i4u4
173subroutine vec_ste_vu4i4u4(arg1, arg2, arg3)
174  vector(unsigned(4)) :: arg1
175  integer(4) :: arg2
176  integer(4) :: arg3
177  call vec_ste(arg1, arg2, arg3)
178
179! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
180! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
181! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
182! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[arg3]])
183end subroutine vec_ste_vu4i4u4
184
185! CHECK-LABEL: vec_ste_vr4i4r4
186subroutine vec_ste_vr4i4r4(arg1, arg2, arg3)
187  vector(real(4)) :: arg1
188  integer(4) :: arg2
189  real(4) :: arg3
190  call vec_ste(arg1, arg2, arg3)
191
192! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %0, align 16
193! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
194! LLVMIR: %[[pos:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
195! LLVMIR: %[[bc:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
196! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[bc]], ptr %[[pos]])
197
198end subroutine vec_ste_vr4i4r4
199
200! CHECK-LABEL: vec_ste_vi4i4ia4
201subroutine vec_ste_vi4i4ia4(arg1, arg2, arg3, i)
202  vector(integer(4)) :: arg1
203  integer(4) :: arg2, i
204  integer(4) :: arg3(5)
205  call vec_ste(arg1, arg2, arg3(i))
206
207! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
208! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
209! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
210! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
211! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
212! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
213! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
214! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
215! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
216! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
217! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[gep2]])
218end subroutine vec_ste_vi4i4ia4
219
220!----------------------
221! vec_stxv
222!----------------------
223
224! CHECK-LABEL: vec_stxv_test_vr4i2r4
225subroutine vec_stxv_test_vr4i2r4(arg1, arg2, arg3)
226  vector(real(4)) :: arg1
227  integer(2) :: arg2
228  real(4) :: arg3
229  call vec_stxv(arg1, arg2, arg3)
230
231! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
232! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
233! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
234! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
235end subroutine vec_stxv_test_vr4i2r4
236
237! CHECK-LABEL: vec_stxv_test_vi4i8ia4
238subroutine vec_stxv_test_vi4i8ia4(arg1, arg2, arg3, i)
239  vector(integer(4)) :: arg1
240  integer(8) :: arg2
241  integer(4) :: arg3(10)
242  integer(4) :: i
243  call vec_stxv(arg1, arg2, arg3(i))
244
245! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
246! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
247! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
248! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
249! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
250! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
251! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
252! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
253! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
254! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
255! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
256end subroutine vec_stxv_test_vi4i8ia4
257
258! CHECK-LABEL: vec_stxv_test_vi2i4vi2
259subroutine vec_stxv_test_vi2i4vi2(arg1, arg2, arg3)
260  vector(integer(2)) :: arg1
261  integer(4) :: arg2
262  vector(integer(2)) :: arg3
263  call vec_stxv(arg1, arg2, arg3)
264
265! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
266! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
267! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
268! LLVMIR: store <8 x i16> %[[arg1]], ptr %[[addr]], align 16
269end subroutine vec_stxv_test_vi2i4vi2
270
271! CHECK-LABEL: vec_stxv_test_vi4i4vai4
272subroutine vec_stxv_test_vi4i4vai4(arg1, arg2, arg3, i)
273  vector(integer(4)) :: arg1
274  integer(4) :: arg2
275  vector(integer(4)) :: arg3(20)
276  integer(4) :: i
277  call vec_stxv(arg1, arg2, arg3(i))
278
279! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
280! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
281! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
282! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
283! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
284! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
285! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
286! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
287! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
288! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
289! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
290end subroutine vec_stxv_test_vi4i4vai4
291
292!----------------------
293! vec_xst
294!----------------------
295
296! CHECK-LABEL: vec_xst_test_vr4i2r4
297subroutine vec_xst_test_vr4i2r4(arg1, arg2, arg3)
298  vector(real(4)) :: arg1
299  integer(2) :: arg2
300  real(4) :: arg3
301  call vec_xst(arg1, arg2, arg3)
302
303
304! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
305! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
306! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
307! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
308end subroutine vec_xst_test_vr4i2r4
309
310! CHECK-LABEL: vec_xst_test_vi4i8ia4
311subroutine vec_xst_test_vi4i8ia4(arg1, arg2, arg3, i)
312  vector(integer(4)) :: arg1
313  integer(8) :: arg2
314  integer(4) :: arg3(10)
315  integer(4) :: i
316  call vec_xst(arg1, arg2, arg3(i))
317
318! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
319! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
320! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
321! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
322! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
323! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
324! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
325! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
326! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
327! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
328! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
329end subroutine vec_xst_test_vi4i8ia4
330
331! CHECK-LABEL: vec_xst_test_vi2i4vi2
332subroutine vec_xst_test_vi2i4vi2(arg1, arg2, arg3)
333  vector(integer(2)) :: arg1
334  integer(4) :: arg2
335  vector(integer(2)) :: arg3
336  call vec_xst(arg1, arg2, arg3)
337
338! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
339! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
340! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
341! LLVMIR: store <8 x i16> %[[arg1]], ptr %[[addr]], align 16
342end subroutine vec_xst_test_vi2i4vi2
343
344! CHECK-LABEL: vec_xst_test_vi4i4vai4
345subroutine vec_xst_test_vi4i4vai4(arg1, arg2, arg3, i)
346  vector(integer(4)) :: arg1
347  integer(4) :: arg2
348  vector(integer(4)) :: arg3(20)
349  integer(4) :: i
350  call vec_xst(arg1, arg2, arg3(i))
351
352! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
353! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
354! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
355! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
356! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
357! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
358! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
359! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
360! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
361! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
362! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
363end subroutine vec_xst_test_vi4i4vai4
364
365!----------------------
366! vec_xst_be
367!----------------------
368
369! CHECK-LABEL: vec_xst_be_test_vr4i2r4
370subroutine vec_xst_be_test_vr4i2r4(arg1, arg2, arg3)
371  vector(real(4)) :: arg1
372  integer(2) :: arg2
373  real(4) :: arg3
374  call vec_xst_be(arg1, arg2, arg3)
375
376! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
377! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
378! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
379! LLVMIR: %[[shf:.*]] = shufflevector <4 x float> %[[arg1]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
380! LLVMIR: store <4 x float> %[[shf]], ptr %[[addr]], align 16
381end subroutine vec_xst_be_test_vr4i2r4
382
383! CHECK-LABEL: vec_xst_be_test_vi4i8ia4
384subroutine vec_xst_be_test_vi4i8ia4(arg1, arg2, arg3, i)
385  vector(integer(4)) :: arg1
386  integer(8) :: arg2
387  integer(4) :: arg3(10)
388  integer(4) :: i
389  call vec_xst_be(arg1, arg2, arg3(i))
390
391! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
392! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
393! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
394! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
395! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
396! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
397! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
398! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
399! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
400! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
401! LLVMIR: %[[src:.*]] = shufflevector <4 x i32> %[[arg1]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
402! LLVMIR: store <4 x i32> %[[src]], ptr %[[gep2]], align 16
403end subroutine vec_xst_be_test_vi4i8ia4
404
405! CHECK-LABEL: vec_xst_be_test_vi2i4vi2
406subroutine vec_xst_be_test_vi2i4vi2(arg1, arg2, arg3)
407  vector(integer(2)) :: arg1
408  integer(4) :: arg2
409  vector(integer(2)) :: arg3
410  call vec_xst_be(arg1, arg2, arg3)
411
412! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
413! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
414! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
415! LLVMIR: %[[src:.*]] = shufflevector <8 x i16> %[[arg1]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
416! LLVMIR: store <8 x i16> %[[src]], ptr %[[addr]], align 16
417end subroutine vec_xst_be_test_vi2i4vi2
418
419! CHECK-LABEL: vec_xst_be_test_vi4i4vai4
420subroutine vec_xst_be_test_vi4i4vai4(arg1, arg2, arg3, i)
421  vector(integer(4)) :: arg1
422  integer(4) :: arg2
423  vector(integer(4)) :: arg3(20)
424  integer(4) :: i
425  call vec_xst_be(arg1, arg2, arg3(i))
426
427! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
428! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
429! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
430! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
431! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
432! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
433! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
434! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
435! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
436! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
437! LLVMIR: %[[src:.*]] = shufflevector <4 x i32> %[[arg1]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
438! LLVMIR: store <4 x i32> %[[src]], ptr %[[gep2]], align 16
439end subroutine vec_xst_be_test_vi4i4vai4
440
441!----------------------
442! vec_xstd2
443!----------------------
444
445! CHECK-LABEL: vec_xstd2_test_vr4i2r4
446subroutine vec_xstd2_test_vr4i2r4(arg1, arg2, arg3)
447  vector(real(4)) :: arg1
448  integer(2) :: arg2
449  real(4) :: arg3
450  call vec_xstd2(arg1, arg2, arg3)
451
452
453! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
454! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
455! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
456! LLVMIR: %[[src:.*]] = bitcast <4 x float> %[[arg1]] to <2 x i64>
457! LLVMIR: store <2 x i64> %[[src]], ptr %[[addr]], align 16
458end subroutine vec_xstd2_test_vr4i2r4
459
460! CHECK-LABEL: vec_xstd2_test_vi4i8ia4
461subroutine vec_xstd2_test_vi4i8ia4(arg1, arg2, arg3, i)
462  vector(integer(4)) :: arg1
463  integer(8) :: arg2
464  integer(4) :: arg3(10)
465  integer(4) :: i
466  call vec_xstd2(arg1, arg2, arg3(i))
467
468! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
469! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
470! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
471! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
472! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
473! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
474! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
475! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
476! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
477! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
478! LLVMIR: %[[src:.*]] = bitcast <4 x i32> %[[arg1]] to <2 x i64>
479! LLVMIR: store <2 x i64> %[[src]], ptr %[[gep2]], align 16
480end subroutine vec_xstd2_test_vi4i8ia4
481
482! CHECK-LABEL: vec_xstd2_test_vi2i4vi2
483subroutine vec_xstd2_test_vi2i4vi2(arg1, arg2, arg3)
484  vector(integer(2)) :: arg1
485  integer(4) :: arg2
486  vector(integer(2)) :: arg3
487  call vec_xstd2(arg1, arg2, arg3)
488
489! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
490! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
491! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
492! LLVMIR: %[[src:.*]] = bitcast <8 x i16> %[[arg1]] to <2 x i64>
493! LLVMIR: store <2 x i64> %[[src]], ptr %[[addr]], align 16
494end subroutine vec_xstd2_test_vi2i4vi2
495
496! CHECK-LABEL: vec_xstd2_test_vi4i4vai4
497subroutine vec_xstd2_test_vi4i4vai4(arg1, arg2, arg3, i)
498  vector(integer(4)) :: arg1
499  integer(4) :: arg2
500  vector(integer(4)) :: arg3(20)
501  integer(4) :: i
502  call vec_xstd2(arg1, arg2, arg3(i))
503
504! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
505! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
506! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
507! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
508! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
509! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
510! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
511! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
512! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
513! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
514! LLVMIR: %[[src:.*]] = bitcast <4 x i32> %[[arg1]] to <2 x i64>
515! LLVMIR: store <2 x i64> %[[src]], ptr %[[gep2]], align 16
516end subroutine vec_xstd2_test_vi4i4vai4
517
518!----------------------
519! vec_xstw4
520!----------------------
521
522! CHECK-LABEL: vec_xstw4_test_vr4i2r4
523subroutine vec_xstw4_test_vr4i2r4(arg1, arg2, arg3)
524  vector(real(4)) :: arg1
525  integer(2) :: arg2
526  real(4) :: arg3
527  call vec_xstw4(arg1, arg2, arg3)
528
529
530! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
531! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
532! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
533! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
534end subroutine vec_xstw4_test_vr4i2r4
535
536! CHECK-LABEL: vec_xstw4_test_vi4i8ia4
537subroutine vec_xstw4_test_vi4i8ia4(arg1, arg2, arg3, i)
538  vector(integer(4)) :: arg1
539  integer(8) :: arg2
540  integer(4) :: arg3(10)
541  integer(4) :: i
542  call vec_xstw4(arg1, arg2, arg3(i))
543
544! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
545! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
546! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
547! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
548! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
549! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
550! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
551! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
552! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
553! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
554! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
555end subroutine vec_xstw4_test_vi4i8ia4
556
557! CHECK-LABEL: vec_xstw4_test_vi2i4vi2
558subroutine vec_xstw4_test_vi2i4vi2(arg1, arg2, arg3)
559  vector(integer(2)) :: arg1
560  integer(4) :: arg2
561  vector(integer(2)) :: arg3
562  call vec_xstw4(arg1, arg2, arg3)
563
564! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
565! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
566! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
567! LLVMIR: %[[src:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
568! LLVMIR: store <4 x i32> %[[src]], ptr %[[addr]], align 16
569end subroutine vec_xstw4_test_vi2i4vi2
570
571! CHECK-LABEL: vec_xstw4_test_vi4i4vai4
572subroutine vec_xstw4_test_vi4i4vai4(arg1, arg2, arg3, i)
573  vector(integer(4)) :: arg1
574  integer(4) :: arg2
575  vector(integer(4)) :: arg3(20)
576  integer(4) :: i
577  call vec_xstw4(arg1, arg2, arg3(i))
578
579! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
580! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
581! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
582! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
583! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
584! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
585! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
586! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
587! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
588! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
589! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
590end subroutine vec_xstw4_test_vi4i4vai4
591