xref: /llvm-project/llvm/test/CodeGen/PowerPC/vsx-ldst.ll (revision 5403c59c608c08c8ecd4303763f08eb046eb5e4d)
1; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+vsx -O2 \
2; RUN:   -mtriple=powerpc64-unknown-linux-gnu < %s > %t
3; RUN: grep lxvw4x < %t | count 3
4; RUN: grep lxvd2x < %t | count 3
5; RUN: grep stxvw4x < %t | count 3
6; RUN: grep stxvd2x < %t | count 3
7
8; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+vsx -O0 -fast-isel=1 \
9; RUN:   -mtriple=powerpc64-unknown-linux-gnu < %s > %t
10; RUN: grep lxvw4x < %t | count 3
11; RUN: grep lxvd2x < %t | count 3
12; RUN: grep stxvw4x < %t | count 3
13; RUN: grep stxvd2x < %t | count 3
14
15; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+vsx -O2 \
16; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
17; RUN: grep lxvd2x < %t | count 6
18; RUN: grep stxvd2x < %t | count 6
19
20; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O2 \
21; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
22; RUN: grep lxv < %t | count 6
23; RUN: grep stxv < %t | count 6
24
25
26@vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16
27@vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
28@vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16
29@vsll = global <2 x i64> <i64 255, i64 -937>, align 16
30@vull = global <2 x i64> <i64 1447, i64 2894>, align 16
31@vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16
32@res_vsi = common global <4 x i32> zeroinitializer, align 16
33@res_vui = common global <4 x i32> zeroinitializer, align 16
34@res_vf = common global <4 x float> zeroinitializer, align 16
35@res_vsll = common global <2 x i64> zeroinitializer, align 16
36@res_vull = common global <2 x i64> zeroinitializer, align 16
37@res_vd = common global <2 x double> zeroinitializer, align 16
38
39; Function Attrs: nounwind
40define void @test1() {
41entry:
42  %0 = load <4 x i32>, ptr @vsi, align 16
43  %1 = load <4 x i32>, ptr @vui, align 16
44  %2 = load <4 x i32>, ptr @vf, align 16
45  %3 = load <2 x double>, ptr @vsll, align 16
46  %4 = load <2 x double>, ptr @vull, align 16
47  %5 = load <2 x double>, ptr @vd, align 16
48  store <4 x i32> %0, ptr @res_vsi, align 16
49  store <4 x i32> %1, ptr @res_vui, align 16
50  store <4 x i32> %2, ptr @res_vf, align 16
51  store <2 x double> %3, ptr @res_vsll, align 16
52  store <2 x double> %4, ptr @res_vull, align 16
53  store <2 x double> %5, ptr @res_vd, align 16
54  ret void
55}
56