xref: /llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll (revision b006b60dc993b2e0ba3e412c80709477241b6be6)
1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test vector move intrinsic instructions
4;;;
5;;; Note:
6;;;   We test VMVivl and VMVivl_v, and VMVivml_v instructions.
7
8; Function Attrs: nounwind
9define void @vmv_vsvl(ptr %0, i32 signext %1) {
10; CHECK-LABEL: vmv_vsvl:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    lea %s2, 256
13; CHECK-NEXT:    lvl %s2
14; CHECK-NEXT:    vld %v0, 8, %s0
15; CHECK-NEXT:    and %s1, %s1, (32)0
16; CHECK-NEXT:    vmv %v0, %s1, %v0
17; CHECK-NEXT:    vst %v0, 8, %s0
18; CHECK-NEXT:    b.l.t (, %s10)
19  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
20  %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 %1, <256 x double> %3, i32 256)
21  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
22  ret void
23}
24
25; Function Attrs: nounwind readonly
26declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
27
28; Function Attrs: nounwind readnone
29declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32)
30
31; Function Attrs: nounwind writeonly
32declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
33
34; Function Attrs: nounwind
35define void @vmv_vsvl_imm(ptr %0) {
36; CHECK-LABEL: vmv_vsvl_imm:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    lea %s1, 256
39; CHECK-NEXT:    lvl %s1
40; CHECK-NEXT:    vld %v0, 8, %s0
41; CHECK-NEXT:    vmv %v0, 31, %v0
42; CHECK-NEXT:    vst %v0, 8, %s0
43; CHECK-NEXT:    b.l.t (, %s10)
44  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
45  %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 31, <256 x double> %2, i32 256)
46  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
47  ret void
48}
49
50; Function Attrs: nounwind
51define void @vmv_vsvvl(ptr %0, i32 signext %1) {
52; CHECK-LABEL: vmv_vsvvl:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    lea %s2, 256
55; CHECK-NEXT:    lvl %s2
56; CHECK-NEXT:    vld %v0, 8, %s0
57; CHECK-NEXT:    and %s1, %s1, (32)0
58; CHECK-NEXT:    lea %s3, 128
59; CHECK-NEXT:    lvl %s3
60; CHECK-NEXT:    vmv %v0, %s1, %v0
61; CHECK-NEXT:    lvl %s2
62; CHECK-NEXT:    vst %v0, 8, %s0
63; CHECK-NEXT:    b.l.t (, %s10)
64  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
65  %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 %1, <256 x double> %3, <256 x double> %3, i32 128)
66  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
67  ret void
68}
69
70; Function Attrs: nounwind readnone
71declare <256 x double> @llvm.ve.vl.vmv.vsvvl(i32, <256 x double>, <256 x double>, i32)
72
73; Function Attrs: nounwind
74define void @vmv_vsvvl_imm(ptr %0) {
75; CHECK-LABEL: vmv_vsvvl_imm:
76; CHECK:       # %bb.0:
77; CHECK-NEXT:    lea %s1, 256
78; CHECK-NEXT:    lvl %s1
79; CHECK-NEXT:    vld %v0, 8, %s0
80; CHECK-NEXT:    lea %s2, 128
81; CHECK-NEXT:    lvl %s2
82; CHECK-NEXT:    vmv %v0, 31, %v0
83; CHECK-NEXT:    lvl %s1
84; CHECK-NEXT:    vst %v0, 8, %s0
85; CHECK-NEXT:    b.l.t (, %s10)
86  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
87  %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 31, <256 x double> %2, <256 x double> %2, i32 128)
88  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
89  ret void
90}
91
92; Function Attrs: nounwind
93define void @vmv_vsvmvl(ptr %0, i32 signext %1) {
94; CHECK-LABEL: vmv_vsvmvl:
95; CHECK:       # %bb.0:
96; CHECK-NEXT:    lea %s2, 256
97; CHECK-NEXT:    lvl %s2
98; CHECK-NEXT:    vld %v0, 8, %s0
99; CHECK-NEXT:    and %s1, %s1, (32)0
100; CHECK-NEXT:    lea %s3, 128
101; CHECK-NEXT:    lvl %s3
102; CHECK-NEXT:    vmv %v0, %s1, %v0, %vm1
103; CHECK-NEXT:    lvl %s2
104; CHECK-NEXT:    vst %v0, 8, %s0
105; CHECK-NEXT:    b.l.t (, %s10)
106  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
107  %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 %1, <256 x double> %3, <256 x i1> undef, <256 x double> %3, i32 128)
108  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
109  ret void
110}
111
112; Function Attrs: nounwind readnone
113declare <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
114
115; Function Attrs: nounwind
116define void @vmv_vsvmvl_imm(ptr %0) {
117; CHECK-LABEL: vmv_vsvmvl_imm:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    lea %s1, 256
120; CHECK-NEXT:    lvl %s1
121; CHECK-NEXT:    vld %v0, 8, %s0
122; CHECK-NEXT:    lea %s2, 128
123; CHECK-NEXT:    lvl %s2
124; CHECK-NEXT:    vmv %v0, 31, %v0, %vm1
125; CHECK-NEXT:    lvl %s1
126; CHECK-NEXT:    vst %v0, 8, %s0
127; CHECK-NEXT:    b.l.t (, %s10)
128  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
129  %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 31, <256 x double> %2, <256 x i1> undef, <256 x double> %2, i32 128)
130  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
131  ret void
132}
133