xref: /llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/vsfa.ll (revision 1365718778b4ce05587afa22835282c5d3f835b7)
1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test vector shift left and add intrinsic instructions
4;;;
5;;; Note:
6;;;   We test VSFA*vrrl, VSFA*vrrl_v, VSFA*virl, VSFA*virl_v, VSFA*vrrml_v, and
7;;;   VSFA*virml_v instructions.
8
9; Function Attrs: nounwind readnone
10define fastcc <256 x double> @vsfa_vvssl(<256 x double> %0, i64 %1, i64 %2) {
11; CHECK-LABEL: vsfa_vvssl:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    lea %s2, 256
14; CHECK-NEXT:    lvl %s2
15; CHECK-NEXT:    vsfa %v0, %v0, %s0, %s1
16; CHECK-NEXT:    b.l.t (, %s10)
17  %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
18  ret <256 x double> %4
19}
20
21; Function Attrs: nounwind readnone
22declare <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double>, i64, i64, i32)
23
24; Function Attrs: nounwind readnone
25define fastcc <256 x double> @vsfa_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
26; CHECK-LABEL: vsfa_vvssvl:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    lea %s2, 128
29; CHECK-NEXT:    lvl %s2
30; CHECK-NEXT:    vsfa %v1, %v0, %s0, %s1
31; CHECK-NEXT:    lea %s16, 256
32; CHECK-NEXT:    lvl %s16
33; CHECK-NEXT:    vor %v0, (0)1, %v1
34; CHECK-NEXT:    b.l.t (, %s10)
35  %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
36  ret <256 x double> %5
37}
38
39; Function Attrs: nounwind readnone
40declare <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
41
42; Function Attrs: nounwind readnone
43define fastcc <256 x double> @vsfa_vvssl_imm(<256 x double> %0, i64 %1) {
44; CHECK-LABEL: vsfa_vvssl_imm:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    lea %s1, 256
47; CHECK-NEXT:    lvl %s1
48; CHECK-NEXT:    vsfa %v0, %v0, 8, %s0
49; CHECK-NEXT:    b.l.t (, %s10)
50  %3 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
51  ret <256 x double> %3
52}
53
54; Function Attrs: nounwind readnone
55define fastcc <256 x double> @vsfa_vvssvl_imm(<256 x double> %0, i64 %1, <256 x double> %2) {
56; CHECK-LABEL: vsfa_vvssvl_imm:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    lea %s1, 128
59; CHECK-NEXT:    lvl %s1
60; CHECK-NEXT:    vsfa %v1, %v0, 8, %s0
61; CHECK-NEXT:    lea %s16, 256
62; CHECK-NEXT:    lvl %s16
63; CHECK-NEXT:    vor %v0, (0)1, %v1
64; CHECK-NEXT:    b.l.t (, %s10)
65  %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
66  ret <256 x double> %4
67}
68
69; Function Attrs: nounwind readnone
70define fastcc <256 x double> @vsfa_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
71; CHECK-LABEL: vsfa_vvssmvl:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    lea %s2, 128
74; CHECK-NEXT:    lvl %s2
75; CHECK-NEXT:    vsfa %v1, %v0, %s0, %s1, %vm1
76; CHECK-NEXT:    lea %s16, 256
77; CHECK-NEXT:    lvl %s16
78; CHECK-NEXT:    vor %v0, (0)1, %v1
79; CHECK-NEXT:    b.l.t (, %s10)
80  %6 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
81  ret <256 x double> %6
82}
83
84; Function Attrs: nounwind readnone
85declare <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
86
87; Function Attrs: nounwind readnone
88define fastcc <256 x double> @vsfa_vvssmvl_imm(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
89; CHECK-LABEL: vsfa_vvssmvl_imm:
90; CHECK:       # %bb.0:
91; CHECK-NEXT:    lea %s1, 128
92; CHECK-NEXT:    lvl %s1
93; CHECK-NEXT:    vsfa %v1, %v0, 8, %s0, %vm1
94; CHECK-NEXT:    lea %s16, 256
95; CHECK-NEXT:    lvl %s16
96; CHECK-NEXT:    vor %v0, (0)1, %v1
97; CHECK-NEXT:    b.l.t (, %s10)
98  %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
99  ret <256 x double> %5
100}
101