xref: /llvm-project/llvm/test/CodeGen/SystemZ/vec-rot-01.ll (revision c61eb440059d6e9c18e6f8404e06bf125aa942c9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; Test vector rotate left instructions with vector rotate amount.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
5
6declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
7declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
8declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
9declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
10
11; Test a v16i8 rotate left.
12define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val, <16 x i8> %amt) {
13; CHECK-LABEL: f1:
14; CHECK:       # %bb.0:
15; CHECK-NEXT:    verllvb %v24, %v26, %v28
16; CHECK-NEXT:    br %r14
17
18  %inv = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8,
19                        i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %amt
20  %parta = shl <16 x i8> %val, %amt
21  %partb = lshr <16 x i8> %val, %inv
22
23  %rotl = or <16 x i8> %parta, %partb
24
25  ret <16 x i8> %rotl
26}
27
28; Test a v16i8 rotate left (matched from fshl).
29define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val, <16 x i8> %amt) {
30; CHECK-LABEL: f2:
31; CHECK:       # %bb.0:
32; CHECK-NEXT:    verllvb %v24, %v26, %v28
33; CHECK-NEXT:    br %r14
34
35  %rotl = tail call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %val, <16 x i8> %val, <16 x i8> %amt)
36
37  ret <16 x i8> %rotl
38}
39
40; Test a v8i16 rotate left.
41define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val, <8 x i16> %amt) {
42; CHECK-LABEL: f3:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    verllvh %v24, %v26, %v28
45; CHECK-NEXT:    br %r14
46
47  %inv = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16,
48                        i16 16, i16 16, i16 16, i16 16>, %amt
49  %parta = shl <8 x i16> %val, %amt
50  %partb = lshr <8 x i16> %val, %inv
51
52  %rotl = or <8 x i16> %parta, %partb
53
54  ret <8 x i16> %rotl
55}
56
57; Test a v8i16 rotate left (matched from fshl).
58define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val, <8 x i16> %amt) {
59; CHECK-LABEL: f4:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    verllvh %v24, %v26, %v28
62; CHECK-NEXT:    br %r14
63
64  %rotl = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %val, <8 x i16> %val, <8 x i16> %amt)
65
66  ret <8 x i16> %rotl
67}
68
69; Test a v4i32 rotate left.
70define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val, <4 x i32> %amt) {
71; CHECK-LABEL: f5:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    verllvf %v24, %v26, %v28
74; CHECK-NEXT:    br %r14
75
76  %inv = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %amt
77  %parta = shl <4 x i32> %val, %amt
78  %partb = lshr <4 x i32> %val, %inv
79
80  %rotl = or <4 x i32> %parta, %partb
81
82  ret <4 x i32> %rotl
83}
84
85; Test a v4i32 rotate left (matched from fshl).
86define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val, <4 x i32> %amt) {
87; CHECK-LABEL: f6:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    verllvf %v24, %v26, %v28
90; CHECK-NEXT:    br %r14
91
92  %rotl = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %val, <4 x i32> %val, <4 x i32> %amt)
93
94  ret <4 x i32> %rotl
95}
96
97; Test a v2i64 rotate left.
98define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val, <2 x i64> %amt) {
99; CHECK-LABEL: f7:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    verllvg %v24, %v26, %v28
102; CHECK-NEXT:    br %r14
103
104  %inv = sub <2 x i64> <i64 64, i64 64>, %amt
105  %parta = shl <2 x i64> %val, %amt
106  %partb = lshr <2 x i64> %val, %inv
107
108  %rotl = or <2 x i64> %parta, %partb
109
110  ret <2 x i64> %rotl
111}
112
113; Test a v2i64 rotate left (matched from fshl).
114define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val, <2 x i64> %amt) {
115; CHECK-LABEL: f8:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    verllvg %v24, %v26, %v28
118; CHECK-NEXT:    br %r14
119
120  %rotl = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %val, <2 x i64> %val, <2 x i64> %amt)
121
122  ret <2 x i64> %rotl
123}
124