xref: /minix3/external/bsd/llvm/dist/llvm/test/CodeGen/SystemZ/shift-05.ll (revision f4a2713ac843a11c696ec80c0a5e3e5d80b4d338)
1*f4a2713aSLionel Sambuc; Test 32-bit shifts left.
2*f4a2713aSLionel Sambuc;
3*f4a2713aSLionel Sambuc; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4*f4a2713aSLionel Sambuc
5*f4a2713aSLionel Sambuc; Check the low end of the SLLG range.
6*f4a2713aSLionel Sambucdefine i64 @f1(i64 %a) {
7*f4a2713aSLionel Sambuc; CHECK-LABEL: f1:
8*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 1
9*f4a2713aSLionel Sambuc; CHECK: br %r14
10*f4a2713aSLionel Sambuc  %shift = shl i64 %a, 1
11*f4a2713aSLionel Sambuc  ret i64 %shift
12*f4a2713aSLionel Sambuc}
13*f4a2713aSLionel Sambuc
14*f4a2713aSLionel Sambuc; Check the high end of the defined SLLG range.
15*f4a2713aSLionel Sambucdefine i64 @f2(i64 %a) {
16*f4a2713aSLionel Sambuc; CHECK-LABEL: f2:
17*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 63
18*f4a2713aSLionel Sambuc; CHECK: br %r14
19*f4a2713aSLionel Sambuc  %shift = shl i64 %a, 63
20*f4a2713aSLionel Sambuc  ret i64 %shift
21*f4a2713aSLionel Sambuc}
22*f4a2713aSLionel Sambuc
23*f4a2713aSLionel Sambuc; We don't generate shifts by out-of-range values.
24*f4a2713aSLionel Sambucdefine i64 @f3(i64 %a) {
25*f4a2713aSLionel Sambuc; CHECK-LABEL: f3:
26*f4a2713aSLionel Sambuc; CHECK-NOT: sllg
27*f4a2713aSLionel Sambuc; CHECK: br %r14
28*f4a2713aSLionel Sambuc  %shift = shl i64 %a, 64
29*f4a2713aSLionel Sambuc  ret i64 %shift
30*f4a2713aSLionel Sambuc}
31*f4a2713aSLionel Sambuc
32*f4a2713aSLionel Sambuc; Check variable shifts.
33*f4a2713aSLionel Sambucdefine i64 @f4(i64 %a, i64 %amt) {
34*f4a2713aSLionel Sambuc; CHECK-LABEL: f4:
35*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 0(%r3)
36*f4a2713aSLionel Sambuc; CHECK: br %r14
37*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %amt
38*f4a2713aSLionel Sambuc  ret i64 %shift
39*f4a2713aSLionel Sambuc}
40*f4a2713aSLionel Sambuc
41*f4a2713aSLionel Sambuc; Check shift amounts that have a constant term.
42*f4a2713aSLionel Sambucdefine i64 @f5(i64 %a, i64 %amt) {
43*f4a2713aSLionel Sambuc; CHECK-LABEL: f5:
44*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 10(%r3)
45*f4a2713aSLionel Sambuc; CHECK: br %r14
46*f4a2713aSLionel Sambuc  %add = add i64 %amt, 10
47*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %add
48*f4a2713aSLionel Sambuc  ret i64 %shift
49*f4a2713aSLionel Sambuc}
50*f4a2713aSLionel Sambuc
51*f4a2713aSLionel Sambuc; ...and again with a sign-extended 32-bit shift amount.
52*f4a2713aSLionel Sambucdefine i64 @f6(i64 %a, i32 %amt) {
53*f4a2713aSLionel Sambuc; CHECK-LABEL: f6:
54*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 10(%r3)
55*f4a2713aSLionel Sambuc; CHECK: br %r14
56*f4a2713aSLionel Sambuc  %add = add i32 %amt, 10
57*f4a2713aSLionel Sambuc  %addext = sext i32 %add to i64
58*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %addext
59*f4a2713aSLionel Sambuc  ret i64 %shift
60*f4a2713aSLionel Sambuc}
61*f4a2713aSLionel Sambuc
62*f4a2713aSLionel Sambuc; ...and now with a zero-extended 32-bit shift amount.
63*f4a2713aSLionel Sambucdefine i64 @f7(i64 %a, i32 %amt) {
64*f4a2713aSLionel Sambuc; CHECK-LABEL: f7:
65*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 10(%r3)
66*f4a2713aSLionel Sambuc; CHECK: br %r14
67*f4a2713aSLionel Sambuc  %add = add i32 %amt, 10
68*f4a2713aSLionel Sambuc  %addext = zext i32 %add to i64
69*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %addext
70*f4a2713aSLionel Sambuc  ret i64 %shift
71*f4a2713aSLionel Sambuc}
72*f4a2713aSLionel Sambuc
73*f4a2713aSLionel Sambuc; Check shift amounts that have the largest in-range constant term.  We could
74*f4a2713aSLionel Sambuc; mask the amount instead.
75*f4a2713aSLionel Sambucdefine i64 @f8(i64 %a, i64 %amt) {
76*f4a2713aSLionel Sambuc; CHECK-LABEL: f8:
77*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 524287(%r3)
78*f4a2713aSLionel Sambuc; CHECK: br %r14
79*f4a2713aSLionel Sambuc  %add = add i64 %amt, 524287
80*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %add
81*f4a2713aSLionel Sambuc  ret i64 %shift
82*f4a2713aSLionel Sambuc}
83*f4a2713aSLionel Sambuc
84*f4a2713aSLionel Sambuc; Check the next value up, which without masking must use a separate
85*f4a2713aSLionel Sambuc; addition.
86*f4a2713aSLionel Sambucdefine i64 @f9(i64 %a, i64 %amt) {
87*f4a2713aSLionel Sambuc; CHECK-LABEL: f9:
88*f4a2713aSLionel Sambuc; CHECK: a{{g?}}fi %r3, 524288
89*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 0(%r3)
90*f4a2713aSLionel Sambuc; CHECK: br %r14
91*f4a2713aSLionel Sambuc  %add = add i64 %amt, 524288
92*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %add
93*f4a2713aSLionel Sambuc  ret i64 %shift
94*f4a2713aSLionel Sambuc}
95*f4a2713aSLionel Sambuc
96*f4a2713aSLionel Sambuc; Check cases where 1 is subtracted from the shift amount.
97*f4a2713aSLionel Sambucdefine i64 @f10(i64 %a, i64 %amt) {
98*f4a2713aSLionel Sambuc; CHECK-LABEL: f10:
99*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, -1(%r3)
100*f4a2713aSLionel Sambuc; CHECK: br %r14
101*f4a2713aSLionel Sambuc  %sub = sub i64 %amt, 1
102*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %sub
103*f4a2713aSLionel Sambuc  ret i64 %shift
104*f4a2713aSLionel Sambuc}
105*f4a2713aSLionel Sambuc
106*f4a2713aSLionel Sambuc; Check the lowest value that can be subtracted from the shift amount.
107*f4a2713aSLionel Sambuc; Again, we could mask the shift amount instead.
108*f4a2713aSLionel Sambucdefine i64 @f11(i64 %a, i64 %amt) {
109*f4a2713aSLionel Sambuc; CHECK-LABEL: f11:
110*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, -524288(%r3)
111*f4a2713aSLionel Sambuc; CHECK: br %r14
112*f4a2713aSLionel Sambuc  %sub = sub i64 %amt, 524288
113*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %sub
114*f4a2713aSLionel Sambuc  ret i64 %shift
115*f4a2713aSLionel Sambuc}
116*f4a2713aSLionel Sambuc
117*f4a2713aSLionel Sambuc; Check the next value down, which without masking must use a separate
118*f4a2713aSLionel Sambuc; addition.
119*f4a2713aSLionel Sambucdefine i64 @f12(i64 %a, i64 %amt) {
120*f4a2713aSLionel Sambuc; CHECK-LABEL: f12:
121*f4a2713aSLionel Sambuc; CHECK: a{{g?}}fi %r3, -524289
122*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 0(%r3)
123*f4a2713aSLionel Sambuc; CHECK: br %r14
124*f4a2713aSLionel Sambuc  %sub = sub i64 %amt, 524289
125*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %sub
126*f4a2713aSLionel Sambuc  ret i64 %shift
127*f4a2713aSLionel Sambuc}
128*f4a2713aSLionel Sambuc
129*f4a2713aSLionel Sambuc; Check that we don't try to generate "indexed" shifts.
130*f4a2713aSLionel Sambucdefine i64 @f13(i64 %a, i64 %b, i64 %c) {
131*f4a2713aSLionel Sambuc; CHECK-LABEL: f13:
132*f4a2713aSLionel Sambuc; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
133*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 0({{%r[34]}})
134*f4a2713aSLionel Sambuc; CHECK: br %r14
135*f4a2713aSLionel Sambuc  %add = add i64 %b, %c
136*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %add
137*f4a2713aSLionel Sambuc  ret i64 %shift
138*f4a2713aSLionel Sambuc}
139*f4a2713aSLionel Sambuc
140*f4a2713aSLionel Sambuc; Check that the shift amount uses an address register.  It cannot be in %r0.
141*f4a2713aSLionel Sambucdefine i64 @f14(i64 %a, i64 *%ptr) {
142*f4a2713aSLionel Sambuc; CHECK-LABEL: f14:
143*f4a2713aSLionel Sambuc; CHECK: l %r1, 4(%r3)
144*f4a2713aSLionel Sambuc; CHECK: sllg %r2, %r2, 0(%r1)
145*f4a2713aSLionel Sambuc; CHECK: br %r14
146*f4a2713aSLionel Sambuc  %amt = load i64 *%ptr
147*f4a2713aSLionel Sambuc  %shift = shl i64 %a, %amt
148*f4a2713aSLionel Sambuc  ret i64 %shift
149*f4a2713aSLionel Sambuc}
150