xref: /llvm-project/llvm/test/Transforms/AggressiveInstCombine/trunc_ashr.ll (revision 7c802f985f2c28985c233d05b559a4e8c92110ae)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s
3
4; Negative test - could be folded if preceding InstCombine
5; transforms `ashr` to `lshr`
6
7define i16 @ashr_15_zext(i16 %x) {
8; CHECK-LABEL: @ashr_15_zext(
9; CHECK-NEXT:    [[ZEXT:%.*]] = zext i16 [[X:%.*]] to i32
10; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[ZEXT]], 15
11; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[ASHR]] to i16
12; CHECK-NEXT:    ret i16 [[TRUNC]]
13;
14  %zext = zext i16 %x to i32
15  %ashr = ashr i32 %zext, 15
16  %trunc = trunc i32 %ashr to i16
17  ret i16 %trunc
18}
19
20define i16 @ashr_sext_15(i16 %x) {
21; CHECK-LABEL: @ashr_sext_15(
22; CHECK-NEXT:    [[ASHR:%.*]] = ashr i16 [[X:%.*]], 15
23; CHECK-NEXT:    ret i16 [[ASHR]]
24;
25  %sext = sext i16 %x to i32
26  %ashr = ashr i32 %sext, 15
27  %trunc = trunc i32 %ashr to i16
28  ret i16 %trunc
29}
30
31; Negative test
32
33define i16 @ashr_sext_16(i16 %x) {
34; CHECK-LABEL: @ashr_sext_16(
35; CHECK-NEXT:    [[ZEXT:%.*]] = zext i16 [[X:%.*]] to i32
36; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[ZEXT]], 16
37; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[ASHR]] to i16
38; CHECK-NEXT:    ret i16 [[TRUNC]]
39;
40  %zext = zext i16 %x to i32
41  %ashr = ashr i32 %zext, 16
42  %trunc = trunc i32 %ashr to i16
43  ret i16 %trunc
44}
45
46; Negative test
47
48define i16 @ashr_var_shift_amount(i8 %x, i8 %amt) {
49; CHECK-LABEL: @ashr_var_shift_amount(
50; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[X:%.*]] to i32
51; CHECK-NEXT:    [[ZA:%.*]] = zext i8 [[AMT:%.*]] to i32
52; CHECK-NEXT:    [[S:%.*]] = ashr i32 [[Z]], [[ZA]]
53; CHECK-NEXT:    [[A:%.*]] = add i32 [[S]], [[Z]]
54; CHECK-NEXT:    [[S2:%.*]] = ashr i32 [[A]], 2
55; CHECK-NEXT:    [[T:%.*]] = trunc i32 [[S2]] to i16
56; CHECK-NEXT:    ret i16 [[T]]
57;
58  %z = zext i8 %x to i32
59  %za = zext i8 %amt to i32
60  %s = ashr i32 %z, %za
61  %a = add i32 %s, %z
62  %s2 = ashr i32 %a, 2
63  %t = trunc i32 %s2 to i16
64  ret i16 %t
65}
66
67define i16 @ashr_var_bounded_shift_amount(i8 %x, i8 %amt) {
68; CHECK-LABEL: @ashr_var_bounded_shift_amount(
69; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[X:%.*]] to i16
70; CHECK-NEXT:    [[ZA:%.*]] = zext i8 [[AMT:%.*]] to i16
71; CHECK-NEXT:    [[ZA2:%.*]] = and i16 [[ZA]], 15
72; CHECK-NEXT:    [[S:%.*]] = ashr i16 [[Z]], [[ZA2]]
73; CHECK-NEXT:    [[A:%.*]] = add i16 [[S]], [[Z]]
74; CHECK-NEXT:    [[S2:%.*]] = ashr i16 [[A]], 2
75; CHECK-NEXT:    ret i16 [[S2]]
76;
77  %z = zext i8 %x to i32
78  %za = zext i8 %amt to i32
79  %za2 = and i32 %za, 15
80  %s = ashr i32 %z, %za2
81  %a = add i32 %s, %z
82  %s2 = ashr i32 %a, 2
83  %t = trunc i32 %s2 to i16
84  ret i16 %t
85}
86
87; Negative test
88
89define i32 @ashr_check_no_overflow(i32 %x, i16 %amt) {
90; CHECK-LABEL: @ashr_check_no_overflow(
91; CHECK-NEXT:    [[ZEXT:%.*]] = zext i32 [[X:%.*]] to i64
92; CHECK-NEXT:    [[SEXT:%.*]] = sext i16 [[AMT:%.*]] to i64
93; CHECK-NEXT:    [[AND:%.*]] = and i64 [[SEXT]], 4294967295
94; CHECK-NEXT:    [[SHL:%.*]] = ashr i64 [[ZEXT]], [[AND]]
95; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32
96; CHECK-NEXT:    ret i32 [[TRUNC]]
97;
98  %zext = zext i32 %x to i64
99  %sext = sext i16 %amt to i64
100  %and = and i64 %sext, 4294967295
101  %shl = ashr i64 %zext, %and
102  %trunc = trunc i64 %shl to i32
103  ret i32 %trunc
104}
105
106define void @ashr_big_dag(ptr %a, i8 %b, i8 %c) {
107; CHECK-LABEL: @ashr_big_dag(
108; CHECK-NEXT:    [[ZEXT1:%.*]] = zext i8 [[B:%.*]] to i16
109; CHECK-NEXT:    [[ZEXT2:%.*]] = zext i8 [[C:%.*]] to i16
110; CHECK-NEXT:    [[ADD1:%.*]] = add i16 [[ZEXT1]], [[ZEXT2]]
111; CHECK-NEXT:    [[SFT1:%.*]] = and i16 [[ADD1]], 15
112; CHECK-NEXT:    [[SHR1:%.*]] = ashr i16 [[ADD1]], [[SFT1]]
113; CHECK-NEXT:    [[ADD2:%.*]] = add i16 [[ADD1]], [[SHR1]]
114; CHECK-NEXT:    [[SFT2:%.*]] = and i16 [[ADD2]], 7
115; CHECK-NEXT:    [[SHR2:%.*]] = ashr i16 [[ADD2]], [[SFT2]]
116; CHECK-NEXT:    store i16 [[SHR2]], ptr [[A:%.*]], align 2
117; CHECK-NEXT:    ret void
118;
119  %zext1 = zext i8 %b to i32
120  %zext2 = zext i8 %c to i32
121  %add1 = add i32 %zext1, %zext2
122  %sft1 = and i32 %add1, 15
123  %shr1 = ashr i32 %add1, %sft1
124  %add2 = add i32 %add1, %shr1
125  %sft2 = and i32 %add2, 7
126  %shr2 = ashr i32 %add2, %sft2
127  %trunc = trunc i32 %shr2 to i16
128  store i16 %trunc, ptr %a, align 2
129  ret void
130}
131
132; Negative test
133
134define i8 @ashr_check_not_i8_trunc(i16 %x) {
135; CHECK-LABEL: @ashr_check_not_i8_trunc(
136; CHECK-NEXT:    [[ASHR:%.*]] = ashr i16 [[X:%.*]], 1
137; CHECK-NEXT:    [[ZEXT2:%.*]] = zext i16 [[ASHR]] to i32
138; CHECK-NEXT:    [[ASHR2:%.*]] = ashr i32 [[ZEXT2]], 2
139; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[ASHR2]] to i8
140; CHECK-NEXT:    ret i8 [[TRUNC]]
141;
142  %ashr = ashr i16 %x, 1
143  %zext2 = zext i16 %ashr to i32
144  %ashr2 = ashr i32 %zext2, 2
145  %trunc = trunc i32 %ashr2 to i8
146  ret i8 %trunc
147}
148
149define <2 x i16> @ashr_vector(<2 x i8> %x) {
150; CHECK-LABEL: @ashr_vector(
151; CHECK-NEXT:    [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i16>
152; CHECK-NEXT:    [[ZA:%.*]] = and <2 x i16> [[Z]], <i16 7, i16 8>
153; CHECK-NEXT:    [[S:%.*]] = ashr <2 x i16> [[Z]], [[ZA]]
154; CHECK-NEXT:    [[A:%.*]] = add <2 x i16> [[S]], [[Z]]
155; CHECK-NEXT:    [[S2:%.*]] = ashr <2 x i16> [[A]], <i16 4, i16 5>
156; CHECK-NEXT:    ret <2 x i16> [[S2]]
157;
158  %z = zext <2 x i8> %x to <2 x i32>
159  %za = and <2 x i32> %z, <i32 7, i32 8>
160  %s = ashr <2 x i32> %z, %za
161  %a = add <2 x i32> %s, %z
162  %s2 = ashr <2 x i32> %a, <i32 4, i32 5>
163  %t = trunc <2 x i32> %s2 to <2 x i16>
164  ret <2 x i16> %t
165}
166
167; Negative test - can only fold to <2 x i16>, requiring new vector type
168
169define <2 x i8> @ashr_vector_no_new_vector_type(<2 x i8> %x) {
170; CHECK-LABEL: @ashr_vector_no_new_vector_type(
171; CHECK-NEXT:    [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
172; CHECK-NEXT:    [[ZA:%.*]] = and <2 x i32> [[Z]], <i32 7, i32 8>
173; CHECK-NEXT:    [[S:%.*]] = ashr <2 x i32> [[Z]], [[ZA]]
174; CHECK-NEXT:    [[A:%.*]] = add <2 x i32> [[S]], [[Z]]
175; CHECK-NEXT:    [[S2:%.*]] = ashr <2 x i32> [[A]], <i32 4, i32 5>
176; CHECK-NEXT:    [[T:%.*]] = trunc <2 x i32> [[S2]] to <2 x i8>
177; CHECK-NEXT:    ret <2 x i8> [[T]]
178;
179  %z = zext <2 x i8> %x to <2 x i32>
180  %za = and <2 x i32> %z, <i32 7, i32 8>
181  %s = ashr <2 x i32> %z, %za
182  %a = add <2 x i32> %s, %z
183  %s2 = ashr <2 x i32> %a, <i32 4, i32 5>
184  %t = trunc <2 x i32> %s2 to <2 x i8>
185  ret <2 x i8> %t
186}
187
188; Negative test
189
190define <2 x i16> @ashr_vector_large_shift_amount(<2 x i8> %x) {
191; CHECK-LABEL: @ashr_vector_large_shift_amount(
192; CHECK-NEXT:    [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
193; CHECK-NEXT:    [[ZA:%.*]] = and <2 x i32> [[Z]], <i32 7, i32 8>
194; CHECK-NEXT:    [[S:%.*]] = ashr <2 x i32> [[Z]], [[ZA]]
195; CHECK-NEXT:    [[A:%.*]] = add <2 x i32> [[S]], [[Z]]
196; CHECK-NEXT:    [[S2:%.*]] = ashr <2 x i32> [[A]], <i32 16, i32 5>
197; CHECK-NEXT:    [[T:%.*]] = trunc <2 x i32> [[S2]] to <2 x i16>
198; CHECK-NEXT:    ret <2 x i16> [[T]]
199;
200  %z = zext <2 x i8> %x to <2 x i32>
201  %za = and <2 x i32> %z, <i32 7, i32 8>
202  %s = ashr <2 x i32> %z, %za
203  %a = add <2 x i32> %s, %z
204  %s2 = ashr <2 x i32> %a, <i32 16, i32 5>
205  %t = trunc <2 x i32> %s2 to <2 x i16>
206  ret <2 x i16> %t
207}
208
209define i16 @ashr_exact(i16 %x) {
210; CHECK-LABEL: @ashr_exact(
211; CHECK-NEXT:    [[AND:%.*]] = and i16 [[X:%.*]], 32767
212; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i16 [[AND]], 15
213; CHECK-NEXT:    ret i16 [[ASHR]]
214;
215  %zext = zext i16 %x to i32
216  %and = and i32 %zext, 32767
217  %ashr = ashr exact i32 %and, 15
218  %trunc = trunc i32 %ashr to i16
219  ret i16 %trunc
220}
221
222; Negative test
223
224define i16 @ashr_negative_operand(i16 %x) {
225; CHECK-LABEL: @ashr_negative_operand(
226; CHECK-NEXT:    [[ZEXT:%.*]] = zext i16 [[X:%.*]] to i32
227; CHECK-NEXT:    [[XOR:%.*]] = xor i32 -1, [[ZEXT]]
228; CHECK-NEXT:    [[LSHR2:%.*]] = ashr i32 [[XOR]], 2
229; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[LSHR2]] to i16
230; CHECK-NEXT:    ret i16 [[TRUNC]]
231;
232  %zext = zext i16 %x to i32
233  %xor = xor i32 -1, %zext
234  %lshr2 = ashr i32 %xor, 2
235  %trunc = trunc i32 %lshr2 to i16
236  ret i16 %trunc
237}
238
239define i16 @ashr_negative_operand_but_short(i16 %x) {
240; CHECK-LABEL: @ashr_negative_operand_but_short(
241; CHECK-NEXT:    [[AND:%.*]] = and i16 [[X:%.*]], 32767
242; CHECK-NEXT:    [[XOR:%.*]] = xor i16 -1, [[AND]]
243; CHECK-NEXT:    [[LSHR2:%.*]] = ashr i16 [[XOR]], 2
244; CHECK-NEXT:    ret i16 [[LSHR2]]
245;
246  %zext = zext i16 %x to i32
247  %and = and i32 %zext, 32767
248  %xor = xor i32 -1, %and
249  %lshr2 = ashr i32 %xor, 2
250  %trunc = trunc i32 %lshr2 to i16
251  ret i16 %trunc
252}
253