xref: /llvm-project/llvm/test/Transforms/InstCombine/trunc.ll (revision 855bc46bc810c4ae97ea9f427429a7efd8b9cc15)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
4
5; Instcombine should be able to eliminate all of these ext casts.
6
7declare void @use(i32)
8declare void @use.i8(i8)
9declare void @use_vec(<2 x i32>)
10
11define i64 @test1(i64 %a) {
12; CHECK-LABEL: @test1(
13; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
14; CHECK-NEXT:    [[C:%.*]] = and i64 [[A]], 15
15; CHECK-NEXT:    call void @use(i32 [[B]])
16; CHECK-NEXT:    ret i64 [[C]]
17;
18  %b = trunc i64 %a to i32
19  %c = and i32 %b, 15
20  %d = zext i32 %c to i64
21  call void @use(i32 %b)
22  ret i64 %d
23}
24
25define <2 x i64> @test1_vec(<2 x i64> %a) {
26; CHECK-LABEL: @test1_vec(
27; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
28; CHECK-NEXT:    [[D:%.*]] = and <2 x i64> [[A]], splat (i64 15)
29; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
30; CHECK-NEXT:    ret <2 x i64> [[D]]
31;
32  %b = trunc <2 x i64> %a to <2 x i32>
33  %c = and <2 x i32> %b, <i32 15, i32 15>
34  %d = zext <2 x i32> %c to <2 x i64>
35  call void @use_vec(<2 x i32> %b)
36  ret <2 x i64> %d
37}
38
39define <2 x i64> @test1_vec_nonuniform(<2 x i64> %a) {
40; CHECK-LABEL: @test1_vec_nonuniform(
41; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
42; CHECK-NEXT:    [[D:%.*]] = and <2 x i64> [[A]], <i64 15, i64 7>
43; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
44; CHECK-NEXT:    ret <2 x i64> [[D]]
45;
46  %b = trunc <2 x i64> %a to <2 x i32>
47  %c = and <2 x i32> %b, <i32 15, i32 7>
48  %d = zext <2 x i32> %c to <2 x i64>
49  call void @use_vec(<2 x i32> %b)
50  ret <2 x i64> %d
51}
52
53define <2 x i64> @test1_vec_poison(<2 x i64> %a) {
54; CHECK-LABEL: @test1_vec_poison(
55; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
56; CHECK-NEXT:    [[D:%.*]] = and <2 x i64> [[A]], <i64 15, i64 poison>
57; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
58; CHECK-NEXT:    ret <2 x i64> [[D]]
59;
60  %b = trunc <2 x i64> %a to <2 x i32>
61  %c = and <2 x i32> %b, <i32 15, i32 poison>
62  %d = zext <2 x i32> %c to <2 x i64>
63  call void @use_vec(<2 x i32> %b)
64  ret <2 x i64> %d
65}
66
67define i64 @test2(i64 %a) {
68; CHECK-LABEL: @test2(
69; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
70; CHECK-NEXT:    [[D1:%.*]] = shl i64 [[A]], 36
71; CHECK-NEXT:    [[D:%.*]] = ashr exact i64 [[D1]], 36
72; CHECK-NEXT:    call void @use(i32 [[B]])
73; CHECK-NEXT:    ret i64 [[D]]
74;
75  %b = trunc i64 %a to i32
76  %c = shl i32 %b, 4
77  %q = ashr i32 %c, 4
78  %d = sext i32 %q to i64
79  call void @use(i32 %b)
80  ret i64 %d
81}
82
83define <2 x i64> @test2_vec(<2 x i64> %a) {
84; CHECK-LABEL: @test2_vec(
85; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
86; CHECK-NEXT:    [[D1:%.*]] = shl <2 x i64> [[A]], splat (i64 36)
87; CHECK-NEXT:    [[D:%.*]] = ashr exact <2 x i64> [[D1]], splat (i64 36)
88; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
89; CHECK-NEXT:    ret <2 x i64> [[D]]
90;
91  %b = trunc <2 x i64> %a to <2 x i32>
92  %c = shl <2 x i32> %b, <i32 4, i32 4>
93  %q = ashr <2 x i32> %c, <i32 4, i32 4>
94  %d = sext <2 x i32> %q to <2 x i64>
95  call void @use_vec(<2 x i32> %b)
96  ret <2 x i64> %d
97}
98
99define <2 x i64> @test2_vec_nonuniform(<2 x i64> %a) {
100; CHECK-LABEL: @test2_vec_nonuniform(
101; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
102; CHECK-NEXT:    [[D1:%.*]] = shl <2 x i64> [[A]], <i64 36, i64 37>
103; CHECK-NEXT:    [[D:%.*]] = ashr exact <2 x i64> [[D1]], <i64 36, i64 37>
104; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
105; CHECK-NEXT:    ret <2 x i64> [[D]]
106;
107  %b = trunc <2 x i64> %a to <2 x i32>
108  %c = shl <2 x i32> %b, <i32 4, i32 5>
109  %q = ashr <2 x i32> %c, <i32 4, i32 5>
110  %d = sext <2 x i32> %q to <2 x i64>
111  call void @use_vec(<2 x i32> %b)
112  ret <2 x i64> %d
113}
114
115define <2 x i64> @test2_vec_poison(<2 x i64> %a) {
116; CHECK-LABEL: @test2_vec_poison(
117; CHECK-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
118; CHECK-NEXT:    [[D1:%.*]] = shl <2 x i64> [[A]], <i64 36, i64 poison>
119; CHECK-NEXT:    [[D:%.*]] = ashr exact <2 x i64> [[D1]], <i64 36, i64 poison>
120; CHECK-NEXT:    call void @use_vec(<2 x i32> [[B]])
121; CHECK-NEXT:    ret <2 x i64> [[D]]
122;
123  %b = trunc <2 x i64> %a to <2 x i32>
124  %c = shl <2 x i32> %b, <i32 4, i32 poison>
125  %q = ashr <2 x i32> %c, <i32 4, i32 poison>
126  %d = sext <2 x i32> %q to <2 x i64>
127  call void @use_vec(<2 x i32> %b)
128  ret <2 x i64> %d
129}
130
131define i64 @test3(i64 %a) {
132; CHECK-LABEL: @test3(
133; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
134; CHECK-NEXT:    [[C:%.*]] = and i64 [[A]], 8
135; CHECK-NEXT:    call void @use(i32 [[B]])
136; CHECK-NEXT:    ret i64 [[C]]
137;
138  %b = trunc i64 %a to i32
139  %c = and i32 %b, 8
140  %d = zext i32 %c to i64
141  call void @use(i32 %b)
142  ret i64 %d
143}
144
145define i64 @test4(i64 %a) {
146; CHECK-LABEL: @test4(
147; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
148; CHECK-NEXT:    [[C:%.*]] = and i64 [[A]], 8
149; CHECK-NEXT:    [[X:%.*]] = xor i64 [[C]], 8
150; CHECK-NEXT:    call void @use(i32 [[B]])
151; CHECK-NEXT:    ret i64 [[X]]
152;
153  %b = trunc i64 %a to i32
154  %c = and i32 %b, 8
155  %x = xor i32 %c, 8
156  %d = zext i32 %x to i64
157  call void @use(i32 %b)
158  ret i64 %d
159}
160
161define i32 @test5(i32 %A) {
162; CHECK-LABEL: @test5(
163; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[A:%.*]], 16
164; CHECK-NEXT:    ret i32 [[TMP1]]
165;
166  %B = zext i32 %A to i128
167  %C = lshr i128 %B, 16
168  %D = trunc i128 %C to i32
169  ret i32 %D
170}
171
172define i32 @test6(i64 %A) {
173; CHECK-LABEL: @test6(
174; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
175; CHECK-NEXT:    [[D:%.*]] = trunc nuw i64 [[TMP1]] to i32
176; CHECK-NEXT:    ret i32 [[D]]
177;
178  %B = zext i64 %A to i128
179  %C = lshr i128 %B, 32
180  %D = trunc i128 %C to i32
181  ret i32 %D
182}
183
184; Test case where 'ashr' demanded bits does not contain any of the high bits,
185; but does contain sign bits, where the sign bit is not known to be zero.
186define i16 @ashr_mul_sign_bits(i8 %X, i8 %Y) {
187; CHECK-LABEL: @ashr_mul_sign_bits(
188; CHECK-NEXT:    [[A:%.*]] = sext i8 [[X:%.*]] to i16
189; CHECK-NEXT:    [[B:%.*]] = sext i8 [[Y:%.*]] to i16
190; CHECK-NEXT:    [[C:%.*]] = mul nsw i16 [[A]], [[B]]
191; CHECK-NEXT:    [[D:%.*]] = ashr i16 [[C]], 3
192; CHECK-NEXT:    ret i16 [[D]]
193;
194  %A = sext i8 %X to i32
195  %B = sext i8 %Y to i32
196  %C = mul i32 %A, %B
197  %D = ashr i32 %C, 3
198  %E = trunc i32 %D to i16
199  ret i16 %E
200}
201
202define i16 @ashr_mul(i8 %X, i8 %Y) {
203; CHECK-LABEL: @ashr_mul(
204; CHECK-NEXT:    [[A:%.*]] = sext i8 [[X:%.*]] to i16
205; CHECK-NEXT:    [[B:%.*]] = sext i8 [[Y:%.*]] to i16
206; CHECK-NEXT:    [[C:%.*]] = mul nsw i16 [[A]], [[B]]
207; CHECK-NEXT:    [[D:%.*]] = ashr i16 [[C]], 8
208; CHECK-NEXT:    ret i16 [[D]]
209;
210  %A = sext i8 %X to i20
211  %B = sext i8 %Y to i20
212  %C = mul i20 %A, %B
213  %D = ashr i20 %C, 8
214  %E = trunc i20 %D to i16
215  ret i16 %E
216}
217
218define i32 @trunc_ashr(i32 %X) {
219; CHECK-LABEL: @trunc_ashr(
220; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8
221; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], -8388608
222; CHECK-NEXT:    ret i32 [[TMP2]]
223;
224  %A = zext i32 %X to i36
225  %B = or i36 %A, -2147483648 ; 0xF80000000
226  %C = ashr i36 %B, 8
227  %T = trunc i36 %C to i32
228  ret i32  %T
229}
230
231define <2 x i32> @trunc_ashr_vec(<2 x i32> %X) {
232; CHECK-LABEL: @trunc_ashr_vec(
233; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], splat (i32 8)
234; CHECK-NEXT:    [[TMP2:%.*]] = or <2 x i32> [[TMP1]], splat (i32 -8388608)
235; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
236;
237  %A = zext <2 x i32> %X to <2 x i36>
238  %B = or <2 x i36> %A, <i36 -2147483648, i36 -2147483648> ; 0xF80000000
239  %C = ashr <2 x i36> %B, <i36 8, i36 8>
240  %T = trunc <2 x i36> %C to <2 x i32>
241  ret <2 x i32>  %T
242}
243
244define i92 @test7(i64 %A) {
245; CHECK-LABEL: @test7(
246; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
247; CHECK-NEXT:    [[D:%.*]] = zext nneg i64 [[TMP1]] to i92
248; CHECK-NEXT:    ret i92 [[D]]
249;
250  %B = zext i64 %A to i128
251  %C = lshr i128 %B, 32
252  %D = trunc i128 %C to i92
253  ret i92 %D
254}
255
256define i64 @test8(i32 %A, i32 %B) {
257; CHECK-LABEL: @test8(
258; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
259; CHECK-NEXT:    [[D:%.*]] = zext i32 [[B:%.*]] to i64
260; CHECK-NEXT:    [[E:%.*]] = shl nuw i64 [[D]], 32
261; CHECK-NEXT:    [[F:%.*]] = or disjoint i64 [[E]], [[C]]
262; CHECK-NEXT:    ret i64 [[F]]
263;
264  %C = zext i32 %A to i128
265  %D = zext i32 %B to i128
266  %E = shl i128 %D, 32
267  %F = or i128 %E, %C
268  %G = trunc i128 %F to i64
269  ret i64 %G
270}
271
272define <2 x i64> @test8_vec(<2 x i32> %A, <2 x i32> %B) {
273; CHECK-LABEL: @test8_vec(
274; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
275; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64>
276; CHECK-NEXT:    [[E:%.*]] = shl nuw <2 x i64> [[D]], splat (i64 32)
277; CHECK-NEXT:    [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]]
278; CHECK-NEXT:    ret <2 x i64> [[F]]
279;
280  %C = zext <2 x i32> %A to <2 x i128>
281  %D = zext <2 x i32> %B to <2 x i128>
282  %E = shl <2 x i128> %D, <i128 32, i128 32>
283  %F = or <2 x i128> %E, %C
284  %G = trunc <2 x i128> %F to <2 x i64>
285  ret <2 x i64> %G
286}
287
288define <2 x i64> @test8_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
289; CHECK-LABEL: @test8_vec_nonuniform(
290; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
291; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64>
292; CHECK-NEXT:    [[E:%.*]] = shl <2 x i64> [[D]], <i64 32, i64 48>
293; CHECK-NEXT:    [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]]
294; CHECK-NEXT:    ret <2 x i64> [[F]]
295;
296  %C = zext <2 x i32> %A to <2 x i128>
297  %D = zext <2 x i32> %B to <2 x i128>
298  %E = shl <2 x i128> %D, <i128 32, i128 48>
299  %F = or <2 x i128> %E, %C
300  %G = trunc <2 x i128> %F to <2 x i64>
301  ret <2 x i64> %G
302}
303
304define <2 x i64> @test8_vec_poison(<2 x i32> %A, <2 x i32> %B) {
305; CHECK-LABEL: @test8_vec_poison(
306; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
307; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64>
308; CHECK-NEXT:    [[E:%.*]] = shl nuw <2 x i64> [[D]], <i64 32, i64 poison>
309; CHECK-NEXT:    [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]]
310; CHECK-NEXT:    ret <2 x i64> [[F]]
311;
312  %C = zext <2 x i32> %A to <2 x i128>
313  %D = zext <2 x i32> %B to <2 x i128>
314  %E = shl <2 x i128> %D, <i128 32, i128 poison>
315  %F = or <2 x i128> %E, %C
316  %G = trunc <2 x i128> %F to <2 x i64>
317  ret <2 x i64> %G
318}
319
320define i8 @test9(i32 %X) {
321; CHECK-LABEL: @test9(
322; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
323; CHECK-NEXT:    [[Z:%.*]] = and i8 [[TMP1]], 42
324; CHECK-NEXT:    ret i8 [[Z]]
325;
326  %Y = and i32 %X, 42
327  %Z = trunc i32 %Y to i8
328  ret i8 %Z
329}
330
331; rdar://8808586
332define i8 @test10(i32 %X) {
333; CHECK-LABEL: @test10(
334; CHECK-NEXT:    [[Y:%.*]] = trunc i32 [[X:%.*]] to i8
335; CHECK-NEXT:    [[Z:%.*]] = and i8 [[Y]], 42
336; CHECK-NEXT:    ret i8 [[Z]]
337;
338  %Y = trunc i32 %X to i8
339  %Z = and i8 %Y, 42
340  ret i8 %Z
341}
342
343define i64 @test11(i32 %A, i32 %B) {
344; CHECK-LABEL: @test11(
345; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
346; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
347; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
348; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw i64 [[C]], [[E]]
349; CHECK-NEXT:    ret i64 [[F]]
350;
351  %C = zext i32 %A to i128
352  %D = zext i32 %B to i128
353  %E = and i128 %D, 31
354  %F = shl i128 %C, %E
355  %G = trunc i128 %F to i64
356  ret i64 %G
357}
358
359define <2 x i64> @test11_vec(<2 x i32> %A, <2 x i32> %B) {
360; CHECK-LABEL: @test11_vec(
361; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
362; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], splat (i32 31)
363; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
364; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
365; CHECK-NEXT:    ret <2 x i64> [[F]]
366;
367  %C = zext <2 x i32> %A to <2 x i128>
368  %D = zext <2 x i32> %B to <2 x i128>
369  %E = and <2 x i128> %D, <i128 31, i128 31>
370  %F = shl <2 x i128> %C, %E
371  %G = trunc <2 x i128> %F to <2 x i64>
372  ret <2 x i64> %G
373}
374
375define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
376; CHECK-LABEL: @test11_vec_nonuniform(
377; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
378; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
379; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
380; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
381; CHECK-NEXT:    ret <2 x i64> [[F]]
382;
383  %C = zext <2 x i32> %A to <2 x i128>
384  %D = zext <2 x i32> %B to <2 x i128>
385  %E = and <2 x i128> %D, <i128 31, i128 15>
386  %F = shl <2 x i128> %C, %E
387  %G = trunc <2 x i128> %F to <2 x i64>
388  ret <2 x i64> %G
389}
390
391define <2 x i64> @test11_vec_poison(<2 x i32> %A, <2 x i32> %B) {
392; CHECK-LABEL: @test11_vec_poison(
393; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
394; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
395; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
396; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
397; CHECK-NEXT:    ret <2 x i64> [[F]]
398;
399  %C = zext <2 x i32> %A to <2 x i128>
400  %D = zext <2 x i32> %B to <2 x i128>
401  %E = and <2 x i128> %D, <i128 31, i128 poison>
402  %F = shl <2 x i128> %C, %E
403  %G = trunc <2 x i128> %F to <2 x i64>
404  ret <2 x i64> %G
405}
406
407define i64 @test12(i32 %A, i32 %B) {
408; CHECK-LABEL: @test12(
409; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
410; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
411; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
412; CHECK-NEXT:    [[F:%.*]] = lshr i64 [[C]], [[E]]
413; CHECK-NEXT:    ret i64 [[F]]
414;
415  %C = zext i32 %A to i128
416  %D = zext i32 %B to i128
417  %E = and i128 %D, 31
418  %F = lshr i128 %C, %E
419  %G = trunc i128 %F to i64
420  ret i64 %G
421}
422
423define <2 x i64> @test12_vec(<2 x i32> %A, <2 x i32> %B) {
424; CHECK-LABEL: @test12_vec(
425; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
426; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], splat (i32 31)
427; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
428; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
429; CHECK-NEXT:    ret <2 x i64> [[F]]
430;
431  %C = zext <2 x i32> %A to <2 x i128>
432  %D = zext <2 x i32> %B to <2 x i128>
433  %E = and <2 x i128> %D, <i128 31, i128 31>
434  %F = lshr <2 x i128> %C, %E
435  %G = trunc <2 x i128> %F to <2 x i64>
436  ret <2 x i64> %G
437}
438
439define <2 x i64> @test12_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
440; CHECK-LABEL: @test12_vec_nonuniform(
441; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
442; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
443; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
444; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
445; CHECK-NEXT:    ret <2 x i64> [[F]]
446;
447  %C = zext <2 x i32> %A to <2 x i128>
448  %D = zext <2 x i32> %B to <2 x i128>
449  %E = and <2 x i128> %D, <i128 31, i128 15>
450  %F = lshr <2 x i128> %C, %E
451  %G = trunc <2 x i128> %F to <2 x i64>
452  ret <2 x i64> %G
453}
454
455define <2 x i64> @test12_vec_poison(<2 x i32> %A, <2 x i32> %B) {
456; CHECK-LABEL: @test12_vec_poison(
457; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
458; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
459; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
460; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
461; CHECK-NEXT:    ret <2 x i64> [[F]]
462;
463  %C = zext <2 x i32> %A to <2 x i128>
464  %D = zext <2 x i32> %B to <2 x i128>
465  %E = and <2 x i128> %D, <i128 31, i128 poison>
466  %F = lshr <2 x i128> %C, %E
467  %G = trunc <2 x i128> %F to <2 x i64>
468  ret <2 x i64> %G
469}
470
471define i64 @test13(i32 %A, i32 %B) {
472; CHECK-LABEL: @test13(
473; CHECK-NEXT:    [[C:%.*]] = sext i32 [[A:%.*]] to i64
474; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
475; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
476; CHECK-NEXT:    [[F:%.*]] = ashr i64 [[C]], [[E]]
477; CHECK-NEXT:    ret i64 [[F]]
478;
479  %C = sext i32 %A to i128
480  %D = zext i32 %B to i128
481  %E = and i128 %D, 31
482  %F = ashr i128 %C, %E
483  %G = trunc i128 %F to i64
484  ret i64 %G
485}
486
487define <2 x i64> @test13_vec(<2 x i32> %A, <2 x i32> %B) {
488; CHECK-LABEL: @test13_vec(
489; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
490; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], splat (i32 31)
491; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
492; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
493; CHECK-NEXT:    ret <2 x i64> [[F]]
494;
495  %C = sext <2 x i32> %A to <2 x i128>
496  %D = zext <2 x i32> %B to <2 x i128>
497  %E = and <2 x i128> %D, <i128 31, i128 31>
498  %F = ashr <2 x i128> %C, %E
499  %G = trunc <2 x i128> %F to <2 x i64>
500  ret <2 x i64> %G
501}
502
503define <2 x i64> @test13_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
504; CHECK-LABEL: @test13_vec_nonuniform(
505; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
506; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
507; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
508; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
509; CHECK-NEXT:    ret <2 x i64> [[F]]
510;
511  %C = sext <2 x i32> %A to <2 x i128>
512  %D = zext <2 x i32> %B to <2 x i128>
513  %E = and <2 x i128> %D, <i128 31, i128 15>
514  %F = ashr <2 x i128> %C, %E
515  %G = trunc <2 x i128> %F to <2 x i64>
516  ret <2 x i64> %G
517}
518
519define <2 x i64> @test13_vec_poison(<2 x i32> %A, <2 x i32> %B) {
520; CHECK-LABEL: @test13_vec_poison(
521; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
522; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
523; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
524; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
525; CHECK-NEXT:    ret <2 x i64> [[F]]
526;
527  %C = sext <2 x i32> %A to <2 x i128>
528  %D = zext <2 x i32> %B to <2 x i128>
529  %E = and <2 x i128> %D, <i128 31, i128 poison>
530  %F = ashr <2 x i128> %C, %E
531  %G = trunc <2 x i128> %F to <2 x i64>
532  ret <2 x i64> %G
533}
534
535; PR25543
536; https://llvm.org/bugs/show_bug.cgi?id=25543
537; This is an extractelement.
538
539define i32 @trunc_bitcast1(<4 x i32> %v) {
540; CHECK-LABEL: @trunc_bitcast1(
541; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x i32> [[V:%.*]], i64 1
542; CHECK-NEXT:    ret i32 [[EXT]]
543;
544  %bc = bitcast <4 x i32> %v to i128
545  %shr = lshr i128 %bc, 32
546  %ext = trunc i128 %shr to i32
547  ret i32 %ext
548}
549
550; A bitcast may still be required.
551
552define i32 @trunc_bitcast2(<2 x i64> %v) {
553; CHECK-LABEL: @trunc_bitcast2(
554; CHECK-NEXT:    [[BC1:%.*]] = bitcast <2 x i64> [[V:%.*]] to <4 x i32>
555; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x i32> [[BC1]], i64 2
556; CHECK-NEXT:    ret i32 [[EXT]]
557;
558  %bc = bitcast <2 x i64> %v to i128
559  %shr = lshr i128 %bc, 64
560  %ext = trunc i128 %shr to i32
561  ret i32 %ext
562}
563
564; The right shift is optional.
565
566define i32 @trunc_bitcast3(<4 x i32> %v) {
567; CHECK-LABEL: @trunc_bitcast3(
568; CHECK-NEXT:    [[EXT:%.*]] = extractelement <4 x i32> [[V:%.*]], i64 0
569; CHECK-NEXT:    ret i32 [[EXT]]
570;
571  %bc = bitcast <4 x i32> %v to i128
572  %ext = trunc i128 %bc to i32
573  ret i32 %ext
574}
575
576define i32 @trunc_shl_31_i32_i64(i64 %val) {
577; CHECK-LABEL: @trunc_shl_31_i32_i64(
578; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
579; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 31
580; CHECK-NEXT:    ret i32 [[TRUNC]]
581;
582  %shl = shl i64 %val, 31
583  %trunc = trunc i64 %shl to i32
584  ret i32 %trunc
585}
586
587define i32 @trunc_shl_nsw_31_i32_i64(i64 %val) {
588; CHECK-LABEL: @trunc_shl_nsw_31_i32_i64(
589; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
590; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 31
591; CHECK-NEXT:    ret i32 [[TRUNC]]
592;
593  %shl = shl nsw i64 %val, 31
594  %trunc = trunc i64 %shl to i32
595  ret i32 %trunc
596}
597
598define i32 @trunc_shl_nuw_31_i32_i64(i64 %val) {
599; CHECK-LABEL: @trunc_shl_nuw_31_i32_i64(
600; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
601; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 31
602; CHECK-NEXT:    ret i32 [[TRUNC]]
603;
604  %shl = shl nuw i64 %val, 31
605  %trunc = trunc i64 %shl to i32
606  ret i32 %trunc
607}
608
609define i32 @trunc_shl_nsw_nuw_31_i32_i64(i64 %val) {
610; CHECK-LABEL: @trunc_shl_nsw_nuw_31_i32_i64(
611; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
612; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 31
613; CHECK-NEXT:    ret i32 [[TRUNC]]
614;
615  %shl = shl nsw nuw i64 %val, 31
616  %trunc = trunc i64 %shl to i32
617  ret i32 %trunc
618}
619
620define i16 @trunc_shl_15_i16_i64(i64 %val) {
621; CHECK-LABEL: @trunc_shl_15_i16_i64(
622; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i16
623; CHECK-NEXT:    [[TRUNC:%.*]] = shl i16 [[VAL_TR]], 15
624; CHECK-NEXT:    ret i16 [[TRUNC]]
625;
626  %shl = shl i64 %val, 15
627  %trunc = trunc i64 %shl to i16
628  ret i16 %trunc
629}
630
631define i16 @trunc_shl_15_i16_i32(i32 %val) {
632; CHECK-LABEL: @trunc_shl_15_i16_i32(
633; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i32 [[VAL:%.*]] to i16
634; CHECK-NEXT:    [[TRUNC:%.*]] = shl i16 [[VAL_TR]], 15
635; CHECK-NEXT:    ret i16 [[TRUNC]]
636;
637  %shl = shl i32 %val, 15
638  %trunc = trunc i32 %shl to i16
639  ret i16 %trunc
640}
641
642define i8 @trunc_shl_7_i8_i64(i64 %val) {
643; CHECK-LABEL: @trunc_shl_7_i8_i64(
644; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i8
645; CHECK-NEXT:    [[TRUNC:%.*]] = shl i8 [[VAL_TR]], 7
646; CHECK-NEXT:    ret i8 [[TRUNC]]
647;
648  %shl = shl i64 %val, 7
649  %trunc = trunc i64 %shl to i8
650  ret i8 %trunc
651}
652
653define i2 @trunc_shl_1_i2_i64(i64 %val) {
654; CHECK-LABEL: @trunc_shl_1_i2_i64(
655; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[VAL:%.*]], 1
656; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[SHL]] to i2
657; CHECK-NEXT:    ret i2 [[TRUNC]]
658;
659  %shl = shl i64 %val, 1
660  %trunc = trunc i64 %shl to i2
661  ret i2 %trunc
662}
663
664define i32 @trunc_shl_1_i32_i64(i64 %val) {
665; CHECK-LABEL: @trunc_shl_1_i32_i64(
666; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
667; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 1
668; CHECK-NEXT:    ret i32 [[TRUNC]]
669;
670  %shl = shl i64 %val, 1
671  %trunc = trunc i64 %shl to i32
672  ret i32 %trunc
673}
674
675define i32 @trunc_shl_16_i32_i64(i64 %val) {
676; CHECK-LABEL: @trunc_shl_16_i32_i64(
677; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc i64 [[VAL:%.*]] to i32
678; CHECK-NEXT:    [[TRUNC:%.*]] = shl i32 [[VAL_TR]], 16
679; CHECK-NEXT:    ret i32 [[TRUNC]]
680;
681  %shl = shl i64 %val, 16
682  %trunc = trunc i64 %shl to i32
683  ret i32 %trunc
684}
685
686define i32 @trunc_shl_33_i32_i64(i64 %val) {
687; CHECK-LABEL: @trunc_shl_33_i32_i64(
688; CHECK-NEXT:    ret i32 0
689;
690  %shl = shl i64 %val, 33
691  %trunc = trunc i64 %shl to i32
692  ret i32 %trunc
693}
694
695define i32 @trunc_shl_32_i32_i64(i64 %val) {
696; CHECK-LABEL: @trunc_shl_32_i32_i64(
697; CHECK-NEXT:    ret i32 0
698;
699  %shl = shl i64 %val, 32
700  %trunc = trunc i64 %shl to i32
701  ret i32 %trunc
702}
703
704; Should be able to handle vectors
705define <2 x i32> @trunc_shl_16_v2i32_v2i64(<2 x i64> %val) {
706; CHECK-LABEL: @trunc_shl_16_v2i32_v2i64(
707; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32>
708; CHECK-NEXT:    [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], splat (i32 16)
709; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
710;
711  %shl = shl <2 x i64> %val, <i64 16, i64 16>
712  %trunc = trunc <2 x i64> %shl to <2 x i32>
713  ret <2 x i32> %trunc
714}
715
716define <2 x i32> @trunc_shl_nosplat_v2i32_v2i64(<2 x i64> %val) {
717; CHECK-LABEL: @trunc_shl_nosplat_v2i32_v2i64(
718; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32>
719; CHECK-NEXT:    [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], <i32 15, i32 16>
720; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
721;
722  %shl = shl <2 x i64> %val, <i64 15, i64 16>
723  %trunc = trunc <2 x i64> %shl to <2 x i32>
724  ret <2 x i32> %trunc
725}
726
727define void @trunc_shl_31_i32_i64_multi_use(i64 %val, ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) {
728; CHECK-LABEL: @trunc_shl_31_i32_i64_multi_use(
729; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[VAL:%.*]], 31
730; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32
731; CHECK-NEXT:    store volatile i32 [[TRUNC]], ptr addrspace(1) [[PTR0:%.*]], align 4
732; CHECK-NEXT:    store volatile i64 [[SHL]], ptr addrspace(1) [[PTR1:%.*]], align 8
733; CHECK-NEXT:    ret void
734;
735  %shl = shl i64 %val, 31
736  %trunc = trunc i64 %shl to i32
737  store volatile i32 %trunc, ptr addrspace(1) %ptr0
738  store volatile i64 %shl, ptr addrspace(1) %ptr1
739  ret void
740}
741
742define i32 @trunc_shl_lshr_infloop(i64 %arg) {
743; CHECK-LABEL: @trunc_shl_lshr_infloop(
744; CHECK-NEXT:    [[ARG_TR:%.*]] = trunc i64 [[ARG:%.*]] to i32
745; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[ARG_TR]], 1
746; CHECK-NEXT:    [[C:%.*]] = and i32 [[TMP1]], -4
747; CHECK-NEXT:    ret i32 [[C]]
748;
749  %A = lshr i64 %arg, 1
750  %B = shl i64 %A, 2
751  %C = trunc i64 %B to i32
752  ret i32 %C
753}
754
755define <2 x i32> @trunc_shl_v2i32_v2i64_uniform(<2 x i64> %val) {
756; CHECK-LABEL: @trunc_shl_v2i32_v2i64_uniform(
757; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32>
758; CHECK-NEXT:    [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], splat (i32 31)
759; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
760;
761  %shl = shl <2 x i64> %val, <i64 31, i64 31>
762  %trunc = trunc <2 x i64> %shl to <2 x i32>
763  ret <2 x i32> %trunc
764}
765
766define <2 x i32> @trunc_shl_v2i32_v2i64_poison(<2 x i64> %val) {
767; CHECK-LABEL: @trunc_shl_v2i32_v2i64_poison(
768; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32>
769; CHECK-NEXT:    [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], <i32 31, i32 poison>
770; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
771;
772  %shl = shl <2 x i64> %val, <i64 31, i64 poison>
773  %trunc = trunc <2 x i64> %shl to <2 x i32>
774  ret <2 x i32> %trunc
775}
776
777define <2 x i32> @trunc_shl_v2i32_v2i64_nonuniform(<2 x i64> %val) {
778; CHECK-LABEL: @trunc_shl_v2i32_v2i64_nonuniform(
779; CHECK-NEXT:    [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32>
780; CHECK-NEXT:    [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], <i32 31, i32 12>
781; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
782;
783  %shl = shl <2 x i64> %val, <i64 31, i64 12>
784  %trunc = trunc <2 x i64> %shl to <2 x i32>
785  ret <2 x i32> %trunc
786}
787
788define <2 x i32> @trunc_shl_v2i32_v2i64_outofrange(<2 x i64> %val) {
789; CHECK-LABEL: @trunc_shl_v2i32_v2i64_outofrange(
790; CHECK-NEXT:    [[SHL:%.*]] = shl <2 x i64> [[VAL:%.*]], <i64 31, i64 33>
791; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i64> [[SHL]] to <2 x i32>
792; CHECK-NEXT:    ret <2 x i32> [[TRUNC]]
793;
794  %shl = shl <2 x i64> %val, <i64 31, i64 33>
795  %trunc = trunc <2 x i64> %shl to <2 x i32>
796  ret <2 x i32> %trunc
797}
798
799define i32 @trunc_shl_ashr_infloop(i64 %arg) {
800; CHECK-LABEL: @trunc_shl_ashr_infloop(
801; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[ARG:%.*]], 1
802; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
803; CHECK-NEXT:    [[C:%.*]] = and i32 [[TMP2]], -4
804; CHECK-NEXT:    ret i32 [[C]]
805;
806  %A = ashr i64 %arg, 3
807  %B = shl i64 %A, 2
808  %C = trunc i64 %B to i32
809  ret i32 %C
810}
811
812define i32 @trunc_shl_shl_infloop(i64 %arg) {
813; CHECK-LABEL: @trunc_shl_shl_infloop(
814; CHECK-NEXT:    [[ARG_TR:%.*]] = trunc i64 [[ARG:%.*]] to i32
815; CHECK-NEXT:    [[C:%.*]] = shl i32 [[ARG_TR]], 3
816; CHECK-NEXT:    ret i32 [[C]]
817;
818  %A = shl i64 %arg, 1
819  %B = shl i64 %A, 2
820  %C = trunc i64 %B to i32
821  ret i32 %C
822}
823
824define i32 @trunc_shl_lshr_var(i64 %arg, i64 %val) {
825; CHECK-LABEL: @trunc_shl_lshr_var(
826; CHECK-NEXT:    [[A:%.*]] = lshr i64 [[ARG:%.*]], [[VAL:%.*]]
827; CHECK-NEXT:    [[A_TR:%.*]] = trunc i64 [[A]] to i32
828; CHECK-NEXT:    [[C:%.*]] = shl i32 [[A_TR]], 2
829; CHECK-NEXT:    ret i32 [[C]]
830;
831  %A = lshr i64 %arg, %val
832  %B = shl i64 %A, 2
833  %C = trunc i64 %B to i32
834  ret i32 %C
835}
836
837define i32 @trunc_shl_ashr_var(i64 %arg, i64 %val) {
838; CHECK-LABEL: @trunc_shl_ashr_var(
839; CHECK-NEXT:    [[A:%.*]] = ashr i64 [[ARG:%.*]], [[VAL:%.*]]
840; CHECK-NEXT:    [[A_TR:%.*]] = trunc i64 [[A]] to i32
841; CHECK-NEXT:    [[C:%.*]] = shl i32 [[A_TR]], 2
842; CHECK-NEXT:    ret i32 [[C]]
843;
844  %A = ashr i64 %arg, %val
845  %B = shl i64 %A, 2
846  %C = trunc i64 %B to i32
847  ret i32 %C
848}
849
850define i32 @trunc_shl_shl_var(i64 %arg, i64 %val) {
851; CHECK-LABEL: @trunc_shl_shl_var(
852; CHECK-NEXT:    [[A:%.*]] = shl i64 [[ARG:%.*]], [[VAL:%.*]]
853; CHECK-NEXT:    [[A_TR:%.*]] = trunc i64 [[A]] to i32
854; CHECK-NEXT:    [[C:%.*]] = shl i32 [[A_TR]], 2
855; CHECK-NEXT:    ret i32 [[C]]
856;
857  %A = shl i64 %arg, %val
858  %B = shl i64 %A, 2
859  %C = trunc i64 %B to i32
860  ret i32 %C
861}
862
863define <8 x i16> @trunc_shl_v8i15_v8i32_15(<8 x i32> %a) {
864; CHECK-LABEL: @trunc_shl_v8i15_v8i32_15(
865; CHECK-NEXT:    [[A_TR:%.*]] = trunc <8 x i32> [[A:%.*]] to <8 x i16>
866; CHECK-NEXT:    [[CONV:%.*]] = shl <8 x i16> [[A_TR]], splat (i16 15)
867; CHECK-NEXT:    ret <8 x i16> [[CONV]]
868;
869  %shl = shl <8 x i32> %a, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
870  %conv = trunc <8 x i32> %shl to <8 x i16>
871  ret <8 x i16> %conv
872}
873
874define <8 x i16> @trunc_shl_v8i16_v8i32_16(<8 x i32> %a) {
875; CHECK-LABEL: @trunc_shl_v8i16_v8i32_16(
876; CHECK-NEXT:    ret <8 x i16> zeroinitializer
877;
878  %shl = shl <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
879  %conv = trunc <8 x i32> %shl to <8 x i16>
880  ret <8 x i16> %conv
881}
882
883define <8 x i16> @trunc_shl_v8i16_v8i32_17(<8 x i32> %a) {
884; CHECK-LABEL: @trunc_shl_v8i16_v8i32_17(
885; CHECK-NEXT:    ret <8 x i16> zeroinitializer
886;
887  %shl = shl <8 x i32> %a, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
888  %conv = trunc <8 x i32> %shl to <8 x i16>
889  ret <8 x i16> %conv
890}
891
892define <8 x i16> @trunc_shl_v8i16_v8i32_4(<8 x i32> %a) {
893; CHECK-LABEL: @trunc_shl_v8i16_v8i32_4(
894; CHECK-NEXT:    [[A_TR:%.*]] = trunc <8 x i32> [[A:%.*]] to <8 x i16>
895; CHECK-NEXT:    [[CONV:%.*]] = shl <8 x i16> [[A_TR]], splat (i16 4)
896; CHECK-NEXT:    ret <8 x i16> [[CONV]]
897;
898  %shl = shl <8 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
899  %conv = trunc <8 x i32> %shl to <8 x i16>
900  ret <8 x i16> %conv
901}
902
903; Although the mask is the same value, we don't create a shuffle for types that the backend may not be able to handle:
904; trunc (shuffle X, C, Mask) --> shuffle (trunc X), C', Mask
905
906define <4 x i8> @wide_shuf(<4 x i32> %x) {
907; CHECK-LABEL: @wide_shuf(
908; CHECK-NEXT:    [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> <i32 poison, i32 3634, i32 90, i32 poison>, <4 x i32> <i32 1, i32 5, i32 6, i32 2>
909; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <4 x i32> [[SHUF]] to <4 x i8>
910; CHECK-NEXT:    ret <4 x i8> [[TRUNC]]
911;
912  %shuf = shufflevector <4 x i32> %x, <4 x i32> <i32 35, i32 3634, i32 90, i32 -1>, <4 x i32> <i32 1, i32 5, i32 6, i32 2>
913  %trunc = trunc <4 x i32> %shuf to <4 x i8>
914  ret <4 x i8> %trunc
915}
916
917; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask
918
919define <4 x i8> @wide_splat1(<4 x i32> %x) {
920; CHECK-LABEL: @wide_splat1(
921; CHECK-NEXT:    [[TMP1:%.*]] = trunc <4 x i32> [[X:%.*]] to <4 x i8>
922; CHECK-NEXT:    [[TRUNC:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
923; CHECK-NEXT:    ret <4 x i8> [[TRUNC]]
924;
925  %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
926  %trunc = trunc <4 x i32> %shuf to <4 x i8>
927  ret <4 x i8> %trunc
928}
929
930; Test weird types.
931; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask
932
933define <3 x i31> @wide_splat2(<3 x i33> %x) {
934; CHECK-LABEL: @wide_splat2(
935; CHECK-NEXT:    [[TMP1:%.*]] = trunc <3 x i33> [[X:%.*]] to <3 x i31>
936; CHECK-NEXT:    [[TRUNC:%.*]] = shufflevector <3 x i31> [[TMP1]], <3 x i31> poison, <3 x i32> <i32 1, i32 1, i32 1>
937; CHECK-NEXT:    ret <3 x i31> [[TRUNC]]
938;
939  %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> <i32 1, i32 1, i32 1>
940  %trunc = trunc <3 x i33> %shuf to <3 x i31>
941  ret <3 x i31> %trunc
942}
943
944; FIXME:
945; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask
946; A mask with poison elements should still be considered a splat mask.
947
948define <3 x i31> @wide_splat3(<3 x i33> %x) {
949; CHECK-LABEL: @wide_splat3(
950; CHECK-NEXT:    [[SHUF:%.*]] = shufflevector <3 x i33> [[X:%.*]], <3 x i33> poison, <3 x i32> <i32 poison, i32 1, i32 1>
951; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <3 x i33> [[SHUF]] to <3 x i31>
952; CHECK-NEXT:    ret <3 x i31> [[TRUNC]]
953;
954  %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> <i32 poison, i32 1, i32 1>
955  %trunc = trunc <3 x i33> %shuf to <3 x i31>
956  ret <3 x i31> %trunc
957}
958
959; TODO: The shuffle extends the length of the input vector. Should we shrink this?
960
961define <8 x i8> @wide_lengthening_splat(<4 x i16> %v) {
962; CHECK-LABEL: @wide_lengthening_splat(
963; CHECK-NEXT:    [[SHUF:%.*]] = shufflevector <4 x i16> [[V:%.*]], <4 x i16> poison, <8 x i32> zeroinitializer
964; CHECK-NEXT:    [[TR:%.*]] = trunc <8 x i16> [[SHUF]] to <8 x i8>
965; CHECK-NEXT:    ret <8 x i8> [[TR]]
966;
967  %shuf = shufflevector <4 x i16> %v, <4 x i16> %v, <8 x i32> zeroinitializer
968  %tr = trunc <8 x i16> %shuf to <8 x i8>
969  ret <8 x i8> %tr
970}
971
972define <2 x i8> @narrow_add_vec_constant(<2 x i32> %x) {
973; CHECK-LABEL: @narrow_add_vec_constant(
974; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8>
975; CHECK-NEXT:    [[TR:%.*]] = add <2 x i8> [[TMP1]], <i8 0, i8 127>
976; CHECK-NEXT:    ret <2 x i8> [[TR]]
977;
978  %add = add <2 x i32> %x, <i32 256, i32 -129>
979  %tr = trunc <2 x i32> %add to <2 x i8>
980  ret <2 x i8> %tr
981}
982
983define <2 x i8> @narrow_mul_vec_constant(<2 x i32> %x) {
984; CHECK-LABEL: @narrow_mul_vec_constant(
985; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8>
986; CHECK-NEXT:    [[TR:%.*]] = mul <2 x i8> [[TMP1]], <i8 0, i8 127>
987; CHECK-NEXT:    ret <2 x i8> [[TR]]
988;
989  %add = mul <2 x i32> %x, <i32 256, i32 -129>
990  %tr = trunc <2 x i32> %add to <2 x i8>
991  ret <2 x i8> %tr
992}
993
994define <2 x i8> @narrow_sub_vec_constant(<2 x i32> %x) {
995; CHECK-LABEL: @narrow_sub_vec_constant(
996; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8>
997; CHECK-NEXT:    [[TR:%.*]] = sub <2 x i8> <i8 0, i8 127>, [[TMP1]]
998; CHECK-NEXT:    ret <2 x i8> [[TR]]
999;
1000  %sub = sub <2 x i32> <i32 256, i32 -129>, %x
1001  %tr = trunc <2 x i32> %sub to <2 x i8>
1002  ret <2 x i8> %tr
1003}
1004
1005; If the select is narrowed based on the target's datalayout, we allow more optimizations.
1006
1007define i16 @PR44545(i32 %t0, i32 %data) {
1008; CHECK-LABEL: @PR44545(
1009; CHECK-NEXT:    [[ISZERO:%.*]] = icmp eq i32 [[DATA:%.*]], 0
1010; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[T0:%.*]] to i16
1011; CHECK-NEXT:    [[SUB:%.*]] = select i1 [[ISZERO]], i16 -1, i16 [[TMP1]]
1012; CHECK-NEXT:    ret i16 [[SUB]]
1013;
1014  %t1 = add nuw nsw i32 %t0, 1
1015  %iszero = icmp eq i32 %data, 0
1016  %ffs = select i1 %iszero, i32 0, i32 %t1
1017  %cast = trunc i32 %ffs to i16
1018  %sub = add nsw i16 %cast, -1
1019  ret i16 %sub
1020}
1021
1022; Make sure that SimplifyDemandedBits drops the nowrap flags
1023define i8 @drop_nsw_trunc(i16 %x, i16 %y) {
1024; CHECK-LABEL: @drop_nsw_trunc(
1025; CHECK-NEXT:    [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
1026; CHECK-NEXT:    [[RES:%.*]] = trunc i16 [[AND2]] to i8
1027; CHECK-NEXT:    ret i8 [[RES]]
1028;
1029  %and = and i16 %x, 255
1030  %and2 = and i16 %and, %y
1031  %res = trunc nsw i16 %and2 to i8
1032  ret i8 %res
1033}
1034
1035define i8 @drop_nuw_trunc(i16 %x, i16 %y) {
1036; CHECK-LABEL: @drop_nuw_trunc(
1037; CHECK-NEXT:    [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
1038; CHECK-NEXT:    [[RES:%.*]] = trunc i16 [[AND2]] to i8
1039; CHECK-NEXT:    ret i8 [[RES]]
1040;
1041  %and = and i16 %x, 255
1042  %and2 = and i16 %and, %y
1043  %res = trunc nuw i16 %and2 to i8
1044  ret i8 %res
1045}
1046
1047define i8 @drop_both_trunc(i16 %x, i16 %y) {
1048; CHECK-LABEL: @drop_both_trunc(
1049; CHECK-NEXT:    [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
1050; CHECK-NEXT:    [[RES:%.*]] = trunc i16 [[AND2]] to i8
1051; CHECK-NEXT:    ret i8 [[RES]]
1052;
1053  %and = and i16 %x, 255
1054  %and2 = and i16 %and, %y
1055  %res = trunc nuw nsw i16 %and2 to i8
1056  ret i8 %res
1057}
1058
1059define i1 @trunc_xor(i8 %x, i8 %y) {
1060; CHECK-LABEL: @trunc_xor(
1061; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
1062; CHECK-NEXT:    [[R:%.*]] = trunc i8 [[XOR]] to i1
1063; CHECK-NEXT:    ret i1 [[R]]
1064;
1065  %xor = xor i8 %x, %y
1066  %r = trunc i8 %xor to i1
1067  ret i1 %r
1068}
1069
1070define i1 @trunc_nuw_xor(i8 %x, i8 %y) {
1071; CHECK-LABEL: @trunc_nuw_xor(
1072; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X:%.*]], [[Y:%.*]]
1073; CHECK-NEXT:    ret i1 [[R]]
1074;
1075  %xor = xor i8 %x, %y
1076  %r = trunc nuw i8 %xor to i1
1077  ret i1 %r
1078}
1079
1080define i1 @trunc_nsw_xor(i8 %x, i8 %y) {
1081; CHECK-LABEL: @trunc_nsw_xor(
1082; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[X:%.*]], [[Y:%.*]]
1083; CHECK-NEXT:    ret i1 [[R]]
1084;
1085  %xor = xor i8 %x, %y
1086  %r = trunc nsw i8 %xor to i1
1087  ret i1 %r
1088}
1089
1090define <2 x i1> @trunc_nuw_xor_vector(<2 x i8> %x, <2 x i8> %y) {
1091; CHECK-LABEL: @trunc_nuw_xor_vector(
1092; CHECK-NEXT:    [[R:%.*]] = icmp ne <2 x i8> [[X:%.*]], [[Y:%.*]]
1093; CHECK-NEXT:    ret <2 x i1> [[R]]
1094;
1095  %xor = xor <2 x i8> %x, %y
1096  %r = trunc nuw <2 x i8> %xor to <2 x i1>
1097  ret <2 x i1> %r
1098}
1099
1100define void @pr95547(i32 %x) {
1101; CHECK-LABEL: @pr95547(
1102; CHECK-NEXT:    [[X_TRUNC:%.*]] = trunc i32 [[X:%.*]] to i16
1103; CHECK-NEXT:    [[DIV:%.*]] = udiv i16 11, [[X_TRUNC]]
1104; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X]], 256
1105; CHECK-NEXT:    br i1 [[CMP]], label [[LOOP:%.*]], label [[EXIT:%.*]]
1106; CHECK:       loop:
1107; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i16 [[DIV]] to i8
1108; CHECK-NEXT:    call void @use.i8(i8 [[TRUNC]])
1109; CHECK-NEXT:    br label [[LOOP]]
1110; CHECK:       exit:
1111; CHECK-NEXT:    ret void
1112;
1113  %x.trunc = trunc i32 %x to i16
1114  %div = udiv i16 11, %x.trunc
1115  %cmp = icmp ult i32 %x, 256
1116  br i1 %cmp, label %loop, label %exit
1117
1118loop:
1119  ; The loop is just here to prevent sinking.
1120  %trunc = trunc i16 %div to i8
1121  call void @use.i8(i8 %trunc)
1122  br label %loop
1123
1124exit:
1125  ret void
1126}
1127
1128define i1 @trunc_nuw_i1_non_zero(i8 %1) {
1129; CHECK-LABEL: @trunc_nuw_i1_non_zero(
1130; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP0:%.*]], 0
1131; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
1132; CHECK-NEXT:    ret i1 true
1133;
1134  %3 = icmp ne i8 %1, 0
1135  tail call void @llvm.assume(i1 %3)
1136  %ret = trunc nuw i8 %1 to i1
1137  ret i1 %ret
1138}
1139
1140define i1 @neg_trunc_nuw_i1_maybe_zero(i8 %1) {
1141; CHECK-LABEL: @neg_trunc_nuw_i1_maybe_zero(
1142; CHECK-NEXT:    [[RET:%.*]] = trunc nuw i8 [[TMP0:%.*]] to i1
1143; CHECK-NEXT:    ret i1 [[RET]]
1144;
1145  %ret = trunc nuw i8 %1 to i1
1146  ret i1 %ret
1147}
1148
1149define i2 @neg_trunc_nuw_i2_non_zero(i8 %1) {
1150; CHECK-LABEL: @neg_trunc_nuw_i2_non_zero(
1151; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP0:%.*]], 0
1152; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
1153; CHECK-NEXT:    [[RET:%.*]] = trunc nuw i8 [[TMP0]] to i2
1154; CHECK-NEXT:    ret i2 [[RET]]
1155;
1156  %3 = icmp ne i8 %1, 0
1157  tail call void @llvm.assume(i1 %3)
1158  %ret = trunc nuw i8 %1 to i2
1159  ret i2 %ret
1160}
1161
1162define i1 @neg_trunc_i1_non_zero(i8 %1) {
1163; CHECK-LABEL: @neg_trunc_i1_non_zero(
1164; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP0:%.*]], 0
1165; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
1166; CHECK-NEXT:    [[RET:%.*]] = trunc i8 [[TMP0]] to i1
1167; CHECK-NEXT:    ret i1 [[RET]]
1168;
1169  %3 = icmp ne i8 %1, 0
1170  tail call void @llvm.assume(i1 %3)
1171  %ret = trunc i8 %1 to i1
1172  ret i1 %ret
1173}
1174
1175define i1 @trunc_nsw_i1_non_zero(i8 %1) {
1176; CHECK-LABEL: @trunc_nsw_i1_non_zero(
1177; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP0:%.*]], 0
1178; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
1179; CHECK-NEXT:    ret i1 true
1180;
1181  %3 = icmp ne i8 %1, 0
1182  tail call void @llvm.assume(i1 %3)
1183  %ret = trunc nsw i8 %1 to i1
1184  ret i1 %ret
1185}
1186
1187define i1 @neg_trunc_nsw_i1_maybe_zero(i8 %1) {
1188; CHECK-LABEL: @neg_trunc_nsw_i1_maybe_zero(
1189; CHECK-NEXT:    [[RET:%.*]] = trunc nsw i8 [[TMP0:%.*]] to i1
1190; CHECK-NEXT:    ret i1 [[RET]]
1191;
1192  %ret = trunc nsw i8 %1 to i1
1193  ret i1 %ret
1194}
1195
1196define i2 @neg_trunc_nsw_i2_non_zero(i8 %1) {
1197; CHECK-LABEL: @neg_trunc_nsw_i2_non_zero(
1198; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP0:%.*]], 0
1199; CHECK-NEXT:    tail call void @llvm.assume(i1 [[TMP2]])
1200; CHECK-NEXT:    [[RET:%.*]] = trunc nsw i8 [[TMP0]] to i2
1201; CHECK-NEXT:    ret i2 [[RET]]
1202;
1203  %3 = icmp ne i8 %1, 0
1204  tail call void @llvm.assume(i1 %3)
1205  %ret = trunc nsw i8 %1 to i2
1206  ret i2 %ret
1207}
1208