xref: /llvm-project/llvm/test/CodeGen/X86/shift-folding.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-coalescing | FileCheck %s
3
4define ptr @test1(ptr %P, i32 %X) {
5; CHECK-LABEL: test1:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
8; CHECK-NEXT:    andl $-4, %eax
9; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
10; CHECK-NEXT:    retl
11  %Y = lshr i32 %X, 2
12  %gep.upgrd.1 = zext i32 %Y to i64
13  %P2 = getelementptr i32, ptr %P, i64 %gep.upgrd.1
14  ret ptr %P2
15}
16
17define ptr @test2(ptr %P, i32 %X) {
18; CHECK-LABEL: test2:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
21; CHECK-NEXT:    shll $4, %eax
22; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
23; CHECK-NEXT:    retl
24  %Y = shl i32 %X, 2
25  %gep.upgrd.2 = zext i32 %Y to i64
26  %P2 = getelementptr i32, ptr %P, i64 %gep.upgrd.2
27  ret ptr %P2
28}
29
30define ptr @test3(ptr %P, i32 %X) {
31; CHECK-LABEL: test3:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
34; CHECK-NEXT:    andl $-4, %eax
35; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
36; CHECK-NEXT:    retl
37  %Y = ashr i32 %X, 2
38  %P2 = getelementptr i32, ptr %P, i32 %Y
39  ret ptr %P2
40}
41
42define fastcc i32 @test4(ptr %d) {
43; CHECK-LABEL: test4:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    movzbl 3(%ecx), %eax
46; CHECK-NEXT:    retl
47  %tmp4 = load i32, ptr %d
48  %tmp512 = lshr i32 %tmp4, 24
49  ret i32 %tmp512
50}
51
52; Ensure that we don't fold away shifts which have multiple uses, as they are
53; just re-introduced for the second use.
54
55define i64 @test5(i16 %i, ptr %arr) {
56; CHECK-LABEL: test5:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
59; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
60; CHECK-NEXT:    shrl $11, %eax
61; CHECK-NEXT:    xorl %edx, %edx
62; CHECK-NEXT:    addl (%ecx,%eax,4), %eax
63; CHECK-NEXT:    setb %dl
64; CHECK-NEXT:    retl
65  %i.zext = zext i16 %i to i32
66  %index = lshr i32 %i.zext, 11
67  %index.zext = zext i32 %index to i64
68  %val.ptr = getelementptr inbounds i32, ptr %arr, i64 %index.zext
69  %val = load i32, ptr %val.ptr
70  %val.zext = zext i32 %val to i64
71  %sum = add i64 %val.zext, %index.zext
72  ret i64 %sum
73}
74
75; We should not crash because an undef shift was created.
76
77define i32 @overshift(i32 %a) {
78; CHECK-LABEL: overshift:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    retl
81  %shr = lshr i32 %a, 33
82  %xor = xor i32 1, %shr
83  ret i32 %xor
84}
85
86; Should be possible to adjust the pointer and narrow the load to 16 bits.
87define i16 @srl_load_narrowing1(ptr %arg) {
88; CHECK-LABEL: srl_load_narrowing1:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
91; CHECK-NEXT:    movzwl 1(%eax), %eax
92; CHECK-NEXT:    retl
93  %tmp1 = load i32, ptr %arg, align 1
94  %tmp2 = lshr i32 %tmp1, 8
95  %tmp3 = trunc i32 %tmp2 to i16
96  ret i16 %tmp3
97}
98
99define i16 @srl_load_narrowing2(ptr %arg) {
100; CHECK-LABEL: srl_load_narrowing2:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
103; CHECK-NEXT:    movzbl 3(%eax), %eax
104; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
105; CHECK-NEXT:    retl
106  %tmp1 = load i32, ptr %arg, align 1
107  %tmp2 = lshr i32 %tmp1, 24
108  %tmp3 = trunc i32 %tmp2 to i16
109  ret i16 %tmp3
110}
111