xref: /llvm-project/llvm/test/CodeGen/X86/fold-and-shift-x86_64.ll (revision e283ef7e936b8fca9e7216ecc61003e7f9790923)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
3
4define i8 @t1(ptr %X, i64 %i) {
5; CHECK-LABEL: t1:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    andq $-255, %rsi
8; CHECK-NEXT:    movzbl (%rdi,%rsi,4), %eax
9; CHECK-NEXT:    retq
10
11entry:
12  %tmp2 = shl i64 %i, 2
13  %tmp4 = and i64 %tmp2, -1020
14  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
15  %tmp9 = load i8, ptr %tmp7
16  ret i8 %tmp9
17}
18
19define i8 @t2(ptr %X, i64 %i) {
20; CHECK-LABEL: t2:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    andq $-14, %rsi
23; CHECK-NEXT:    movzbl (%rdi,%rsi,4), %eax
24; CHECK-NEXT:    retq
25
26entry:
27  %tmp2 = shl i64 %i, 2
28  %tmp4 = and i64 %tmp2, -56
29  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
30  %tmp9 = load i8, ptr %tmp7
31  ret i8 %tmp9
32}
33
34define i8 @t3(ptr %X, i64 %i) {
35; CHECK-LABEL: t3:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    movl %esi, %eax
38; CHECK-NEXT:    movzbl (%rdi,%rax,4), %eax
39; CHECK-NEXT:    retq
40
41entry:
42  %tmp2 = shl i64 %i, 2
43  %tmp4 = and i64 %tmp2, 17179869180
44  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
45  %tmp9 = load i8, ptr %tmp7
46  ret i8 %tmp9
47}
48
49define i8 @t4(ptr %X, i64 %i) {
50; CHECK-LABEL: t4:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    andl $-2, %esi
53; CHECK-NEXT:    movzbl (%rdi,%rsi,4), %eax
54; CHECK-NEXT:    retq
55
56entry:
57  %tmp2 = shl i64 %i, 2
58  %tmp4 = and i64 %tmp2, 17179869176
59  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
60  %tmp9 = load i8, ptr %tmp7
61  ret i8 %tmp9
62}
63
64define i8 @t5(ptr %X, i64 %i) {
65; CHECK-LABEL: t5:
66; CHECK:       # %bb.0: # %entry
67; CHECK-NEXT:    andl $-250002, %esi # imm = 0xFFFC2F6E
68; CHECK-NEXT:    movzbl (%rdi,%rsi,4), %eax
69; CHECK-NEXT:    retq
70
71entry:
72  %tmp2 = shl i64 %i, 2
73  %tmp4 = and i64 %tmp2, 17178869176
74  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
75  %tmp9 = load i8, ptr %tmp7
76  ret i8 %tmp9
77}
78
79define i8 @t6(ptr %X, i32 %i) {
80; CHECK-LABEL: t6:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
83; CHECK-NEXT:    andl $15, %esi
84; CHECK-NEXT:    movzbl (%rdi,%rsi,4), %eax
85; CHECK-NEXT:    retq
86entry:
87  %tmp2 = shl i32 %i, 2
88  %tmp3 = zext i32 %tmp2 to i64
89  %tmp4 = and i64 %tmp3, 60
90  %tmp7 = getelementptr i8, ptr %X, i64 %tmp4
91  %tmp9 = load i8, ptr %tmp7
92  ret i8 %tmp9
93}
94
95define i32 @t7(<16 x i8> %a0, ptr %p0) {
96; CHECK-LABEL: t7:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    pmovmskb %xmm0, %eax
99; CHECK-NEXT:    shrl $3, %eax
100; CHECK-NEXT:    movzbl (%rdi,%rax,4), %eax
101; CHECK-NEXT:    retq
102  %i = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
103  %index = lshr i32 %i, 1
104  %mask = and i32 %index, 16777212
105  %val.ptr = getelementptr inbounds i8, ptr %p0, i32 %mask
106  %val = load i8, ptr %val.ptr
107  %ext = zext i8 %val to i32
108  ret i32 %ext
109}
110declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>)
111