xref: /llvm-project/llvm/test/CodeGen/X86/align-down-const.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X86
3; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X64
4
5; Fold
6;   ptr - (ptr & C)
7; To
8;   ptr & (~C)
9;
10; This needs to be a backend-level fold because only by now pointers
11; are just registers; in middle-end IR this can only be done via @llvm.ptrmask()
12; intrinsic which is not sufficiently widely-spread yet.
13;
14; https://bugs.llvm.org/show_bug.cgi?id=44448
15
16; The basic positive tests
17
18define i32 @t0_32(i32 %ptr) nounwind {
19; X86-LABEL: t0_32:
20; X86:       # %bb.0:
21; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
22; X86-NEXT:    andl $-16, %eax
23; X86-NEXT:    retl
24;
25; X64-LABEL: t0_32:
26; X64:       # %bb.0:
27; X64-NEXT:    movl %edi, %eax
28; X64-NEXT:    andl $-16, %eax
29; X64-NEXT:    retq
30  %bias = and i32 %ptr, 15
31  %r = sub i32 %ptr, %bias
32  ret i32 %r
33}
34define i64 @t1_64(i64 %ptr) nounwind {
35; X86-LABEL: t1_64:
36; X86:       # %bb.0:
37; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
38; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
39; X86-NEXT:    andl $-16, %eax
40; X86-NEXT:    retl
41;
42; X64-LABEL: t1_64:
43; X64:       # %bb.0:
44; X64-NEXT:    movq %rdi, %rax
45; X64-NEXT:    andq $-16, %rax
46; X64-NEXT:    retq
47  %bias = and i64 %ptr, 15
48  %r = sub i64 %ptr, %bias
49  ret i64 %r
50}
51
52define i32 @t2_powerof2(i32 %ptr) nounwind {
53; X86-LABEL: t2_powerof2:
54; X86:       # %bb.0:
55; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
56; X86-NEXT:    andl $-17, %eax
57; X86-NEXT:    retl
58;
59; X64-LABEL: t2_powerof2:
60; X64:       # %bb.0:
61; X64-NEXT:    movl %edi, %eax
62; X64-NEXT:    andl $-17, %eax
63; X64-NEXT:    retq
64  %bias = and i32 %ptr, 16
65  %r = sub i32 %ptr, %bias
66  ret i32 %r
67}
68define i32 @t3_random_constant(i32 %ptr) nounwind {
69; X86-LABEL: t3_random_constant:
70; X86:       # %bb.0:
71; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
72; X86-NEXT:    andl $-43, %eax
73; X86-NEXT:    retl
74;
75; X64-LABEL: t3_random_constant:
76; X64:       # %bb.0:
77; X64-NEXT:    movl %edi, %eax
78; X64-NEXT:    andl $-43, %eax
79; X64-NEXT:    retq
80  %bias = and i32 %ptr, 42
81  %r = sub i32 %ptr, %bias
82  ret i32 %r
83}
84
85; Extra use tests
86
87define i32 @t4_extrause(i32 %ptr, ptr %bias_storage) nounwind {
88; X86-LABEL: t4_extrause:
89; X86:       # %bb.0:
90; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
91; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
92; X86-NEXT:    movl %eax, %edx
93; X86-NEXT:    andl $15, %edx
94; X86-NEXT:    movl %edx, (%ecx)
95; X86-NEXT:    andl $-16, %eax
96; X86-NEXT:    retl
97;
98; X64-LABEL: t4_extrause:
99; X64:       # %bb.0:
100; X64-NEXT:    movl %edi, %eax
101; X64-NEXT:    movl %edi, %ecx
102; X64-NEXT:    andl $15, %ecx
103; X64-NEXT:    movl %ecx, (%rsi)
104; X64-NEXT:    andl $-16, %eax
105; X64-NEXT:    retq
106  %bias = and i32 %ptr, 15
107  store i32 %bias, ptr %bias_storage
108  %r = sub i32 %ptr, %bias
109  ret i32 %r
110}
111
112; Negative tests
113
114define i32 @n5_different_ptrs(i32 %ptr0, i32 %ptr1) nounwind {
115; X86-LABEL: n5_different_ptrs:
116; X86:       # %bb.0:
117; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
118; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
119; X86-NEXT:    andl $15, %ecx
120; X86-NEXT:    subl %ecx, %eax
121; X86-NEXT:    retl
122;
123; X64-LABEL: n5_different_ptrs:
124; X64:       # %bb.0:
125; X64-NEXT:    movl %edi, %eax
126; X64-NEXT:    andl $15, %esi
127; X64-NEXT:    subl %esi, %eax
128; X64-NEXT:    retq
129  %bias = and i32 %ptr1, 15 ; not %ptr0
130  %r = sub i32 %ptr0, %bias ; not %ptr1
131  ret i32 %r
132}
133
134define i32 @n6_sub_is_not_commutative(i32 %ptr) nounwind {
135; X86-LABEL: n6_sub_is_not_commutative:
136; X86:       # %bb.0:
137; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
138; X86-NEXT:    movl %ecx, %eax
139; X86-NEXT:    andl $15, %eax
140; X86-NEXT:    subl %ecx, %eax
141; X86-NEXT:    retl
142;
143; X64-LABEL: n6_sub_is_not_commutative:
144; X64:       # %bb.0:
145; X64-NEXT:    movl %edi, %eax
146; X64-NEXT:    andl $15, %eax
147; X64-NEXT:    subl %edi, %eax
148; X64-NEXT:    retq
149  %bias = and i32 %ptr, 15
150  %r = sub i32 %bias, %ptr ; wrong order
151  ret i32 %r
152}
153