xref: /llvm-project/llvm/test/CodeGen/AArch64/signbit-shift.ll (revision 3acbd38492c394dec32ccde3f11885e5b59d5aa9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
3
4; If positive...
5
6define i32 @zext_ifpos(i32 %x) {
7; CHECK-LABEL: zext_ifpos:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    mvn w8, w0
10; CHECK-NEXT:    lsr w0, w8, #31
11; CHECK-NEXT:    ret
12  %c = icmp sgt i32 %x, -1
13  %e = zext i1 %c to i32
14  ret i32 %e
15}
16
17define i32 @add_zext_ifpos(i32 %x) {
18; CHECK-LABEL: add_zext_ifpos:
19; CHECK:       // %bb.0:
20; CHECK-NEXT:    asr w8, w0, #31
21; CHECK-NEXT:    add w0, w8, #42
22; CHECK-NEXT:    ret
23  %c = icmp sgt i32 %x, -1
24  %e = zext i1 %c to i32
25  %r = add i32 %e, 41
26  ret i32 %r
27}
28
29define <4 x i32> @add_zext_ifpos_vec_splat(<4 x i32> %x) {
30; CHECK-LABEL: add_zext_ifpos_vec_splat:
31; CHECK:       // %bb.0:
32; CHECK-NEXT:    movi v1.4s, #41
33; CHECK-NEXT:    cmge v0.4s, v0.4s, #0
34; CHECK-NEXT:    sub v0.4s, v1.4s, v0.4s
35; CHECK-NEXT:    ret
36  %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
37  %e = zext <4 x i1> %c to <4 x i32>
38  %r = add <4 x i32> %e, <i32 41, i32 41, i32 41, i32 41>
39  ret <4 x i32> %r
40}
41
42define i32 @sel_ifpos_tval_bigger(i32 %x) {
43; CHECK-LABEL: sel_ifpos_tval_bigger:
44; CHECK:       // %bb.0:
45; CHECK-NEXT:    mov w8, #41 // =0x29
46; CHECK-NEXT:    cmp w0, #0
47; CHECK-NEXT:    cinc w0, w8, ge
48; CHECK-NEXT:    ret
49  %c = icmp sgt i32 %x, -1
50  %r = select i1 %c, i32 42, i32 41
51  ret i32 %r
52}
53
54define i32 @sext_ifpos(i32 %x) {
55; CHECK-LABEL: sext_ifpos:
56; CHECK:       // %bb.0:
57; CHECK-NEXT:    mvn w8, w0
58; CHECK-NEXT:    asr w0, w8, #31
59; CHECK-NEXT:    ret
60  %c = icmp sgt i32 %x, -1
61  %e = sext i1 %c to i32
62  ret i32 %e
63}
64
65define i32 @add_sext_ifpos(i32 %x) {
66; CHECK-LABEL: add_sext_ifpos:
67; CHECK:       // %bb.0:
68; CHECK-NEXT:    lsr w8, w0, #31
69; CHECK-NEXT:    add w0, w8, #41
70; CHECK-NEXT:    ret
71  %c = icmp sgt i32 %x, -1
72  %e = sext i1 %c to i32
73  %r = add i32 %e, 42
74  ret i32 %r
75}
76
77define <4 x i32> @add_sext_ifpos_vec_splat(<4 x i32> %x) {
78; CHECK-LABEL: add_sext_ifpos_vec_splat:
79; CHECK:       // %bb.0:
80; CHECK-NEXT:    movi v1.4s, #42
81; CHECK-NEXT:    cmge v0.4s, v0.4s, #0
82; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
83; CHECK-NEXT:    ret
84  %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
85  %e = sext <4 x i1> %c to <4 x i32>
86  %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42>
87  ret <4 x i32> %r
88}
89
90define i32 @sel_ifpos_fval_bigger(i32 %x) {
91; CHECK-LABEL: sel_ifpos_fval_bigger:
92; CHECK:       // %bb.0:
93; CHECK-NEXT:    mov w8, #41 // =0x29
94; CHECK-NEXT:    cmp w0, #0
95; CHECK-NEXT:    cinc w0, w8, lt
96; CHECK-NEXT:    ret
97  %c = icmp sgt i32 %x, -1
98  %r = select i1 %c, i32 41, i32 42
99  ret i32 %r
100}
101
102; If negative...
103
104define i32 @zext_ifneg(i32 %x) {
105; CHECK-LABEL: zext_ifneg:
106; CHECK:       // %bb.0:
107; CHECK-NEXT:    lsr w0, w0, #31
108; CHECK-NEXT:    ret
109  %c = icmp slt i32 %x, 0
110  %r = zext i1 %c to i32
111  ret i32 %r
112}
113
114define i32 @add_zext_ifneg(i32 %x) {
115; CHECK-LABEL: add_zext_ifneg:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    lsr w8, w0, #31
118; CHECK-NEXT:    add w0, w8, #41
119; CHECK-NEXT:    ret
120  %c = icmp slt i32 %x, 0
121  %e = zext i1 %c to i32
122  %r = add i32 %e, 41
123  ret i32 %r
124}
125
126define i32 @sel_ifneg_tval_bigger(i32 %x) {
127; CHECK-LABEL: sel_ifneg_tval_bigger:
128; CHECK:       // %bb.0:
129; CHECK-NEXT:    mov w8, #41 // =0x29
130; CHECK-NEXT:    cmp w0, #0
131; CHECK-NEXT:    cinc w0, w8, lt
132; CHECK-NEXT:    ret
133  %c = icmp slt i32 %x, 0
134  %r = select i1 %c, i32 42, i32 41
135  ret i32 %r
136}
137
138define i32 @sext_ifneg(i32 %x) {
139; CHECK-LABEL: sext_ifneg:
140; CHECK:       // %bb.0:
141; CHECK-NEXT:    asr w0, w0, #31
142; CHECK-NEXT:    ret
143  %c = icmp slt i32 %x, 0
144  %r = sext i1 %c to i32
145  ret i32 %r
146}
147
148define i32 @add_sext_ifneg(i32 %x) {
149; CHECK-LABEL: add_sext_ifneg:
150; CHECK:       // %bb.0:
151; CHECK-NEXT:    asr w8, w0, #31
152; CHECK-NEXT:    add w0, w8, #42
153; CHECK-NEXT:    ret
154  %c = icmp slt i32 %x, 0
155  %e = sext i1 %c to i32
156  %r = add i32 %e, 42
157  ret i32 %r
158}
159
160define i32 @sel_ifneg_fval_bigger(i32 %x) {
161; CHECK-LABEL: sel_ifneg_fval_bigger:
162; CHECK:       // %bb.0:
163; CHECK-NEXT:    mov w8, #41 // =0x29
164; CHECK-NEXT:    cmp w0, #0
165; CHECK-NEXT:    cinc w0, w8, ge
166; CHECK-NEXT:    ret
167  %c = icmp slt i32 %x, 0
168  %r = select i1 %c, i32 41, i32 42
169  ret i32 %r
170}
171
172define i32 @add_lshr_not(i32 %x) {
173; CHECK-LABEL: add_lshr_not:
174; CHECK:       // %bb.0:
175; CHECK-NEXT:    asr w8, w0, #31
176; CHECK-NEXT:    add w0, w8, #42
177; CHECK-NEXT:    ret
178  %not = xor i32 %x, -1
179  %sh = lshr i32 %not, 31
180  %r = add i32 %sh, 41
181  ret i32 %r
182}
183
184define <4 x i32> @add_lshr_not_vec_splat(<4 x i32> %x) {
185; CHECK-LABEL: add_lshr_not_vec_splat:
186; CHECK:       // %bb.0:
187; CHECK-NEXT:    movi v1.4s, #43
188; CHECK-NEXT:    ssra v1.4s, v0.4s, #31
189; CHECK-NEXT:    mov v0.16b, v1.16b
190; CHECK-NEXT:    ret
191  %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
192  %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
193  %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42>
194  ret <4 x i32> %r
195}
196
197define i32 @sub_lshr_not(i32 %x) {
198; CHECK-LABEL: sub_lshr_not:
199; CHECK:       // %bb.0:
200; CHECK-NEXT:    mov w8, #42 // =0x2a
201; CHECK-NEXT:    bfxil w8, w0, #31, #1
202; CHECK-NEXT:    mov w0, w8
203; CHECK-NEXT:    ret
204  %not = xor i32 %x, -1
205  %sh = lshr i32 %not, 31
206  %r = sub i32 43, %sh
207  ret i32 %r
208}
209
210define <4 x i32> @sub_lshr_not_vec_splat(<4 x i32> %x) {
211; CHECK-LABEL: sub_lshr_not_vec_splat:
212; CHECK:       // %bb.0:
213; CHECK-NEXT:    movi v1.4s, #41
214; CHECK-NEXT:    usra v1.4s, v0.4s, #31
215; CHECK-NEXT:    mov v0.16b, v1.16b
216; CHECK-NEXT:    ret
217  %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
218  %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
219  %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %e
220  ret <4 x i32> %r
221}
222
223define i32 @sub_lshr(i32 %x, i32 %y) {
224; CHECK-LABEL: sub_lshr:
225; CHECK:       // %bb.0:
226; CHECK-NEXT:    add w0, w1, w0, asr #31
227; CHECK-NEXT:    ret
228  %sh = lshr i32 %x, 31
229  %r = sub i32 %y, %sh
230  ret i32 %r
231}
232
233define <4 x i32> @sub_lshr_vec(<4 x i32> %x, <4 x i32> %y) {
234; CHECK-LABEL: sub_lshr_vec:
235; CHECK:       // %bb.0:
236; CHECK-NEXT:    ssra v1.4s, v0.4s, #31
237; CHECK-NEXT:    mov v0.16b, v1.16b
238; CHECK-NEXT:    ret
239  %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
240  %r = sub <4 x i32> %y, %sh
241  ret <4 x i32> %r
242}
243
244define i32 @sub_const_op_lshr(i32 %x) {
245; CHECK-LABEL: sub_const_op_lshr:
246; CHECK:       // %bb.0:
247; CHECK-NEXT:    asr w8, w0, #31
248; CHECK-NEXT:    add w0, w8, #43
249; CHECK-NEXT:    ret
250  %sh = lshr i32 %x, 31
251  %r = sub i32 43, %sh
252  ret i32 %r
253}
254
255define <4 x i32> @sub_const_op_lshr_vec(<4 x i32> %x) {
256; CHECK-LABEL: sub_const_op_lshr_vec:
257; CHECK:       // %bb.0:
258; CHECK-NEXT:    movi v1.4s, #42
259; CHECK-NEXT:    ssra v1.4s, v0.4s, #31
260; CHECK-NEXT:    mov v0.16b, v1.16b
261; CHECK-NEXT:    ret
262  %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
263  %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %sh
264  ret <4 x i32> %r
265}
266
267