xref: /llvm-project/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll (revision db158c7c830807caeeb0691739c41f1d522029e9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
3
4@var1_32 = global i32 0
5@var2_32 = global i32 0
6
7@var1_64 = global i64 0
8@var2_64 = global i64 0
9
10define void @logical_32bit() minsize {
11; CHECK-LABEL: logical_32bit:
12; CHECK:       // %bb.0:
13; CHECK-NEXT:    adrp x8, :got:var1_32
14; CHECK-NEXT:    adrp x9, :got:var2_32
15; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_32]
16; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var2_32]
17; CHECK-NEXT:    ldr w10, [x8]
18; CHECK-NEXT:    ldr w9, [x9]
19; CHECK-NEXT:    and w11, w10, w9
20; CHECK-NEXT:    bic w12, w10, w9
21; CHECK-NEXT:    str w11, [x8]
22; CHECK-NEXT:    orr w11, w10, w9
23; CHECK-NEXT:    str w12, [x8]
24; CHECK-NEXT:    orn w12, w10, w9
25; CHECK-NEXT:    str w11, [x8]
26; CHECK-NEXT:    eor w11, w10, w9
27; CHECK-NEXT:    str w12, [x8]
28; CHECK-NEXT:    eon w12, w9, w10
29; CHECK-NEXT:    str w11, [x8]
30; CHECK-NEXT:    and w11, w10, w9, lsl #31
31; CHECK-NEXT:    str w12, [x8]
32; CHECK-NEXT:    bic w12, w10, w9, lsl #31
33; CHECK-NEXT:    str w11, [x8]
34; CHECK-NEXT:    orr w11, w10, w9, lsl #31
35; CHECK-NEXT:    str w12, [x8]
36; CHECK-NEXT:    orn w12, w10, w9, lsl #31
37; CHECK-NEXT:    str w11, [x8]
38; CHECK-NEXT:    eor w11, w10, w9, lsl #31
39; CHECK-NEXT:    str w12, [x8]
40; CHECK-NEXT:    eon w12, w10, w9, lsl #31
41; CHECK-NEXT:    str w11, [x8]
42; CHECK-NEXT:    bic w11, w10, w9, asr #10
43; CHECK-NEXT:    str w12, [x8]
44; CHECK-NEXT:    eor w12, w10, w9, asr #10
45; CHECK-NEXT:    str w11, [x8]
46; CHECK-NEXT:    orn w11, w10, w9, lsr #1
47; CHECK-NEXT:    str w12, [x8]
48; CHECK-NEXT:    eor w12, w10, w9, lsr #1
49; CHECK-NEXT:    str w11, [x8]
50; CHECK-NEXT:    eon w11, w10, w9, ror #20
51; CHECK-NEXT:    and w9, w10, w9, ror #20
52; CHECK-NEXT:    str w12, [x8]
53; CHECK-NEXT:    str w11, [x8]
54; CHECK-NEXT:    str w9, [x8]
55; CHECK-NEXT:    ret
56  %val1 = load i32, ptr @var1_32
57  %val2 = load i32, ptr @var2_32
58
59  ; First check basic and/bic/or/orn/eor/eon patterns with no shift
60  %neg_val2 = xor i32 -1, %val2
61
62  %and_noshift = and i32 %val1, %val2
63  store volatile i32 %and_noshift, ptr @var1_32
64  %bic_noshift = and i32 %neg_val2, %val1
65  store volatile i32 %bic_noshift, ptr @var1_32
66
67  %or_noshift = or i32 %val1, %val2
68  store volatile i32 %or_noshift, ptr @var1_32
69  %orn_noshift = or i32 %neg_val2, %val1
70  store volatile i32 %orn_noshift, ptr @var1_32
71
72  %xor_noshift = xor i32 %val1, %val2
73  store volatile i32 %xor_noshift, ptr @var1_32
74  %xorn_noshift = xor i32 %neg_val2, %val1
75  store volatile i32 %xorn_noshift, ptr @var1_32
76
77  ; Check the maximum shift on each
78  %operand_lsl31 = shl i32 %val2, 31
79  %neg_operand_lsl31 = xor i32 -1, %operand_lsl31
80
81  %and_lsl31 = and i32 %val1, %operand_lsl31
82  store volatile i32 %and_lsl31, ptr @var1_32
83  %bic_lsl31 = and i32 %val1, %neg_operand_lsl31
84  store volatile i32 %bic_lsl31, ptr @var1_32
85
86  %or_lsl31 = or i32 %val1, %operand_lsl31
87  store volatile i32 %or_lsl31, ptr @var1_32
88  %orn_lsl31 = or i32 %val1, %neg_operand_lsl31
89  store volatile i32 %orn_lsl31, ptr @var1_32
90
91  %xor_lsl31 = xor i32 %val1, %operand_lsl31
92  store volatile i32 %xor_lsl31, ptr @var1_32
93  %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31
94  store volatile i32 %xorn_lsl31, ptr @var1_32
95
96  ; Check other shifts on a subset
97  %operand_asr10 = ashr i32 %val2, 10
98  %neg_operand_asr10 = xor i32 -1, %operand_asr10
99
100  %bic_asr10 = and i32 %val1, %neg_operand_asr10
101  store volatile i32 %bic_asr10, ptr @var1_32
102  %xor_asr10 = xor i32 %val1, %operand_asr10
103  store volatile i32 %xor_asr10, ptr @var1_32
104
105  %operand_lsr1 = lshr i32 %val2, 1
106  %neg_operand_lsr1 = xor i32 -1, %operand_lsr1
107
108  %orn_lsr1 = or i32 %val1, %neg_operand_lsr1
109  store volatile i32 %orn_lsr1, ptr @var1_32
110  %xor_lsr1 = xor i32 %val1, %operand_lsr1
111  store volatile i32 %xor_lsr1, ptr @var1_32
112
113  %operand_ror20_big = shl i32 %val2, 12
114  %operand_ror20_small = lshr i32 %val2, 20
115  %operand_ror20 = or i32 %operand_ror20_big, %operand_ror20_small
116  %neg_operand_ror20 = xor i32 -1, %operand_ror20
117
118  %xorn_ror20 = xor i32 %val1, %neg_operand_ror20
119  store volatile i32 %xorn_ror20, ptr @var1_32
120  %and_ror20 = and i32 %val1, %operand_ror20
121  store volatile i32 %and_ror20, ptr @var1_32
122
123  ret void
124}
125
126define void @logical_64bit() minsize {
127; CHECK-LABEL: logical_64bit:
128; CHECK:       // %bb.0:
129; CHECK-NEXT:    adrp x8, :got:var1_64
130; CHECK-NEXT:    adrp x9, :got:var2_64
131; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_64]
132; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var2_64]
133; CHECK-NEXT:    ldr x10, [x8]
134; CHECK-NEXT:    ldr x9, [x9]
135; CHECK-NEXT:    and x11, x10, x9
136; CHECK-NEXT:    bic x12, x10, x9
137; CHECK-NEXT:    str x11, [x8]
138; CHECK-NEXT:    orr x11, x10, x9
139; CHECK-NEXT:    str x12, [x8]
140; CHECK-NEXT:    orn x12, x10, x9
141; CHECK-NEXT:    str x11, [x8]
142; CHECK-NEXT:    eor x11, x10, x9
143; CHECK-NEXT:    str x12, [x8]
144; CHECK-NEXT:    eon x12, x9, x10
145; CHECK-NEXT:    str x11, [x8]
146; CHECK-NEXT:    and x11, x10, x9, lsl #63
147; CHECK-NEXT:    str x12, [x8]
148; CHECK-NEXT:    bic x12, x10, x9, lsl #63
149; CHECK-NEXT:    str x11, [x8]
150; CHECK-NEXT:    orr x11, x10, x9, lsl #63
151; CHECK-NEXT:    str x12, [x8]
152; CHECK-NEXT:    orn x12, x10, x9, lsl #63
153; CHECK-NEXT:    str x11, [x8]
154; CHECK-NEXT:    eor x11, x10, x9, lsl #63
155; CHECK-NEXT:    str x12, [x8]
156; CHECK-NEXT:    eon x12, x10, x9, lsl #63
157; CHECK-NEXT:    str x11, [x8]
158; CHECK-NEXT:    bic x11, x10, x9, asr #10
159; CHECK-NEXT:    str x12, [x8]
160; CHECK-NEXT:    eor x12, x10, x9, asr #10
161; CHECK-NEXT:    str x11, [x8]
162; CHECK-NEXT:    orn x11, x10, x9, lsr #1
163; CHECK-NEXT:    str x12, [x8]
164; CHECK-NEXT:    eor x12, x10, x9, lsr #1
165; CHECK-NEXT:    str x11, [x8]
166; CHECK-NEXT:    eon x11, x10, x9, ror #20
167; CHECK-NEXT:    and x9, x10, x9, ror #20
168; CHECK-NEXT:    str x12, [x8]
169; CHECK-NEXT:    str x11, [x8]
170; CHECK-NEXT:    str x9, [x8]
171; CHECK-NEXT:    ret
172  %val1 = load i64, ptr @var1_64
173  %val2 = load i64, ptr @var2_64
174
175  ; First check basic and/bic/or/orn/eor/eon patterns with no shift
176  %neg_val2 = xor i64 -1, %val2
177
178  %and_noshift = and i64 %val1, %val2
179  store volatile i64 %and_noshift, ptr @var1_64
180  %bic_noshift = and i64 %neg_val2, %val1
181  store volatile i64 %bic_noshift, ptr @var1_64
182
183  %or_noshift = or i64 %val1, %val2
184  store volatile i64 %or_noshift, ptr @var1_64
185  %orn_noshift = or i64 %neg_val2, %val1
186  store volatile i64 %orn_noshift, ptr @var1_64
187
188  %xor_noshift = xor i64 %val1, %val2
189  store volatile i64 %xor_noshift, ptr @var1_64
190  %xorn_noshift = xor i64 %neg_val2, %val1
191  store volatile i64 %xorn_noshift, ptr @var1_64
192
193  ; Check the maximum shift on each
194  %operand_lsl63 = shl i64 %val2, 63
195  %neg_operand_lsl63 = xor i64 -1, %operand_lsl63
196
197  %and_lsl63 = and i64 %val1, %operand_lsl63
198  store volatile i64 %and_lsl63, ptr @var1_64
199  %bic_lsl63 = and i64 %val1, %neg_operand_lsl63
200  store volatile i64 %bic_lsl63, ptr @var1_64
201
202  %or_lsl63 = or i64 %val1, %operand_lsl63
203  store volatile i64 %or_lsl63, ptr @var1_64
204  %orn_lsl63 = or i64 %val1, %neg_operand_lsl63
205  store volatile i64 %orn_lsl63, ptr @var1_64
206
207  %xor_lsl63 = xor i64 %val1, %operand_lsl63
208  store volatile i64 %xor_lsl63, ptr @var1_64
209  %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63
210  store volatile i64 %xorn_lsl63, ptr @var1_64
211
212  ; Check other shifts on a subset
213  %operand_asr10 = ashr i64 %val2, 10
214  %neg_operand_asr10 = xor i64 -1, %operand_asr10
215
216  %bic_asr10 = and i64 %val1, %neg_operand_asr10
217  store volatile i64 %bic_asr10, ptr @var1_64
218  %xor_asr10 = xor i64 %val1, %operand_asr10
219  store volatile i64 %xor_asr10, ptr @var1_64
220
221  %operand_lsr1 = lshr i64 %val2, 1
222  %neg_operand_lsr1 = xor i64 -1, %operand_lsr1
223
224  %orn_lsr1 = or i64 %val1, %neg_operand_lsr1
225  store volatile i64 %orn_lsr1, ptr @var1_64
226  %xor_lsr1 = xor i64 %val1, %operand_lsr1
227  store volatile i64 %xor_lsr1, ptr @var1_64
228
229  ; Construct a rotate-right from a bunch of other logical
230  ; operations. DAGCombiner should ensure we the ROTR during
231  ; selection
232  %operand_ror20_big = shl i64 %val2, 44
233  %operand_ror20_small = lshr i64 %val2, 20
234  %operand_ror20 = or i64 %operand_ror20_big, %operand_ror20_small
235  %neg_operand_ror20 = xor i64 -1, %operand_ror20
236
237  %xorn_ror20 = xor i64 %val1, %neg_operand_ror20
238  store volatile i64 %xorn_ror20, ptr @var1_64
239  %and_ror20 = and i64 %val1, %operand_ror20
240  store volatile i64 %and_ror20, ptr @var1_64
241
242  ret void
243}
244
245define void @flag_setting() {
246; CHECK-LABEL: flag_setting:
247; CHECK:       // %bb.0:
248; CHECK-NEXT:    adrp x8, :got:var1_64
249; CHECK-NEXT:    adrp x10, :got:var2_64
250; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_64]
251; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var2_64]
252; CHECK-NEXT:    ldr x9, [x8]
253; CHECK-NEXT:    ldr x10, [x10]
254; CHECK-NEXT:    tst x9, x10
255; CHECK-NEXT:    b.gt .LBB2_4
256; CHECK-NEXT:  // %bb.1: // %test2
257; CHECK-NEXT:    tst x9, x10, lsl #63
258; CHECK-NEXT:    b.lt .LBB2_4
259; CHECK-NEXT:  // %bb.2: // %test3
260; CHECK-NEXT:    and x10, x9, x10, asr #12
261; CHECK-NEXT:    cmp x10, #1
262; CHECK-NEXT:    b.ge .LBB2_4
263; CHECK-NEXT:  // %bb.3: // %other_exit
264; CHECK-NEXT:    str x9, [x8]
265; CHECK-NEXT:  .LBB2_4: // %common.ret
266; CHECK-NEXT:    ret
267  %val1 = load i64, ptr @var1_64
268  %val2 = load i64, ptr @var2_64
269
270  %simple_and = and i64 %val1, %val2
271  %tst1 = icmp sgt i64 %simple_and, 0
272  br i1 %tst1, label %ret, label %test2, !prof !1
273
274test2:
275  %shifted_op = shl i64 %val2, 63
276  %shifted_and = and i64 %val1, %shifted_op
277  %tst2 = icmp slt i64 %shifted_and, 0
278  br i1 %tst2, label %ret, label %test3, !prof !1
279
280test3:
281  %asr_op = ashr i64 %val2, 12
282  %asr_and = and i64 %asr_op, %val1
283  %tst3 = icmp sgt i64 %asr_and, 0
284  br i1 %tst3, label %ret, label %other_exit, !prof !1
285
286other_exit:
287  store volatile i64 %val1, ptr @var1_64
288  ret void
289ret:
290  ret void
291}
292
293define i64 @add_swap_rhs_lhs_i64(i64 %0, i64 %1) {
294; CHECK-LABEL: add_swap_rhs_lhs_i64:
295; CHECK:       // %bb.0:
296; CHECK-NEXT:    lsl x8, x0, #8
297; CHECK-NEXT:    add x0, x8, x1, lsl #3
298; CHECK-NEXT:    ret
299  %3 = shl i64 %0, 8
300  %4 = shl i64 %1, 3
301  %5 = add i64 %4, %3
302  ret i64 %5
303}
304
305define i64 @add_swap_no_op_i64(i64 %0, i64 %1, ptr %2) {
306; CHECK-LABEL: add_swap_no_op_i64:
307; CHECK:       // %bb.0:
308; CHECK-NEXT:    lsl x8, x1, #3
309; CHECK-NEXT:    add x0, x8, x0, lsl #8
310; CHECK-NEXT:    str x8, [x2]
311; CHECK-NEXT:    ret
312  %4 = shl i64 %0, 8
313  %5 = shl i64 %1, 3
314  store i64 %5, ptr %2
315  %6 = add i64 %5, %4
316  ret i64 %6
317}
318
319define i32 @add_swap_rhs_lhs_i32(i32 %0, i32 %1) {
320; CHECK-LABEL: add_swap_rhs_lhs_i32:
321; CHECK:       // %bb.0:
322; CHECK-NEXT:    lsl w8, w0, #8
323; CHECK-NEXT:    add w0, w8, w1, lsl #3
324; CHECK-NEXT:    ret
325  %3 = shl i32 %0, 8
326  %4 = shl i32 %1, 3
327  %5 = add i32 %4, %3
328  ret i32 %5
329}
330
331define i32 @add_swap_no_op_i32(i32 %0, i32 %1, ptr %2) {
332; CHECK-LABEL: add_swap_no_op_i32:
333; CHECK:       // %bb.0:
334; CHECK-NEXT:    lsl w8, w1, #3
335; CHECK-NEXT:    add w0, w8, w0, lsl #8
336; CHECK-NEXT:    str w8, [x2]
337; CHECK-NEXT:    ret
338  %4 = shl i32 %0, 8
339  %5 = shl i32 %1, 3
340  store i32 %5, ptr %2
341  %6 = add i32 %5, %4
342  ret i32 %6
343}
344
345!1 = !{!"branch_weights", i32 1, i32 1}
346