xref: /llvm-project/llvm/test/CodeGen/AArch64/bitfield-insert.ll (revision 61510b51c33464a6bc15e4cf5b1ee07e2e0ec1c9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
3
4; First, a simple example from Clang. The registers could plausibly be
5; different, but probably won't be.
6
7%struct.foo = type { i8, [2 x i8], i8 }
8
9define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
10; CHECK-LABEL: from_clang:
11; CHECK:       // %bb.0: // %entry
12; CHECK-NEXT:    mov w8, #135 // =0x87
13; CHECK-NEXT:    and x9, x0, #0xffffff00
14; CHECK-NEXT:    and w8, w0, w8
15; CHECK-NEXT:    bfi w8, w1, #3, #4
16; CHECK-NEXT:    orr x0, x8, x9
17; CHECK-NEXT:    ret
18entry:
19  %f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0
20  %tmp.sroa.0.0.extract.trunc = trunc i64 %f.coerce.fca.0.extract to i32
21  %bf.value = shl i32 %n, 3
22  %0 = and i32 %bf.value, 120
23  %f.sroa.0.0.insert.ext.masked = and i32 %tmp.sroa.0.0.extract.trunc, 135
24  %1 = or i32 %f.sroa.0.0.insert.ext.masked, %0
25  %f.sroa.0.0.extract.trunc = zext i32 %1 to i64
26  %tmp1.sroa.1.1.insert.insert = and i64 %f.coerce.fca.0.extract, 4294967040
27  %tmp1.sroa.0.0.insert.insert = or i64 %f.sroa.0.0.extract.trunc, %tmp1.sroa.1.1.insert.insert
28  %.fca.0.insert = insertvalue [1 x i64] undef, i64 %tmp1.sroa.0.0.insert.insert, 0
29  ret [1 x i64] %.fca.0.insert
30}
31
32define void @test_whole32(ptr %existing, ptr %new) {
33; CHECK-LABEL: test_whole32:
34; CHECK:       // %bb.0:
35; CHECK-NEXT:    ldr w8, [x0]
36; CHECK-NEXT:    ldr w9, [x1]
37; CHECK-NEXT:    bfi w8, w9, #26, #5
38; CHECK-NEXT:    str w8, [x0]
39; CHECK-NEXT:    ret
40  %oldval = load volatile i32, ptr %existing
41  %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
42
43  %newval = load volatile i32, ptr %new
44  %newval_shifted = shl i32 %newval, 26
45  %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
46
47  %combined = or i32 %oldval_keep, %newval_masked
48  store volatile i32 %combined, ptr %existing
49
50  ret void
51}
52
53define void @test_whole64(ptr %existing, ptr %new) {
54; CHECK-LABEL: test_whole64:
55; CHECK:       // %bb.0:
56; CHECK-NEXT:    ldr x8, [x0]
57; CHECK-NEXT:    ldr x9, [x1]
58; CHECK-NEXT:    bfi x8, x9, #26, #14
59; CHECK-NEXT:    str x8, [x0]
60; CHECK-NEXT:    ret
61  %oldval = load volatile i64, ptr %existing
62  %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL
63
64  %newval = load volatile i64, ptr %new
65  %newval_shifted = shl i64 %newval, 26
66  %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000
67
68  %combined = or i64 %oldval_keep, %newval_masked
69  store volatile i64 %combined, ptr %existing
70
71  ret void
72}
73
74define void @test_whole32_from64(ptr %existing, ptr %new) {
75; CHECK-LABEL: test_whole32_from64:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    ldr x8, [x0]
78; CHECK-NEXT:    ldr x9, [x1]
79; CHECK-NEXT:    and x8, x8, #0xffff0000
80; CHECK-NEXT:    bfxil x8, x9, #0, #16
81; CHECK-NEXT:    str x8, [x0]
82; CHECK-NEXT:    ret
83  %oldval = load volatile i64, ptr %existing
84  %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000
85
86  %newval = load volatile i64, ptr %new
87  %newval_masked = and i64 %newval, 65535 ; = 0xffff
88
89  %combined = or i64 %oldval_keep, %newval_masked
90  store volatile i64 %combined, ptr %existing
91
92  ret void
93}
94
95define void @test_32bit_masked(ptr %existing, ptr %new) {
96; CHECK-LABEL: test_32bit_masked:
97; CHECK:       // %bb.0:
98; CHECK-NEXT:    ldr w9, [x0]
99; CHECK-NEXT:    mov w8, #135 // =0x87
100; CHECK-NEXT:    ldr w10, [x1]
101; CHECK-NEXT:    and w8, w9, w8
102; CHECK-NEXT:    bfi w8, w10, #3, #4
103; CHECK-NEXT:    str w8, [x0]
104; CHECK-NEXT:    ret
105  %oldval = load volatile i32, ptr %existing
106  %oldval_keep = and i32 %oldval, 135 ; = 0x87
107
108  %newval = load volatile i32, ptr %new
109  %newval_shifted = shl i32 %newval, 3
110  %newval_masked = and i32 %newval_shifted, 120 ; = 0x78
111
112  %combined = or i32 %oldval_keep, %newval_masked
113  store volatile i32 %combined, ptr %existing
114
115  ret void
116}
117
118define void @test_64bit_masked(ptr %existing, ptr %new) {
119; CHECK-LABEL: test_64bit_masked:
120; CHECK:       // %bb.0:
121; CHECK-NEXT:    ldr x8, [x0]
122; CHECK-NEXT:    ldr x9, [x1]
123; CHECK-NEXT:    and x8, x8, #0xff00000000
124; CHECK-NEXT:    bfi x8, x9, #40, #8
125; CHECK-NEXT:    str x8, [x0]
126; CHECK-NEXT:    ret
127  %oldval = load volatile i64, ptr %existing
128  %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
129
130  %newval = load volatile i64, ptr %new
131  %newval_shifted = shl i64 %newval, 40
132  %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000
133
134  %combined = or i64 %newval_masked, %oldval_keep
135  store volatile i64 %combined, ptr %existing
136
137  ret void
138}
139
140; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
141define void @test_32bit_complexmask(ptr %existing, ptr %new) {
142; CHECK-LABEL: test_32bit_complexmask:
143; CHECK:       // %bb.0:
144; CHECK-NEXT:    ldr w9, [x0]
145; CHECK-NEXT:    mov w8, #647 // =0x287
146; CHECK-NEXT:    ldr w10, [x1]
147; CHECK-NEXT:    and w8, w9, w8
148; CHECK-NEXT:    bfi w8, w10, #3, #4
149; CHECK-NEXT:    str w8, [x0]
150; CHECK-NEXT:    ret
151  %oldval = load volatile i32, ptr %existing
152  %oldval_keep = and i32 %oldval, 647 ; = 0x287
153
154  %newval = load volatile i32, ptr %new
155  %newval_shifted = shl i32 %newval, 3
156  %newval_masked = and i32 %newval_shifted, 120 ; = 0x278
157
158  %combined = or i32 %oldval_keep, %newval_masked
159  store volatile i32 %combined, ptr %existing
160
161  ret void
162}
163
164; Neither mask is a contiguous set of 1s. BFI can't be used
165define void @test_32bit_badmask(ptr %existing, ptr %new) {
166; CHECK-LABEL: test_32bit_badmask:
167; CHECK:       // %bb.0:
168; CHECK-NEXT:    ldr w8, [x0]
169; CHECK-NEXT:    ldr w9, [x1]
170; CHECK-NEXT:    mov w10, #632 // =0x278
171; CHECK-NEXT:    mov w11, #135 // =0x87
172; CHECK-NEXT:    and w9, w10, w9, lsl #3
173; CHECK-NEXT:    and w8, w8, w11
174; CHECK-NEXT:    orr w8, w8, w9
175; CHECK-NEXT:    str w8, [x0]
176; CHECK-NEXT:    ret
177  %oldval = load volatile i32, ptr %existing
178  %oldval_keep = and i32 %oldval, 135 ; = 0x87
179
180  %newval = load volatile i32, ptr %new
181  %newval_shifted = shl i32 %newval, 3
182  %newval_masked = and i32 %newval_shifted, 632 ; = 0x278
183
184  %combined = or i32 %oldval_keep, %newval_masked
185  store volatile i32 %combined, ptr %existing
186
187  ret void
188}
189
190; Ditto
191define void @test_64bit_badmask(ptr %existing, ptr %new) {
192; CHECK-LABEL: test_64bit_badmask:
193; CHECK:       // %bb.0:
194; CHECK-NEXT:    ldr x8, [x0]
195; CHECK-NEXT:    ldr x9, [x1]
196; CHECK-NEXT:    mov w10, #664 // =0x298
197; CHECK-NEXT:    mov w11, #135 // =0x87
198; CHECK-NEXT:    and x9, x10, x9, lsl #3
199; CHECK-NEXT:    and x8, x8, x11
200; CHECK-NEXT:    orr x8, x8, x9
201; CHECK-NEXT:    str x8, [x0]
202; CHECK-NEXT:    ret
203  %oldval = load volatile i64, ptr %existing
204  %oldval_keep = and i64 %oldval, 135 ; = 0x87
205
206  %newval = load volatile i64, ptr %new
207  %newval_shifted = shl i64 %newval, 3
208  %newval_masked = and i64 %newval_shifted, 664 ; = 0x278
209
210  %combined = or i64 %oldval_keep, %newval_masked
211  store volatile i64 %combined, ptr %existing
212
213  ret void
214}
215
216; Bitfield insert where there's a left-over shr needed at the beginning
217; (e.g. result of str.bf1 = str.bf2)
218define void @test_32bit_with_shr(ptr %existing, ptr %new) {
219; CHECK-LABEL: test_32bit_with_shr:
220; CHECK:       // %bb.0:
221; CHECK-NEXT:    ldr w8, [x0]
222; CHECK-NEXT:    ldr w9, [x1]
223; CHECK-NEXT:    lsr w9, w9, #14
224; CHECK-NEXT:    bfi w8, w9, #26, #5
225; CHECK-NEXT:    str w8, [x0]
226; CHECK-NEXT:    ret
227  %oldval = load volatile i32, ptr %existing
228  %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
229
230  %newval = load i32, ptr %new
231  %newval_shifted = shl i32 %newval, 12
232  %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
233
234  %combined = or i32 %oldval_keep, %newval_masked
235  store volatile i32 %combined, ptr %existing
236
237  ret void
238}
239
240; Bitfield insert where the second or operand is a better match to be folded into the BFM
241define void @test_32bit_opnd1_better(ptr %existing, ptr %new) {
242; CHECK-LABEL: test_32bit_opnd1_better:
243; CHECK:       // %bb.0:
244; CHECK-NEXT:    ldr w8, [x0]
245; CHECK-NEXT:    ldr w9, [x1]
246; CHECK-NEXT:    and w8, w8, #0xffff
247; CHECK-NEXT:    bfi w8, w9, #16, #8
248; CHECK-NEXT:    str w8, [x0]
249; CHECK-NEXT:    ret
250  %oldval = load volatile i32, ptr %existing
251  %oldval_keep = and i32 %oldval, 65535 ; 0x0000ffff
252
253  %newval = load i32, ptr %new
254  %newval_shifted = shl i32 %newval, 16
255  %newval_masked = and i32 %newval_shifted, 16711680 ; 0x00ff0000
256
257  %combined = or i32 %oldval_keep, %newval_masked
258  store volatile i32 %combined, ptr %existing
259
260  ret void
261}
262
263; Tests when all the bits from one operand are not useful
264define i32 @test_nouseful_bits(i8 %a, i32 %b) {
265; CHECK-LABEL: test_nouseful_bits:
266; CHECK:       // %bb.0:
267; CHECK-NEXT:    and w8, w0, #0xff
268; CHECK-NEXT:    lsl w8, w8, #8
269; CHECK-NEXT:    mov w9, w8
270; CHECK-NEXT:    bfxil w9, w0, #0, #8
271; CHECK-NEXT:    orr w0, w8, w9, lsl #16
272; CHECK-NEXT:    ret
273  %conv = zext i8 %a to i32     ;   0  0  0  A
274  %shl = shl i32 %b, 8          ;   B2 B1 B0 0
275  %or = or i32 %conv, %shl      ;   B2 B1 B0 A
276  %shl.1 = shl i32 %or, 8       ;   B1 B0 A 0
277  %or.1 = or i32 %conv, %shl.1  ;   B1 B0 A A
278  %shl.2 = shl i32 %or.1, 8     ;   B0 A A 0
279  %or.2 = or i32 %conv, %shl.2  ;   B0 A A A
280  %shl.3 = shl i32 %or.2, 8     ;   A A A 0
281  %or.3 = or i32 %conv, %shl.3  ;   A A A A
282  %shl.4 = shl i32 %or.3, 8     ;   A A A 0
283  ret i32 %shl.4
284}
285
286define void @test_nouseful_strb(ptr %ptr32, ptr %ptr8, i32 %x)  {
287; CHECK-LABEL: test_nouseful_strb:
288; CHECK:       // %bb.0: // %entry
289; CHECK-NEXT:    ldr w8, [x0]
290; CHECK-NEXT:    bfxil w8, w2, #16, #3
291; CHECK-NEXT:    strb w8, [x1]
292; CHECK-NEXT:    ret
293entry:
294  %0 = load i32, ptr %ptr32, align 8
295  %and = and i32 %0, -8
296  %shr = lshr i32 %x, 16
297  %and1 = and i32 %shr, 7
298  %or = or i32 %and, %and1
299  %trunc = trunc i32 %or to i8
300  store i8 %trunc, ptr %ptr8
301  ret void
302}
303
304define void @test_nouseful_strh(ptr %ptr32, ptr %ptr16, i32 %x)  {
305; CHECK-LABEL: test_nouseful_strh:
306; CHECK:       // %bb.0: // %entry
307; CHECK-NEXT:    ldr w8, [x0]
308; CHECK-NEXT:    bfxil w8, w2, #16, #4
309; CHECK-NEXT:    strh w8, [x1]
310; CHECK-NEXT:    ret
311entry:
312  %0 = load i32, ptr %ptr32, align 8
313  %and = and i32 %0, -16
314  %shr = lshr i32 %x, 16
315  %and1 = and i32 %shr, 15
316  %or = or i32 %and, %and1
317  %trunc = trunc i32 %or to i16
318  store i16 %trunc, ptr %ptr16
319  ret void
320}
321
322define void @test_nouseful_sturb(ptr %ptr32, ptr %ptr8, i32 %x)  {
323; CHECK-LABEL: test_nouseful_sturb:
324; CHECK:       // %bb.0: // %entry
325; CHECK-NEXT:    ldr w8, [x0]
326; CHECK-NEXT:    bfxil w8, w2, #16, #3
327; CHECK-NEXT:    sturb w8, [x1, #-1]
328; CHECK-NEXT:    ret
329entry:
330  %0 = load i32, ptr %ptr32, align 8
331  %and = and i32 %0, -8
332  %shr = lshr i32 %x, 16
333  %and1 = and i32 %shr, 7
334  %or = or i32 %and, %and1
335  %trunc = trunc i32 %or to i8
336  %gep = getelementptr i8, ptr %ptr8, i64 -1
337  store i8 %trunc, ptr %gep
338  ret void
339}
340
341define void @test_nouseful_sturh(ptr %ptr32, ptr %ptr16, i32 %x)  {
342; CHECK-LABEL: test_nouseful_sturh:
343; CHECK:       // %bb.0: // %entry
344; CHECK-NEXT:    ldr w8, [x0]
345; CHECK-NEXT:    bfxil w8, w2, #16, #4
346; CHECK-NEXT:    sturh w8, [x1, #-2]
347; CHECK-NEXT:    ret
348entry:
349  %0 = load i32, ptr %ptr32, align 8
350  %and = and i32 %0, -16
351  %shr = lshr i32 %x, 16
352  %and1 = and i32 %shr, 15
353  %or = or i32 %and, %and1
354  %trunc = trunc i32 %or to i16
355  %gep = getelementptr i16, ptr %ptr16, i64 -1
356  store i16 %trunc, ptr %gep
357  ret void
358}
359
360; The next set of tests generate a BFXIL from 'or (and X, Mask0Imm),
361; (and Y, Mask1Imm)' iff Mask0Imm and ~Mask1Imm are equivalent and one of the
362; MaskImms is a shifted mask (e.g., 0x000ffff0).
363
364define i32 @test_or_and_and1(i32 %a, i32 %b) {
365; CHECK-LABEL: test_or_and_and1:
366; CHECK:       // %bb.0: // %entry
367; CHECK-NEXT:    lsr w8, w1, #4
368; CHECK-NEXT:    bfi w0, w8, #4, #12
369; CHECK-NEXT:    ret
370entry:
371  %and = and i32 %a, -65521 ; 0xffff000f
372  %and1 = and i32 %b, 65520 ; 0x0000fff0
373  %or = or i32 %and1, %and
374  ret i32 %or
375}
376
377define i32 @test_or_and_and2(i32 %a, i32 %b) {
378; CHECK-LABEL: test_or_and_and2:
379; CHECK:       // %bb.0: // %entry
380; CHECK-NEXT:    lsr w8, w0, #4
381; CHECK-NEXT:    mov w0, w1
382; CHECK-NEXT:    bfi w0, w8, #4, #12
383; CHECK-NEXT:    ret
384entry:
385  %and = and i32 %a, 65520   ; 0x0000fff0
386  %and1 = and i32 %b, -65521 ; 0xffff000f
387  %or = or i32 %and1, %and
388  ret i32 %or
389}
390
391define i64 @test_or_and_and3(i64 %a, i64 %b) {
392; CHECK-LABEL: test_or_and_and3:
393; CHECK:       // %bb.0: // %entry
394; CHECK-NEXT:    lsr x8, x1, #16
395; CHECK-NEXT:    bfi x0, x8, #16, #32
396; CHECK-NEXT:    ret
397entry:
398  %and = and i64 %a, -281474976645121 ; 0xffff00000000ffff
399  %and1 = and i64 %b, 281474976645120 ; 0x0000ffffffff0000
400  %or = or i64 %and1, %and
401  ret i64 %or
402}
403
404; Don't convert 'and' with multiple uses.
405define i32 @test_or_and_and4(i32 %a, i32 %b, ptr %ptr) {
406; CHECK-LABEL: test_or_and_and4:
407; CHECK:       // %bb.0: // %entry
408; CHECK-NEXT:    and w8, w0, #0xffff000f
409; CHECK-NEXT:    and w9, w1, #0xfff0
410; CHECK-NEXT:    orr w0, w9, w8
411; CHECK-NEXT:    str w8, [x2]
412; CHECK-NEXT:    ret
413entry:
414  %and = and i32 %a, -65521
415  store i32 %and, ptr %ptr, align 4
416  %and2 = and i32 %b, 65520
417  %or = or i32 %and2, %and
418  ret i32 %or
419}
420
421; Don't convert 'and' with multiple uses.
422define i32 @test_or_and_and5(i32 %a, i32 %b, ptr %ptr) {
423; CHECK-LABEL: test_or_and_and5:
424; CHECK:       // %bb.0: // %entry
425; CHECK-NEXT:    and w8, w1, #0xfff0
426; CHECK-NEXT:    and w9, w0, #0xffff000f
427; CHECK-NEXT:    orr w0, w8, w9
428; CHECK-NEXT:    str w8, [x2]
429; CHECK-NEXT:    ret
430entry:
431  %and = and i32 %b, 65520
432  store i32 %and, ptr %ptr, align 4
433  %and1 = and i32 %a, -65521
434  %or = or i32 %and, %and1
435  ret i32 %or
436}
437
438define i32 @test1(i32 %a) {
439; CHECK-LABEL: test1:
440; CHECK:       // %bb.0:
441; CHECK-NEXT:    mov w8, #5 // =0x5
442; CHECK-NEXT:    bfxil w0, w8, #0, #4
443; CHECK-NEXT:    ret
444  %1 = and i32 %a, -16 ; 0xfffffff0
445  %2 = or i32 %1, 5    ; 0x00000005
446  ret i32 %2
447}
448
449define i32 @test2(i32 %a) {
450; CHECK-LABEL: test2:
451; CHECK:       // %bb.0:
452; CHECK-NEXT:    mov w8, #10 // =0xa
453; CHECK-NEXT:    bfi w0, w8, #22, #4
454; CHECK-NEXT:    ret
455  %1 = and i32 %a, -62914561 ; 0xfc3fffff
456  %2 = or i32 %1, 41943040   ; 0x06400000
457  ret i32 %2
458}
459
460define i64 @test3(i64 %a) {
461; CHECK-LABEL: test3:
462; CHECK:       // %bb.0:
463; CHECK-NEXT:    mov x8, #5 // =0x5
464; CHECK-NEXT:    bfxil x0, x8, #0, #3
465; CHECK-NEXT:    ret
466  %1 = and i64 %a, -8 ; 0xfffffffffffffff8
467  %2 = or i64 %1, 5   ; 0x0000000000000005
468  ret i64 %2
469}
470
471define i64 @test4(i64 %a) {
472; CHECK-LABEL: test4:
473; CHECK:       // %bb.0:
474; CHECK-NEXT:    mov x8, #9 // =0x9
475; CHECK-NEXT:    bfi x0, x8, #1, #7
476; CHECK-NEXT:    ret
477  %1 = and i64 %a, -255 ; 0xffffffffffffff01
478  %2 = or i64 %1,  18   ; 0x0000000000000012
479  ret i64 %2
480}
481
482; Don't generate BFI/BFXIL if the immediate can be encoded in the ORR.
483define i32 @test5(i32 %a) {
484; CHECK-LABEL: test5:
485; CHECK:       // %bb.0:
486; CHECK-NEXT:    and w8, w0, #0xfffffff0
487; CHECK-NEXT:    orr w0, w8, #0x6
488; CHECK-NEXT:    ret
489  %1 = and i32 %a, 4294967280 ; 0xfffffff0
490  %2 = or i32 %1, 6           ; 0x00000006
491  ret i32 %2
492}
493
494; BFXIL will use the same constant as the ORR, so we don't care how the constant
495; is materialized (it's an equal cost either way).
496define i32 @test6(i32 %a) {
497; CHECK-LABEL: test6:
498; CHECK:       // %bb.0:
499; CHECK-NEXT:    mov w8, #23250 // =0x5ad2
500; CHECK-NEXT:    movk w8, #11, lsl #16
501; CHECK-NEXT:    bfxil w0, w8, #0, #20
502; CHECK-NEXT:    ret
503  %1 = and i32 %a, 4293918720 ; 0xfff00000
504  %2 = or i32 %1, 744146      ; 0x000b5ad2
505  ret i32 %2
506}
507
508; BFIs that require the same number of instruction to materialize the constant
509; as the original ORR are okay.
510define i32 @test7(i32 %a) {
511; CHECK-LABEL: test7:
512; CHECK:       // %bb.0:
513; CHECK-NEXT:    mov w8, #44393 // =0xad69
514; CHECK-NEXT:    movk w8, #5, lsl #16
515; CHECK-NEXT:    bfi w0, w8, #1, #19
516; CHECK-NEXT:    ret
517  %1 = and i32 %a, 4293918721 ; 0xfff00001
518  %2 = or i32 %1, 744146      ; 0x000b5ad2
519  ret i32 %2
520}
521
522; BFIs that require more instructions to materialize the constant as compared
523; to the original ORR are not okay.  In this case we would be replacing the
524; 'and' with a 'movk', which would decrease ILP while using the same number of
525; instructions.
526define i64 @test8(i64 %a) {
527; CHECK-LABEL: test8:
528; CHECK:       // %bb.0:
529; CHECK-NEXT:    mov x8, #2035482624 // =0x79530000
530; CHECK-NEXT:    and x9, x0, #0xff000000000000ff
531; CHECK-NEXT:    movk x8, #36694, lsl #32
532; CHECK-NEXT:    orr x0, x9, x8
533; CHECK-NEXT:    ret
534  %1 = and i64 %a, -72057594037927681 ; 0xff000000000000ff
535  %2 = or i64 %1, 157601565442048     ; 0x00008f5679530000
536  ret i64 %2
537}
538
539; This test exposed an issue with an overly aggressive assert.  The bit of code
540; that is expected to catch this case is unable to deal with the trunc, which
541; results in a failing check due to a mismatch between the BFI opcode and
542; the expected value type of the OR.
543define i32 @test9(i64 %b, i32 %e) {
544; CHECK-LABEL: test9:
545; CHECK:       // %bb.0:
546; CHECK-NEXT:    lsr x0, x0, #12
547; CHECK-NEXT:    lsr w8, w1, #23
548; CHECK-NEXT:    bfi w0, w8, #23, #9
549; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
550; CHECK-NEXT:    ret
551  %c = lshr i64 %b, 12
552  %d = trunc i64 %c to i32
553  %f = and i32 %d, 8388607
554  %g = and i32 %e, -8388608
555  %h = or i32 %g, %f
556  ret i32 %h
557}
558
559define <2 x i32> @test_complex_type(ptr %addr, i64 %in, ptr %bf ) {
560; CHECK-LABEL: test_complex_type:
561; CHECK:       // %bb.0:
562; CHECK-NEXT:    ldr d0, [x0], #8
563; CHECK-NEXT:    orr x8, x0, x1, lsl #32
564; CHECK-NEXT:    str x8, [x2]
565; CHECK-NEXT:    ret
566  %vec = load <2 x i32>, ptr %addr
567
568  %vec.next = getelementptr <2 x i32>, ptr %addr, i32 1
569  %lo = ptrtoint ptr %vec.next to i64
570
571  %hi = shl i64 %in, 32
572  %both = or i64 %lo, %hi
573  store i64 %both, ptr %bf
574
575  ret <2 x i32> %vec
576}
577
578define i64 @test_truncated_shift(i64 %x, i64 %y) {
579; CHECK-LABEL: test_truncated_shift:
580; CHECK:       // %bb.0: // %entry
581; CHECK-NEXT:    bfi x0, x1, #25, #5
582; CHECK-NEXT:    ret
583entry:
584  %and = and i64 %x, -1040187393
585  %shl4 = shl i64 %y, 25
586  %and5 = and i64 %shl4, 1040187392
587  %or = or i64 %and5, %and
588  ret i64 %or
589}
590
591define i64 @test_and_extended_shift_with_imm(i64 %0) {
592; CHECK-LABEL: test_and_extended_shift_with_imm:
593; CHECK:       // %bb.0:
594; CHECK-NEXT:    ubfiz x0, x0, #7, #8
595; CHECK-NEXT:    ret
596  %2 = shl i64 %0, 7
597  %3 = and i64 %2, 32640  ; #0x7f80
598  ret i64 %3
599}
600
601; orr with left-shifted operand is better than bfi, since it improves data
602; dependency, and orr has a smaller latency and higher throughput than bfm on
603; some AArch64 processors (for the rest, orr is at least as good as bfm)
604;
605; ubfx x8, x0, #8, #7
606; and x9, x0, #0x7f
607; orr x0, x9, x8, lsl #7
608define i64 @test_orr_not_bfxil_i64(i64 %0) {
609; CHECK-LABEL: test_orr_not_bfxil_i64:
610; CHECK:       // %bb.0:
611; CHECK-NEXT:    ubfx x8, x0, #8, #7
612; CHECK-NEXT:    and x9, x0, #0x7f
613; CHECK-NEXT:    orr x0, x9, x8, lsl #7
614; CHECK-NEXT:    ret
615  %2 = and i64 %0, 127
616  %3 = lshr i64 %0, 1
617  %4 = and i64 %3, 16256  ; 0x3f80
618  %5 = or i64 %4, %2
619  ret i64 %5
620}
621
622; The 32-bit test for `test_orr_not_bfxil_i64`.
623define i32 @test_orr_not_bfxil_i32(i32 %0) {
624; CHECK-LABEL: test_orr_not_bfxil_i32:
625; CHECK:       // %bb.0:
626; CHECK-NEXT:    ubfx w8, w0, #8, #7
627; CHECK-NEXT:    and w9, w0, #0x7f
628; CHECK-NEXT:    orr w0, w9, w8, lsl #7
629; CHECK-NEXT:    ret
630  %2 = and i32 %0, 127
631  %3 = lshr i32 %0, 1
632  %4 = and i32 %3, 16256  ; 0x3f80
633  %5 = or i32 %4, %2
634  ret i32 %5
635}
636
637; For or operation, one operand is a left shift of another operand.
638; So orr with a left-shifted operand is generated (not bfi).
639define i64 @test_orr_not_bfi_i64(i64 %0) {
640; CHECK-LABEL: test_orr_not_bfi_i64:
641; CHECK:       // %bb.0:
642; CHECK-NEXT:    and x8, x0, #0xff
643; CHECK-NEXT:    orr x0, x8, x8, lsl #8
644; CHECK-NEXT:    ret
645  %2 = and i64 %0, 255
646  %3 = shl i64 %2, 8
647  %4 = or i64 %2, %3
648  ret i64 %4
649}
650
651; bfi is better than orr, since it would simplify away two instructions
652; (%mask and %bit-field-pos-op).
653define i32 @test_bfi_not_orr_i32(i32 %0, i32 %1) {
654; CHECK-LABEL: test_bfi_not_orr_i32:
655; CHECK:       // %bb.0:
656; CHECK-NEXT:    and w8, w1, #0xff
657; CHECK-NEXT:    bfi w8, w0, #8, #8
658; CHECK-NEXT:    mov w0, w8
659; CHECK-NEXT:    ret
660  %bfi_dst = and i32 %1, 255
661  %mask = and i32 %0, 255
662  %bit-field-pos-op = shl i32 %mask, 8
663  %or_res = or i32 %bit-field-pos-op, %bfi_dst
664  ret i32 %or_res
665}
666
667; orr is generated (not bfi), since both simplify away one instruction (%3)
668; while orr has shorter latency and higher throughput.
669define i32 @test_orr_not_bfi_i32(i32 %0) {
670; CHECK-LABEL: test_orr_not_bfi_i32:
671; CHECK:       // %bb.0:
672; CHECK-NEXT:    and w8, w0, #0xff
673; CHECK-NEXT:    orr w0, w8, w8, lsl #8
674; CHECK-NEXT:    ret
675  %2 = and i32 %0, 255
676  %3 = shl i32 %2, 8
677  %4 = or i32 %2, %3
678  ret i32 %4
679}
680
681; bfxil is better than orr, since it would simplify away two instructions
682; (%mask and %bit-field-extract-op).
683define i64 @test_bfxil_not_orr_i64(i64 %0, i64 %1) {
684; CHECK-LABEL: test_bfxil_not_orr_i64:
685; CHECK:       // %bb.0:
686; CHECK-NEXT:    and x0, x0, #0xff000
687; CHECK-NEXT:    bfxil x0, x1, #12, #8
688; CHECK-NEXT:    ret
689  %shifted-mask = and i64 %1, 1044480
690  %bfi-dst = and i64 %0, 1044480
691  %bit-field-extract-op = lshr i64 %shifted-mask, 12
692  %or_res = or i64 %bit-field-extract-op, %bfi-dst
693  ret i64 %or_res
694}
695
696; orr is generated (not bfxil), since one operand is the right shift of another
697; operand.
698define i64 @orr_not_bfxil_test2_i64(i64 %0) {
699; CHECK-LABEL: orr_not_bfxil_test2_i64:
700; CHECK:       // %bb.0:
701; CHECK-NEXT:    and x8, x0, #0xff000
702; CHECK-NEXT:    orr x0, x8, x8, lsr #12
703; CHECK-NEXT:    ret
704  %2 = and i64 %0, 1044480 ; 0xff000
705  %3 = lshr i64 %2, 12
706  %4 = or i64 %2, %3
707  ret i64 %4
708}
709
710; bfxil simplifies away two instructions (that computes %shifted-mask and
711; %bit-field-extract-op respectively), so it's better than orr (which
712; simplifies away at most one shift).
713define i32 @test_bfxil_not_orr_i32(i32 %0, i32 %1) {
714; CHECK-LABEL: test_bfxil_not_orr_i32:
715; CHECK:       // %bb.0:
716; CHECK-NEXT:    and w0, w0, #0xff000
717; CHECK-NEXT:    bfxil w0, w1, #12, #8
718; CHECK-NEXT:    ret
719  %shifted-mask = and i32 %1, 1044480
720  %bfxil-dst = and i32 %0, 1044480
721  %bit-field-extract-op = lshr i32 %shifted-mask, 12
722  %or_res = or i32 %bit-field-extract-op, %bfxil-dst
723  ret i32 %or_res
724}
725
726; one operand is the shift of another operand, so orr is generated (not bfxil).
727define i32 @orr_not_bfxil_test2_i32(i32 %0) {
728; CHECK-LABEL: orr_not_bfxil_test2_i32:
729; CHECK:       // %bb.0:
730; CHECK-NEXT:    and w8, w0, #0xff000
731; CHECK-NEXT:    orr w0, w8, w8, lsr #12
732; CHECK-NEXT:    ret
733  %2 = and i32 %0, 1044480  ; 0xff000
734  %3 = lshr i32 %2, 12
735  %4 = or i32 %2, %3
736  ret i32 %4
737}
738
739define i16 @implicit_trunc_of_imm(ptr %p, i16 %a, i16 %b) {
740; CHECK-LABEL: implicit_trunc_of_imm:
741; CHECK:       // %bb.0: // %entry
742; CHECK-NEXT:    and w8, w1, #0xffffe000
743; CHECK-NEXT:    mov x9, x0
744; CHECK-NEXT:    mov w10, w8
745; CHECK-NEXT:    mov w0, w8
746; CHECK-NEXT:    bfxil w10, w2, #0, #1
747; CHECK-NEXT:    strh w10, [x9]
748; CHECK-NEXT:    ret
749entry:
750  %and1 = and i16 %a, -8192
751  %and2 = and i16 %b, 1
752  %or = or i16 %and2, %and1
753  store i16 %or, ptr %p
754  ret i16 %and1
755}
756