xref: /llvm-project/llvm/test/CodeGen/X86/atomic-bit-test.ll (revision e6bf48d11047e970cb24554a01b65b566d6b5d22)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
4
5@v16 = dso_local global i16 0, align 2
6@v32 = dso_local global i32 0, align 4
7@v64 = dso_local global i64 0, align 8
8
9define i16 @bts1() nounwind {
10; X86-LABEL: bts1:
11; X86:       # %bb.0: # %entry
12; X86-NEXT:    xorl %eax, %eax
13; X86-NEXT:    lock btsw $0, v16
14; X86-NEXT:    setb %al
15; X86-NEXT:    # kill: def $ax killed $ax killed $eax
16; X86-NEXT:    retl
17;
18; X64-LABEL: bts1:
19; X64:       # %bb.0: # %entry
20; X64-NEXT:    xorl %eax, %eax
21; X64-NEXT:    lock btsw $0, v16(%rip)
22; X64-NEXT:    setb %al
23; X64-NEXT:    # kill: def $ax killed $ax killed $eax
24; X64-NEXT:    retq
25entry:
26  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
27  %and = and i16 %0, 1
28  ret i16 %and
29}
30
31define i16 @bts2() nounwind {
32; X86-LABEL: bts2:
33; X86:       # %bb.0: # %entry
34; X86-NEXT:    xorl %eax, %eax
35; X86-NEXT:    lock btsw $1, v16
36; X86-NEXT:    setb %al
37; X86-NEXT:    addl %eax, %eax
38; X86-NEXT:    # kill: def $ax killed $ax killed $eax
39; X86-NEXT:    retl
40;
41; X64-LABEL: bts2:
42; X64:       # %bb.0: # %entry
43; X64-NEXT:    xorl %eax, %eax
44; X64-NEXT:    lock btsw $1, v16(%rip)
45; X64-NEXT:    setb %al
46; X64-NEXT:    addl %eax, %eax
47; X64-NEXT:    # kill: def $ax killed $ax killed $eax
48; X64-NEXT:    retq
49entry:
50  %0 = atomicrmw or ptr @v16, i16 2 monotonic, align 2
51  %and = and i16 %0, 2
52  ret i16 %and
53}
54
55define i16 @bts15() nounwind {
56; X86-LABEL: bts15:
57; X86:       # %bb.0: # %entry
58; X86-NEXT:    xorl %eax, %eax
59; X86-NEXT:    lock btsw $15, v16
60; X86-NEXT:    setb %al
61; X86-NEXT:    shll $15, %eax
62; X86-NEXT:    # kill: def $ax killed $ax killed $eax
63; X86-NEXT:    retl
64;
65; X64-LABEL: bts15:
66; X64:       # %bb.0: # %entry
67; X64-NEXT:    xorl %eax, %eax
68; X64-NEXT:    lock btsw $15, v16(%rip)
69; X64-NEXT:    setb %al
70; X64-NEXT:    shll $15, %eax
71; X64-NEXT:    # kill: def $ax killed $ax killed $eax
72; X64-NEXT:    retq
73entry:
74  %0 = atomicrmw or ptr @v16, i16 32768 monotonic, align 2
75  %and = and i16 %0, 32768
76  ret i16 %and
77}
78
79define i32 @bts31() nounwind {
80; X86-LABEL: bts31:
81; X86:       # %bb.0: # %entry
82; X86-NEXT:    xorl %eax, %eax
83; X86-NEXT:    lock btsl $31, v32
84; X86-NEXT:    setb %al
85; X86-NEXT:    shll $31, %eax
86; X86-NEXT:    retl
87;
88; X64-LABEL: bts31:
89; X64:       # %bb.0: # %entry
90; X64-NEXT:    xorl %eax, %eax
91; X64-NEXT:    lock btsl $31, v32(%rip)
92; X64-NEXT:    setb %al
93; X64-NEXT:    shll $31, %eax
94; X64-NEXT:    retq
95entry:
96  %0 = atomicrmw or ptr @v32, i32 2147483648 monotonic, align 4
97  %and = and i32 %0, 2147483648
98  ret i32 %and
99}
100
101define i64 @bts63() nounwind {
102; X86-LABEL: bts63:
103; X86:       # %bb.0: # %entry
104; X86-NEXT:    pushl %ebx
105; X86-NEXT:    pushl %esi
106; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
107; X86-NEXT:    movl v64+4, %edx
108; X86-NEXT:    movl v64, %eax
109; X86-NEXT:    .p2align 4
110; X86-NEXT:  .LBB4_1: # %atomicrmw.start
111; X86-NEXT:    # =>This Inner Loop Header: Depth=1
112; X86-NEXT:    movl %edx, %ecx
113; X86-NEXT:    orl %esi, %ecx
114; X86-NEXT:    movl %eax, %ebx
115; X86-NEXT:    lock cmpxchg8b v64
116; X86-NEXT:    jne .LBB4_1
117; X86-NEXT:  # %bb.2: # %atomicrmw.end
118; X86-NEXT:    andl %esi, %edx
119; X86-NEXT:    xorl %eax, %eax
120; X86-NEXT:    popl %esi
121; X86-NEXT:    popl %ebx
122; X86-NEXT:    retl
123;
124; X64-LABEL: bts63:
125; X64:       # %bb.0: # %entry
126; X64-NEXT:    xorl %eax, %eax
127; X64-NEXT:    lock btsq $63, v64(%rip)
128; X64-NEXT:    setb %al
129; X64-NEXT:    shlq $63, %rax
130; X64-NEXT:    retq
131entry:
132  %0 = atomicrmw or ptr @v64, i64 -9223372036854775808 monotonic, align 8
133  %and = and i64 %0, -9223372036854775808
134  ret i64 %and
135}
136
137define i16 @btc1() nounwind {
138; X86-LABEL: btc1:
139; X86:       # %bb.0: # %entry
140; X86-NEXT:    xorl %eax, %eax
141; X86-NEXT:    lock btcw $0, v16
142; X86-NEXT:    setb %al
143; X86-NEXT:    # kill: def $ax killed $ax killed $eax
144; X86-NEXT:    retl
145;
146; X64-LABEL: btc1:
147; X64:       # %bb.0: # %entry
148; X64-NEXT:    xorl %eax, %eax
149; X64-NEXT:    lock btcw $0, v16(%rip)
150; X64-NEXT:    setb %al
151; X64-NEXT:    # kill: def $ax killed $ax killed $eax
152; X64-NEXT:    retq
153entry:
154  %0 = atomicrmw xor ptr @v16, i16 1 monotonic, align 2
155  %and = and i16 %0, 1
156  ret i16 %and
157}
158
159define i16 @btc2() nounwind {
160; X86-LABEL: btc2:
161; X86:       # %bb.0: # %entry
162; X86-NEXT:    xorl %eax, %eax
163; X86-NEXT:    lock btcw $1, v16
164; X86-NEXT:    setb %al
165; X86-NEXT:    addl %eax, %eax
166; X86-NEXT:    # kill: def $ax killed $ax killed $eax
167; X86-NEXT:    retl
168;
169; X64-LABEL: btc2:
170; X64:       # %bb.0: # %entry
171; X64-NEXT:    xorl %eax, %eax
172; X64-NEXT:    lock btcw $1, v16(%rip)
173; X64-NEXT:    setb %al
174; X64-NEXT:    addl %eax, %eax
175; X64-NEXT:    # kill: def $ax killed $ax killed $eax
176; X64-NEXT:    retq
177entry:
178  %0 = atomicrmw xor ptr @v16, i16 2 monotonic, align 2
179  %and = and i16 %0, 2
180  ret i16 %and
181}
182
183define i16 @btc15() nounwind {
184; X86-LABEL: btc15:
185; X86:       # %bb.0: # %entry
186; X86-NEXT:    movw $-32768, %ax # imm = 0x8000
187; X86-NEXT:    lock xaddw %ax, v16
188; X86-NEXT:    andl $32768, %eax # imm = 0x8000
189; X86-NEXT:    # kill: def $ax killed $ax killed $eax
190; X86-NEXT:    retl
191;
192; X64-LABEL: btc15:
193; X64:       # %bb.0: # %entry
194; X64-NEXT:    movw $-32768, %ax # imm = 0x8000
195; X64-NEXT:    lock xaddw %ax, v16(%rip)
196; X64-NEXT:    andl $32768, %eax # imm = 0x8000
197; X64-NEXT:    # kill: def $ax killed $ax killed $eax
198; X64-NEXT:    retq
199entry:
200  %0 = atomicrmw xor ptr @v16, i16 32768 monotonic, align 2
201  %and = and i16 %0, 32768
202  ret i16 %and
203}
204
205define i32 @btc31() nounwind {
206; X86-LABEL: btc31:
207; X86:       # %bb.0: # %entry
208; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
209; X86-NEXT:    lock xaddl %eax, v32
210; X86-NEXT:    andl $-2147483648, %eax # imm = 0x80000000
211; X86-NEXT:    retl
212;
213; X64-LABEL: btc31:
214; X64:       # %bb.0: # %entry
215; X64-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
216; X64-NEXT:    lock xaddl %eax, v32(%rip)
217; X64-NEXT:    andl $-2147483648, %eax # imm = 0x80000000
218; X64-NEXT:    retq
219entry:
220  %0 = atomicrmw xor ptr @v32, i32 2147483648 monotonic, align 4
221  %and = and i32 %0, 2147483648
222  ret i32 %and
223}
224
225define i64 @btc63() nounwind {
226; X86-LABEL: btc63:
227; X86:       # %bb.0: # %entry
228; X86-NEXT:    pushl %ebx
229; X86-NEXT:    pushl %esi
230; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
231; X86-NEXT:    movl v64+4, %edx
232; X86-NEXT:    movl v64, %eax
233; X86-NEXT:    .p2align 4
234; X86-NEXT:  .LBB9_1: # %atomicrmw.start
235; X86-NEXT:    # =>This Inner Loop Header: Depth=1
236; X86-NEXT:    movl %edx, %ecx
237; X86-NEXT:    xorl %esi, %ecx
238; X86-NEXT:    movl %eax, %ebx
239; X86-NEXT:    lock cmpxchg8b v64
240; X86-NEXT:    jne .LBB9_1
241; X86-NEXT:  # %bb.2: # %atomicrmw.end
242; X86-NEXT:    andl %esi, %edx
243; X86-NEXT:    xorl %eax, %eax
244; X86-NEXT:    popl %esi
245; X86-NEXT:    popl %ebx
246; X86-NEXT:    retl
247;
248; X64-LABEL: btc63:
249; X64:       # %bb.0: # %entry
250; X64-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
251; X64-NEXT:    movq %rcx, %rax
252; X64-NEXT:    lock xaddq %rax, v64(%rip)
253; X64-NEXT:    andq %rcx, %rax
254; X64-NEXT:    retq
255entry:
256  %0 = atomicrmw xor ptr @v64, i64 -9223372036854775808 monotonic, align 8
257  %and = and i64 %0, -9223372036854775808
258  ret i64 %and
259}
260
261define i16 @btr1() nounwind {
262; X86-LABEL: btr1:
263; X86:       # %bb.0: # %entry
264; X86-NEXT:    xorl %eax, %eax
265; X86-NEXT:    lock btrw $0, v16
266; X86-NEXT:    setb %al
267; X86-NEXT:    # kill: def $ax killed $ax killed $eax
268; X86-NEXT:    retl
269;
270; X64-LABEL: btr1:
271; X64:       # %bb.0: # %entry
272; X64-NEXT:    xorl %eax, %eax
273; X64-NEXT:    lock btrw $0, v16(%rip)
274; X64-NEXT:    setb %al
275; X64-NEXT:    # kill: def $ax killed $ax killed $eax
276; X64-NEXT:    retq
277entry:
278  %0 = atomicrmw and ptr @v16, i16 -2 monotonic, align 2
279  %and = and i16 %0, 1
280  ret i16 %and
281}
282
283define i16 @btr2() nounwind {
284; X86-LABEL: btr2:
285; X86:       # %bb.0: # %entry
286; X86-NEXT:    xorl %eax, %eax
287; X86-NEXT:    lock btrw $1, v16
288; X86-NEXT:    setb %al
289; X86-NEXT:    addl %eax, %eax
290; X86-NEXT:    # kill: def $ax killed $ax killed $eax
291; X86-NEXT:    retl
292;
293; X64-LABEL: btr2:
294; X64:       # %bb.0: # %entry
295; X64-NEXT:    xorl %eax, %eax
296; X64-NEXT:    lock btrw $1, v16(%rip)
297; X64-NEXT:    setb %al
298; X64-NEXT:    addl %eax, %eax
299; X64-NEXT:    # kill: def $ax killed $ax killed $eax
300; X64-NEXT:    retq
301entry:
302  %0 = atomicrmw and ptr @v16, i16 -3 monotonic, align 2
303  %and = and i16 %0, 2
304  ret i16 %and
305}
306
307define i16 @btr15() nounwind {
308; X86-LABEL: btr15:
309; X86:       # %bb.0: # %entry
310; X86-NEXT:    xorl %eax, %eax
311; X86-NEXT:    lock btrw $15, v16
312; X86-NEXT:    setb %al
313; X86-NEXT:    shll $15, %eax
314; X86-NEXT:    # kill: def $ax killed $ax killed $eax
315; X86-NEXT:    retl
316;
317; X64-LABEL: btr15:
318; X64:       # %bb.0: # %entry
319; X64-NEXT:    xorl %eax, %eax
320; X64-NEXT:    lock btrw $15, v16(%rip)
321; X64-NEXT:    setb %al
322; X64-NEXT:    shll $15, %eax
323; X64-NEXT:    # kill: def $ax killed $ax killed $eax
324; X64-NEXT:    retq
325entry:
326  %0 = atomicrmw and ptr @v16, i16 32767 monotonic, align 2
327  %and = and i16 %0, 32768
328  ret i16 %and
329}
330
331define i32 @btr31() nounwind {
332; X86-LABEL: btr31:
333; X86:       # %bb.0: # %entry
334; X86-NEXT:    xorl %eax, %eax
335; X86-NEXT:    lock btrl $31, v32
336; X86-NEXT:    setb %al
337; X86-NEXT:    shll $31, %eax
338; X86-NEXT:    retl
339;
340; X64-LABEL: btr31:
341; X64:       # %bb.0: # %entry
342; X64-NEXT:    xorl %eax, %eax
343; X64-NEXT:    lock btrl $31, v32(%rip)
344; X64-NEXT:    setb %al
345; X64-NEXT:    shll $31, %eax
346; X64-NEXT:    retq
347entry:
348  %0 = atomicrmw and ptr @v32, i32 2147483647 monotonic, align 4
349  %and = and i32 %0, 2147483648
350  ret i32 %and
351}
352
353define i64 @btr63() nounwind {
354; X86-LABEL: btr63:
355; X86:       # %bb.0: # %entry
356; X86-NEXT:    pushl %ebx
357; X86-NEXT:    pushl %edi
358; X86-NEXT:    pushl %esi
359; X86-NEXT:    movl $2147483647, %esi # imm = 0x7FFFFFFF
360; X86-NEXT:    movl $-1, %edi
361; X86-NEXT:    movl v64+4, %edx
362; X86-NEXT:    movl v64, %eax
363; X86-NEXT:    .p2align 4
364; X86-NEXT:  .LBB14_1: # %atomicrmw.start
365; X86-NEXT:    # =>This Inner Loop Header: Depth=1
366; X86-NEXT:    movl %eax, %ebx
367; X86-NEXT:    andl %edi, %ebx
368; X86-NEXT:    movl %edx, %ecx
369; X86-NEXT:    andl %esi, %ecx
370; X86-NEXT:    lock cmpxchg8b v64
371; X86-NEXT:    jne .LBB14_1
372; X86-NEXT:  # %bb.2: # %atomicrmw.end
373; X86-NEXT:    addl $1, %edi
374; X86-NEXT:    adcl $0, %esi
375; X86-NEXT:    andl %edi, %eax
376; X86-NEXT:    andl %esi, %edx
377; X86-NEXT:    popl %esi
378; X86-NEXT:    popl %edi
379; X86-NEXT:    popl %ebx
380; X86-NEXT:    retl
381;
382; X64-LABEL: btr63:
383; X64:       # %bb.0: # %entry
384; X64-NEXT:    xorl %eax, %eax
385; X64-NEXT:    lock btrq $63, v64(%rip)
386; X64-NEXT:    setb %al
387; X64-NEXT:    shlq $63, %rax
388; X64-NEXT:    retq
389entry:
390  %0 = atomicrmw and ptr @v64, i64 9223372036854775807 monotonic, align 8
391  %and = and i64 %0, -9223372036854775808
392  ret i64 %and
393}
394
395define i16 @multi_use1() nounwind {
396; X86-LABEL: multi_use1:
397; X86:       # %bb.0: # %entry
398; X86-NEXT:    movzwl v16, %eax
399; X86-NEXT:    .p2align 4
400; X86-NEXT:  .LBB15_1: # %atomicrmw.start
401; X86-NEXT:    # =>This Inner Loop Header: Depth=1
402; X86-NEXT:    movl %eax, %ecx
403; X86-NEXT:    orl $1, %ecx
404; X86-NEXT:    # kill: def $ax killed $ax killed $eax
405; X86-NEXT:    lock cmpxchgw %cx, v16
406; X86-NEXT:    # kill: def $ax killed $ax def $eax
407; X86-NEXT:    jne .LBB15_1
408; X86-NEXT:  # %bb.2: # %atomicrmw.end
409; X86-NEXT:    movl %eax, %ecx
410; X86-NEXT:    andl $1, %ecx
411; X86-NEXT:    xorl $2, %eax
412; X86-NEXT:    orl %ecx, %eax
413; X86-NEXT:    # kill: def $ax killed $ax killed $eax
414; X86-NEXT:    retl
415;
416; X64-LABEL: multi_use1:
417; X64:       # %bb.0: # %entry
418; X64-NEXT:    movzwl v16(%rip), %eax
419; X64-NEXT:    .p2align 4
420; X64-NEXT:  .LBB15_1: # %atomicrmw.start
421; X64-NEXT:    # =>This Inner Loop Header: Depth=1
422; X64-NEXT:    movl %eax, %ecx
423; X64-NEXT:    orl $1, %ecx
424; X64-NEXT:    # kill: def $ax killed $ax killed $eax
425; X64-NEXT:    lock cmpxchgw %cx, v16(%rip)
426; X64-NEXT:    # kill: def $ax killed $ax def $eax
427; X64-NEXT:    jne .LBB15_1
428; X64-NEXT:  # %bb.2: # %atomicrmw.end
429; X64-NEXT:    movl %eax, %ecx
430; X64-NEXT:    andl $1, %ecx
431; X64-NEXT:    xorl $2, %eax
432; X64-NEXT:    orl %ecx, %eax
433; X64-NEXT:    # kill: def $ax killed $ax killed $eax
434; X64-NEXT:    retq
435entry:
436  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
437  %1 = and i16 %0, 1
438  %2 = xor i16 %0, 2
439  %3 = or i16 %1, %2
440  ret i16 %3
441}
442
443define i16 @multi_use2() nounwind {
444; X86-LABEL: multi_use2:
445; X86:       # %bb.0: # %entry
446; X86-NEXT:    xorl %eax, %eax
447; X86-NEXT:    lock btsw $0, v16
448; X86-NEXT:    setb %al
449; X86-NEXT:    leal (%eax,%eax,2), %eax
450; X86-NEXT:    # kill: def $ax killed $ax killed $eax
451; X86-NEXT:    retl
452;
453; X64-LABEL: multi_use2:
454; X64:       # %bb.0: # %entry
455; X64-NEXT:    xorl %eax, %eax
456; X64-NEXT:    lock btsw $0, v16(%rip)
457; X64-NEXT:    setb %al
458; X64-NEXT:    leal (%rax,%rax,2), %eax
459; X64-NEXT:    # kill: def $ax killed $ax killed $eax
460; X64-NEXT:    retq
461entry:
462  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
463  %1 = and i16 %0, 1
464  %2 = shl i16 %1, 1
465  %3 = or i16 %1, %2
466  ret i16 %3
467}
468
469define i16 @use_in_diff_bb() nounwind {
470; X86-LABEL: use_in_diff_bb:
471; X86:       # %bb.0: # %entry
472; X86-NEXT:    pushl %esi
473; X86-NEXT:    movzwl v16, %esi
474; X86-NEXT:    .p2align 4
475; X86-NEXT:  .LBB17_1: # %atomicrmw.start
476; X86-NEXT:    # =>This Inner Loop Header: Depth=1
477; X86-NEXT:    movl %esi, %ecx
478; X86-NEXT:    orl $1, %ecx
479; X86-NEXT:    movl %esi, %eax
480; X86-NEXT:    lock cmpxchgw %cx, v16
481; X86-NEXT:    movl %eax, %esi
482; X86-NEXT:    jne .LBB17_1
483; X86-NEXT:  # %bb.2: # %atomicrmw.end
484; X86-NEXT:    xorl %eax, %eax
485; X86-NEXT:    testb %al, %al
486; X86-NEXT:    jne .LBB17_4
487; X86-NEXT:  # %bb.3:
488; X86-NEXT:    calll foo@PLT
489; X86-NEXT:  .LBB17_4:
490; X86-NEXT:    andl $1, %esi
491; X86-NEXT:    movl %esi, %eax
492; X86-NEXT:    popl %esi
493; X86-NEXT:    retl
494;
495; X64-LABEL: use_in_diff_bb:
496; X64:       # %bb.0: # %entry
497; X64-NEXT:    pushq %rbx
498; X64-NEXT:    movzwl v16(%rip), %ebx
499; X64-NEXT:    .p2align 4
500; X64-NEXT:  .LBB17_1: # %atomicrmw.start
501; X64-NEXT:    # =>This Inner Loop Header: Depth=1
502; X64-NEXT:    movl %ebx, %ecx
503; X64-NEXT:    orl $1, %ecx
504; X64-NEXT:    movl %ebx, %eax
505; X64-NEXT:    lock cmpxchgw %cx, v16(%rip)
506; X64-NEXT:    movl %eax, %ebx
507; X64-NEXT:    jne .LBB17_1
508; X64-NEXT:  # %bb.2: # %atomicrmw.end
509; X64-NEXT:    xorl %eax, %eax
510; X64-NEXT:    testb %al, %al
511; X64-NEXT:    jne .LBB17_4
512; X64-NEXT:  # %bb.3:
513; X64-NEXT:    callq foo@PLT
514; X64-NEXT:  .LBB17_4:
515; X64-NEXT:    andl $1, %ebx
516; X64-NEXT:    movl %ebx, %eax
517; X64-NEXT:    popq %rbx
518; X64-NEXT:    retq
519entry:
520  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
521  br i1 undef, label %1, label %2
5221:
523  call void @foo()
524  br label %3
5252:
526  br label %3
5273:
528  %and = and i16 %0, 1
529  ret i16 %and
530}
531
532declare void @foo()
533
534define void @no_and_cmp0_fold() nounwind {
535; X86-LABEL: no_and_cmp0_fold:
536; X86:       # %bb.0: # %entry
537; X86-NEXT:    lock btsl $3, v32
538; X86-NEXT:    xorl %eax, %eax
539; X86-NEXT:    testb %al, %al
540; X86-NEXT:    je .LBB18_1
541; X86-NEXT:  # %bb.2: # %if.end
542; X86-NEXT:    retl
543; X86-NEXT:  .LBB18_1: # %if.then
544;
545; X64-LABEL: no_and_cmp0_fold:
546; X64:       # %bb.0: # %entry
547; X64-NEXT:    lock btsl $3, v32(%rip)
548; X64-NEXT:    xorl %eax, %eax
549; X64-NEXT:    testb %al, %al
550; X64-NEXT:    je .LBB18_1
551; X64-NEXT:  # %bb.2: # %if.end
552; X64-NEXT:    retq
553; X64-NEXT:  .LBB18_1: # %if.then
554entry:
555  %0 = atomicrmw or ptr @v32, i32 8 monotonic, align 4
556  %and = and i32 %0, 8
557  %tobool = icmp ne i32 %and, 0
558  br i1 undef, label %if.then, label %if.end
559
560if.then:                                          ; preds = %entry
561  unreachable
562
563if.end:                                           ; preds = %entry
564  %or.cond8 = select i1 %tobool, i1 undef, i1 false
565  ret void
566}
567
568define i32 @split_hoist_and(i32 %0) nounwind {
569; X86-LABEL: split_hoist_and:
570; X86:       # %bb.0:
571; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
572; X86-NEXT:    xorl %eax, %eax
573; X86-NEXT:    lock btsl $3, v32
574; X86-NEXT:    setb %al
575; X86-NEXT:    shll $3, %eax
576; X86-NEXT:    testl %ecx, %ecx
577; X86-NEXT:    retl
578;
579; X64-LABEL: split_hoist_and:
580; X64:       # %bb.0:
581; X64-NEXT:    xorl %eax, %eax
582; X64-NEXT:    lock btsl $3, v32(%rip)
583; X64-NEXT:    setb %al
584; X64-NEXT:    shll $3, %eax
585; X64-NEXT:    retq
586  %2 = atomicrmw or ptr @v32, i32 8 monotonic, align 4
587  %3 = tail call i32 @llvm.ctlz.i32(i32 %0, i1 false)
588  %4 = and i32 %2, 8
589  ret i32 %4
590}
591
592declare i32 @llvm.ctlz.i32(i32, i1)
593