xref: /llvm-project/llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll (revision f0dd12ec5c0169ba5b4363b62d59511181cf954a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc %s -O2 -mtriple=i686-unknown-linux-gnu -o - | FileCheck %s
3
4@f = global ptr zeroinitializer
5
6; PR39391 - The load of %v1 should be scheduled before the zeroing of the A-D registers.
7
8define void @g() #0 {
9; CHECK-LABEL: g:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    pushl %ebp
12; CHECK-NEXT:    .cfi_def_cfa_offset 8
13; CHECK-NEXT:    .cfi_offset %ebp, -8
14; CHECK-NEXT:    movl %esp, %ebp
15; CHECK-NEXT:    .cfi_def_cfa_register %ebp
16; CHECK-NEXT:    pushl %ebx
17; CHECK-NEXT:    pushl %esi
18; CHECK-NEXT:    subl $16, %esp
19; CHECK-NEXT:    .cfi_offset %esi, -16
20; CHECK-NEXT:    .cfi_offset %ebx, -12
21; CHECK-NEXT:    movl f, %esi
22; CHECK-NEXT:    movzbl (%esi), %eax
23; CHECK-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
24; CHECK-NEXT:    xorl %eax, %eax
25; CHECK-NEXT:    xorl %edx, %edx
26; CHECK-NEXT:    xorl %ecx, %ecx
27; CHECK-NEXT:    xorl %ebx, %ebx
28; CHECK-NEXT:    lock cmpxchg8b (%esi)
29; CHECK-NEXT:    cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
30; CHECK-NEXT:    je .LBB0_2
31; CHECK-NEXT:  # %bb.1: # %.
32; CHECK-NEXT:    calll m@PLT
33; CHECK-NEXT:  .LBB0_2: # %k.end
34entry:
35  %p = load ptr, ptr @f
36  %v1 = load atomic i8, ptr %p monotonic, align 1
37  %v2 = load atomic i64, ptr %p monotonic, align 8
38  %j.h = icmp eq i8 %v1, 0
39  br i1 %j.h, label %k.end, label %.
40
41.:                                                ; preds = %entry
42  %v3 = call i32 @m()
43  unreachable
44
45k.end:                                            ; preds = %entry
46  unreachable
47}
48
49declare i32 @m()
50
51attributes #0 = { noimplicitfloat "frame-pointer"="non-leaf" }
52