1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=x86_64 -machine-sink-load-instrs-threshold=2 -run-pass=machine-sink %s -o - | FileCheck %s 3# RUN: llc -mtriple=x86_64 -machine-sink-load-instrs-threshold=2 -run-pass=mir-debugify,machine-sink,mir-strip-debug %s -o - | FileCheck %s 4 5# Verify that machine-sink pass is debug invariant wrt to given input. Since 6# the pass examines MemOperands the IR is required for the original bug to 7# trigger. 8 9--- | 10 @e = global i32 0, align 1 11 @d = global i32 0, align 1 12 @f = global i32 0, align 1 13 @g = global i32 0, align 1 14 15 define i32 @l() { 16 entry: 17 br label %for.body 18 19 for.body: ; preds = %h.exit, %entry 20 %cmp = phi i1 [ true, %entry ], [ false, %h.exit ] 21 %0 = load i32, ptr @d, align 1 22 %tobool61.not.i = icmp eq i32 %0, 0 23 %e.promoted44.i = load i32, ptr @e, align 1 24 br i1 %tobool61.not.i, label %h.exit, label %for.cond13.preheader.preheader.i 25 26 for.cond13.preheader.preheader.i: ; preds = %for.body 27 %1 = load i32, ptr @f, align 1 28 store i32 %1, ptr @g, align 1 29 br label %h.exit 30 31 h.exit: ; preds = %for.cond13.preheader.preheader.i, %for.body 32 %.us-phi50.i = or i32 %e.promoted44.i, 4 33 store i32 %.us-phi50.i, ptr @e, align 1 34 br i1 %cmp, label %for.body, label %for.end 35 36 for.end: ; preds = %h.exit 37 ret i32 undef 38 } 39... 40--- 41name: l 42alignment: 16 43tracksRegLiveness: true 44registers: 45 - { id: 0, class: gr8 } 46 - { id: 1, class: gr32 } 47 - { id: 2, class: gr8 } 48 - { id: 3, class: gr64 } 49 - { id: 4, class: gr64 } 50 - { id: 5, class: gr64 } 51 - { id: 6, class: gr32 } 52 - { id: 7, class: gr64 } 53 - { id: 8, class: gr8 } 54 - { id: 9, class: gr32 } 55 - { id: 10, class: gr64 } 56 - { id: 11, class: gr32 } 57 - { id: 12, class: gr32 } 58frameInfo: 59 maxAlignment: 1 60machineFunctionInfo: {} 61body: | 62 ; CHECK-LABEL: name: l 63 ; CHECK: bb.0.entry: 64 ; CHECK-NEXT: successors: %bb.1(0x80000000) 65 ; CHECK-NEXT: {{ $}} 66 ; CHECK-NEXT: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 1 67 ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @d, $noreg :: (load (s64) from got) 68 ; CHECK-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @e, $noreg :: (load (s64) from got) 69 ; CHECK-NEXT: [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @f, $noreg :: (load (s64) from got) 70 ; CHECK-NEXT: [[MOV64rm3:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @g, $noreg :: (load (s64) from got) 71 ; CHECK-NEXT: {{ $}} 72 ; CHECK-NEXT: bb.1.for.body: 73 ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.2(0x50000000) 74 ; CHECK-NEXT: {{ $}} 75 ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr8 = PHI [[MOV8ri]], %bb.0, %8, %bb.3 76 ; CHECK-NEXT: CMP32mi [[MOV64rm]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1) 77 ; CHECK-NEXT: JCC_1 %bb.3, 4, implicit $eflags 78 ; CHECK-NEXT: JMP_1 %bb.2 79 ; CHECK-NEXT: {{ $}} 80 ; CHECK-NEXT: bb.2.for.cond13.preheader.preheader.i: 81 ; CHECK-NEXT: successors: %bb.3(0x80000000) 82 ; CHECK-NEXT: {{ $}} 83 ; CHECK-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[MOV64rm2]], 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @f, align 1) 84 ; CHECK-NEXT: MOV32mr [[MOV64rm3]], 1, $noreg, 0, $noreg, killed [[MOV32rm]] :: (store (s32) into @g, align 1) 85 ; CHECK-NEXT: {{ $}} 86 ; CHECK-NEXT: bb.3.h.exit: 87 ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.4(0x04000000) 88 ; CHECK-NEXT: {{ $}} 89 ; CHECK-NEXT: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV64rm1]], 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @e, align 1) 90 ; CHECK-NEXT: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[MOV32rm1]], 4, implicit-def dead $eflags 91 ; CHECK-NEXT: MOV32mr [[MOV64rm1]], 1, $noreg, 0, $noreg, killed [[OR32ri8_]] :: (store (s32) into @e, align 1) 92 ; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags 93 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32r0_]].sub_8bit 94 ; CHECK-NEXT: TEST8ri [[PHI]], 1, implicit-def $eflags 95 ; CHECK-NEXT: JCC_1 %bb.1, 5, implicit $eflags 96 ; CHECK-NEXT: JMP_1 %bb.4 97 ; CHECK-NEXT: {{ $}} 98 ; CHECK-NEXT: bb.4.for.end: 99 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF 100 ; CHECK-NEXT: $eax = COPY [[DEF]] 101 ; CHECK-NEXT: RET 0, $eax 102 bb.0.entry: 103 %2:gr8 = MOV8ri 1 104 %3:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @d, $noreg :: (load (s64) from got) 105 %4:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @e, $noreg :: (load (s64) from got) 106 %5:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @f, $noreg :: (load (s64) from got) 107 %7:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @g, $noreg :: (load (s64) from got) 108 109 bb.1.for.body: 110 successors: %bb.3(0x30000000), %bb.2(0x50000000) 111 112 %0:gr8 = PHI %2, %bb.0, %8, %bb.3 113 CMP32mi %3, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1) 114 %1:gr32 = MOV32rm %4, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @e, align 1) 115 JCC_1 %bb.3, 4, implicit $eflags 116 JMP_1 %bb.2 117 118 bb.2.for.cond13.preheader.preheader.i: 119 %6:gr32 = MOV32rm %5, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @f, align 1) 120 MOV32mr %7, 1, $noreg, 0, $noreg, killed %6 :: (store (s32) into @g, align 1) 121 122 bb.3.h.exit: 123 successors: %bb.1(0x7c000000), %bb.4(0x04000000) 124 125 %9:gr32 = OR32ri8 %1, 4, implicit-def dead $eflags 126 MOV32mr %4, 1, $noreg, 0, $noreg, killed %9 :: (store (s32) into @e, align 1) 127 %11:gr32 = MOV32r0 implicit-def dead $eflags 128 %8:gr8 = COPY %11.sub_8bit 129 TEST8ri %0, 1, implicit-def $eflags 130 JCC_1 %bb.1, 5, implicit $eflags 131 JMP_1 %bb.4 132 133 bb.4.for.end: 134 %12:gr32 = IMPLICIT_DEF 135 $eax = COPY %12 136 RET 0, $eax 137... 138