xref: /llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir (revision 59c6bd156cc8b42758ce90909615748e21c6eee2)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+lob -run-pass=arm-mve-vpt-opts --verify-machineinstrs %s -o - | FileCheck %s
3
4--- |
5  @d = local_unnamed_addr global i32 0, align 4
6  @c = local_unnamed_addr global [1 x i32] zeroinitializer, align 4
7
8  define i32 @e() optsize {
9  entry:
10    %.pr = load i32, ptr @d, align 4
11    %cmp13 = icmp sgt i32 %.pr, -1
12    br i1 %cmp13, label %for.cond1.preheader.preheader, label %for.end9
13
14  for.cond1.preheader.preheader:                    ; preds = %entry
15    %0 = add i32 %.pr, 1
16    %1 = call i32 @llvm.start.loop.iterations.i32(i32 %0)
17    br label %for.cond1.preheader
18
19  for.cond1.preheader:                              ; preds = %for.cond1.preheader.preheader, %for.cond1.preheader
20    %2 = phi i32 [ %1, %for.cond1.preheader.preheader ], [ %3, %for.cond1.preheader ]
21    call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(24) @c, i8 0, i32 24, i1 false)
22    %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %2, i32 1)
23    %4 = icmp ne i32 %3, 0
24    br i1 %4, label %for.cond1.preheader, label %for.cond.for.end9_crit_edge
25
26  for.cond.for.end9_crit_edge:                      ; preds = %for.cond1.preheader
27    store i32 -1, ptr @d, align 4
28    br label %for.end9
29
30  for.end9:                                         ; preds = %for.cond.for.end9_crit_edge, %entry
31    ret i32 undef
32  }
33
34  declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
35  declare i32 @llvm.start.loop.iterations.i32(i32)
36  declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
37
38...
39---
40name:            e
41alignment:       2
42exposesReturnsTwice: false
43legalized:       false
44regBankSelected: false
45selected:        false
46failedISel:      false
47tracksRegLiveness: true
48hasWinCFI:       false
49registers:
50  - { id: 0, class: gprnopc, preferred-register: '' }
51  - { id: 1, class: gpr, preferred-register: '' }
52  - { id: 2, class: gprlr, preferred-register: '' }
53  - { id: 3, class: gpr, preferred-register: '' }
54  - { id: 4, class: rgpr, preferred-register: '' }
55  - { id: 5, class: rgpr, preferred-register: '' }
56  - { id: 6, class: gprlr, preferred-register: '' }
57  - { id: 7, class: rgpr, preferred-register: '' }
58  - { id: 8, class: rgpr, preferred-register: '' }
59  - { id: 9, class: gprlr, preferred-register: '' }
60  - { id: 10, class: gprlr, preferred-register: '' }
61  - { id: 11, class: rgpr, preferred-register: '' }
62  - { id: 12, class: rgpr, preferred-register: '' }
63  - { id: 13, class: gpr, preferred-register: '' }
64liveins:         []
65body:             |
66  ; CHECK-LABEL: name: e
67  ; CHECK: bb.0.entry:
68  ; CHECK-NEXT:   successors: %bb.1(0x50000000), %bb.4(0x30000000)
69  ; CHECK-NEXT: {{  $}}
70  ; CHECK-NEXT:   [[t2MOVi32imm:%[0-9]+]]:rgpr = t2MOVi32imm @d
71  ; CHECK-NEXT:   [[t2LDRi12_:%[0-9]+]]:gprnopc = t2LDRi12 [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
72  ; CHECK-NEXT:   t2CMPri [[t2LDRi12_]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
73  ; CHECK-NEXT:   t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
74  ; CHECK-NEXT:   t2B %bb.1, 14 /* CC::al */, $noreg
75  ; CHECK-NEXT: {{  $}}
76  ; CHECK-NEXT: bb.1.for.cond1.preheader.preheader:
77  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
78  ; CHECK-NEXT: {{  $}}
79  ; CHECK-NEXT:   [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[t2LDRi12_]], 1, 14 /* CC::al */, $noreg, $noreg
80  ; CHECK-NEXT:   [[tMOVr:%[0-9]+]]:gprlr = tMOVr killed [[t2ADDri]], 14 /* CC::al */, $noreg
81  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY [[tMOVr]]
82  ; CHECK-NEXT:   [[t2MOVi32imm1:%[0-9]+]]:rgpr = t2MOVi32imm @c
83  ; CHECK-NEXT:   [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
84  ; CHECK-NEXT: {{  $}}
85  ; CHECK-NEXT: bb.2.for.cond1.preheader:
86  ; CHECK-NEXT:   successors: %bb.2(0x7c000000), %bb.3(0x04000000)
87  ; CHECK-NEXT: {{  $}}
88  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gprlr = PHI [[COPY]], %bb.1, %3, %bb.2
89  ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
90  ; CHECK-NEXT:   $r0 = COPY [[t2MOVi32imm1]]
91  ; CHECK-NEXT:   $r1 = COPY [[t2MOVi]]
92  ; CHECK-NEXT:   tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
93  ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
94  ; CHECK-NEXT:   [[t2SUBri:%[0-9]+]]:gprlr = t2SUBri [[PHI]], 1, 14 /* CC::al */, $noreg, $noreg
95  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY [[t2SUBri]]
96  ; CHECK-NEXT:   t2CMPri [[t2SUBri]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
97  ; CHECK-NEXT:   t2Bcc %bb.2, 1 /* CC::ne */, $cpsr
98  ; CHECK-NEXT:   t2B %bb.3, 14 /* CC::al */, $noreg
99  ; CHECK-NEXT: {{  $}}
100  ; CHECK-NEXT: bb.3.for.cond.for.end9_crit_edge:
101  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
102  ; CHECK-NEXT: {{  $}}
103  ; CHECK-NEXT:   [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
104  ; CHECK-NEXT:   t2STRi12 killed [[t2MOVi1]], [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
105  ; CHECK-NEXT: {{  $}}
106  ; CHECK-NEXT: bb.4.for.end9:
107  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
108  ; CHECK-NEXT:   $r0 = COPY [[DEF]]
109  ; CHECK-NEXT:   tBX_RET 14 /* CC::al */, $noreg, implicit $r0
110  bb.0.entry:
111    successors: %bb.1(0x50000000), %bb.4(0x30000000)
112
113    %4:rgpr = t2MOVi32imm @d
114    %0:gprnopc = t2LDRi12 %4, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
115    t2CMPri %0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
116    t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
117    t2B %bb.1, 14 /* CC::al */, $noreg
118
119  bb.1.for.cond1.preheader.preheader:
120    successors: %bb.2(0x80000000)
121
122    %5:rgpr = t2ADDri %0, 1, 14 /* CC::al */, $noreg, $noreg
123    %6:gprlr = t2DoLoopStart killed %5
124    %1:gpr = COPY %6
125    %7:rgpr = t2MOVi32imm @c
126    %8:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
127
128  bb.2.for.cond1.preheader:
129    successors: %bb.2(0x7c000000), %bb.3(0x04000000)
130
131    %2:gprlr = PHI %1, %bb.1, %3, %bb.2
132    ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
133    $r0 = COPY %7
134    $r1 = COPY %8
135    tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
136    ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
137    %9:gprlr = t2LoopDec %2, 1
138    %3:gpr = COPY %9
139    t2LoopEnd %9, %bb.2, implicit-def dead $cpsr
140    t2B %bb.3, 14 /* CC::al */, $noreg
141
142  bb.3.for.cond.for.end9_crit_edge:
143    successors: %bb.4(0x80000000)
144
145    %12:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
146    t2STRi12 killed %12, %4, 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
147
148  bb.4.for.end9:
149    %13:gpr = IMPLICIT_DEF
150    $r0 = COPY %13
151    tBX_RET 14 /* CC::al */, $noreg, implicit $r0
152
153...
154