xref: /llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir (revision 59c6bd156cc8b42758ce90909615748e21c6eee2)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
3
4# Test that VPNOTs cannot be within a tail predicated loop.
5
6--- |
7  define dso_local void @inloop_vpnot(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture readonly %c, ptr nocapture readonly %d, ptr nocapture %e, i32 %N) local_unnamed_addr #0 {
8  entry:
9    %cmp9 = icmp eq i32 %N, 0
10    %tmp = add i32 %N, 3
11    %tmp1 = lshr i32 %tmp, 2
12    %tmp2 = shl nuw i32 %tmp1, 2
13    %tmp3 = add i32 %tmp2, -4
14    %tmp4 = lshr i32 %tmp3, 2
15    %tmp5 = add nuw nsw i32 %tmp4, 1
16    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
17
18  vector.ph:                                        ; preds = %entry
19    %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
20    br label %vector.body
21
22  vector.body:                                      ; preds = %vector.body, %vector.ph
23    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
24    %lsr.iv.e = phi ptr [ %scevgep.e, %vector.body ], [ %e, %vector.ph ]
25    %lsr.iv.d = phi ptr [ %scevgep.d, %vector.body ], [ %d, %vector.ph ]
26    %lsr.iv.c = phi ptr [ %scevgep.c, %vector.body ], [ %c, %vector.ph ]
27    %lsr.iv18 = phi ptr [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
28    %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %a, %vector.ph ]
29    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp14, %vector.body ]
30    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
31    %lsr.iv17 = bitcast ptr %lsr.iv to ptr
32    %lsr.iv1820 = bitcast ptr %lsr.iv18 to ptr
33    %lsr.iv1820.c = bitcast ptr %lsr.iv.c to ptr
34    %lsr.iv17.d = bitcast ptr %lsr.iv.d to ptr
35    %lsr.cast.e = bitcast ptr %lsr.iv.e to ptr
36    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
37    %tmp9 = sub i32 %tmp7, 4
38    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
39    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
40    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
41    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
42    %wide.masked.load.c = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv1820.c, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
43    %sext.load.c = sext <4 x i16> %wide.masked.load.c to <4 x i32>
44    %wide.masked.load.d = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv17.d, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
45    %sext.load.d = sext <4 x i16> %wide.masked.load.d to <4 x i32>
46    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
47    %mul.2 = mul nsw <4 x i32> %sext.load.c, %sext.load.d
48    %tmp13 = add <4 x i32> %tmp12, %mul.2
49    %tmp14 = add <4 x i32> %tmp13, %vec.phi
50    %vpnot = xor <4 x i1> %tmp8, <i1 true, i1 true, i1 true, i1 true>
51    call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp14, ptr %lsr.cast.e, i32 4, <4 x i1> %vpnot)
52    %scevgep = getelementptr i16, ptr %lsr.iv, i32 4
53    %scevgep19 = getelementptr i16, ptr %lsr.iv18, i32 4
54    %scevgep.c = getelementptr i16, ptr %lsr.iv.c, i32 4
55    %scevgep.d = getelementptr i16, ptr %lsr.iv.d, i32 4
56    %scevgep.e = getelementptr i32, ptr %lsr.iv.e, i32 4
57    %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
58    %tmp16 = icmp ne i32 %tmp15, 0
59    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
60    br i1 %tmp16, label %vector.body, label %for.cond.cleanup
61
62  for.cond.cleanup:                                 ; preds = %vector.body, %entry
63    ret void
64  }
65  declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>) #1
66  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #2
67  declare i32 @llvm.start.loop.iterations.i32(i32) #3
68  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
69  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
70
71...
72---
73name:            inloop_vpnot
74alignment:       2
75exposesReturnsTwice: false
76legalized:       false
77regBankSelected: false
78selected:        false
79failedISel:      false
80tracksRegLiveness: true
81hasWinCFI:       false
82registers:       []
83liveins:
84  - { reg: '$r0', virtual-reg: '' }
85  - { reg: '$r1', virtual-reg: '' }
86  - { reg: '$r2', virtual-reg: '' }
87  - { reg: '$r3', virtual-reg: '' }
88frameInfo:
89  isFrameAddressTaken: false
90  isReturnAddressTaken: false
91  hasStackMap:     false
92  hasPatchPoint:   false
93  stackSize:       16
94  offsetAdjustment: 0
95  maxAlignment:    4
96  adjustsStack:    false
97  hasCalls:        false
98  stackProtector:  ''
99  maxCallFrameSize: 0
100  cvBytesOfCalleeSavedRegisters: 0
101  hasOpaqueSPAdjustment: false
102  hasVAStart:      false
103  hasMustTailInVarArgFunc: false
104  localFrameSize:  0
105  savePoint:       ''
106  restorePoint:    ''
107fixedStack:
108  - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: default,
109      isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
110      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
111  - { id: 1, type: default, offset: 0, size: 4, alignment: 8, stack-id: default,
112      isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
113      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
114stack:
115  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
116      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
117      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
118  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
119      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
120      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
121  - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
122      stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
123      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
124  - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
125      stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
126      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
127callSites:       []
128constants:       []
129machineFunctionInfo: {}
130body:             |
131  ; CHECK-LABEL: name: inloop_vpnot
132  ; CHECK: bb.0.entry:
133  ; CHECK-NEXT:   successors: %bb.3(0x30000000), %bb.1(0x50000000)
134  ; CHECK-NEXT:   liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
135  ; CHECK-NEXT: {{  $}}
136  ; CHECK-NEXT:   frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
137  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
138  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $lr, -4
139  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r7, -8
140  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r5, -12
141  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r4, -16
142  ; CHECK-NEXT:   renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
143  ; CHECK-NEXT:   t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
144  ; CHECK-NEXT:   tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
145  ; CHECK-NEXT: {{  $}}
146  ; CHECK-NEXT: bb.1.vector.ph:
147  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
148  ; CHECK-NEXT:   liveins: $r0, $r1, $r2, $r3, $r12
149  ; CHECK-NEXT: {{  $}}
150  ; CHECK-NEXT:   renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
151  ; CHECK-NEXT:   renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
152  ; CHECK-NEXT:   renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
153  ; CHECK-NEXT:   renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
154  ; CHECK-NEXT:   renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
155  ; CHECK-NEXT:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
156  ; CHECK-NEXT:   renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
157  ; CHECK-NEXT:   $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
158  ; CHECK-NEXT: {{  $}}
159  ; CHECK-NEXT: bb.2.vector.body:
160  ; CHECK-NEXT:   successors: %bb.2(0x7c000000), %bb.3(0x04000000)
161  ; CHECK-NEXT:   liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
162  ; CHECK-NEXT: {{  $}}
163  ; CHECK-NEXT:   renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
164  ; CHECK-NEXT:   MVE_VPST 4, implicit $vpr
165  ; CHECK-NEXT:   renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
166  ; CHECK-NEXT:   renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
167  ; CHECK-NEXT:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
168  ; CHECK-NEXT:   MVE_VPST 4, implicit $vpr
169  ; CHECK-NEXT:   renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
170  ; CHECK-NEXT:   renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
171  ; CHECK-NEXT:   renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
172  ; CHECK-NEXT:   $lr = tMOVr $r4, 14 /* CC::al */, $noreg
173  ; CHECK-NEXT:   renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
174  ; CHECK-NEXT:   renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
175  ; CHECK-NEXT:   renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
176  ; CHECK-NEXT:   renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
177  ; CHECK-NEXT:   renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
178  ; CHECK-NEXT:   MVE_VPST 8, implicit $vpr
179  ; CHECK-NEXT:   renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
180  ; CHECK-NEXT:   dead $lr = t2LEUpdate killed renamable $lr, %bb.2
181  ; CHECK-NEXT: {{  $}}
182  ; CHECK-NEXT: bb.3.for.cond.cleanup:
183  ; CHECK-NEXT:   tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
184  bb.0.entry:
185    successors: %bb.3(0x30000000), %bb.1(0x50000000)
186    liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
187
188    frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
189    frame-setup CFI_INSTRUCTION def_cfa_offset 16
190    frame-setup CFI_INSTRUCTION offset $lr, -4
191    frame-setup CFI_INSTRUCTION offset $r7, -8
192    frame-setup CFI_INSTRUCTION offset $r5, -12
193    frame-setup CFI_INSTRUCTION offset $r4, -16
194    renamable $r12 = t2LDRi12 $sp, 20, 14, $noreg :: (load (s32) from %fixed-stack.0)
195    t2CMPri renamable $r12, 0, 14, $noreg, implicit-def $cpsr
196    tBcc %bb.3, 0, killed $cpsr
197
198  bb.1.vector.ph:
199    successors: %bb.2(0x80000000)
200    liveins: $r0, $r1, $r2, $r3, $r12
201
202    renamable $lr = t2ADDri renamable $r12, 3, 14, $noreg, $noreg
203    renamable $r4, dead $cpsr = tMOVi8 1, 14, $noreg
204    renamable $lr = t2BICri killed renamable $lr, 3, 14, $noreg, $noreg
205    renamable $r5 = tLDRspi $sp, 4, 14, $noreg :: (load (s32) from %fixed-stack.1, align 8)
206    renamable $lr = t2SUBri killed renamable $lr, 4, 14, $noreg, $noreg
207    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
208    renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14, $noreg, $noreg
209    $lr = t2DoLoopStart renamable $lr
210    $r4 = tMOVr killed $lr, 14, $noreg
211
212  bb.2.vector.body:
213    successors: %bb.2(0x7c000000), %bb.3(0x04000000)
214    liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
215
216    renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
217    MVE_VPST 4, implicit $vpr
218    renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
219    renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
220    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
221    MVE_VPST 4, implicit $vpr
222    renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
223    renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
224    renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
225    $lr = tMOVr $r4, 14, $noreg
226    renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
227    renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14, $noreg
228    renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
229    renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
230    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
231    MVE_VPST 8, implicit $vpr
232    renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
233    renamable $lr = t2LoopDec killed renamable $lr, 1
234    t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
235    tB %bb.3, 14, $noreg
236
237  bb.3.for.cond.cleanup:
238    tPOP_RET 14, $noreg, def $r4, def $r5, def $r7, def $pc
239
240...
241