1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s 3 4--- | 5 define dso_local arm_aapcs_vfpcc void @test1(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr { 6 entry: 7 %cmp30 = icmp eq i32 %N, 0 8 %0 = add i32 %N, 3 9 %1 = lshr i32 %0, 2 10 %2 = shl nuw i32 %1, 2 11 %3 = add i32 %2, -4 12 %4 = lshr i32 %3, 2 13 %5 = add nuw nsw i32 %4, 1 14 br i1 %cmp30, label %for.cond.cleanup6, label %vector.ph 15 16 vector.ph: ; preds = %entry 17 %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5) 18 br label %vector.body 19 20 vector.body: ; preds = %vector.body, %vector.ph 21 %lsr.iv68 = phi ptr [ %scevgep69, %vector.body ], [ %a, %vector.ph ] 22 %lsr.iv65 = phi ptr [ %scevgep66, %vector.body ], [ %c, %vector.ph ] 23 %lsr.iv62 = phi ptr [ %scevgep63, %vector.body ], [ %b, %vector.ph ] 24 %6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ] 25 %7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ] 26 %lsr.iv6870 = bitcast ptr %lsr.iv68 to ptr 27 %lsr.iv6567 = bitcast ptr %lsr.iv65 to ptr 28 %lsr.iv6264 = bitcast ptr %lsr.iv62 to ptr 29 %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7) 30 %9 = sub i32 %7, 4 31 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv6264, i32 4, <4 x i1> %8, <4 x i32> undef) 32 %wide.masked.load35 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv6567, i32 4, <4 x i1> %8, <4 x i32> undef) 33 %10 = mul nsw <4 x i32> %wide.masked.load35, %wide.masked.load 34 call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %lsr.iv6870, i32 4, <4 x i1> %8) 35 %scevgep63 = getelementptr i32, ptr %lsr.iv62, i32 4 36 %scevgep66 = getelementptr i32, ptr %lsr.iv65, i32 4 37 %scevgep69 = getelementptr i32, ptr %lsr.iv68, i32 4 38 %11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1) 39 %12 = icmp ne i32 %11, 0 40 br i1 %12, label %vector.body, label %for.cond4.preheader 41 42 for.cond4.preheader: ; preds = %vector.body 43 %13 = icmp eq i32 %N, 0 44 %14 = add i32 %N, 3 45 %15 = lshr i32 %14, 2 46 %16 = shl nuw i32 %15, 2 47 %17 = add i32 %16, -4 48 %18 = lshr i32 %17, 2 49 %19 = add nuw nsw i32 %18, 1 50 br i1 %13, label %for.cond.cleanup6, label %vector.ph39 51 52 vector.ph39: ; preds = %for.cond4.preheader 53 %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %19) 54 br label %vector.body38 55 56 vector.body38: ; preds = %vector.body38, %vector.ph39 57 %lsr.iv59 = phi ptr [ %scevgep60, %vector.body38 ], [ %a, %vector.ph39 ] 58 %lsr.iv56 = phi ptr [ %scevgep57, %vector.body38 ], [ %c, %vector.ph39 ] 59 %lsr.iv = phi ptr [ %scevgep, %vector.body38 ], [ %b, %vector.ph39 ] 60 %20 = phi i32 [ %start2, %vector.ph39 ], [ %26, %vector.body38 ] 61 %21 = phi i32 [ %N, %vector.ph39 ], [ %23, %vector.body38 ] 62 %lsr.iv5961 = bitcast ptr %lsr.iv59 to ptr 63 %lsr.iv5658 = bitcast ptr %lsr.iv56 to ptr 64 %lsr.iv55 = bitcast ptr %lsr.iv to ptr 65 %22 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %21) 66 %23 = sub i32 %21, 4 67 %wide.masked.load52 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv55, i32 4, <4 x i1> %22, <4 x i32> undef) 68 %wide.masked.load53 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv5658, i32 4, <4 x i1> %22, <4 x i32> undef) 69 %24 = xor <4 x i32> %wide.masked.load53, %wide.masked.load52 70 %wide.masked.load54 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv5961, i32 4, <4 x i1> %22, <4 x i32> undef) 71 %25 = add nsw <4 x i32> %wide.masked.load54, %24 72 call void @llvm.masked.store.v4i32.p0(<4 x i32> %25, ptr %lsr.iv5961, i32 4, <4 x i1> %22) 73 %scevgep = getelementptr i32, ptr %lsr.iv, i32 4 74 %scevgep57 = getelementptr i32, ptr %lsr.iv56, i32 4 75 %scevgep60 = getelementptr i32, ptr %lsr.iv59, i32 4 76 %26 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %20, i32 1) 77 %27 = icmp ne i32 %26, 0 78 br i1 %27, label %vector.body38, label %for.cond.cleanup6 79 80 for.cond.cleanup6: ; preds = %vector.body38, %entry, %for.cond4.preheader 81 ret void 82 } 83 define dso_local arm_aapcs_vfpcc void @test2(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr { 84 entry: 85 %div = lshr i32 %N, 1 86 %cmp30 = icmp eq i32 %div, 0 87 %0 = add nuw i32 %div, 3 88 %1 = lshr i32 %0, 2 89 %2 = shl nuw i32 %1, 2 90 %3 = add i32 %2, -4 91 %4 = lshr i32 %3, 2 92 %5 = add nuw nsw i32 %4, 1 93 br i1 %cmp30, label %for.cond4.preheader, label %vector.ph 94 95 vector.ph: ; preds = %entry 96 %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5) 97 br label %vector.body 98 99 vector.body: ; preds = %vector.body, %vector.ph 100 %lsr.iv68 = phi ptr [ %scevgep69, %vector.body ], [ %a, %vector.ph ] 101 %lsr.iv65 = phi ptr [ %scevgep66, %vector.body ], [ %c, %vector.ph ] 102 %lsr.iv62 = phi ptr [ %scevgep63, %vector.body ], [ %b, %vector.ph ] 103 %6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ] 104 %7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ] 105 %lsr.iv6870 = bitcast ptr %lsr.iv68 to ptr 106 %lsr.iv6567 = bitcast ptr %lsr.iv65 to ptr 107 %lsr.iv6264 = bitcast ptr %lsr.iv62 to ptr 108 %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7) 109 %9 = sub i32 %7, 4 110 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv6264, i32 4, <4 x i1> %8, <4 x i32> undef) 111 %wide.masked.load35 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv6567, i32 4, <4 x i1> %8, <4 x i32> undef) 112 %10 = mul nsw <4 x i32> %wide.masked.load35, %wide.masked.load 113 call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %lsr.iv6870, i32 4, <4 x i1> %8) 114 %scevgep63 = getelementptr i32, ptr %lsr.iv62, i32 4 115 %scevgep66 = getelementptr i32, ptr %lsr.iv65, i32 4 116 %scevgep69 = getelementptr i32, ptr %lsr.iv68, i32 4 117 %11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1) 118 %12 = icmp ne i32 %11, 0 119 br i1 %12, label %vector.body, label %for.cond4.preheader 120 121 for.cond4.preheader: ; preds = %vector.body, %entry 122 %cmp528 = icmp eq i32 %N, 0 123 %13 = add i32 %N, 3 124 %14 = lshr i32 %13, 2 125 %15 = shl nuw i32 %14, 2 126 %16 = add i32 %15, -4 127 %17 = lshr i32 %16, 2 128 %18 = add nuw nsw i32 %17, 1 129 br i1 %cmp528, label %for.cond.cleanup6, label %vector.ph39 130 131 vector.ph39: ; preds = %for.cond4.preheader 132 %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %18) 133 br label %vector.body38 134 135 vector.body38: ; preds = %vector.body38, %vector.ph39 136 %lsr.iv59 = phi ptr [ %scevgep60, %vector.body38 ], [ %a, %vector.ph39 ] 137 %lsr.iv56 = phi ptr [ %scevgep57, %vector.body38 ], [ %c, %vector.ph39 ] 138 %lsr.iv = phi ptr [ %scevgep, %vector.body38 ], [ %b, %vector.ph39 ] 139 %19 = phi i32 [ %start2, %vector.ph39 ], [ %25, %vector.body38 ] 140 %20 = phi i32 [ %N, %vector.ph39 ], [ %22, %vector.body38 ] 141 %lsr.iv5961 = bitcast ptr %lsr.iv59 to ptr 142 %lsr.iv5658 = bitcast ptr %lsr.iv56 to ptr 143 %lsr.iv55 = bitcast ptr %lsr.iv to ptr 144 %21 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %20) 145 %22 = sub i32 %20, 4 146 %wide.masked.load52 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv55, i32 4, <4 x i1> %21, <4 x i32> undef) 147 %wide.masked.load53 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv5658, i32 4, <4 x i1> %21, <4 x i32> undef) 148 %23 = xor <4 x i32> %wide.masked.load53, %wide.masked.load52 149 %wide.masked.load54 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv5961, i32 4, <4 x i1> %21, <4 x i32> undef) 150 %24 = add nsw <4 x i32> %wide.masked.load54, %23 151 call void @llvm.masked.store.v4i32.p0(<4 x i32> %24, ptr %lsr.iv5961, i32 4, <4 x i1> %21) 152 %scevgep = getelementptr i32, ptr %lsr.iv, i32 4 153 %scevgep57 = getelementptr i32, ptr %lsr.iv56, i32 4 154 %scevgep60 = getelementptr i32, ptr %lsr.iv59, i32 4 155 %25 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %19, i32 1) 156 %26 = icmp ne i32 %25, 0 157 br i1 %26, label %vector.body38, label %for.cond.cleanup6 158 159 for.cond.cleanup6: ; preds = %vector.body38, %for.cond4.preheader 160 ret void 161 } 162 define dso_local arm_aapcs_vfpcc void @test3(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr { 163 entry: 164 %cmp54 = icmp eq i32 %N, 0 165 %0 = add i32 %N, 3 166 %1 = lshr i32 %0, 2 167 %2 = shl nuw i32 %1, 2 168 %3 = add i32 %2, -4 169 %4 = lshr i32 %3, 2 170 %5 = add nuw nsw i32 %4, 1 171 br i1 %cmp54, label %for.cond.cleanup17, label %vector.ph 172 173 vector.ph: ; preds = %entry 174 %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5) 175 br label %vector.body 176 177 vector.body: ; preds = %vector.body, %vector.ph 178 %lsr.iv123 = phi ptr [ %scevgep124, %vector.body ], [ %a, %vector.ph ] 179 %lsr.iv120 = phi ptr [ %scevgep121, %vector.body ], [ %c, %vector.ph ] 180 %lsr.iv117 = phi ptr [ %scevgep118, %vector.body ], [ %b, %vector.ph ] 181 %6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ] 182 %7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ] 183 %lsr.iv123125 = bitcast ptr %lsr.iv123 to ptr 184 %lsr.iv120122 = bitcast ptr %lsr.iv120 to ptr 185 %lsr.iv117119 = bitcast ptr %lsr.iv117 to ptr 186 %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7) 187 %9 = sub i32 %7, 4 188 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv117119, i32 4, <4 x i1> %8, <4 x i32> undef) 189 %wide.masked.load62 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv120122, i32 4, <4 x i1> %8, <4 x i32> undef) 190 %10 = mul nsw <4 x i32> %wide.masked.load62, %wide.masked.load 191 call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %lsr.iv123125, i32 4, <4 x i1> %8) 192 %scevgep118 = getelementptr i32, ptr %lsr.iv117, i32 4 193 %scevgep121 = getelementptr i32, ptr %lsr.iv120, i32 4 194 %scevgep124 = getelementptr i32, ptr %lsr.iv123, i32 4 195 %11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1) 196 %12 = icmp ne i32 %11, 0 197 br i1 %12, label %vector.body, label %for.cond4.preheader 198 199 for.cond4.preheader: ; preds = %vector.body 200 %div = lshr i32 %N, 1 201 %cmp552 = icmp eq i32 %div, 0 202 %13 = add nuw i32 %div, 3 203 %14 = lshr i32 %13, 2 204 %15 = shl nuw i32 %14, 2 205 %16 = add i32 %15, -4 206 %17 = lshr i32 %16, 2 207 %18 = add nuw nsw i32 %17, 1 208 br i1 %cmp552, label %for.cond15.preheader, label %vector.ph66 209 210 vector.ph66: ; preds = %for.cond4.preheader 211 %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %18) 212 br label %vector.body65 213 214 vector.body65: ; preds = %vector.body65, %vector.ph66 215 %lsr.iv114 = phi ptr [ %scevgep115, %vector.body65 ], [ %a, %vector.ph66 ] 216 %lsr.iv111 = phi ptr [ %scevgep112, %vector.body65 ], [ %c, %vector.ph66 ] 217 %lsr.iv108 = phi ptr [ %scevgep109, %vector.body65 ], [ %b, %vector.ph66 ] 218 %19 = phi i32 [ %start2, %vector.ph66 ], [ %25, %vector.body65 ] 219 %20 = phi i32 [ %div, %vector.ph66 ], [ %22, %vector.body65 ] 220 %lsr.iv114116 = bitcast ptr %lsr.iv114 to ptr 221 %lsr.iv111113 = bitcast ptr %lsr.iv111 to ptr 222 %lsr.iv108110 = bitcast ptr %lsr.iv108 to ptr 223 %21 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %20) 224 %22 = sub i32 %20, 4 225 %wide.masked.load79 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv108110, i32 4, <4 x i1> %21, <4 x i32> undef) 226 %wide.masked.load80 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv111113, i32 4, <4 x i1> %21, <4 x i32> undef) 227 %23 = xor <4 x i32> %wide.masked.load80, %wide.masked.load79 228 %wide.masked.load81 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv114116, i32 4, <4 x i1> %21, <4 x i32> undef) 229 %24 = add nsw <4 x i32> %wide.masked.load81, %23 230 call void @llvm.masked.store.v4i32.p0(<4 x i32> %24, ptr %lsr.iv114116, i32 4, <4 x i1> %21) 231 %scevgep109 = getelementptr i32, ptr %lsr.iv108, i32 4 232 %scevgep112 = getelementptr i32, ptr %lsr.iv111, i32 4 233 %scevgep115 = getelementptr i32, ptr %lsr.iv114, i32 4 234 %25 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %19, i32 1) 235 %26 = icmp ne i32 %25, 0 236 br i1 %26, label %vector.body65, label %for.cond15.preheader 237 238 for.cond15.preheader: ; preds = %vector.body65, %for.cond4.preheader 239 %27 = icmp eq i32 %N, 0 240 %28 = add i32 %N, 3 241 %29 = lshr i32 %28, 2 242 %30 = shl nuw i32 %29, 2 243 %31 = add i32 %30, -4 244 %32 = lshr i32 %31, 2 245 %33 = add nuw nsw i32 %32, 1 246 br i1 %27, label %for.cond.cleanup17, label %vector.ph85 247 248 vector.ph85: ; preds = %for.cond15.preheader 249 %start3 = call i32 @llvm.start.loop.iterations.i32(i32 %33) 250 br label %vector.body84 251 252 vector.body84: ; preds = %vector.body84, %vector.ph85 253 %lsr.iv105 = phi ptr [ %scevgep106, %vector.body84 ], [ %a, %vector.ph85 ] 254 %lsr.iv102 = phi ptr [ %scevgep103, %vector.body84 ], [ %c, %vector.ph85 ] 255 %lsr.iv = phi ptr [ %scevgep, %vector.body84 ], [ %b, %vector.ph85 ] 256 %34 = phi i32 [ %start3, %vector.ph85 ], [ %40, %vector.body84 ] 257 %35 = phi i32 [ %N, %vector.ph85 ], [ %37, %vector.body84 ] 258 %lsr.iv105107 = bitcast ptr %lsr.iv105 to ptr 259 %lsr.iv102104 = bitcast ptr %lsr.iv102 to ptr 260 %lsr.iv101 = bitcast ptr %lsr.iv to ptr 261 %36 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %35) 262 %37 = sub i32 %35, 4 263 %wide.masked.load98 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv101, i32 4, <4 x i1> %36, <4 x i32> undef) 264 %wide.masked.load99 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv102104, i32 4, <4 x i1> %36, <4 x i32> undef) 265 %wide.masked.load100 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv105107, i32 4, <4 x i1> %36, <4 x i32> undef) 266 %38 = add <4 x i32> %wide.masked.load99, %wide.masked.load98 267 %39 = sub <4 x i32> %wide.masked.load100, %38 268 call void @llvm.masked.store.v4i32.p0(<4 x i32> %39, ptr %lsr.iv105107, i32 4, <4 x i1> %36) 269 %scevgep = getelementptr i32, ptr %lsr.iv, i32 4 270 %scevgep103 = getelementptr i32, ptr %lsr.iv102, i32 4 271 %scevgep106 = getelementptr i32, ptr %lsr.iv105, i32 4 272 %40 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %34, i32 1) 273 %41 = icmp ne i32 %40, 0 274 br i1 %41, label %vector.body84, label %for.cond.cleanup17 275 276 for.cond.cleanup17: ; preds = %vector.body84, %entry, %for.cond15.preheader 277 ret void 278 } 279 declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) 280 declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) 281 declare i32 @llvm.start.loop.iterations.i32(i32) 282 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) 283 declare <4 x i1> @llvm.arm.mve.vctp32(i32) 284 285... 286--- 287name: test1 288alignment: 2 289exposesReturnsTwice: false 290legalized: false 291regBankSelected: false 292selected: false 293failedISel: false 294tracksRegLiveness: true 295hasWinCFI: false 296registers: [] 297liveins: 298 - { reg: '$r0', virtual-reg: '' } 299 - { reg: '$r1', virtual-reg: '' } 300 - { reg: '$r2', virtual-reg: '' } 301 - { reg: '$r3', virtual-reg: '' } 302frameInfo: 303 isFrameAddressTaken: false 304 isReturnAddressTaken: false 305 hasStackMap: false 306 hasPatchPoint: false 307 stackSize: 24 308 offsetAdjustment: -16 309 maxAlignment: 4 310 adjustsStack: false 311 hasCalls: false 312 stackProtector: '' 313 maxCallFrameSize: 0 314 cvBytesOfCalleeSavedRegisters: 0 315 hasOpaqueSPAdjustment: false 316 hasVAStart: false 317 hasMustTailInVarArgFunc: false 318 localFrameSize: 0 319 savePoint: '' 320 restorePoint: '' 321fixedStack: [] 322stack: 323 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, 324 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, 325 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 326 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, 327 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, 328 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 329 - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, 330 stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true, 331 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 332 - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, 333 stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, 334 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 335 - { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4, 336 stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, 337 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 338 - { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4, 339 stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true, 340 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 341callSites: [] 342constants: [] 343machineFunctionInfo: {} 344body: | 345 ; CHECK-LABEL: name: test1 346 ; CHECK: bb.0.entry: 347 ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.1(0x50000000) 348 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8 349 ; CHECK-NEXT: {{ $}} 350 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 351 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20 352 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4 353 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8 354 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12 355 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16 356 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20 357 ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg 358 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8 359 ; CHECK-NEXT: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg 360 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -24 361 ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr 362 ; CHECK-NEXT: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr 363 ; CHECK-NEXT: {{ $}} 364 ; CHECK-NEXT: bb.1.vector.ph: 365 ; CHECK-NEXT: successors: %bb.2(0x80000000) 366 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 367 ; CHECK-NEXT: {{ $}} 368 ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg 369 ; CHECK-NEXT: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg 370 ; CHECK-NEXT: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg 371 ; CHECK-NEXT: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg 372 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4 373 ; CHECK-NEXT: {{ $}} 374 ; CHECK-NEXT: bb.2.vector.body: 375 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) 376 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8 377 ; CHECK-NEXT: {{ $}} 378 ; CHECK-NEXT: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4) 379 ; CHECK-NEXT: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4) 380 ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 381 ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4) 382 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2 383 ; CHECK-NEXT: {{ $}} 384 ; CHECK-NEXT: bb.3.for.cond4.preheader: 385 ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000) 386 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 387 ; CHECK-NEXT: {{ $}} 388 ; CHECK-NEXT: tCBZ $r3, %bb.6 389 ; CHECK-NEXT: {{ $}} 390 ; CHECK-NEXT: bb.4.vector.ph39: 391 ; CHECK-NEXT: successors: %bb.5(0x80000000) 392 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 393 ; CHECK-NEXT: {{ $}} 394 ; CHECK-NEXT: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg 395 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3 396 ; CHECK-NEXT: {{ $}} 397 ; CHECK-NEXT: bb.5.vector.body38: 398 ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000) 399 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r12 400 ; CHECK-NEXT: {{ $}} 401 ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4) 402 ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4) 403 ; CHECK-NEXT: renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4) 404 ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 405 ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 406 ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4) 407 ; CHECK-NEXT: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg 408 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5 409 ; CHECK-NEXT: {{ $}} 410 ; CHECK-NEXT: bb.6.for.cond.cleanup6: 411 ; CHECK-NEXT: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg 412 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 413 bb.0.entry: 414 successors: %bb.6(0x30000000), %bb.1(0x50000000) 415 liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8 416 417 frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 418 frame-setup CFI_INSTRUCTION def_cfa_offset 20 419 frame-setup CFI_INSTRUCTION offset $lr, -4 420 frame-setup CFI_INSTRUCTION offset $r7, -8 421 frame-setup CFI_INSTRUCTION offset $r6, -12 422 frame-setup CFI_INSTRUCTION offset $r5, -16 423 frame-setup CFI_INSTRUCTION offset $r4, -20 424 $r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg 425 frame-setup CFI_INSTRUCTION def_cfa $r7, 8 426 early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14, $noreg 427 frame-setup CFI_INSTRUCTION offset $r8, -24 428 tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr 429 tBcc %bb.6, 0, killed $cpsr 430 431 bb.1.vector.ph: 432 successors: %bb.2(0x80000000) 433 liveins: $r0, $r1, $r2, $r3 434 435 renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg 436 $r8 = tMOVr $r0, 14, $noreg 437 renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg 438 $r5 = tMOVr $r2, 14, $noreg 439 renamable $r12 = t2SUBri killed renamable $r6, 4, 14, $noreg, $noreg 440 renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg 441 $r4 = tMOVr $r3, 14, $noreg 442 renamable $lr = nuw nsw t2ADDrs killed renamable $r6, renamable $r12, 19, 14, $noreg, $noreg 443 $r6 = tMOVr $r1, 14, $noreg 444 $lr = t2DoLoopStart renamable $lr 445 446 bb.2.vector.body: 447 successors: %bb.2(0x7c000000), %bb.3(0x04000000) 448 liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12 449 450 renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg, $noreg 451 MVE_VPST 4, implicit $vpr 452 renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4) 453 renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4) 454 renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg 455 renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 456 MVE_VPST 8, implicit $vpr 457 renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4) 458 renamable $lr = t2LoopDec killed renamable $lr, 1 459 t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr 460 tB %bb.3, 14, $noreg 461 462 bb.3.for.cond4.preheader: 463 successors: %bb.6(0x30000000), %bb.4(0x50000000) 464 liveins: $r0, $r1, $r2, $r3, $r12 465 466 tCBZ $r3, %bb.6 467 468 bb.4.vector.ph39: 469 successors: %bb.5(0x80000000) 470 liveins: $r0, $r1, $r2, $r3, $r12 471 472 renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg 473 renamable $lr = nuw nsw t2ADDrs killed renamable $r6, killed renamable $r12, 19, 14, $noreg, $noreg 474 $r12 = tMOVr $r0, 14, $noreg 475 $lr = t2DoLoopStart renamable $lr 476 477 bb.5.vector.body38: 478 successors: %bb.5(0x7c000000), %bb.6(0x04000000) 479 liveins: $lr, $r0, $r1, $r2, $r3, $r12 480 481 renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg 482 MVE_VPST 2, implicit $vpr 483 renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv55, align 4) 484 renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4) 485 renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4) 486 renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 487 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg 488 renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 489 MVE_VPST 8, implicit $vpr 490 MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4) 491 renamable $lr = t2LoopDec killed renamable $lr, 1 492 $r0 = tMOVr $r12, 14, $noreg 493 t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr 494 tB %bb.6, 14, $noreg 495 496 bb.6.for.cond.cleanup6: 497 $r8, $sp = t2LDR_POST $sp, 4, 14, $noreg 498 tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 499 500... 501--- 502name: test2 503alignment: 2 504exposesReturnsTwice: false 505legalized: false 506regBankSelected: false 507selected: false 508failedISel: false 509tracksRegLiveness: true 510hasWinCFI: false 511registers: [] 512liveins: 513 - { reg: '$r0', virtual-reg: '' } 514 - { reg: '$r1', virtual-reg: '' } 515 - { reg: '$r2', virtual-reg: '' } 516 - { reg: '$r3', virtual-reg: '' } 517frameInfo: 518 isFrameAddressTaken: false 519 isReturnAddressTaken: false 520 hasStackMap: false 521 hasPatchPoint: false 522 stackSize: 24 523 offsetAdjustment: -16 524 maxAlignment: 4 525 adjustsStack: false 526 hasCalls: false 527 stackProtector: '' 528 maxCallFrameSize: 0 529 cvBytesOfCalleeSavedRegisters: 0 530 hasOpaqueSPAdjustment: false 531 hasVAStart: false 532 hasMustTailInVarArgFunc: false 533 localFrameSize: 0 534 savePoint: '' 535 restorePoint: '' 536fixedStack: [] 537stack: 538 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, 539 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, 540 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 541 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, 542 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, 543 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 544 - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, 545 stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true, 546 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 547 - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, 548 stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, 549 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 550 - { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4, 551 stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, 552 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 553 - { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4, 554 stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true, 555 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 556callSites: [] 557constants: [] 558machineFunctionInfo: {} 559body: | 560 ; CHECK-LABEL: name: test2 561 ; CHECK: bb.0.entry: 562 ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000) 563 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8 564 ; CHECK-NEXT: {{ $}} 565 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 566 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20 567 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4 568 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8 569 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12 570 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16 571 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20 572 ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg 573 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8 574 ; CHECK-NEXT: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg 575 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -24 576 ; CHECK-NEXT: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg 577 ; CHECK-NEXT: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr 578 ; CHECK-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr 579 ; CHECK-NEXT: {{ $}} 580 ; CHECK-NEXT: bb.1.vector.ph: 581 ; CHECK-NEXT: successors: %bb.2(0x80000000) 582 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 583 ; CHECK-NEXT: {{ $}} 584 ; CHECK-NEXT: renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg 585 ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg 586 ; CHECK-NEXT: $r5 = tMOVr $r1, 14 /* CC::al */, $noreg 587 ; CHECK-NEXT: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg 588 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4 589 ; CHECK-NEXT: {{ $}} 590 ; CHECK-NEXT: bb.2.vector.body: 591 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) 592 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8 593 ; CHECK-NEXT: {{ $}} 594 ; CHECK-NEXT: renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4) 595 ; CHECK-NEXT: renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4) 596 ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 597 ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4) 598 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2 599 ; CHECK-NEXT: {{ $}} 600 ; CHECK-NEXT: bb.3.for.cond4.preheader: 601 ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000) 602 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 603 ; CHECK-NEXT: {{ $}} 604 ; CHECK-NEXT: tCBZ $r3, %bb.6 605 ; CHECK-NEXT: {{ $}} 606 ; CHECK-NEXT: bb.4.vector.ph39: 607 ; CHECK-NEXT: successors: %bb.5(0x80000000) 608 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 609 ; CHECK-NEXT: {{ $}} 610 ; CHECK-NEXT: $r4 = tMOVr $r0, 14 /* CC::al */, $noreg 611 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3 612 ; CHECK-NEXT: {{ $}} 613 ; CHECK-NEXT: bb.5.vector.body38: 614 ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000) 615 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4 616 ; CHECK-NEXT: {{ $}} 617 ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4) 618 ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4) 619 ; CHECK-NEXT: renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4) 620 ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 621 ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 622 ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4) 623 ; CHECK-NEXT: $r0 = tMOVr $r4, 14 /* CC::al */, $noreg 624 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5 625 ; CHECK-NEXT: {{ $}} 626 ; CHECK-NEXT: bb.6.for.cond.cleanup6: 627 ; CHECK-NEXT: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg 628 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 629 bb.0.entry: 630 successors: %bb.3(0x30000000), %bb.1(0x50000000) 631 liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8 632 633 frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 634 frame-setup CFI_INSTRUCTION def_cfa_offset 20 635 frame-setup CFI_INSTRUCTION offset $lr, -4 636 frame-setup CFI_INSTRUCTION offset $r7, -8 637 frame-setup CFI_INSTRUCTION offset $r6, -12 638 frame-setup CFI_INSTRUCTION offset $r5, -16 639 frame-setup CFI_INSTRUCTION offset $r4, -20 640 $r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg 641 frame-setup CFI_INSTRUCTION def_cfa $r7, 8 642 early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14, $noreg 643 frame-setup CFI_INSTRUCTION offset $r8, -24 644 renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg 645 renamable $r12 = t2MOVi 1, 14, $noreg, $noreg 646 t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr 647 tBcc %bb.3, 0, killed $cpsr 648 649 bb.1.vector.ph: 650 successors: %bb.2(0x80000000) 651 liveins: $r0, $r1, $r2, $r3, $r12 652 653 renamable $r6, dead $cpsr = tMOVi8 3, 14, $noreg 654 renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14, $noreg 655 renamable $r6 = nuw t2ADDrs killed renamable $r6, renamable $r3, 11, 14, $noreg, $noreg 656 $r8 = tMOVr $r0, 14, $noreg 657 renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg 658 $r5 = tMOVr $r1, 14, $noreg 659 renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg 660 renamable $lr = nuw nsw t2ADDrs renamable $r12, killed renamable $r6, 19, 14, $noreg, $noreg 661 $r6 = tMOVr $r2, 14, $noreg 662 $lr = t2DoLoopStart renamable $lr 663 664 bb.2.vector.body: 665 successors: %bb.2(0x7c000000), %bb.3(0x04000000) 666 liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12 667 668 renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg, $noreg 669 MVE_VPST 4, implicit $vpr 670 renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4) 671 renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4) 672 renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg 673 renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 674 MVE_VPST 8, implicit $vpr 675 renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4) 676 renamable $lr = t2LoopDec killed renamable $lr, 1 677 t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr 678 tB %bb.3, 14, $noreg 679 680 bb.3.for.cond4.preheader: 681 successors: %bb.6(0x30000000), %bb.4(0x50000000) 682 liveins: $r0, $r1, $r2, $r3, $r12 683 684 tCBZ $r3, %bb.6 685 686 bb.4.vector.ph39: 687 successors: %bb.5(0x80000000) 688 liveins: $r0, $r1, $r2, $r3, $r12 689 690 renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg 691 $r4 = tMOVr $r0, 14, $noreg 692 renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg 693 renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg 694 renamable $lr = nuw nsw t2ADDrs killed renamable $r12, killed renamable $r6, 19, 14, $noreg, $noreg 695 $lr = t2DoLoopStart renamable $lr 696 697 bb.5.vector.body38: 698 successors: %bb.5(0x7c000000), %bb.6(0x04000000) 699 liveins: $lr, $r0, $r1, $r2, $r3, $r4 700 701 renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg 702 MVE_VPST 2, implicit $vpr 703 renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv55, align 4) 704 renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4) 705 renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4) 706 renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 707 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg 708 renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 709 MVE_VPST 8, implicit $vpr 710 MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4) 711 renamable $lr = t2LoopDec killed renamable $lr, 1 712 $r0 = tMOVr $r4, 14, $noreg 713 t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr 714 tB %bb.6, 14, $noreg 715 716 bb.6.for.cond.cleanup6: 717 $r8, $sp = t2LDR_POST $sp, 4, 14, $noreg 718 tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 719 720... 721--- 722name: test3 723alignment: 2 724exposesReturnsTwice: false 725legalized: false 726regBankSelected: false 727selected: false 728failedISel: false 729tracksRegLiveness: true 730hasWinCFI: false 731registers: [] 732liveins: 733 - { reg: '$r0', virtual-reg: '' } 734 - { reg: '$r1', virtual-reg: '' } 735 - { reg: '$r2', virtual-reg: '' } 736 - { reg: '$r3', virtual-reg: '' } 737frameInfo: 738 isFrameAddressTaken: false 739 isReturnAddressTaken: false 740 hasStackMap: false 741 hasPatchPoint: false 742 stackSize: 32 743 offsetAdjustment: -24 744 maxAlignment: 4 745 adjustsStack: false 746 hasCalls: false 747 stackProtector: '' 748 maxCallFrameSize: 0 749 cvBytesOfCalleeSavedRegisters: 0 750 hasOpaqueSPAdjustment: false 751 hasVAStart: false 752 hasMustTailInVarArgFunc: false 753 localFrameSize: 0 754 savePoint: '' 755 restorePoint: '' 756fixedStack: [] 757stack: 758 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, 759 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, 760 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 761 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, 762 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, 763 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 764 - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, 765 stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true, 766 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 767 - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, 768 stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, 769 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 770 - { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4, 771 stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, 772 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 773 - { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4, 774 stack-id: default, callee-saved-register: '$r10', callee-saved-restored: true, 775 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 776 - { id: 6, name: '', type: spill-slot, offset: -28, size: 4, alignment: 4, 777 stack-id: default, callee-saved-register: '$r9', callee-saved-restored: true, 778 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 779 - { id: 7, name: '', type: spill-slot, offset: -32, size: 4, alignment: 4, 780 stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true, 781 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } 782callSites: [] 783constants: [] 784machineFunctionInfo: {} 785body: | 786 ; CHECK-LABEL: name: test3 787 ; CHECK: bb.0.entry: 788 ; CHECK-NEXT: successors: %bb.9(0x30000000), %bb.1(0x50000000) 789 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10 790 ; CHECK-NEXT: {{ $}} 791 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 792 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20 793 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4 794 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8 795 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12 796 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16 797 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20 798 ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg 799 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8 800 ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10 801 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -24 802 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -28 803 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -32 804 ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr 805 ; CHECK-NEXT: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr 806 ; CHECK-NEXT: {{ $}} 807 ; CHECK-NEXT: bb.1.vector.ph: 808 ; CHECK-NEXT: successors: %bb.2(0x80000000) 809 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 810 ; CHECK-NEXT: {{ $}} 811 ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg 812 ; CHECK-NEXT: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg 813 ; CHECK-NEXT: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg 814 ; CHECK-NEXT: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg 815 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4 816 ; CHECK-NEXT: {{ $}} 817 ; CHECK-NEXT: bb.2.vector.body: 818 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) 819 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8 820 ; CHECK-NEXT: {{ $}} 821 ; CHECK-NEXT: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv117119, align 4) 822 ; CHECK-NEXT: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv120122, align 4) 823 ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 824 ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv123125, align 4) 825 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2 826 ; CHECK-NEXT: {{ $}} 827 ; CHECK-NEXT: bb.3.for.cond4.preheader: 828 ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000) 829 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 830 ; CHECK-NEXT: {{ $}} 831 ; CHECK-NEXT: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg 832 ; CHECK-NEXT: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr 833 ; CHECK-NEXT: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr 834 ; CHECK-NEXT: {{ $}} 835 ; CHECK-NEXT: bb.4.vector.ph66: 836 ; CHECK-NEXT: successors: %bb.5(0x80000000) 837 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 838 ; CHECK-NEXT: {{ $}} 839 ; CHECK-NEXT: renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg 840 ; CHECK-NEXT: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg 841 ; CHECK-NEXT: $r9 = tMOVr $r2, 14 /* CC::al */, $noreg 842 ; CHECK-NEXT: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg 843 ; CHECK-NEXT: $r6 = tMOVr $r0, 14 /* CC::al */, $noreg 844 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r5 845 ; CHECK-NEXT: {{ $}} 846 ; CHECK-NEXT: bb.5.vector.body65: 847 ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000) 848 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r9, $r10 849 ; CHECK-NEXT: {{ $}} 850 ; CHECK-NEXT: renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv108110, align 4) 851 ; CHECK-NEXT: renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv111113, align 4) 852 ; CHECK-NEXT: renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv114116, align 4) 853 ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 854 ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 855 ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv114116, align 4) 856 ; CHECK-NEXT: $r10 = tMOVr $r6, 14 /* CC::al */, $noreg 857 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5 858 ; CHECK-NEXT: {{ $}} 859 ; CHECK-NEXT: bb.6.for.cond15.preheader: 860 ; CHECK-NEXT: successors: %bb.9(0x30000000), %bb.7(0x50000000) 861 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 862 ; CHECK-NEXT: {{ $}} 863 ; CHECK-NEXT: tCBZ $r3, %bb.9 864 ; CHECK-NEXT: {{ $}} 865 ; CHECK-NEXT: bb.7.vector.ph85: 866 ; CHECK-NEXT: successors: %bb.8(0x80000000) 867 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3 868 ; CHECK-NEXT: {{ $}} 869 ; CHECK-NEXT: $r5 = tMOVr $r0, 14 /* CC::al */, $noreg 870 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3 871 ; CHECK-NEXT: {{ $}} 872 ; CHECK-NEXT: bb.8.vector.body84: 873 ; CHECK-NEXT: successors: %bb.8(0x7c000000), %bb.9(0x04000000) 874 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r5 875 ; CHECK-NEXT: {{ $}} 876 ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv101, align 4) 877 ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv102104, align 4) 878 ; CHECK-NEXT: renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv105107, align 4) 879 ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 880 ; CHECK-NEXT: renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 881 ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv105107, align 4) 882 ; CHECK-NEXT: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg 883 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.8 884 ; CHECK-NEXT: {{ $}} 885 ; CHECK-NEXT: bb.9.for.cond.cleanup17: 886 ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10 887 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 888 bb.0.entry: 889 successors: %bb.9(0x30000000), %bb.1(0x50000000) 890 liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10 891 892 frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp 893 frame-setup CFI_INSTRUCTION def_cfa_offset 20 894 frame-setup CFI_INSTRUCTION offset $lr, -4 895 frame-setup CFI_INSTRUCTION offset $r7, -8 896 frame-setup CFI_INSTRUCTION offset $r6, -12 897 frame-setup CFI_INSTRUCTION offset $r5, -16 898 frame-setup CFI_INSTRUCTION offset $r4, -20 899 $r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg 900 frame-setup CFI_INSTRUCTION def_cfa $r7, 8 901 $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r8, killed $r9, killed $r10 902 frame-setup CFI_INSTRUCTION offset $r10, -24 903 frame-setup CFI_INSTRUCTION offset $r9, -28 904 frame-setup CFI_INSTRUCTION offset $r8, -32 905 tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr 906 tBcc %bb.9, 0, killed $cpsr 907 908 bb.1.vector.ph: 909 successors: %bb.2(0x80000000) 910 liveins: $r0, $r1, $r2, $r3 911 912 renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg 913 $r8 = tMOVr $r0, 14, $noreg 914 renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg 915 $r5 = tMOVr $r2, 14, $noreg 916 renamable $r12 = t2SUBri killed renamable $r6, 4, 14, $noreg, $noreg 917 renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg 918 $r4 = tMOVr $r3, 14, $noreg 919 renamable $lr = nuw nsw t2ADDrs killed renamable $r6, renamable $r12, 19, 14, $noreg, $noreg 920 $r6 = tMOVr $r1, 14, $noreg 921 $lr = t2DoLoopStart renamable $lr 922 923 bb.2.vector.body: 924 successors: %bb.2(0x7c000000), %bb.3(0x04000000) 925 liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12 926 927 renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg, $noreg 928 MVE_VPST 4, implicit $vpr 929 renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv117119, align 4) 930 renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv120122, align 4) 931 renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg 932 renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 933 MVE_VPST 8, implicit $vpr 934 renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv123125, align 4) 935 renamable $lr = t2LoopDec killed renamable $lr, 1 936 t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr 937 tB %bb.3, 14, $noreg 938 939 bb.3.for.cond4.preheader: 940 successors: %bb.6(0x30000000), %bb.4(0x50000000) 941 liveins: $r0, $r1, $r2, $r3, $r12 942 943 renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg 944 renamable $r8 = t2MOVi 1, 14, $noreg, $noreg 945 t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr 946 tBcc %bb.6, 0, killed $cpsr 947 948 bb.4.vector.ph66: 949 successors: %bb.5(0x80000000) 950 liveins: $r0, $r1, $r2, $r3, $r8, $r12 951 952 renamable $r6, dead $cpsr = tMOVi8 3, 14, $noreg 953 renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14, $noreg 954 renamable $r6 = nuw t2ADDrs killed renamable $r6, renamable $r3, 11, 14, $noreg, $noreg 955 $r10 = tMOVr $r0, 14, $noreg 956 renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg 957 $r9 = tMOVr $r2, 14, $noreg 958 renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg 959 $r4 = tMOVr $r1, 14, $noreg 960 renamable $lr = nuw nsw t2ADDrs renamable $r8, killed renamable $r6, 19, 14, $noreg, $noreg 961 $r6 = tMOVr $r0, 14, $noreg 962 $lr = t2DoLoopStart renamable $lr 963 964 bb.5.vector.body65: 965 successors: %bb.5(0x7c000000), %bb.6(0x04000000) 966 liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10, $r12 967 968 renamable $vpr = MVE_VCTP32 renamable $r5, 0, $noreg, $noreg 969 MVE_VPST 2, implicit $vpr 970 renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv108110, align 4) 971 renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv111113, align 4) 972 renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv114116, align 4) 973 renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 974 renamable $r5, dead $cpsr = tSUBi8 killed renamable $r5, 4, 14, $noreg 975 renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 976 MVE_VPST 8, implicit $vpr 977 MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv114116, align 4) 978 renamable $lr = t2LoopDec killed renamable $lr, 1 979 $r10 = tMOVr $r6, 14, $noreg 980 t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr 981 tB %bb.6, 14, $noreg 982 983 bb.6.for.cond15.preheader: 984 successors: %bb.9(0x30000000), %bb.7(0x50000000) 985 liveins: $r0, $r1, $r2, $r3, $r8, $r12 986 987 tCBZ $r3, %bb.9 988 989 bb.7.vector.ph85: 990 successors: %bb.8(0x80000000) 991 liveins: $r0, $r1, $r2, $r3, $r8, $r12 992 993 renamable $lr = nuw nsw t2ADDrs killed renamable $r8, killed renamable $r12, 19, 14, $noreg, $noreg 994 $r5 = tMOVr $r0, 14, $noreg 995 $lr = t2DoLoopStart renamable $lr 996 997 bb.8.vector.body84: 998 successors: %bb.8(0x7c000000), %bb.9(0x04000000) 999 liveins: $lr, $r0, $r1, $r2, $r3, $r5 1000 1001 renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg 1002 MVE_VPST 2, implicit $vpr 1003 renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv101, align 4) 1004 renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv102104, align 4) 1005 renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv105107, align 4) 1006 renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 1007 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg 1008 renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0 1009 MVE_VPST 8, implicit $vpr 1010 MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv105107, align 4) 1011 renamable $lr = t2LoopDec killed renamable $lr, 1 1012 $r0 = tMOVr $r5, 14, $noreg 1013 t2LoopEnd renamable $lr, %bb.8, implicit-def dead $cpsr 1014 tB %bb.9, 14, $noreg 1015 1016 bb.9.for.cond.cleanup17: 1017 $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r8, def $r9, def $r10 1018 tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc 1019 1020... 1021