1; RUN: llc < %s -mcpu=cortex-a9 -verify-coalescing -verify-machineinstrs | FileCheck %s 2target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" 3target triple = "thumbv7-apple-ios0.0.0" 4 5; CHECK: f 6; The vld2 and vst2 are not aligned wrt each other, the second Q loaded is the 7; first one stored. 8; The coalescer must find a super-register larger than QQ to eliminate the copy 9; setting up the vst2 data. 10; CHECK: vld2 11; CHECK-NOT: vorr 12; CHECK-NOT: vmov 13; CHECK: vst2 14define void @f(ptr %p, i32 %c) nounwind ssp { 15entry: 16 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr %p, i32 4) 17 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 18 %add.ptr = getelementptr inbounds float, ptr %p, i32 8 19 tail call void @llvm.arm.neon.vst2.p0.v4f32(ptr %add.ptr, <4 x float> %vld221, <4 x float> undef, i32 4) 20 ret void 21} 22 23; CHECK: f1 24; FIXME: This function still has copies. 25define void @f1(ptr %p, i32 %c) nounwind ssp { 26entry: 27 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr %p, i32 4) 28 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 29 %add.ptr = getelementptr inbounds float, ptr %p, i32 8 30 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr %add.ptr, i32 4) 31 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 32 tail call void @llvm.arm.neon.vst2.p0.v4f32(ptr %add.ptr, <4 x float> %vld221, <4 x float> %vld2215, i32 4) 33 ret void 34} 35 36; CHECK: f2 37; FIXME: This function still has copies. 38define void @f2(ptr %p, i32 %c) nounwind ssp { 39entry: 40 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr %p, i32 4) 41 %vld224 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 42 br label %do.body 43 44do.body: ; preds = %do.body, %entry 45 %qq0.0.1.0 = phi <4 x float> [ %vld224, %entry ], [ %vld2216, %do.body ] 46 %c.addr.0 = phi i32 [ %c, %entry ], [ %dec, %do.body ] 47 %p.addr.0 = phi ptr [ %p, %entry ], [ %add.ptr, %do.body ] 48 %add.ptr = getelementptr inbounds float, ptr %p.addr.0, i32 8 49 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr %add.ptr, i32 4) 50 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 51 %vld2216 = extractvalue { <4 x float>, <4 x float> } %vld22, 1 52 tail call void @llvm.arm.neon.vst2.p0.v4f32(ptr %add.ptr, <4 x float> %qq0.0.1.0, <4 x float> %vld2215, i32 4) 53 %dec = add nsw i32 %c.addr.0, -1 54 %tobool = icmp eq i32 %dec, 0 55 br i1 %tobool, label %do.end, label %do.body 56 57do.end: ; preds = %do.body 58 ret void 59} 60 61declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32.p0(ptr, i32) nounwind readonly 62declare void @llvm.arm.neon.vst2.p0.v4f32(ptr, <4 x float>, <4 x float>, i32) nounwind 63 64; CHECK: f3 65; This function has lane insertions that span basic blocks. 66; The trivial REG_SEQUENCE lowering can't handle that, but the coalescer can. 67; 68; void f3(ptr p, ptr q) { 69; float32x2_t x; 70; x[1] = p[3]; 71; if (q) 72; x[0] = q[0] + q[1]; 73; else 74; x[0] = p[2]; 75; vst1_f32(p+4, x); 76; } 77; 78; CHECK-NOT: vmov 79; CHECK-NOT: vorr 80define void @f3(ptr %p, ptr %q) nounwind ssp { 81entry: 82 %arrayidx = getelementptr inbounds float, ptr %p, i32 3 83 %0 = load float, ptr %arrayidx, align 4 84 %vecins = insertelement <2 x float> undef, float %0, i32 1 85 %tobool = icmp eq ptr %q, null 86 br i1 %tobool, label %if.else, label %if.then 87 88if.then: ; preds = %entry 89 %1 = load float, ptr %q, align 4 90 %arrayidx2 = getelementptr inbounds float, ptr %q, i32 1 91 %2 = load float, ptr %arrayidx2, align 4 92 %add = fadd float %1, %2 93 %vecins3 = insertelement <2 x float> %vecins, float %add, i32 0 94 br label %if.end 95 96if.else: ; preds = %entry 97 %arrayidx4 = getelementptr inbounds float, ptr %p, i32 2 98 %3 = load float, ptr %arrayidx4, align 4 99 %vecins5 = insertelement <2 x float> %vecins, float %3, i32 0 100 br label %if.end 101 102if.end: ; preds = %if.else, %if.then 103 %x.0 = phi <2 x float> [ %vecins3, %if.then ], [ %vecins5, %if.else ] 104 %add.ptr = getelementptr inbounds float, ptr %p, i32 4 105 tail call void @llvm.arm.neon.vst1.p0.v2f32(ptr %add.ptr, <2 x float> %x.0, i32 4) 106 ret void 107} 108 109declare void @llvm.arm.neon.vst1.p0.v2f32(ptr, <2 x float>, i32) nounwind 110declare <2 x float> @llvm.arm.neon.vld1.v2f32.p0(ptr, i32) nounwind readonly 111 112; CHECK: f4 113; This function inserts a lane into a fully defined vector. 114; The destination lane isn't read, so the subregs can coalesce. 115; CHECK-NOT: vmov 116; CHECK-NOT: vorr 117define void @f4(ptr %p, ptr %q) nounwind ssp { 118entry: 119 %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32.p0(ptr %p, i32 4) 120 %tobool = icmp eq ptr %q, null 121 br i1 %tobool, label %if.end, label %if.then 122 123if.then: ; preds = %entry 124 %0 = load float, ptr %q, align 4 125 %arrayidx1 = getelementptr inbounds float, ptr %q, i32 1 126 %1 = load float, ptr %arrayidx1, align 4 127 %add = fadd float %0, %1 128 %vecins = insertelement <2 x float> %vld1, float %add, i32 1 129 br label %if.end 130 131if.end: ; preds = %entry, %if.then 132 %x.0 = phi <2 x float> [ %vecins, %if.then ], [ %vld1, %entry ] 133 tail call void @llvm.arm.neon.vst1.p0.v2f32(ptr %p, <2 x float> %x.0, i32 4) 134 ret void 135} 136 137; CHECK: f5 138; Coalesce vector lanes through phis. 139; CHECK: vmov.f32 {{.*}}, #1.0 140; CHECK-NOT: vmov 141; CHECK-NOT: vorr 142; CHECK: bx 143; We may leave the last insertelement in the if.end block. 144; It is inserting the %add value into a dead lane, but %add causes interference 145; in the entry block, and we don't do dead lane checks across basic blocks. 146define void @f5(ptr %p, ptr %q) nounwind ssp { 147entry: 148 %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr %p, i32 4) 149 %vecext = extractelement <4 x float> %vld1, i32 0 150 %vecext1 = extractelement <4 x float> %vld1, i32 1 151 %vecext2 = extractelement <4 x float> %vld1, i32 2 152 %vecext3 = extractelement <4 x float> %vld1, i32 3 153 %add = fadd float %vecext3, 1.000000e+00 154 %tobool = icmp eq ptr %q, null 155 br i1 %tobool, label %if.end, label %if.then 156 157if.then: ; preds = %entry 158 %arrayidx = getelementptr inbounds float, ptr %q, i32 1 159 %0 = load float, ptr %arrayidx, align 4 160 %add4 = fadd float %vecext, %0 161 %1 = load float, ptr %q, align 4 162 %add6 = fadd float %vecext1, %1 163 %arrayidx7 = getelementptr inbounds float, ptr %q, i32 2 164 %2 = load float, ptr %arrayidx7, align 4 165 %add8 = fadd float %vecext2, %2 166 br label %if.end 167 168if.end: ; preds = %entry, %if.then 169 %a.0 = phi float [ %add4, %if.then ], [ %vecext, %entry ] 170 %b.0 = phi float [ %add6, %if.then ], [ %vecext1, %entry ] 171 %c.0 = phi float [ %add8, %if.then ], [ %vecext2, %entry ] 172 %vecinit = insertelement <4 x float> undef, float %a.0, i32 0 173 %vecinit9 = insertelement <4 x float> %vecinit, float %b.0, i32 1 174 %vecinit10 = insertelement <4 x float> %vecinit9, float %c.0, i32 2 175 %vecinit11 = insertelement <4 x float> %vecinit10, float %add, i32 3 176 tail call void @llvm.arm.neon.vst1.p0.v4f32(ptr %p, <4 x float> %vecinit11, i32 4) 177 ret void 178} 179 180declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr, i32) nounwind readonly 181 182declare void @llvm.arm.neon.vst1.p0.v4f32(ptr, <4 x float>, i32) nounwind 183 184; CHECK: pr13999 185define void @pr13999() nounwind readonly { 186entry: 187 br i1 true, label %outer_loop, label %loop.end 188 189outer_loop: 190 %d = phi double [ 0.0, %entry ], [ %add, %after_inner_loop ] 191 %0 = insertelement <2 x double> <double 0.0, double 0.0>, double %d, i32 0 192 br i1 undef, label %after_inner_loop, label %inner_loop 193 194inner_loop: 195 br i1 true, label %after_inner_loop, label %inner_loop 196 197after_inner_loop: 198 %1 = phi <2 x double> [ %0, %outer_loop ], [ <double 0.0, double 0.0>, 199%inner_loop ] 200 %2 = extractelement <2 x double> %1, i32 1 201 %add = fadd double 1.0, %2 202 br i1 false, label %loop.end, label %outer_loop 203 204loop.end: 205 %d.end = phi double [ 0.0, %entry ], [ %add, %after_inner_loop ] 206 ret void 207} 208 209; CHECK: pr14078 210define arm_aapcs_vfpcc i32 @pr14078(ptr nocapture %arg, ptr nocapture %arg1, i32 %arg2) nounwind uwtable readonly { 211bb: 212 br i1 undef, label %bb31, label %bb3 213 214bb3: ; preds = %bb12, %bb 215 %tmp = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer 216 %tmp4 = bitcast <1 x i64> %tmp to <2 x float> 217 %tmp5 = shufflevector <2 x float> %tmp4, <2 x float> undef, <4 x i32> zeroinitializer 218 %tmp6 = bitcast <4 x float> %tmp5 to <2 x i64> 219 %tmp7 = shufflevector <2 x i64> %tmp6, <2 x i64> undef, <1 x i32> zeroinitializer 220 %tmp8 = bitcast <1 x i64> %tmp7 to <2 x float> 221 %tmp9 = tail call <2 x float> @baz(<2 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00>, <2 x float> %tmp8, <2 x float> zeroinitializer) nounwind 222 br i1 undef, label %bb10, label %bb12 223 224bb10: ; preds = %bb3 225 %tmp11 = load <4 x float>, ptr undef, align 8 226 br label %bb12 227 228bb12: ; preds = %bb10, %bb3 229 %tmp13 = shufflevector <2 x float> %tmp9, <2 x float> zeroinitializer, <2 x i32> <i32 0, i32 2> 230 %tmp14 = bitcast <2 x float> %tmp13 to <1 x i64> 231 %tmp15 = shufflevector <1 x i64> %tmp14, <1 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1> 232 %tmp16 = bitcast <2 x i64> %tmp15 to <4 x float> 233 %tmp17 = fmul <4 x float> zeroinitializer, %tmp16 234 %tmp18 = bitcast <4 x float> %tmp17 to <2 x i64> 235 %tmp19 = shufflevector <2 x i64> %tmp18, <2 x i64> undef, <1 x i32> zeroinitializer 236 %tmp20 = bitcast <1 x i64> %tmp19 to <2 x float> 237 %tmp21 = tail call <2 x float> @baz67(<2 x float> %tmp20, <2 x float> undef) nounwind 238 %tmp22 = tail call <2 x float> @baz67(<2 x float> %tmp21, <2 x float> %tmp21) nounwind 239 %tmp23 = shufflevector <2 x float> %tmp22, <2 x float> undef, <4 x i32> zeroinitializer 240 %tmp24 = bitcast <4 x float> %tmp23 to <2 x i64> 241 %tmp25 = shufflevector <2 x i64> %tmp24, <2 x i64> undef, <1 x i32> zeroinitializer 242 %tmp26 = bitcast <1 x i64> %tmp25 to <2 x float> 243 %tmp27 = extractelement <2 x float> %tmp26, i32 0 244 %tmp28 = fcmp olt float %tmp27, 0.000000e+00 245 %tmp29 = select i1 %tmp28, i32 0, i32 undef 246 %tmp30 = icmp ult i32 undef, %arg2 247 br i1 %tmp30, label %bb3, label %bb31 248 249bb31: ; preds = %bb12, %bb 250 %tmp32 = phi i32 [ 1, %bb ], [ %tmp29, %bb12 ] 251 ret i32 %tmp32 252} 253 254declare <2 x float> @baz(<2 x float>, <2 x float>, <2 x float>) nounwind readnone 255 256declare <2 x float> @baz67(<2 x float>, <2 x float>) nounwind readnone 257 258%struct.wombat.5 = type { %struct.quux, %struct.quux, %struct.quux, %struct.quux } 259%struct.quux = type { <4 x float> } 260 261; CHECK: pr14079 262define linkonce_odr arm_aapcs_vfpcc %struct.wombat.5 @pr14079(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2) nounwind uwtable inlinehint { 263bb: 264 %tmp = shufflevector <2 x i64> zeroinitializer, <2 x i64> undef, <1 x i32> zeroinitializer 265 %tmp3 = bitcast <1 x i64> %tmp to <2 x float> 266 %tmp4 = shufflevector <2 x float> %tmp3, <2 x float> zeroinitializer, <2 x i32> <i32 1, i32 3> 267 %tmp5 = shufflevector <2 x float> %tmp4, <2 x float> undef, <2 x i32> <i32 1, i32 3> 268 %tmp6 = bitcast <2 x float> %tmp5 to <1 x i64> 269 %tmp7 = shufflevector <1 x i64> undef, <1 x i64> %tmp6, <2 x i32> <i32 0, i32 1> 270 %tmp8 = bitcast <2 x i64> %tmp7 to <4 x float> 271 %tmp9 = shufflevector <2 x i64> zeroinitializer, <2 x i64> undef, <1 x i32> <i32 1> 272 %tmp10 = bitcast <1 x i64> %tmp9 to <2 x float> 273 %tmp11 = shufflevector <2 x float> %tmp10, <2 x float> undef, <2 x i32> <i32 0, i32 2> 274 %tmp12 = shufflevector <2 x float> %tmp11, <2 x float> undef, <2 x i32> <i32 0, i32 2> 275 %tmp13 = bitcast <2 x float> %tmp12 to <1 x i64> 276 %tmp14 = shufflevector <1 x i64> %tmp13, <1 x i64> undef, <2 x i32> <i32 0, i32 1> 277 %tmp15 = bitcast <2 x i64> %tmp14 to <4 x float> 278 %tmp16 = insertvalue %struct.wombat.5 undef, <4 x float> %tmp8, 1, 0 279 %tmp17 = insertvalue %struct.wombat.5 %tmp16, <4 x float> %tmp15, 2, 0 280 %tmp18 = insertvalue %struct.wombat.5 %tmp17, <4 x float> undef, 3, 0 281 ret %struct.wombat.5 %tmp18 282} 283 284; CHECK: adjustCopiesBackFrom 285; The shuffle in if.else3 must be preserved even though adjustCopiesBackFrom 286; is tempted to remove it. 287; CHECK: vorr d 288define internal void @adjustCopiesBackFrom(ptr noalias nocapture sret(<2 x i64>) %agg.result, <2 x i64> %in) { 289entry: 290 %0 = extractelement <2 x i64> %in, i32 0 291 %cmp = icmp slt i64 %0, 1 292 %.in = select i1 %cmp, <2 x i64> <i64 0, i64 undef>, <2 x i64> %in 293 %1 = extractelement <2 x i64> %in, i32 1 294 %cmp1 = icmp slt i64 %1, 1 295 br i1 %cmp1, label %if.then2, label %if.else3 296 297if.then2: ; preds = %entry 298 %2 = insertelement <2 x i64> %.in, i64 0, i32 1 299 br label %if.end4 300 301if.else3: ; preds = %entry 302 %3 = shufflevector <2 x i64> %.in, <2 x i64> %in, <2 x i32> <i32 0, i32 3> 303 br label %if.end4 304 305if.end4: ; preds = %if.else3, %if.then2 306 %result.2 = phi <2 x i64> [ %2, %if.then2 ], [ %3, %if.else3 ] 307 store <2 x i64> %result.2, ptr %agg.result, align 128 308 ret void 309} 310 311; <rdar://problem/12758887> 312; RegisterCoalescer::updateRegDefsUses() could visit an instruction more than 313; once under rare circumstances. When widening a register from QPR to DTriple 314; with the original virtual register in dsub_1_dsub_2, the double rewrite would 315; produce an invalid sub-register. 316; 317; This is because dsub_1_dsub_2 is not an idempotent sub-register index. 318; It will translate %vr:dsub_0 -> %vr:dsub_1. 319define hidden fastcc void @radar12758887() nounwind optsize ssp { 320entry: 321 br i1 undef, label %for.body, label %for.end70 322 323for.body: ; preds = %for.end, %entry 324 br i1 undef, label %for.body29, label %for.end 325 326for.body29: ; preds = %for.body29, %for.body 327 %0 = load <2 x double>, ptr null, align 1 328 %splat40 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> zeroinitializer 329 %mul41 = fmul <2 x double> undef, %splat40 330 %add42 = fadd <2 x double> undef, %mul41 331 %splat44 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> <i32 1, i32 1> 332 %mul45 = fmul <2 x double> undef, %splat44 333 %add46 = fadd <2 x double> undef, %mul45 334 br i1 undef, label %for.end, label %for.body29 335 336for.end: ; preds = %for.body29, %for.body 337 %accumR2.0.lcssa = phi <2 x double> [ zeroinitializer, %for.body ], [ %add42, %for.body29 ] 338 %accumI2.0.lcssa = phi <2 x double> [ zeroinitializer, %for.body ], [ %add46, %for.body29 ] 339 %1 = shufflevector <2 x double> %accumI2.0.lcssa, <2 x double> undef, <2 x i32> <i32 1, i32 0> 340 %add58 = fadd <2 x double> undef, %1 341 %mul61 = fmul <2 x double> %add58, undef 342 %add63 = fadd <2 x double> undef, %mul61 343 %add64 = fadd <2 x double> undef, %add63 344 %add67 = fadd <2 x double> undef, %add64 345 store <2 x double> %add67, ptr undef, align 1 346 br i1 undef, label %for.end70, label %for.body 347 348for.end70: ; preds = %for.end, %entry 349 ret void 350} 351