1; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cortex-a57 -verify-machineinstrs < %s | FileCheck %s 2 3; This file check a bug in MachineCopyPropagation pass. The last COPY will be 4; incorrectly removed if the machine instructions are as follows: 5; %q5_q6 = COPY %q2_q3 6; %d5 = 7; %d3 = 8; %d3 = COPY %d6 9; This is caused by a bug in function SourceNoLongerAvailable(), which fails to 10; remove the relationship of D6 and "%q5_q6 = COPY %q2_q3". 11 12@failed = internal unnamed_addr global i1 false 13 14; CHECK-LABEL: foo: 15; CHECK: ld2 16; CHECK-NOT: // kill: def D{{[0-9]+}} killed D{{[0-9]+}} 17define void @foo(<2 x i32> %shuffle251, <8 x i8> %vtbl1.i, ptr %t2, <2 x i32> %vrsubhn_v2.i1364) { 18entry: 19 %val0 = alloca [2 x i64], align 8 20 %val1 = alloca <2 x i64>, align 16 21 %vmull = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> <i32 -1, i32 -1>, <2 x i32> %shuffle251) 22 %vgetq_lane = extractelement <2 x i64> %vmull, i32 0 23 %cmp = icmp eq i64 %vgetq_lane, 1 24 br i1 %cmp, label %if.end, label %if.then 25 26if.then: ; preds = %entry 27 store i1 true, ptr @failed, align 1 28 br label %if.end 29 30if.end: ; preds = %if.then, %entry 31 tail call void @f2() 32 %sqdmull = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> <i16 1, i16 0, i16 0, i16 0>, <4 x i16> <i16 2, i16 0, i16 0, i16 0>) 33 %sqadd = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> zeroinitializer, <4 x i32> %sqdmull) 34 %shuffle = shufflevector <4 x i32> %sqadd, <4 x i32> undef, <2 x i32> zeroinitializer 35 %0 = mul <2 x i32> %shuffle, <i32 -1, i32 0> 36 %sub = add <2 x i32> %0, <i32 1, i32 0> 37 %sext = sext <2 x i32> %sub to <2 x i64> 38 %vset_lane603 = shufflevector <2 x i64> %sext, <2 x i64> undef, <1 x i32> zeroinitializer 39 call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> zeroinitializer, <2 x i64> zeroinitializer, i64 1, ptr %val0) 40 call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> <i64 4096>, <1 x i64> <i64 -1>, i64 0, ptr %t2) 41 %vld2_lane = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> <i64 11>, <1 x i64> <i64 11>, i64 0, ptr %t2) 42 %vld2_lane.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 0 43 %vld2_lane.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 1 44 %vld2_lane1 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> %vld2_lane.0.extract, <1 x i64> %vld2_lane.1.extract, i64 0, ptr %val0) 45 %vld2_lane1.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 0 46 %vld2_lane1.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 1 47 call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %vld2_lane1.0.extract, <1 x i64> %vld2_lane1.1.extract, ptr %val1) 48 %t4 = load <2 x i64>, ptr %val1, align 16 49 %vsubhn = sub <2 x i64> <i64 11, i64 0>, %t4 50 %vsubhn1 = lshr <2 x i64> %vsubhn, <i64 32, i64 32> 51 %vsubhn2 = trunc <2 x i64> %vsubhn1 to <2 x i32> 52 %neg = xor <2 x i32> %vsubhn2, <i32 -1, i32 -1> 53 %sqadd1 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 -1>, <1 x i64> <i64 1>) 54 %sqadd2 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %vset_lane603, <1 x i64> %sqadd1) 55 %sqadd3 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 1>, <1 x i64> %sqadd2) 56 %shuffle.i = shufflevector <2 x i32> <i32 undef, i32 0>, <2 x i32> %vrsubhn_v2.i1364, <2 x i32> <i32 1, i32 3> 57 %cmp.i = icmp uge <2 x i32> %shuffle.i, %neg 58 %sext.i = sext <2 x i1> %cmp.i to <2 x i32> 59 %vpadal = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %sext.i) 60 %t5 = sub <1 x i64> %vpadal, %sqadd3 61 %vget_lane1 = extractelement <1 x i64> %t5, i32 0 62 %cmp2 = icmp eq i64 %vget_lane1, 15 63 br i1 %cmp2, label %if.end2, label %if.then2 64 65if.then2: ; preds = %if.end 66 store i1 true, ptr @failed, align 1 67 br label %if.end2 68 69if.end2: ; preds = %if.then682, %if.end 70 call void @f2() 71 %vext = shufflevector <8 x i8> <i8 undef, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> %vtbl1.i, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> 72 %t6 = bitcast <8 x i8> %vext to <2 x i32> 73 call void @f0(<2 x i32> %t6) 74 ret void 75} 76 77declare void @f0(<2 x i32>) 78 79declare <8 x i8> @f1() 80 81declare void @f2() 82 83declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) 84 85declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr nocapture) 86 87declare void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr nocapture) 88 89declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr) 90 91declare void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64>, <1 x i64>, ptr nocapture) 92 93declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>) 94 95declare <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32>) 96 97declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>) 98 99declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) 100