1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 2; RUN: opt < %s -passes=loop-vectorize,simplifycfg,instcombine -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s 3 4target triple = "aarch64-unknown-linux-gnu" 5 6define void @test_big_little_params(ptr readonly %a, ptr readonly %b, ptr noalias %c) #0 { 7; CHECK-LABEL: define void @test_big_little_params 8; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0:[0-9]+]] { 9; CHECK-NEXT: entry: 10; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 11; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 12; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) 13; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 14; CHECK: vector.body: 15; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 16; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] 17; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] 18; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP0]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison) 19; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] 20; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison) 21; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @foo_vector(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i8> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) 22; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] 23; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP2]], ptr [[TMP3]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) 24; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] 25; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) 26; CHECK-NEXT: [[TMP6:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 27; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[EXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]] 28; CHECK: exit: 29; CHECK-NEXT: ret void 30; 31entry: 32 br label %for.body 33 34for.body: 35 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 36 %gep_s = getelementptr i32, ptr %a, i64 %iv 37 %load_s = load i32, ptr %gep_s 38 %gep_b = getelementptr i8, ptr %b, i64 %iv 39 %load_b = load i8, ptr %gep_b 40 %call = call i32 @foo_big_little(i32 %load_s, i8 %load_b) #1 41 %arrayidx = getelementptr inbounds i32, ptr %c, i64 %iv 42 store i32 %call, ptr %arrayidx 43 %iv.next = add nuw nsw i64 %iv, 1 44 %exitcond = icmp eq i64 %iv.next, 1025 45 br i1 %exitcond, label %exit, label %for.body 46 47exit: 48 ret void 49} 50 51define void @test_little_big_params(ptr readonly %a, ptr readonly %b, ptr noalias %c) #0 { 52; CHECK-LABEL: define void @test_little_big_params 53; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { 54; CHECK-NEXT: entry: 55; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() 56; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 1 57; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) 58; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 59; CHECK: vector.body: 60; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 61; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] 62; CHECK-NEXT: [[TMP0:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] 63; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr [[TMP0]], i32 4, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x float> poison) 64; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] 65; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP1]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison) 66; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @bar_vector(<vscale x 2 x float> [[WIDE_MASKED_LOAD]], <vscale x 2 x double> [[WIDE_MASKED_LOAD1]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) 67; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]] 68; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP2]], ptr [[TMP3]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) 69; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] 70; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) 71; CHECK-NEXT: [[TMP6:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 72; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]] 73; CHECK: for.cond.cleanup: 74; CHECK-NEXT: ret void 75; 76entry: 77 br label %for.body 78 79for.body: 80 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 81 %gep_f = getelementptr float, ptr %a, i64 %iv 82 %load_f = load float, ptr %gep_f 83 %gep_d = getelementptr double, ptr %b, i64 %iv 84 %load_d = load double, ptr %gep_d 85 %call = call double @bar_little_big(float %load_f, double %load_d) #2 86 %arrayidx = getelementptr inbounds double, ptr %c, i64 %iv 87 store double %call, ptr %arrayidx 88 %iv.next = add nuw nsw i64 %iv, 1 89 %exitcond = icmp eq i64 %iv.next, 1025 90 br i1 %exitcond, label %for.cond.cleanup, label %for.body 91 92for.cond.cleanup: 93 ret void 94} 95 96;; TODO: Test uniform and linear parameters when they are properly supported, 97;; especially a variant with no vector parameters so the return type 98;; must be used to find the VF. 99 100;; Scalar functions 101declare i32 @foo_big_little(i32, i8) 102declare double @bar_little_big(float, double) 103 104;; Vector function variants 105declare <vscale x 4 x i32> @foo_vector(<vscale x 4 x i32>, <vscale x 4 x i8>, <vscale x 4 x i1>) 106declare <vscale x 2 x double> @bar_vector(<vscale x 2 x float>, <vscale x 2 x double>, <vscale x 2 x i1>) 107 108attributes #0 = { "target-features"="+sve" vscale_range(1,16) } 109attributes #1 = { nounwind "vector-function-abi-variant"="_ZGVsMxvv_foo_big_little(foo_vector)" } 110attributes #2 = { nounwind "vector-function-abi-variant"="_ZGVsMxvv_bar_little_big(bar_vector)" } 111