1; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py 2; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh | FileCheck %s 3 4define void @get_lane_mask() { 5; CHECK-LABEL: 'get_lane_mask' 6; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_nxv16i1_i64 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) 7; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_nxv8i1_i64 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 undef) 8; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_nxv4i1_i64 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 undef) 9; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_nxv2i1_i64 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 undef) 10; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_nxv1i1_i64 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 undef, i64 undef) 11; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_nxv16i1_i32 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 undef) 12; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef) 13; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef) 14; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef) 15; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_nxv1i1_i32 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32 undef, i32 undef) 16; CHECK-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef) 17; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef) 18; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef) 19; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef) 20; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef) 21; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef) 22; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef) 23; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef) 24; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef) 25; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef) 26; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef) 27; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef) 28; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void 29; 30 %mask_nxv16i1_i64 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) 31 %mask_nxv8i1_i64 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 undef) 32 %mask_nxv4i1_i64 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 undef) 33 %mask_nxv2i1_i64 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 undef) 34 %mask_nxv1i1_i64 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 undef, i64 undef) 35 36 %mask_nxv16i1_i32 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 undef) 37 %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef) 38 %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef) 39 %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef) 40 %mask_nxv1i1_i32 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32 undef, i32 undef) 41 42 %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef) 43 %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef) 44 45 %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef) 46 %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef) 47 %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef) 48 %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef) 49 50 %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef) 51 %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef) 52 %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef) 53 %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef) 54 55 %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef) 56 %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef) 57 58 ret void 59} 60 61declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64) 62declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64) 63declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64) 64declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64) 65declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64) 66declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32) 67declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32) 68declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32) 69declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32) 70declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32, i32) 71declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64) 72declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16, i16) 73declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64) 74declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64) 75declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64, i64) 76declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64) 77declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32) 78declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) 79declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) 80declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32) 81declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64) 82declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16, i16) 83