1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+zve64x \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4 5declare {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr , i32, i32) 6declare {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i1>, i32, i32, i32) 7 8define void @test_vlseg2ff_dead_value(ptr %base, i32 %vl, ptr %outvl) { 9; CHECK-LABEL: test_vlseg2ff_dead_value: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 12; CHECK-NEXT: vlseg2e16ff.v v8, (a0) 13; CHECK-NEXT: csrr a0, vl 14; CHECK-NEXT: sw a0, 0(a2) 15; CHECK-NEXT: ret 16entry: 17 %0 = tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i32 %vl, i32 4) 18 %1 = extractvalue {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} %0, 1 19 store i32 %1, ptr %outvl 20 ret void 21} 22 23define void @test_vlseg2ff_mask_dead_value(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) { 24; CHECK-LABEL: test_vlseg2ff_mask_dead_value: 25; CHECK: # %bb.0: # %entry 26; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 27; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t 28; CHECK-NEXT: csrr a0, vl 29; CHECK-NEXT: sw a0, 0(a2) 30; CHECK-NEXT: ret 31entry: 32 %0 = tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1, i32 4) 33 %1 = extractvalue {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} %0, 1 34 store i32 %1, ptr %outvl 35 ret void 36} 37 38define <vscale x 16 x i16> @test_vlseg2ff_dead_vl(ptr %base, i32 %vl) { 39; CHECK-LABEL: test_vlseg2ff_dead_vl: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 42; CHECK-NEXT: vlseg2e16ff.v v4, (a0) 43; CHECK-NEXT: ret 44entry: 45 %0 = tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i32 %vl, i32 4) 46 %1 = extractvalue {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} %0, 0 47 %2 = call <vscale x 16 x i16> @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %1, i32 1) 48 ret <vscale x 16 x i16> %2 49} 50 51define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) { 52; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: 53; CHECK: # %bb.0: # %entry 54; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 55; CHECK-NEXT: vmv4r.v v4, v8 56; CHECK-NEXT: vmv4r.v v8, v12 57; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t 58; CHECK-NEXT: ret 59entry: 60 %0 = tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1, i32 4) 61 %1 = extractvalue {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} %0, 0 62 %2 = call <vscale x 16 x i16> @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %1, i32 1) 63 ret <vscale x 16 x i16> %2 64} 65 66define void @test_vlseg2ff_dead_all(ptr %base, i32 %vl) { 67; CHECK-LABEL: test_vlseg2ff_dead_all: 68; CHECK: # %bb.0: # %entry 69; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 70; CHECK-NEXT: vlseg2e16ff.v v8, (a0) 71; CHECK-NEXT: ret 72entry: 73 tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i32 %vl, i32 4) 74 ret void 75} 76 77define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) { 78; CHECK-LABEL: test_vlseg2ff_mask_dead_all: 79; CHECK: # %bb.0: # %entry 80; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 81; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t 82; CHECK-NEXT: ret 83entry: 84 tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1, i32 4) 85 ret void 86} 87