1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s 3 4declare target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64) 5 6define target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv8i8(ptr %p, i64 %vl) { 7 ; CHECK-LABEL: name: test_vlseg_nxv8i8 8 ; CHECK: bb.0.entry: 9 ; CHECK-NEXT: liveins: $x10, $x11 10 ; CHECK-NEXT: {{ $}} 11 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 12 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 13 ; CHECK-NEXT: [[PseudoVLSEG2E8_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1) 14 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E8_V_M1_]] 15 ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 16 ; CHECK-NEXT: PseudoRET implicit $v8_v9 17entry: 18 %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 3) 19 ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 20} 21 22define target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv4i16(ptr %p, i64 %vl) { 23 ; CHECK-LABEL: name: test_vlseg_nxv4i16 24 ; CHECK: bb.0.entry: 25 ; CHECK-NEXT: liveins: $x10, $x11 26 ; CHECK-NEXT: {{ $}} 27 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 28 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 29 ; CHECK-NEXT: [[PseudoVLSEG2E16_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16_V_M1 $noreg, [[COPY1]], [[COPY]], 4 /* e16 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 2) 30 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E16_V_M1_]] 31 ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 32 ; CHECK-NEXT: PseudoRET implicit $v8_v9 33entry: 34 %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 4) 35 ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 36} 37 38define target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv2i32(ptr %p, i64 %vl) { 39 ; CHECK-LABEL: name: test_vlseg_nxv2i32 40 ; CHECK: bb.0.entry: 41 ; CHECK-NEXT: liveins: $x10, $x11 42 ; CHECK-NEXT: {{ $}} 43 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 44 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 45 ; CHECK-NEXT: [[PseudoVLSEG2E32_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32_V_M1 $noreg, [[COPY1]], [[COPY]], 5 /* e32 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 4) 46 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E32_V_M1_]] 47 ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 48 ; CHECK-NEXT: PseudoRET implicit $v8_v9 49entry: 50 %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 5) 51 ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 52} 53 54define target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv1i64(ptr %p, i64 %vl) { 55 ; CHECK-LABEL: name: test_vlseg_nxv1i64 56 ; CHECK: bb.0.entry: 57 ; CHECK-NEXT: liveins: $x10, $x11 58 ; CHECK-NEXT: {{ $}} 59 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 60 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 61 ; CHECK-NEXT: [[PseudoVLSEG2E64_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64_V_M1 $noreg, [[COPY1]], [[COPY]], 6 /* e64 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 8) 62 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E64_V_M1_]] 63 ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 64 ; CHECK-NEXT: PseudoRET implicit $v8_v9 65entry: 66 %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 6) 67 ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 68} 69