1*b4adce00SBrandon Wu; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2*b4adce00SBrandon Wu; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s 3*b4adce00SBrandon Wu 4*b4adce00SBrandon Wudeclare target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64) 5*b4adce00SBrandon Wu 6*b4adce00SBrandon Wudefine target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv8i8(ptr %p, i64 %vl) { 7*b4adce00SBrandon Wu ; CHECK-LABEL: name: test_vlseg_nxv8i8 8*b4adce00SBrandon Wu ; CHECK: bb.0.entry: 9*b4adce00SBrandon Wu ; CHECK-NEXT: liveins: $x10, $x11 10*b4adce00SBrandon Wu ; CHECK-NEXT: {{ $}} 11*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 12*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 13*b4adce00SBrandon Wu ; CHECK-NEXT: [[PseudoVLSEG2E8_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1) 14*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E8_V_M1_]] 15*b4adce00SBrandon Wu ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 16*b4adce00SBrandon Wu ; CHECK-NEXT: PseudoRET implicit $v8_v9 17*b4adce00SBrandon Wuentry: 18*b4adce00SBrandon Wu %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 3) 19*b4adce00SBrandon Wu ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 20*b4adce00SBrandon Wu} 21*b4adce00SBrandon Wu 22*b4adce00SBrandon Wudefine target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv4i16(ptr %p, i64 %vl) { 23*b4adce00SBrandon Wu ; CHECK-LABEL: name: test_vlseg_nxv4i16 24*b4adce00SBrandon Wu ; CHECK: bb.0.entry: 25*b4adce00SBrandon Wu ; CHECK-NEXT: liveins: $x10, $x11 26*b4adce00SBrandon Wu ; CHECK-NEXT: {{ $}} 27*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 28*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 29*b4adce00SBrandon Wu ; CHECK-NEXT: [[PseudoVLSEG2E16_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16_V_M1 $noreg, [[COPY1]], [[COPY]], 4 /* e16 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 2) 30*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E16_V_M1_]] 31*b4adce00SBrandon Wu ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 32*b4adce00SBrandon Wu ; CHECK-NEXT: PseudoRET implicit $v8_v9 33*b4adce00SBrandon Wuentry: 34*b4adce00SBrandon Wu %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 4) 35*b4adce00SBrandon Wu ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 36*b4adce00SBrandon Wu} 37*b4adce00SBrandon Wu 38*b4adce00SBrandon Wudefine target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv2i32(ptr %p, i64 %vl) { 39*b4adce00SBrandon Wu ; CHECK-LABEL: name: test_vlseg_nxv2i32 40*b4adce00SBrandon Wu ; CHECK: bb.0.entry: 41*b4adce00SBrandon Wu ; CHECK-NEXT: liveins: $x10, $x11 42*b4adce00SBrandon Wu ; CHECK-NEXT: {{ $}} 43*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 44*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 45*b4adce00SBrandon Wu ; CHECK-NEXT: [[PseudoVLSEG2E32_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32_V_M1 $noreg, [[COPY1]], [[COPY]], 5 /* e32 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 4) 46*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E32_V_M1_]] 47*b4adce00SBrandon Wu ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 48*b4adce00SBrandon Wu ; CHECK-NEXT: PseudoRET implicit $v8_v9 49*b4adce00SBrandon Wuentry: 50*b4adce00SBrandon Wu %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 5) 51*b4adce00SBrandon Wu ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 52*b4adce00SBrandon Wu} 53*b4adce00SBrandon Wu 54*b4adce00SBrandon Wudefine target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg_nxv1i64(ptr %p, i64 %vl) { 55*b4adce00SBrandon Wu ; CHECK-LABEL: name: test_vlseg_nxv1i64 56*b4adce00SBrandon Wu ; CHECK: bb.0.entry: 57*b4adce00SBrandon Wu ; CHECK-NEXT: liveins: $x10, $x11 58*b4adce00SBrandon Wu ; CHECK-NEXT: {{ $}} 59*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 60*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 61*b4adce00SBrandon Wu ; CHECK-NEXT: [[PseudoVLSEG2E64_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64_V_M1 $noreg, [[COPY1]], [[COPY]], 6 /* e64 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 8) 62*b4adce00SBrandon Wu ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E64_V_M1_]] 63*b4adce00SBrandon Wu ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] 64*b4adce00SBrandon Wu ; CHECK-NEXT: PseudoRET implicit $v8_v9 65*b4adce00SBrandon Wuentry: 66*b4adce00SBrandon Wu %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 6) 67*b4adce00SBrandon Wu ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0 68*b4adce00SBrandon Wu} 69